index int64 0 0 | repo_id stringlengths 9 205 | file_path stringlengths 31 246 | content stringlengths 1 12.2M | __index_level_0__ int64 0 10k |
|---|---|---|---|---|
0 | Create_ds/gobblin/gobblin-utility/src/main/java/org/apache/gobblin | Create_ds/gobblin/gobblin-utility/src/main/java/org/apache/gobblin/broker/package-info.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/**
* A {@link org.apache.gobblin.broker.iface.SharedResourcesBroker} is an object that provides and manages objects that are accessed
* from multiple points in the application, allowing disconnected components to use shared objects, as well as easy user
* configuration for those objects.
*
* As a model, consider file handles for emitting logs. Multiple tasks in the application might need to access a global log
* file, or each task might have its own log file. To use a {@link org.apache.gobblin.broker.iface.SharedResourcesBroker}, a task
* creates a factory (see {@link org.apache.gobblin.broker.iface.SharedResourceFactory}), in this case a log file handle factory.
* To acquire the file handle, the task sends a request to
* the broker providing the log file handle factory and a {@link org.apache.gobblin.broker.iface.SharedResourceKey} (a discriminator between
* different objects created by the same factory, in the case of the log file handle, the key could specify whether we
* need an error log handle or an info file handle). The broker has a cache of already created objects, and will either
* return the same object if one matches the task's request, or will use the factory to create a new object.
*
* Brokers and the objects cached in them are scoped (see {@link org.apache.gobblin.broker.iface.ScopeType} and
* {@link org.apache.gobblin.broker.iface.ScopeInstance}). Scoping allows the application to provide information to the broker
* about its topology, and allows different scopes to get different objects. In the log file handle example, there might
* be a different handle per task, so all calls withing the same task will get the same handle, while calls from different
* tasks will get a different broker. In the most common use case, the task need not worry about scopes, as the
* factory automatically determines which scope the handle should be created on. However, scoped requests are also
* available, where a task can request an object at a specified scope.
*
* When creating a new object, the broker passes a configuration to the factory (see {@link org.apache.gobblin.broker.iface.ConfigView}
* and {@link org.apache.gobblin.broker.iface.ScopedConfigView}), allowing users to globally change the
* behavior of shared resources transparently to the task. Users can specify configurations for specific factories, scopes,
* and keys (for example, the location of the log file could be settable through configuration, and user can specify
* a different location for global and task scope logs).
*/
package org.apache.gobblin.broker;
| 4,100 |
0 | Create_ds/gobblin/gobblin-utility/src/main/java/org/apache/gobblin | Create_ds/gobblin/gobblin-utility/src/main/java/org/apache/gobblin/util/SerializationUtils.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.util;
import java.io.ByteArrayInputStream;
import java.io.ByteArrayOutputStream;
import java.io.DataInputStream;
import java.io.DataOutputStream;
import java.io.IOException;
import java.io.InputStream;
import java.io.ObjectInputStream;
import java.io.ObjectOutputStream;
import java.io.Serializable;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import com.google.common.io.BaseEncoding;
import org.apache.gobblin.configuration.State;
/**
* A utility class for serializing and deserializing Objects to/from Strings.
*/
public class SerializationUtils {
private static final BaseEncoding DEFAULT_ENCODING = BaseEncoding.base64();
/**
* Serialize an object into a String. The object is first serialized into a byte array,
* which is converted into a String using {@link BaseEncoding#base64()}.
*
* @param obj A {@link Serializable} object
* @return A String representing the input object
* @throws IOException if it fails to serialize the object
*/
public static <T extends Serializable> String serialize(T obj) throws IOException {
return serialize(obj, DEFAULT_ENCODING);
}
/**
* Serialize an object into a String. The object is first serialized into a byte array,
* which is converted into a String using the given {@link BaseEncoding}.
*
* @param obj A {@link Serializable} object
* @param enc The {@link BaseEncoding} used to encode a byte array.
* @return A String representing the input object
* @throws IOException if it fails to serialize the object
*/
public static <T extends Serializable> String serialize(T obj, BaseEncoding enc) throws IOException {
return enc.encode(serializeIntoBytes(obj));
}
/**
* Serialize an object into a byte array.
*
* @param obj A {@link Serializable} object
* @return Byte serialization of input object
* @throws IOException if it fails to serialize the object
*/
public static <T extends Serializable> byte[] serializeIntoBytes(T obj) throws IOException {
try (ByteArrayOutputStream bos = new ByteArrayOutputStream();
ObjectOutputStream oos = new ObjectOutputStream(bos)) {
oos.writeObject(obj);
oos.flush();
return bos.toByteArray();
}
}
/**
* Deserialize a String obtained via {@link #serialize(Serializable)} into an object, using
* {@link BaseEncoding#base64()}.
*
* @param serialized The serialized String
* @param clazz The class the deserialized object should be cast to.
* @return The deserialized object
* @throws IOException if it fails to deserialize the object
*/
public static <T extends Serializable> T deserialize(String serialized, Class<T> clazz) throws IOException {
return deserialize(serialized, clazz, DEFAULT_ENCODING);
}
/**
* Deserialize a String obtained via {@link #serialize(Serializable)} into an object, using the
* given {@link BaseEncoding}, which must be the same {@link BaseEncoding} used to serialize the object.
*
* @param serialized The serialized String
* @param clazz The class the deserialized object should be cast to.
* @param enc The {@link BaseEncoding} used to decode the String.
* @return The deserialized object
* @throws IOException if it fails to deserialize the object
*/
public static <T extends Serializable> T deserialize(String serialized, Class<T> clazz, BaseEncoding enc)
throws IOException {
return deserializeFromBytes(enc.decode(serialized), clazz);
}
/**
* Deserialize a String obtained via {@link #serializeIntoBytes(Serializable)} into an object.
*
* @param serialized The serialized bytes
* @param clazz The class the deserialized object should be cast to.
* @return The deserialized object
* @throws IOException if it fails to deserialize the object
*/
public static <T extends Serializable> T deserializeFromBytes(byte[] serialized, Class<T> clazz)
throws IOException {
try (ObjectInputStream ois = new ObjectInputStream(new ByteArrayInputStream(serialized))) {
return clazz.cast(ois.readObject());
} catch (ClassNotFoundException e) {
throw new IOException(e);
}
}
/**
* Serialize a {@link State} instance to a file.
*
* @param fs the {@link FileSystem} instance for creating the file
* @param jobStateFilePath the path to the file
* @param state the {@link State} to serialize
* @param <T> the {@link State} object type
* @throws IOException if it fails to serialize the {@link State} instance
*/
public static <T extends State> void serializeState(FileSystem fs, Path jobStateFilePath, T state)
throws IOException {
serializeState(fs, jobStateFilePath, state, fs.getDefaultReplication(jobStateFilePath));
}
/**
* Serialize a {@link State} instance to a file.
*
* @param fs the {@link FileSystem} instance for creating the file
* @param jobStateFilePath the path to the file
* @param state the {@link State} to serialize
* @param replication replication of the serialized file.
* @param <T> the {@link State} object type
* @throws IOException if it fails to serialize the {@link State} instance
*/
public static <T extends State> void serializeState(FileSystem fs, Path jobStateFilePath, T state, short replication)
throws IOException {
try (DataOutputStream dataOutputStream = new DataOutputStream(fs.create(jobStateFilePath, replication))) {
state.write(dataOutputStream);
}
}
/**
* Deserialize/read a {@link State} instance from a file.
*
* @param fs the {@link FileSystem} instance for opening the file
* @param jobStateFilePath the path to the file
* @param state an empty {@link State} instance to deserialize into
* @param <T> the {@link State} object type
* @throws IOException if it fails to deserialize the {@link State} instance
*/
public static <T extends State> void deserializeState(FileSystem fs, Path jobStateFilePath, T state)
throws IOException {
try (InputStream is = fs.open(jobStateFilePath)) {
deserializeStateFromInputStream(is, state);
}
}
/**
* Deserialize/read a {@link State} instance from a file.
*
* @param is {@link InputStream} containing the state.
* @param state an empty {@link State} instance to deserialize into
* @param <T> the {@link State} object type
* @throws IOException if it fails to deserialize the {@link State} instance
*/
public static <T extends State> void deserializeStateFromInputStream(InputStream is, T state) throws IOException {
try (DataInputStream dis = (new DataInputStream(is))) {
state.readFields(dis);
}
}
}
| 4,101 |
0 | Create_ds/gobblin/gobblin-utility/src/main/java/org/apache/gobblin | Create_ds/gobblin/gobblin-utility/src/main/java/org/apache/gobblin/util/HiveAvroTypeConstants.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.util;
import java.util.Map;
import java.util.Set;
import org.apache.avro.Schema;
import com.google.common.collect.ImmutableMap;
import com.google.common.collect.ImmutableSet;
import com.google.common.collect.Sets;
public class HiveAvroTypeConstants {
// Avro to Hive schema mapping
public static final Map<Schema.Type, String> AVRO_TO_HIVE_COLUMN_MAPPING_V_12 = ImmutableMap
.<Schema.Type, String>builder()
.put(Schema.Type.NULL, "void")
.put(Schema.Type.BOOLEAN, "boolean")
.put(Schema.Type.INT, "int")
.put(Schema.Type.LONG, "bigint")
.put(Schema.Type.FLOAT, "float")
.put(Schema.Type.DOUBLE, "double")
.put(Schema.Type.BYTES, "binary")
.put(Schema.Type.STRING, "string")
.put(Schema.Type.RECORD, "struct")
.put(Schema.Type.MAP, "map")
.put(Schema.Type.ARRAY, "array")
.put(Schema.Type.UNION, "uniontype")
.put(Schema.Type.ENUM, "string")
.put(Schema.Type.FIXED, "binary")
.build();
// Hive evolution types supported
public static final Map<String, Set<String>> HIVE_COMPATIBLE_TYPES = ImmutableMap
.<String, Set<String>>builder()
.put("tinyint", ImmutableSet.<String>builder()
.add("smallint", "int", "bigint", "float", "double", "decimal", "string", "varchar").build())
.put("smallint", ImmutableSet.<String>builder().add("int", "bigint", "float", "double", "decimal", "string",
"varchar").build())
.put("int", ImmutableSet.<String>builder().add("bigint", "float", "double", "decimal", "string", "varchar")
.build())
.put("bigint", ImmutableSet.<String>builder().add("float", "double", "decimal", "string", "varchar").build())
.put("float", ImmutableSet.<String>builder().add("double", "decimal", "string", "varchar").build())
.put("double", ImmutableSet.<String>builder().add("decimal", "string", "varchar").build())
.put("decimal", ImmutableSet.<String>builder().add("string", "varchar").build())
.put("string", ImmutableSet.<String>builder().add("double", "decimal", "varchar").build())
.put("varchar", ImmutableSet.<String>builder().add("double", "string", "varchar").build())
.put("timestamp", ImmutableSet.<String>builder().add("string", "varchar").build())
.put("date", ImmutableSet.<String>builder().add("string", "varchar").build())
.put("binary", Sets.<String>newHashSet())
.put("boolean", Sets.<String>newHashSet()).build();
/**
* Following are supported Avro logical types where they would be mapped to corresponding Hive types:
* Decimal -> "decimal"
* Date -> "date"
* TIME_MILLIS = "timestamp"
*/
public static final String DECIMAL = "decimal";
public static final String DATE = "date";
public static final String TIME_MILLIS = "time-millis";
}
| 4,102 |
0 | Create_ds/gobblin/gobblin-utility/src/main/java/org/apache/gobblin | Create_ds/gobblin/gobblin-utility/src/main/java/org/apache/gobblin/util/JobLauncherUtils.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.util;
import java.io.IOException;
import java.net.URI;
import java.util.Collection;
import java.util.HashSet;
import java.util.List;
import java.util.Map;
import java.util.Set;
import java.util.concurrent.Callable;
import java.util.concurrent.ExecutionException;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.slf4j.Logger;
import com.google.common.base.Preconditions;
import com.google.common.base.Strings;
import com.google.common.cache.Cache;
import com.google.common.cache.CacheBuilder;
import com.google.common.collect.Lists;
import com.google.common.io.Closer;
import lombok.extern.slf4j.Slf4j;
import org.apache.gobblin.configuration.ConfigurationKeys;
import org.apache.gobblin.configuration.State;
import org.apache.gobblin.source.workunit.MultiWorkUnit;
import org.apache.gobblin.source.workunit.WorkUnit;
/**
* Utility class for the job scheduler and job launchers.
*
* @author Yinan Li
*/
@Slf4j
public class JobLauncherUtils {
public static final String WORK_UNIT_FILE_EXTENSION = ".wu";
public static final String MULTI_WORK_UNIT_FILE_EXTENSION = ".mwu";
// A cache for proxied FileSystems by owners
private static Cache<String, FileSystem> fileSystemCacheByOwners = CacheBuilder.newBuilder().build();
/**
* Create a new job ID.
*
* @param jobName job name
* @return new job ID
*/
public static String newJobId(String jobName) {
return Id.Job.create(jobName, System.currentTimeMillis()).toString();
}
/**
* Create a new job ID from a flow execution ID.
*
* @param jobName job name
* @return new job ID
*/
public static String newJobId(String jobName, long executionId) {
return Id.Job.create(jobName, executionId).toString();
}
/**
* Create a new task ID for the job with the given job ID.
*
* @param jobId job ID
* @param sequence task sequence number
* @return new task ID
*/
public static String newTaskId(String jobId, int sequence) {
return Id.Task.create(Id.parse(jobId).get(Id.Parts.INSTANCE_NAME), sequence).toString();
}
/**
* Create an ID for a new multi-task (corresponding to a {@link org.apache.gobblin.source.workunit.MultiWorkUnit})
* for the job with the given job ID.
*
* @param jobId job ID
* @param sequence multi-task sequence number
* @return new multi-task ID
*/
public static String newMultiTaskId(String jobId, int sequence) {
return Id.MultiTask.create(Id.parse(jobId).get(Id.Parts.INSTANCE_NAME), sequence).toString();
}
/**
* Utility method that takes in a {@link List} of {@link WorkUnit}s, and flattens them. It builds up
* the flattened list by checking each element of the given list, and seeing if it is an instance of
* {@link MultiWorkUnit}. If it is then it calls itself on the {@link WorkUnit}s returned by
* {@link MultiWorkUnit#getWorkUnits()}. If not, then it simply adds the {@link WorkUnit} to the
* flattened list.
*
* @param workUnits is a {@link List} containing either {@link WorkUnit}s or {@link MultiWorkUnit}s
* @return a {@link List} of flattened {@link WorkUnit}s
*/
public static List<WorkUnit> flattenWorkUnits(Collection<WorkUnit> workUnits) {
List<WorkUnit> flattenedWorkUnits = Lists.newArrayList();
for (WorkUnit workUnit : workUnits) {
if (workUnit.isMultiWorkUnit()) {
flattenedWorkUnits.addAll(flattenWorkUnits(((MultiWorkUnit) workUnit).getWorkUnits()));
} else {
flattenedWorkUnits.add(workUnit);
}
}
return flattenedWorkUnits;
}
/** @return flattened list of {@link WorkUnit}s loaded from `path`, which may possibly hold a multi-work unit */
public static List<WorkUnit> loadFlattenedWorkUnits(FileSystem fs, Path path) throws IOException {
WorkUnit workUnit = JobLauncherUtils.createEmptyWorkUnitPerExtension(path);
SerializationUtils.deserializeState(fs, path, workUnit);
return JobLauncherUtils.flattenWorkUnits(Lists.newArrayList(workUnit));
}
/** @return an empty {@link WorkUnit}, potentially an empty {@link MultiWorkUnit}, based on the {@link Path} extension */
public static WorkUnit createEmptyWorkUnitPerExtension(Path p) {
return JobLauncherUtils.hasMultiWorkUnitExtension(p) ? MultiWorkUnit.createEmpty() : WorkUnit.createEmpty();
}
/** @return whether {@link Path} ends with {@link JobLauncherUtils#MULTI_WORK_UNIT_FILE_EXTENSION} */
public static boolean hasMultiWorkUnitExtension(Path p) {
return p.getName().endsWith(JobLauncherUtils.MULTI_WORK_UNIT_FILE_EXTENSION);
}
/** @return whether {@link Path} ends with {@link JobLauncherUtils#MULTI_WORK_UNIT_FILE_EXTENSION} or {@link JobLauncherUtils#WORK_UNIT_FILE_EXTENSION} */
public static boolean hasAnyWorkUnitExtension(Path p) {
return p.getName().endsWith(JobLauncherUtils.MULTI_WORK_UNIT_FILE_EXTENSION)
|| p.getName().endsWith(JobLauncherUtils.WORK_UNIT_FILE_EXTENSION);
}
/**
* Cleanup the staging data for a list of Gobblin tasks. This method calls the
* {@link #cleanTaskStagingData(State, Logger)} method.
*
* @param states a {@link List} of {@link State}s that need their staging data cleaned
*/
public static void cleanStagingData(List<? extends State> states, Logger logger) throws IOException {
for (State state : states) {
JobLauncherUtils.cleanTaskStagingData(state, logger);
}
}
/**
* Cleanup staging data of all tasks of a job.
*
* @param state a {@link State} instance storing job configuration properties
* @param logger a {@link Logger} used for logging
*/
public static void cleanJobStagingData(State state, Logger logger) throws IOException {
if (!state.contains(ConfigurationKeys.WRITER_STAGING_DIR) || !state.contains(ConfigurationKeys.WRITER_OUTPUT_DIR)) {
return;
}
String writerFsUri = state.getProp(ConfigurationKeys.WRITER_FILE_SYSTEM_URI, ConfigurationKeys.LOCAL_FS_URI);
FileSystem fs = getFsWithProxy(state, writerFsUri, WriterUtils.getFsConfiguration(state));
Path jobStagingPath = new Path(state.getProp(ConfigurationKeys.WRITER_STAGING_DIR));
logger.info("Cleaning up staging directory " + jobStagingPath);
HadoopUtils.deletePath(fs, jobStagingPath, true);
if (fs.exists(jobStagingPath.getParent()) && fs.listStatus(jobStagingPath.getParent()).length == 0) {
logger.debug("Deleting directory " + jobStagingPath.getParent());
HadoopUtils.deletePath(fs, jobStagingPath.getParent(), true);
}
Path jobOutputPath = new Path(state.getProp(ConfigurationKeys.WRITER_OUTPUT_DIR));
logger.info("Cleaning up output directory " + jobOutputPath);
HadoopUtils.deletePath(fs, jobOutputPath, true);
if (fs.exists(jobOutputPath.getParent()) && fs.listStatus(jobOutputPath.getParent()).length == 0) {
logger.debug("Deleting directory " + jobOutputPath.getParent());
HadoopUtils.deletePath(fs, jobOutputPath.getParent(), true);
}
if (state.contains(ConfigurationKeys.ROW_LEVEL_ERR_FILE)) {
if (state.getPropAsBoolean(ConfigurationKeys.CLEAN_ERR_DIR, ConfigurationKeys.DEFAULT_CLEAN_ERR_DIR)) {
Path jobErrPath = new Path(state.getProp(ConfigurationKeys.ROW_LEVEL_ERR_FILE));
log.debug("Cleaning up err directory : " + jobErrPath);
HadoopUtils.deleteIfExists(fs, jobErrPath, true);
}
}
}
/**
* Cleanup staging data of a Gobblin task.
*
* @param state a {@link State} instance storing task configuration properties
* @param logger a {@link Logger} used for logging
*/
public static void cleanTaskStagingData(State state, Logger logger) throws IOException {
int numBranches = state.getPropAsInt(ConfigurationKeys.FORK_BRANCHES_KEY, 1);
for (int branchId = 0; branchId < numBranches; branchId++) {
String writerFsUri = state.getProp(
ForkOperatorUtils.getPropertyNameForBranch(ConfigurationKeys.WRITER_FILE_SYSTEM_URI, numBranches, branchId),
ConfigurationKeys.LOCAL_FS_URI);
FileSystem fs = getFsWithProxy(state, writerFsUri, WriterUtils.getFsConfiguration(state));
Path stagingPath = WriterUtils.getWriterStagingDir(state, numBranches, branchId);
if (fs.exists(stagingPath)) {
logger.info("Cleaning up staging directory " + stagingPath.toUri().getPath());
if (!fs.delete(stagingPath, true)) {
throw new IOException("Clean up staging directory " + stagingPath.toUri().getPath() + " failed");
}
}
Path outputPath = WriterUtils.getWriterOutputDir(state, numBranches, branchId);
if (fs.exists(outputPath)) {
logger.info("Cleaning up output directory " + outputPath.toUri().getPath());
if (!fs.delete(outputPath, true)) {
throw new IOException("Clean up output directory " + outputPath.toUri().getPath() + " failed");
}
}
}
}
/**
* Cleanup staging data of a Gobblin task using a {@link ParallelRunner}.
*
* @param state workunit state.
* @param logger a {@link Logger} used for logging.
* @param closer a closer that registers the given map of ParallelRunners. The caller is responsible
* for closing the closer after the cleaning is done.
* @param parallelRunners a map from FileSystem URI to ParallelRunner.
* @throws IOException if it fails to cleanup the task staging data.
*/
public static void cleanTaskStagingData(State state, Logger logger, Closer closer,
Map<String, ParallelRunner> parallelRunners) throws IOException {
int numBranches = state.getPropAsInt(ConfigurationKeys.FORK_BRANCHES_KEY, 1);
int parallelRunnerThreads =
state.getPropAsInt(ParallelRunner.PARALLEL_RUNNER_THREADS_KEY, ParallelRunner.DEFAULT_PARALLEL_RUNNER_THREADS);
for (int branchId = 0; branchId < numBranches; branchId++) {
String writerFsUri = state.getProp(
ForkOperatorUtils.getPropertyNameForBranch(ConfigurationKeys.WRITER_FILE_SYSTEM_URI, numBranches, branchId),
ConfigurationKeys.LOCAL_FS_URI);
FileSystem fs = getFsWithProxy(state, writerFsUri, WriterUtils.getFsConfiguration(state));
ParallelRunner parallelRunner = getParallelRunner(fs, closer, parallelRunnerThreads, parallelRunners);
Path stagingPath = WriterUtils.getWriterStagingDir(state, numBranches, branchId);
if (fs.exists(stagingPath)) {
logger.info("Cleaning up staging directory " + stagingPath.toUri().getPath());
parallelRunner.deletePath(stagingPath, true);
}
Path outputPath = WriterUtils.getWriterOutputDir(state, numBranches, branchId);
if (fs.exists(outputPath)) {
logger.info("Cleaning up output directory " + outputPath.toUri().getPath());
parallelRunner.deletePath(outputPath, true);
}
}
}
public static void cleanUpOldJobData(State state, Logger logger, boolean stagingDirProvided, boolean outputDirProvided) throws IOException {
Set<Path> jobPaths = new HashSet<>();
String writerFsUri = state.getProp(ConfigurationKeys.WRITER_FILE_SYSTEM_URI, ConfigurationKeys.LOCAL_FS_URI);
FileSystem fs = FileSystem.get(URI.create(writerFsUri), WriterUtils.getFsConfiguration(state));
Path jobPath;
if (stagingDirProvided) {
jobPath = new Path(state.getProp(ConfigurationKeys.WRITER_STAGING_DIR)).getParent();
} else {
jobPath = new Path(state.getProp(ConfigurationKeys.WRITER_STAGING_DIR)).getParent().getParent();
}
jobPaths.add(jobPath);
if (outputDirProvided) {
jobPath = new Path(state.getProp(ConfigurationKeys.WRITER_OUTPUT_DIR)).getParent();
} else {
jobPath = new Path(state.getProp(ConfigurationKeys.WRITER_OUTPUT_DIR)).getParent().getParent();
}
jobPaths.add(jobPath);
for (Path jobPathToDelete : jobPaths) {
logger.info("Cleaning up old job directory " + jobPathToDelete);
HadoopUtils.deletePath(fs, jobPathToDelete, true);
}
}
/**
* @param state
* @param fsUri
* @return
* @throws IOException
*/
public static FileSystem getFsWithProxy(final State state, final String fsUri, final Configuration conf) throws IOException {
if (!state.getPropAsBoolean(ConfigurationKeys.SHOULD_FS_PROXY_AS_USER,
ConfigurationKeys.DEFAULT_SHOULD_FS_PROXY_AS_USER)) {
return FileSystem.get(URI.create(fsUri), conf);
}
Preconditions.checkArgument(!Strings.isNullOrEmpty(state.getProp(ConfigurationKeys.FS_PROXY_AS_USER_NAME)),
"State does not contain a proper proxy user name");
String owner = state.getProp(ConfigurationKeys.FS_PROXY_AS_USER_NAME);
try {
return fileSystemCacheByOwners.get(owner, new Callable<FileSystem>() {
@Override
public FileSystem call()
throws Exception {
return new ProxiedFileSystemWrapper().getProxiedFileSystem(state, ProxiedFileSystemWrapper.AuthType.KEYTAB,
state.getProp(ConfigurationKeys.SUPER_USER_KEY_TAB_LOCATION), fsUri, conf);
}
});
} catch (ExecutionException ee) {
throw new IOException(ee.getCause());
}
}
private static ParallelRunner getParallelRunner(FileSystem fs, Closer closer, int parallelRunnerThreads,
Map<String, ParallelRunner> parallelRunners) {
String uriAndHomeDir = new Path(new Path(fs.getUri()), fs.getHomeDirectory()).toString();
if (!parallelRunners.containsKey(uriAndHomeDir)) {
parallelRunners.put(uriAndHomeDir, closer.register(new ParallelRunner(parallelRunnerThreads, fs)));
}
return parallelRunners.get(uriAndHomeDir);
}
}
| 4,103 |
0 | Create_ds/gobblin/gobblin-utility/src/main/java/org/apache/gobblin | Create_ds/gobblin/gobblin-utility/src/main/java/org/apache/gobblin/util/PortUtils.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.util;
import com.google.common.annotations.VisibleForTesting;
import com.google.common.base.Optional;
import com.google.common.base.Preconditions;
import com.google.common.collect.BiMap;
import com.google.common.collect.HashBiMap;
import com.google.common.collect.Maps;
import java.net.ServerSocket;
import java.util.Map;
import java.util.concurrent.ConcurrentMap;
import java.util.regex.Matcher;
import java.util.regex.Pattern;
public class PortUtils {
public static final int MINIMUM_PORT = 1025;
public static final int MAXIMUM_PORT = 65535;
private static final Pattern PORT_REGEX =
Pattern.compile("\\$\\{PORT_(?>(?>\\?(\\d+))|(?>(\\d+)\\?)|(\\d+|\\?))\\}");
private final PortLocator portLocator;
private final ConcurrentMap<Integer, Boolean> assignedPorts;
public PortUtils() {
this(new ServerSocketPortLocator());
}
@VisibleForTesting
PortUtils(PortLocator locator) {
this.portLocator = locator;
this.assignedPorts = Maps.newConcurrentMap();
}
/**
* Replaces any port tokens in the specified string.
*
* NOTE: Tokens can be in the following forms:
* 1. ${PORT_123}
* 2. ${PORT_?123}
* 3. ${PORT_123?}
* 4. ${PORT_?}
*
* @param value The string in which to replace port tokens.
* @return The replaced string.
*/
public String replacePortTokens(String value) {
BiMap<String, Optional<Integer>> portMappings = HashBiMap.create();
Matcher regexMatcher = PORT_REGEX.matcher(value);
while (regexMatcher.find()) {
String token = regexMatcher.group(0);
if (!portMappings.containsKey(token)) {
Optional<Integer> portStart = Optional.absent();
Optional<Integer> portEnd = Optional.absent();
String unboundedStart = regexMatcher.group(1);
if (unboundedStart != null) {
int requestedEndPort = Integer.parseInt(unboundedStart);
Preconditions.checkArgument(requestedEndPort <= PortUtils.MAXIMUM_PORT);
portEnd = Optional.of(requestedEndPort);
} else {
String unboundedEnd = regexMatcher.group(2);
if (unboundedEnd != null) {
int requestedStartPort = Integer.parseInt(unboundedEnd);
Preconditions.checkArgument(requestedStartPort >= PortUtils.MINIMUM_PORT);
portStart = Optional.of(requestedStartPort);
} else {
String absolute = regexMatcher.group(3);
if (!"?".equals(absolute)) {
int requestedPort = Integer.parseInt(absolute);
Preconditions.checkArgument(requestedPort >= PortUtils.MINIMUM_PORT &&
requestedPort <= PortUtils.MAXIMUM_PORT);
portStart = Optional.of(requestedPort);
portEnd = Optional.of(requestedPort);
}
}
}
Optional<Integer> port = takePort(portStart, portEnd);
portMappings.put(token, port);
}
}
for (Map.Entry<String, Optional<Integer>> port : portMappings.entrySet()) {
if (port.getValue().isPresent()) {
value = value.replace(port.getKey(), port.getValue().get().toString());
}
}
return value;
}
/**
* Finds an open port. {@param portStart} and {@param portEnd} can be absent
*
* ______________________________________________________
* | portStart | portEnd | takenPort |
* |-----------|----------|-----------------------------|
* | absent | absent | random |
* | absent | provided | 1024 < port <= portEnd |
* | provided | absent | portStart <= port <= 65535 |
* | provided | provided | portStart = port = portEnd |
* ------------------------------------------------------
*
* @param portStart the inclusive starting port
* @param portEnd the inclusive ending port
* @return The selected open port.
*/
private synchronized Optional<Integer> takePort(Optional<Integer> portStart, Optional<Integer> portEnd) {
if (!portStart.isPresent() && !portEnd.isPresent()) {
for (int i = 0; i < 65535; i++) {
try {
int port = this.portLocator.random();
Boolean wasAssigned = assignedPorts.putIfAbsent(port, true);
if (wasAssigned == null || !wasAssigned) {
return Optional.of(port);
}
} catch (Exception ignored) {
}
}
}
for (int port = portStart.or(MINIMUM_PORT); port <= portEnd.or(MAXIMUM_PORT); port++) {
try {
this.portLocator.specific(port);
Boolean wasAssigned = assignedPorts.putIfAbsent(port, true);
if (wasAssigned == null || !wasAssigned) {
return Optional.of(port);
}
} catch (Exception ignored) {
}
}
throw new RuntimeException(String.format("No open port could be found for %s to %s", portStart, portEnd));
}
@VisibleForTesting
interface PortLocator {
int random() throws Exception;
int specific(int port) throws Exception;
}
public static class ServerSocketPortLocator implements PortLocator {
@Override
public int random() throws Exception {
try (ServerSocket serverSocket = new ServerSocket(0)) {
return serverSocket.getLocalPort();
}
}
@Override
public int specific(int port) throws Exception {
try (ServerSocket serverSocket = new ServerSocket(port)) {
return serverSocket.getLocalPort();
}
}
}
}
| 4,104 |
0 | Create_ds/gobblin/gobblin-utility/src/main/java/org/apache/gobblin | Create_ds/gobblin/gobblin-utility/src/main/java/org/apache/gobblin/util/AutoReturnableObject.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.util;
import java.io.Closeable;
import java.io.IOException;
import org.apache.commons.pool2.impl.GenericObjectPool;
/**
* Borrow an object from a {@link GenericObjectPool} and returns it automatically on close. Useful for try with resource.
*/
public class AutoReturnableObject<T> implements Closeable {
private final T object;
private final GenericObjectPool<T> pool;
private boolean returned;
public AutoReturnableObject(GenericObjectPool<T> pool) throws IOException {
try {
this.pool = pool;
this.object = pool.borrowObject();
this.returned = false;
} catch (Exception exc) {
throw new IOException(exc);
}
}
/**
* @return the object borrowed from {@link GenericObjectPool}.
* @throws IOException
*/
public T get() throws IOException {
if (this.returned) {
throw new IOException(this.getClass().getCanonicalName() + " has already been closed.");
}
return this.object;
}
/**
* Return the borrowed object to the pool.
* @throws IOException
*/
@Override
public void close() throws IOException {
try {
this.pool.returnObject(this.object);
} catch (Exception exc) {
throw new IOException(exc);
} finally {
this.returned = true;
}
}
}
| 4,105 |
0 | Create_ds/gobblin/gobblin-utility/src/main/java/org/apache/gobblin | Create_ds/gobblin/gobblin-utility/src/main/java/org/apache/gobblin/util/PublisherUtils.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.util;
import java.util.Collection;
import java.util.Map;
import lombok.AllArgsConstructor;
import lombok.Getter;
import com.google.common.base.Predicate;
import com.google.common.collect.ArrayListMultimap;
import com.google.common.collect.Iterables;
import com.google.common.collect.Multimap;
import org.apache.gobblin.configuration.WorkUnitState;
import org.apache.gobblin.configuration.WorkUnitState.WorkingState;
import org.apache.gobblin.publisher.DataPublisher;
import org.apache.gobblin.source.workunit.Extract;
/**
* Utility class for {@link DataPublisher}.
*/
public class PublisherUtils {
/**
* Creates a {@link Multimap} that maps {@link Extract} to their corresponds {@link WorkUnitState}s.
*
* @see Multimap
*/
public static Multimap<Extract, WorkUnitState> createExtractToWorkUnitStateMap(
Collection<? extends WorkUnitState> workUnitStates) {
Multimap<Extract, WorkUnitState> extractToWorkUnitStateMap = ArrayListMultimap.create();
for (WorkUnitState workUnitState : workUnitStates) {
extractToWorkUnitStateMap.put(workUnitState.getExtract(), workUnitState);
}
return extractToWorkUnitStateMap;
}
/**
* Given a {@link Multimap} of {@link Extract}s to {@link WorkUnitState}s, filter out any {@link Extract}s where all
* of the corresponding {@link WorkUnitState}s do not meet the given {@link Predicate}.
*/
public static Multimap<Extract, WorkUnitState> getExtractsForPredicate(
Multimap<Extract, WorkUnitState> extractToWorkUnitStateMap, Predicate<WorkUnitState> predicate) {
Multimap<Extract, WorkUnitState> successfulExtracts = ArrayListMultimap.create();
for (Map.Entry<Extract, Collection<WorkUnitState>> entry : extractToWorkUnitStateMap.asMap().entrySet()) {
if (Iterables.all(entry.getValue(), predicate)) {
successfulExtracts.putAll(entry.getKey(), entry.getValue());
}
}
return successfulExtracts;
}
/**
* Given a {@link Multimap} of {@link Extract}s to {@link WorkUnitState}s, filter out any {@link Extract}s where all
* of the corresponding {@link WorkUnitState}s do not meet the given {@link Predicate}.
* <ul>
* <li> The filtered {@link Extract}s will be available in {@link SplitExtractsResult#getFiltered()}</li>
* <li> The {@link Extract}s satisfying the predicated will be available in {@link SplitExtractsResult#getRetained()}</li>
* </ul>
*
*/
public static SplitExtractsResult splitExtractsByPredicate(
Multimap<Extract, WorkUnitState> extractToWorkUnitStateMap, Predicate<WorkUnitState> predicate) {
Multimap<Extract, WorkUnitState> retained = ArrayListMultimap.create();
Multimap<Extract, WorkUnitState> filtered = ArrayListMultimap.create();
for (Map.Entry<Extract, Collection<WorkUnitState>> entry : extractToWorkUnitStateMap.asMap().entrySet()) {
if (Iterables.all(entry.getValue(), predicate)) {
retained.putAll(entry.getKey(), entry.getValue());
} else {
filtered.putAll(entry.getKey(), entry.getValue());
}
}
return new SplitExtractsResult(retained, filtered);
}
/**
* Implementation of {@link Predicate} that checks if a given {@link WorkUnitState} has a {@link WorkingState} equal
* to {@link WorkingState#SUCCESSFUL}.
*/
public static class WorkUnitStateSuccess implements Predicate<WorkUnitState> {
@Override
public boolean apply(WorkUnitState workUnitState) {
return workUnitState.getWorkingState().equals(WorkingState.SUCCESSFUL);
}
}
@AllArgsConstructor
@Getter
public static class SplitExtractsResult {
private Multimap<Extract, WorkUnitState> retained;
private Multimap<Extract, WorkUnitState> filtered;
}
}
| 4,106 |
0 | Create_ds/gobblin/gobblin-utility/src/main/java/org/apache/gobblin | Create_ds/gobblin/gobblin-utility/src/main/java/org/apache/gobblin/util/FileListUtils.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.util;
import java.io.FileNotFoundException;
import java.io.IOException;
import java.util.Comparator;
import java.util.List;
import java.util.Stack;
import org.apache.commons.lang.ArrayUtils;
import org.apache.hadoop.fs.FileStatus;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.fs.PathFilter;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import com.google.common.collect.Lists;
import com.google.common.primitives.Longs;
import org.apache.gobblin.util.filters.HiddenFilter;
/**
* Utility class for listing files on a {@link FileSystem}.
*
* @see FileSystem
*/
public class FileListUtils {
private static final Logger LOG = LoggerFactory.getLogger(FileListUtils.class);
public static final Comparator<FileStatus> LATEST_MOD_TIME_ORDER = new Comparator<FileStatus>() {
@Override
public int compare(FileStatus file1, FileStatus file2) {
return Longs.compare(Long.valueOf(file2.getModificationTime()), Long.valueOf(file1.getModificationTime()));
}
};
public static final PathFilter NO_OP_PATH_FILTER = new PathFilter() {
@Override
public boolean accept(Path path) {
return true;
}
};
public static List<FileStatus> listFilesRecursively(FileSystem fs, Path path)
throws IOException {
return listFilesRecursively(fs, path, NO_OP_PATH_FILTER);
}
public static List<FileStatus> listFilesRecursively(FileSystem fs, Iterable<Path> paths)
throws IOException {
List<FileStatus> results = Lists.newArrayList();
for (Path path : paths) {
results.addAll(listFilesRecursively(fs, path));
}
return results;
}
/**
* Given a path to copy, list all files rooted at the given path to copy
*
* @param fs the file system of the path
* @param path root path to copy
* @param fileFilter a filter only applied to root
* @param includeEmptyDirectories a control to include empty directories for copy
*/
public static List<FileStatus> listFilesToCopyAtPath(FileSystem fs, Path path, PathFilter fileFilter,
boolean includeEmptyDirectories) throws IOException {
return listFilesToCopyAtPath(fs, path, fileFilter, false, includeEmptyDirectories);
}
/**
* Given a path to copy, list all files rooted at the given path to copy
*
* @param fs the file system of the path
* @param path root path to copy
* @param fileFilter a filter only applied to root
* @param applyFilterToDirectories a control to decide whether to apply filter to directories
* @param includeEmptyDirectories a control to include empty directories for copy
*/
public static List<FileStatus> listFilesToCopyAtPath(FileSystem fs, Path path, PathFilter fileFilter,
boolean applyFilterToDirectories, boolean includeEmptyDirectories) throws IOException {
List<FileStatus> files = Lists.newArrayList();
FileStatus rootFile = fs.getFileStatus(path);
listFilesRecursivelyHelper(fs, files, rootFile, fileFilter, applyFilterToDirectories, includeEmptyDirectories);
// Copy the empty root directory
if (files.size() == 0 && rootFile.isDirectory() && includeEmptyDirectories) {
files.add(rootFile);
}
return files;
}
/**
* Helper method to list out all files under a specified path. The specified {@link PathFilter} is treated as a file
* filter, that is it is only applied to file {@link Path}s.
*/
public static List<FileStatus> listFilesRecursively(FileSystem fs, Path path, PathFilter fileFilter)
throws IOException {
return listFilesRecursively(fs, path, fileFilter, false);
}
/**
* Helper method to list out all files under a specified path. If applyFilterToDirectories is false, the supplied
* {@link PathFilter} will only be applied to files.
*/
public static List<FileStatus> listFilesRecursively(FileSystem fs, Path path, PathFilter fileFilter,
boolean applyFilterToDirectories)
throws IOException {
return listFilesRecursivelyHelper(fs, Lists.newArrayList(), fs.getFileStatus(path), fileFilter,
applyFilterToDirectories, false);
}
private static List<FileStatus> listFilesRecursivelyHelper(FileSystem fs, List<FileStatus> files,
FileStatus fileStatus, PathFilter fileFilter, boolean applyFilterToDirectories, boolean includeEmptyDirectories)
throws FileNotFoundException, IOException {
if (fileStatus.isDirectory()) {
for (FileStatus status : fs
.listStatus(fileStatus.getPath(), applyFilterToDirectories ? fileFilter : NO_OP_PATH_FILTER)) {
if (status.isDirectory()) {
// Number of files collected before diving into the directory
int numFilesBefore = files.size();
listFilesRecursivelyHelper(fs, files, status, fileFilter, applyFilterToDirectories, includeEmptyDirectories);
// Number of files collected after diving into the directory
int numFilesAfter = files.size();
if (numFilesAfter == numFilesBefore && includeEmptyDirectories) {
/*
* This is effectively an empty directory, which needs explicit copying. Has there any data file
* in the directory, the directory would be created as a side-effect of copying the data file
*/
files.add(status);
}
} else {
listFilesRecursivelyHelper(fs, files, status, fileFilter, applyFilterToDirectories, includeEmptyDirectories);
}
}
} else if (fileFilter.accept(fileStatus.getPath())) {
files.add(fileStatus);
}
return files;
}
/**
* Method to list out all files, or directory if no file exists, under a specified path.
*/
public static List<FileStatus> listMostNestedPathRecursively(FileSystem fs, Path path)
throws IOException {
return listMostNestedPathRecursively(fs, path, NO_OP_PATH_FILTER);
}
public static List<FileStatus> listMostNestedPathRecursively(FileSystem fs, Iterable<Path> paths)
throws IOException {
List<FileStatus> results = Lists.newArrayList();
for (Path path : paths) {
results.addAll(listMostNestedPathRecursively(fs, path));
}
return results;
}
/**
* Method to list out all files, or directory if no file exists, under a specified path.
* The specified {@link PathFilter} is treated as a file filter, that is it is only applied to file {@link Path}s.
*/
public static List<FileStatus> listMostNestedPathRecursively(FileSystem fs, Path path, PathFilter fileFilter)
throws IOException {
return listMostNestedPathRecursivelyHelper(fs, Lists.<FileStatus>newArrayList(), fs.getFileStatus(path),
fileFilter);
}
private static List<FileStatus> listMostNestedPathRecursivelyHelper(FileSystem fs, List<FileStatus> files,
FileStatus fileStatus, PathFilter fileFilter)
throws IOException {
if (fileStatus.isDirectory()) {
FileStatus[] curFileStatus = fs.listStatus(fileStatus.getPath());
if (ArrayUtils.isEmpty(curFileStatus)) {
files.add(fileStatus);
} else {
for (FileStatus status : curFileStatus) {
listMostNestedPathRecursivelyHelper(fs, files, status, fileFilter);
}
}
} else if (fileFilter.accept(fileStatus.getPath())) {
files.add(fileStatus);
}
return files;
}
/**
* Helper method to list out all paths under a specified path. If the {@link org.apache.hadoop.fs.FileSystem} is
* unable to list the contents of a relevant directory, will log an error and skip.
*/
public static List<FileStatus> listPathsRecursively(FileSystem fs, Path path, PathFilter fileFilter)
throws IOException {
return listPathsRecursivelyHelper(fs, Lists.<FileStatus>newArrayList(), fs.getFileStatus(path), fileFilter);
}
private static List<FileStatus> listPathsRecursivelyHelper(FileSystem fs, List<FileStatus> files,
FileStatus fileStatus, PathFilter fileFilter) {
if (fileFilter.accept(fileStatus.getPath())) {
files.add(fileStatus);
}
if (fileStatus.isDirectory()) {
try {
for (FileStatus status : fs.listStatus(fileStatus.getPath())) {
listPathsRecursivelyHelper(fs, files, status, fileFilter);
}
} catch (IOException ioe) {
LOG.error("Could not list contents of path " + fileStatus.getPath());
}
}
return files;
}
/**
* Get any data file, which is not hidden or a directory, from the given path
*/
public static FileStatus getAnyNonHiddenFile(FileSystem fs, Path path)
throws IOException {
HiddenFilter hiddenFilter = new HiddenFilter();
FileStatus root = fs.getFileStatus(path);
if (!root.isDirectory()) {
return hiddenFilter.accept(path) ? root : null;
}
// DFS to get the first data file
Stack<FileStatus> folders = new Stack<>();
folders.push(root);
while (!folders.empty()) {
FileStatus curFolder = folders.pop();
try {
for (FileStatus status : fs.listStatus(curFolder.getPath(), hiddenFilter)) {
if (status.isDirectory()) {
folders.push(status);
} else {
return status;
}
}
} catch (FileNotFoundException exc) {
// continue
}
}
return null;
}
}
| 4,107 |
0 | Create_ds/gobblin/gobblin-utility/src/main/java/org/apache/gobblin | Create_ds/gobblin/gobblin-utility/src/main/java/org/apache/gobblin/util/ConfigUtils.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.util;
import com.google.common.base.Preconditions;
import java.io.IOException;
import java.io.StringReader;
import java.nio.file.Path;
import java.util.Arrays;
import java.util.Collections;
import java.util.HashSet;
import java.util.Iterator;
import java.util.List;
import java.util.Map;
import java.util.Properties;
import java.util.Set;
import java.util.TreeSet;
import java.util.concurrent.TimeUnit;
import java.util.stream.Collectors;
import org.apache.commons.lang3.StringUtils;
import com.google.common.base.Function;
import com.google.common.base.Optional;
import com.google.common.base.Splitter;
import com.google.common.base.Strings;
import com.google.common.collect.ImmutableMap;
import com.google.common.collect.Lists;
import com.google.common.collect.Maps;
import com.google.gson.Gson;
import com.opencsv.CSVReader;
import com.typesafe.config.Config;
import com.typesafe.config.ConfigException;
import com.typesafe.config.ConfigFactory;
import com.typesafe.config.ConfigList;
import com.typesafe.config.ConfigValue;
import org.apache.gobblin.configuration.State;
import org.apache.gobblin.password.PasswordManager;
/**
* Utility class for dealing with {@link Config} objects.
*/
public class ConfigUtils {
private final FileUtils fileUtils;
/**
* List of keys that should be excluded when converting to typesafe config.
* Usually, it is the key that is both the parent object of a value and a value, which is disallowed by Typesafe.
*/
private static final String GOBBLIN_CONFIG_BLACKLIST_KEYS = "gobblin.config.blacklistKeys";
/**
* A suffix that is automatically appended to property keys that are prefixes of other
* property keys. This is used during Properties -> Config -> Properties conversion since
* typesafe config does not allow such properties. */
public static final String STRIP_SUFFIX = ".ROOT_VALUE";
/**
* Available TimeUnit values that can be parsed from a given String
*/
private static final Set<String> validTimeUnits = Arrays.stream(TimeUnit.values())
.map(TimeUnit::name)
.collect(Collectors.toSet());
public ConfigUtils(FileUtils fileUtils) {
this.fileUtils = fileUtils;
}
public void saveConfigToFile(final Config config, final Path destPath)
throws IOException {
final String configAsHoconString = config.root().render();
this.fileUtils.saveToFile(configAsHoconString, destPath);
}
/**
* Convert a given {@link Config} instance to a {@link Properties} instance.
*
* @param config the given {@link Config} instance
* @return a {@link Properties} instance
*/
public static Properties configToProperties(Config config) {
return configToProperties(config, Optional.absent());
}
/**
* Convert a given {@link Config} instance to a {@link Properties} instance.
* If the config value is not of String type, it will try to get it as a generic Object type
* using {@see com.typesafe.config.Config#getAnyRef()} and then try to return its json representation as a string
*
* @param config the given {@link Config} instance
* @param prefix an optional prefix; if present, only properties whose name starts with the prefix
* will be returned.
* @return a {@link Properties} instance
*/
public static Properties configToProperties(Config config, Optional<String> prefix) {
Properties properties = new Properties();
if (config != null) {
Config resolvedConfig = config.resolve();
for (Map.Entry<String, ConfigValue> entry : resolvedConfig.entrySet()) {
if (!prefix.isPresent() || entry.getKey().startsWith(prefix.get())) {
String propKey = desanitizeKey(entry.getKey());
String propVal;
try {
propVal = resolvedConfig.getString(entry.getKey());
} catch (ConfigException.WrongType wrongType) {
propVal = new Gson().toJson(resolvedConfig.getAnyRef(entry.getKey()));
}
properties.setProperty(propKey, propVal);
}
}
}
return properties;
}
/**
* Convert a given {@link Config} instance to a {@link Properties} instance.
*
* @param config the given {@link Config} instance
* @param prefix only properties whose name starts with the prefix will be returned.
* @return a {@link Properties} instance
*/
public static Properties configToProperties(Config config, String prefix) {
return configToProperties(config, Optional.of(prefix));
}
/**
* @return the subconfig under key "key" if it exists, otherwise returns an empty config.
*/
public static Config getConfigOrEmpty(Config config, String key) {
try {
if (config.hasPath(key)) {
return config.getConfig(key);
} else {
return ConfigFactory.empty();
}
} catch (ConfigException.WrongType wrongType) {
// path exists, but it is not a subconfig
return ConfigFactory.empty();
}
}
/**
* Convert a given {@link Config} to a {@link State} instance.
*
* @param config the given {@link Config} instance
* @return a {@link State} instance
*/
public static State configToState(Config config) {
return new State(configToProperties(config));
}
/**
* Convert a given {@link Properties} to a {@link Config} instance.
*
* <p>
* This method will throw an exception if (1) the {@link Object#toString()} method of any two keys in the
* {@link Properties} objects returns the same {@link String}, or (2) if any two keys are prefixes of one another,
* see the Java Docs of {@link ConfigFactory#parseMap(Map)} for more details.
* </p>
*
* @param properties the given {@link Properties} instance
* @return a {@link Config} instance
*/
public static Config propertiesToConfig(Properties properties) {
return propertiesToConfig(properties, Optional.absent());
}
/**
* Finds a list of properties whose keys are complete prefix of other keys. This function is
* meant to be used during conversion from Properties to typesafe Config as the latter does not
* support this scenario.
* @param properties the Properties collection to inspect
* @param keyPrefix an optional key prefix which limits which properties are inspected.
* */
public static Set<String> findFullPrefixKeys(Properties properties,
Optional<String> keyPrefix) {
TreeSet<String> propNames = new TreeSet<>();
for (Map.Entry<Object, Object> entry : properties.entrySet()) {
String entryKey = entry.getKey().toString();
if (StringUtils.startsWith(entryKey, keyPrefix.or(StringUtils.EMPTY))) {
propNames.add(entryKey);
}
}
Set<String> result = new HashSet<>();
String lastKey = null;
Iterator<String> sortedKeysIter = propNames.iterator();
while(sortedKeysIter.hasNext()) {
String propName = sortedKeysIter.next();
if (null != lastKey && propName.startsWith(lastKey + ".")) {
result.add(lastKey);
}
lastKey = propName;
}
return result;
}
/**
* Convert all the keys that start with a <code>prefix</code> in {@link Properties} to a {@link Config} instance.
*
* <p>
* This method will throw an exception if (1) the {@link Object#toString()} method of any two keys in the
* {@link Properties} objects returns the same {@link String}, or (2) if any two keys are prefixes of one another,
* see the Java Docs of {@link ConfigFactory#parseMap(Map)} for more details.
* </p>
*
* @param properties the given {@link Properties} instance
* @param prefix of keys to be converted
* @return a {@link Config} instance
*/
public static Config propertiesToConfig(Properties properties, Optional<String> prefix) {
Set<String> blacklistedKeys = new HashSet<>(0);
if (properties.containsKey(GOBBLIN_CONFIG_BLACKLIST_KEYS)) {
blacklistedKeys = new HashSet<>(Splitter.on(',').omitEmptyStrings().trimResults()
.splitToList(properties.getProperty(GOBBLIN_CONFIG_BLACKLIST_KEYS)));
}
Set<String> fullPrefixKeys = findFullPrefixKeys(properties, prefix);
ImmutableMap.Builder<String, Object> immutableMapBuilder = ImmutableMap.builder();
for (Map.Entry<Object, Object> entry : properties.entrySet()) {
String entryKey = entry.getKey().toString();
if (StringUtils.startsWith(entryKey, prefix.or(StringUtils.EMPTY)) &&
!blacklistedKeys.contains(entryKey)) {
if (fullPrefixKeys.contains(entryKey)) {
entryKey = sanitizeFullPrefixKey(entryKey);
} else if (sanitizedKey(entryKey)) {
throw new RuntimeException("Properties are not allowed to end in " + STRIP_SUFFIX);
}
immutableMapBuilder.put(entryKey, entry.getValue());
}
}
return ConfigFactory.parseMap(immutableMapBuilder.build());
}
public static String sanitizeFullPrefixKey(String propKey) {
return propKey + STRIP_SUFFIX;
}
/**
* returns true if is it a sanitized key
*/
public static boolean sanitizedKey(String propKey) {
return propKey.endsWith(STRIP_SUFFIX);
}
public static String desanitizeKey(String propKey) {
propKey = sanitizedKey(propKey) ?
propKey.substring(0, propKey.length() - STRIP_SUFFIX.length()) : propKey;
// Also strip quotes that can get introduced by TypeSafe.Config
propKey = propKey.replace("\"", "");
return propKey;
}
/**
* Convert all the keys that start with a <code>prefix</code> in {@link Properties} to a
* {@link Config} instance. The method also tries to guess the types of properties.
*
* <p>
* This method will throw an exception if (1) the {@link Object#toString()} method of any two keys in the
* {@link Properties} objects returns the same {@link String}, or (2) if any two keys are prefixes of one another,
* see the Java Docs of {@link ConfigFactory#parseMap(Map)} for more details.
* </p>
*
* @param properties the given {@link Properties} instance
* @param prefix of keys to be converted
* @return a {@link Config} instance
*/
public static Config propertiesToTypedConfig(Properties properties, Optional<String> prefix) {
Map<String, Object> typedProps = guessPropertiesTypes(properties);
ImmutableMap.Builder<String, Object> immutableMapBuilder = ImmutableMap.builder();
for (Map.Entry<String, Object> entry : typedProps.entrySet()) {
if (StringUtils.startsWith(entry.getKey(), prefix.or(StringUtils.EMPTY))) {
immutableMapBuilder.put(entry.getKey(), entry.getValue());
}
}
return ConfigFactory.parseMap(immutableMapBuilder.build());
}
/** Attempts to guess type types of a Properties. By default, typesafe will make all property
* values Strings. This implementation will try to recognize booleans and numbers. All keys are
* treated as strings.*/
private static Map<String, Object> guessPropertiesTypes(Map<Object, Object> srcProperties) {
Map<String, Object> res = Maps.newHashMapWithExpectedSize(srcProperties.size());
for (Map.Entry<Object, Object> prop : srcProperties.entrySet()) {
Object value = prop.getValue();
if (null != value && value instanceof String && !Strings.isNullOrEmpty(value.toString())) {
try {
value = Long.parseLong(value.toString());
} catch (NumberFormatException e) {
try {
value = Double.parseDouble(value.toString());
} catch (NumberFormatException e2) {
if (value.toString().equalsIgnoreCase("true") || value.toString().equalsIgnoreCase("yes")) {
value = Boolean.TRUE;
} else if (value.toString().equalsIgnoreCase("false") || value.toString().equalsIgnoreCase("no")) {
value = Boolean.FALSE;
} else {
// nothing to do
}
}
}
}
res.put(prop.getKey().toString(), value);
}
return res;
}
/**
* Return string value at <code>path</code> if <code>config</code> has path. If not return an empty string
*
* @param config in which the path may be present
* @param path key to look for in the config object
* @return string value at <code>path</code> if <code>config</code> has path. If not return an empty string
*/
public static String emptyIfNotPresent(Config config, String path) {
return getString(config, path, StringUtils.EMPTY);
}
/**
* Return string value at <code>path</code> if <code>config</code> has path. If not return <code>def</code>
* If the config value is not of String type, it will try to get it as a generic Object type
* using {@see com.typesafe.config.Config#getAnyRef()} and then try to return its json representation as a string
* @param config in which the path may be present
* @param path key to look for in the config object
* @return string value at <code>path</code> if <code>config</code> has path. If not return <code>def</code>
*/
public static String getString(Config config, String path, String def) {
if (config.hasPath(path)) {
String value;
try {
value = config.getString(path);
} catch (ConfigException.WrongType wrongType) {
value = new Gson().toJson(config.getAnyRef(path));
}
return value;
}
return def;
}
/**
* Return TimeUnit value at <code>path</code> if <code>config</code> has path. If not return <code>def</code>
*
* @param config in which the path may be present
* @param path key to look for in the config object
* @return TimeUnit value at <code>path</code> if <code>config</code> has path. If not return <code>def</code>
*/
public static TimeUnit getTimeUnit(Config config, String path, TimeUnit def) {
if (config.hasPath(path)) {
String timeUnit = config.getString(path).toUpperCase();
Preconditions.checkArgument(validTimeUnits.contains(timeUnit),
"Passed invalid TimeUnit for documentTTLUnits: '%s'".format(timeUnit));
return TimeUnit.valueOf(timeUnit);
}
return def;
}
/**
* Return {@link Long} value at <code>path</code> if <code>config</code> has path. If not return <code>def</code>
*
* @param config in which the path may be present
* @param path key to look for in the config object
* @return {@link Long} value at <code>path</code> if <code>config</code> has path. If not return <code>def</code>
*/
public static Long getLong(Config config, String path, Long def) {
if (config.hasPath(path)) {
return Long.valueOf(config.getLong(path));
}
return def;
}
/**
* Return {@link Integer} value at <code>path</code> if <code>config</code> has path. If not return <code>def</code>
*
* @param config in which the path may be present
* @param path key to look for in the config object
* @return {@link Integer} value at <code>path</code> if <code>config</code> has path. If not return <code>def</code>
*/
public static Integer getInt(Config config, String path, Integer def) {
if (config.hasPath(path)) {
return Integer.valueOf(config.getInt(path));
}
return def;
}
/**
* Return boolean value at <code>path</code> if <code>config</code> has path. If not return <code>def</code>
*
* @param config in which the path may be present
* @param path key to look for in the config object
* @return boolean value at <code>path</code> if <code>config</code> has path. If not return <code>def</code>
*/
public static boolean getBoolean(Config config, String path, boolean def) {
if (config.hasPath(path)) {
return config.getBoolean(path);
}
return def;
}
/**
* Return double value at <code>path</code> if <code>config</code> has path. If not return <code>def</code>
*
* @param config in which the path may be present
* @param path key to look for in the config object
* @return double value at <code>path</code> if <code>config</code> has path. If not return <code>def</code>
*/
public static double getDouble(Config config, String path, double def) {
if (config.hasPath(path)) {
return config.getDouble(path);
}
return def;
}
/**
* Return {@link Config} value at <code>path</code> if <code>config</code> has path. If not return <code>def</code>
*
* @param config in which the path may be present
* @param path key to look for in the config object
* @return config value at <code>path</code> if <code>config</code> has path. If not return <code>def</code>
*/
public static Config getConfig(Config config, String path, Config def) {
if (config.hasPath(path)) {
return config.getConfig(path);
}
return def;
}
/**
* <p>
* An extension to {@link Config#getStringList(String)}. The value at <code>path</code> can either be a TypeSafe
* {@link ConfigList} of strings in which case it delegates to {@link Config#getStringList(String)} or as list of
* comma separated strings in which case it splits the comma separated list.
*
*
* </p>
* Additionally
* <li>Returns an empty list if <code>path</code> does not exist
* <li>removes any leading and lagging quotes from each string in the returned list.
*
* Examples below will all return a list [1,2,3] without quotes
*
* <ul>
* <li> a.b=[1,2,3]
* <li> a.b=["1","2","3"]
* <li> a.b=1,2,3
* <li> a.b="1","2","3"
* </ul>
*
* @param config in which the path may be present
* @param path key to look for in the config object
* @return list of strings
*/
public static List<String> getStringList(Config config, String path) {
if (!config.hasPath(path)) {
return Collections.emptyList();
}
List<String> valueList;
try {
valueList = config.getStringList(path);
} catch (ConfigException.WrongType e) {
if (StringUtils.isEmpty(config.getString(path))) {
return Collections.emptyList();
}
/*
* Using CSV Reader as values could be quoted.
* E.g The string "a","false","b","10,12" will be split to a list of 4 elements and not 5.
*
* a
* false
* b
* 10,12
*/
try (CSVReader csvr = new CSVReader(new StringReader(config.getString(path)))) {
valueList = Lists.newArrayList(csvr.readNext());
} catch (IOException ioe) {
throw new RuntimeException(ioe);
}
}
// Remove any leading or lagging quotes in the values
// [\"a\",\"b\"] ---> [a,b]
return Lists.newArrayList(Lists.transform(valueList, new Function<String, String>() {
@Override
public String apply(String input) {
if (input == null) {
return input;
}
return input.replaceAll("^\"|\"$", "");
}
}));
}
/**
* Check if the given <code>key</code> exists in <code>config</code> and it is not null or empty
* Uses {@link StringUtils#isNotBlank(CharSequence)}
* @param config which may have the key
* @param key to look for in the config
*
* @return True if key exits and not null or empty. False otherwise
*/
public static boolean hasNonEmptyPath(Config config, String key) {
return config.hasPath(key) && StringUtils.isNotBlank(config.getString(key));
}
/**
* Check that every key-value in superConfig is in subConfig
*/
public static boolean verifySubset(Config superConfig, Config subConfig) {
for (Map.Entry<String, ConfigValue> entry : subConfig.entrySet()) {
if (!superConfig.hasPath(entry.getKey()) || !superConfig.getValue(entry.getKey()).unwrapped()
.equals(entry.getValue().unwrapped())) {
return false;
}
}
return true;
}
/**
* Resolves encrypted config value(s) by considering on the path with "encConfigPath" as encrypted.
* (If encConfigPath is absent or encConfigPath does not exist in config, config will be just returned untouched.)
* It will use Password manager via given config. Thus, convention of PasswordManager need to be followed in order to be decrypted.
* Note that "encConfigPath" path will be removed from the config key, leaving child path on the config key.
* e.g:
* encConfigPath = enc.conf
* - Before : { enc.conf.secret_key : ENC(rOF43721f0pZqAXg#63a) }
* - After : { secret_key : decrypted_val }
*
* @param config
* @param encConfigPath
* @return
*/
public static Config resolveEncrypted(Config config, Optional<String> encConfigPath) {
if (!encConfigPath.isPresent() || !config.hasPath(encConfigPath.get())) {
return config;
}
Config encryptedConfig = config.getConfig(encConfigPath.get());
PasswordManager passwordManager = PasswordManager.getInstance(configToProperties(config));
Map<String, String> tmpMap = Maps.newHashMapWithExpectedSize(encryptedConfig.entrySet().size());
for (Map.Entry<String, ConfigValue> entry : encryptedConfig.entrySet()) {
String val = entry.getValue().unwrapped().toString();
val = passwordManager.readPassword(val);
tmpMap.put(entry.getKey(), val);
}
return ConfigFactory.parseMap(tmpMap).withFallback(config);
}
}
| 4,108 |
0 | Create_ds/gobblin/gobblin-utility/src/main/java/org/apache/gobblin | Create_ds/gobblin/gobblin-utility/src/main/java/org/apache/gobblin/util/HadoopUtils.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.util;
import java.io.ByteArrayInputStream;
import java.io.ByteArrayOutputStream;
import java.io.DataInputStream;
import java.io.DataOutputStream;
import java.io.File;
import java.io.FileNotFoundException;
import java.io.IOException;
import java.io.InputStream;
import java.io.OutputStream;
import java.net.URI;
import java.nio.file.AccessDeniedException;
import java.util.Collection;
import java.util.List;
import java.util.Map.Entry;
import java.util.Properties;
import java.util.Queue;
import java.util.concurrent.ExecutionException;
import java.util.concurrent.ExecutorService;
import java.util.concurrent.Future;
import java.util.concurrent.TimeUnit;
import org.apache.commons.lang.StringUtils;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileAlreadyExistsException;
import org.apache.hadoop.fs.FileContext;
import org.apache.hadoop.fs.FileStatus;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.FileUtil;
import org.apache.hadoop.fs.LocalFileSystem;
import org.apache.hadoop.fs.Options;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.fs.RawLocalFileSystem;
import org.apache.hadoop.fs.Trash;
import org.apache.hadoop.fs.permission.FsPermission;
import org.apache.hadoop.hdfs.DistributedFileSystem;
import org.apache.hadoop.io.IOUtils;
import org.apache.hadoop.io.Writable;
import org.apache.hadoop.util.ReflectionUtils;
import com.google.common.base.Optional;
import com.google.common.base.Preconditions;
import com.google.common.collect.ImmutableSortedSet;
import com.google.common.collect.Lists;
import com.google.common.collect.Queues;
import com.google.common.io.BaseEncoding;
import com.typesafe.config.Config;
import com.typesafe.config.ConfigFactory;
import com.typesafe.config.ConfigValue;
import lombok.AllArgsConstructor;
import lombok.extern.slf4j.Slf4j;
import org.apache.gobblin.configuration.ConfigurationKeys;
import org.apache.gobblin.configuration.State;
import org.apache.gobblin.util.deprecation.DeprecationUtils;
import org.apache.gobblin.util.executors.ScalingThreadPoolExecutor;
import org.apache.gobblin.writer.DataWriter;
/**
* A utility class for working with Hadoop.
*/
@Slf4j
public class HadoopUtils {
public static final String HDFS_ILLEGAL_TOKEN_REGEX = "[\\s:\\\\]";
/**
* A {@link Collection} of all known {@link FileSystem} schemes that do not support atomic renames or copies.
*
* <p>
* The following important properties are useful to remember when writing code that is compatible with S3:
* <ul>
* <li>Renames are not atomic, and require copying the entire source file to the destination file</li>
* <li>Writes to S3 using {@link FileSystem#create(Path)} will first go to the local filesystem, when the stream
* is closed the local file will be uploaded to S3</li>
* </ul>
* </p>
*/
public static final Collection<String> FS_SCHEMES_NON_ATOMIC =
ImmutableSortedSet.orderedBy(String.CASE_INSENSITIVE_ORDER).add("s3").add("s3a").add("s3n").build();
public static final String MAX_FILESYSTEM_QPS = "filesystem.throttling.max.filesystem.qps";
private static final List<String> DEPRECATED_KEYS = Lists.newArrayList("gobblin.copy.max.filesystem.qps");
private static final int MAX_RENAME_TRIES = 3;
public static Configuration newConfiguration() {
Configuration conf = new Configuration();
// Explicitly check for S3 environment variables, so that Hadoop can access s3 and s3n URLs.
// h/t https://github.com/apache/spark/blob/master/core/src/main/scala/org/apache/spark/deploy/SparkHadoopUtil.scala
String awsAccessKeyId = System.getenv("AWS_ACCESS_KEY_ID");
String awsSecretAccessKey = System.getenv("AWS_SECRET_ACCESS_KEY");
if (awsAccessKeyId != null && awsSecretAccessKey != null) {
conf.set("fs.s3.awsAccessKeyId", awsAccessKeyId);
conf.set("fs.s3.awsSecretAccessKey", awsSecretAccessKey);
conf.set("fs.s3n.awsAccessKeyId", awsAccessKeyId);
conf.set("fs.s3n.awsSecretAccessKey", awsSecretAccessKey);
}
// Add a new custom filesystem mapping
conf.set("fs.sftp.impl", "org.apache.gobblin.source.extractor.extract.sftp.SftpLightWeightFileSystem");
conf.set("fs.sftp.impl.disable.cache", "true");
return conf;
}
/**
* @deprecated Use {@link FileListUtils#listFilesRecursively(FileSystem, Path)}.
*/
@Deprecated
public static List<FileStatus> listStatusRecursive(FileSystem fileSystem, Path path) throws IOException {
List<FileStatus> results = Lists.newArrayList();
walk(results, fileSystem, path);
return results;
}
/**
* Get the path as a string without schema or authority.
*
* E.g. Converts sftp://user/data/file.txt to /user/data/file.txt
*/
public static String toUriPath(Path path) {
return path.toUri().getPath();
}
/**
* A wrapper around {@link FileSystem#delete(Path, boolean)} which throws {@link IOException} if the given
* {@link Path} exists, and {@link FileSystem#delete(Path, boolean)} returns False.
*/
public static void deletePath(FileSystem fs, Path f, boolean recursive) throws IOException {
if (fs.exists(f) && !fs.delete(f, recursive)) {
throw new IOException("Failed to delete: " + f);
}
}
/**
* Calls deletePath() on each directory in the given list of directories to delete.
* If moveToTrash is set, it will be moved to trash according to the file system trash policy.
*/
public static void deleteDirectories(FileSystem fs, List<String> directoriesToDelete, boolean recursive, boolean moveToTrash) throws IOException {
for (String directory : directoriesToDelete) {
if (moveToTrash) {
moveToTrash(fs, new Path(directory));
} else {
deletePath(fs, new Path(directory), recursive);
}
}
}
/**
* A wrapper around {@link FileSystem#delete(Path, boolean)} that only deletes a given {@link Path} if it is present
* on the given {@link FileSystem}.
*/
public static void deleteIfExists(FileSystem fs, Path path, boolean recursive) throws IOException {
if (fs.exists(path)) {
deletePath(fs, path, recursive);
}
}
public static void deletePathAndEmptyAncestors(FileSystem fs, Path f, boolean recursive) throws IOException {
deletePath(fs, f, recursive);
Path parent = f.getParent();
while (parent != null) {
if (fs.exists(parent) && fs.listStatus(parent).length == 0) {
deletePath(fs, parent, true);
parent = parent.getParent();
} else {
break;
}
}
}
/**
* Delete files according to the regular expression provided
* @param fs Filesystem object
* @param path base path
* @param regex regular expression to select files to delete
* @throws IOException
*/
public static void deletePathByRegex(FileSystem fs, final Path path, final String regex) throws IOException {
FileStatus[] statusList = fs.listStatus(path, path1 -> path1.getName().matches(regex));
for (final FileStatus oldJobFile : statusList) {
HadoopUtils.deletePath(fs, oldJobFile.getPath(), true);
}
}
/**
* Moves the object to the filesystem trash according to the file system policy.
* @param fs FileSystem object
* @param path Path to the object to be moved to trash.
* @throws IOException
*/
public static void moveToTrash(FileSystem fs, Path path) throws IOException {
moveToTrash(fs, path, new Configuration());
}
/**
* Moves the object to the filesystem trash according to the file system policy.
* @param fs FileSystem object
* @param path Path to the object to be moved to trash.
* @param conf Configurations
* @throws IOException
*/
public static void moveToTrash(FileSystem fs, Path path, Configuration conf) throws IOException {
Trash.moveToAppropriateTrash(fs, path, conf);
}
/**
* Renames a src {@link Path} on fs {@link FileSystem} to a dst {@link Path}. If fs is a {@link LocalFileSystem} and
* src is a directory then {@link File#renameTo} is called directly to avoid a directory rename race condition where
* {@link org.apache.hadoop.fs.RawLocalFileSystem#rename} copies the conflicting src directory into dst resulting in
* an extra nested level, such as /root/a/b/c/e/e where e is repeated.
*
* @param fs the {@link FileSystem} where the src {@link Path} exists
* @param src the source {@link Path} which will be renamed
* @param dst the {@link Path} to rename to
* @return true if rename succeeded, false if rename failed.
* @throws IOException if rename failed for reasons other than target exists.
*/
public static boolean renamePathHandleLocalFSRace(FileSystem fs, Path src, Path dst) throws IOException {
if (DecoratorUtils.resolveUnderlyingObject(fs) instanceof LocalFileSystem && fs.isDirectory(src)) {
LocalFileSystem localFs = (LocalFileSystem) DecoratorUtils.resolveUnderlyingObject(fs);
File srcFile = localFs.pathToFile(src);
File dstFile = localFs.pathToFile(dst);
return srcFile.renameTo(dstFile);
}
else {
return fs.rename(src, dst);
}
}
/**
* A wrapper around {@link FileContext#rename(Path, Path, Options.Rename...)}.
*/
public static void renamePath(FileContext fc, Path oldName, Path newName) throws IOException {
renamePath(fc, oldName, newName, false);
}
/**
* A wrapper around {@link FileContext#rename(Path, Path, Options.Rename...)}}.
*/
public static void renamePath(FileContext fc, Path oldName, Path newName, boolean overwrite)
throws IOException {
Options.Rename renameOptions = (overwrite) ? Options.Rename.OVERWRITE : Options.Rename.NONE;
fc.rename(oldName, newName, renameOptions);
}
/**
* A wrapper around {@link FileSystem#rename(Path, Path)} which throws {@link IOException} if
* {@link FileSystem#rename(Path, Path)} returns False.
*/
public static void renamePath(FileSystem fs, Path oldName, Path newName) throws IOException {
renamePath(fs, oldName, newName, false);
}
/**
* A wrapper around {@link FileSystem#rename(Path, Path)} which throws {@link IOException} if
* {@link FileSystem#rename(Path, Path)} returns False.
* @param fs FileSystem object
* @param oldName old name of the path
* @param new name of the path
* @throws IOException
*/
public static void renamePath(FileSystem fs, Path oldName, Path newName, boolean overwrite) throws IOException {
renamePath(fs, oldName, newName, overwrite, new Configuration());
}
/**
* A wrapper around {@link FileSystem#rename(Path, Path)} which throws {@link IOException} if
* {@link FileSystem#rename(Path, Path)} returns False.
*/
public static void renamePath(FileSystem fs, Path oldName, Path newName, boolean overwrite, Configuration conf) throws IOException {
//In default implementation of rename with rewrite option in FileSystem, if the parent dir of dst does not exist, it will throw exception,
//Which will fail some of our job unintentionally. So we only call that method when fs is an instance of DistributedFileSystem to avoid inconsistency problem
if(fs instanceof DistributedFileSystem) {
Options.Rename renameOptions = (overwrite) ? Options.Rename.OVERWRITE : Options.Rename.NONE;
((DistributedFileSystem) fs).rename(oldName, newName, renameOptions);
} else {
if (!fs.exists(oldName)) {
throw new FileNotFoundException(String.format("Failed to rename %s to %s: src not found", oldName, newName));
}
if (fs.exists(newName)) {
if (overwrite) {
HadoopUtils.moveToTrash(fs, newName, conf);
} else {
throw new FileAlreadyExistsException(String.format("Failed to rename %s to %s: dst already exists", oldName, newName));
}
}
if (!fs.rename(oldName, newName)) {
throw new IOException(String.format("Failed to rename %s to %s", oldName, newName));
}
}
}
/**
* Moves a src {@link Path} from a srcFs {@link FileSystem} to a dst {@link Path} on a dstFs {@link FileSystem}. If
* the srcFs and the dstFs have the same scheme, and neither of them or S3 schemes, then the {@link Path} is simply
* renamed. Otherwise, the data is from the src {@link Path} to the dst {@link Path}. So this method can handle copying
* data between different {@link FileSystem} implementations.
*
* @param srcFs the source {@link FileSystem} where the src {@link Path} exists
* @param src the source {@link Path} which will me moved
* @param dstFs the destination {@link FileSystem} where the dst {@link Path} should be created
* @param dst the {@link Path} to move data to
*/
public static void movePath(FileSystem srcFs, Path src, FileSystem dstFs, Path dst, Configuration conf)
throws IOException {
movePath(srcFs, src, dstFs, dst, false, conf);
}
/**
* Moves a src {@link Path} from a srcFs {@link FileSystem} to a dst {@link Path} on a dstFs {@link FileSystem}. If
* the srcFs and the dstFs have the same scheme, and neither of them or S3 schemes, then the {@link Path} is simply
* renamed. Otherwise, the data is from the src {@link Path} to the dst {@link Path}. So this method can handle copying
* data between different {@link FileSystem} implementations.
*
* @param srcFs the source {@link FileSystem} where the src {@link Path} exists
* @param src the source {@link Path} which will me moved
* @param dstFs the destination {@link FileSystem} where the dst {@link Path} should be created
* @param dst the {@link Path} to move data to
* @param overwrite true if the destination should be overwritten; otherwise, false
*/
public static void movePath(FileSystem srcFs, Path src, FileSystem dstFs, Path dst, boolean overwrite,
Configuration conf) throws IOException {
if (srcFs.getUri().getScheme().equals(dstFs.getUri().getScheme())
&& !FS_SCHEMES_NON_ATOMIC.contains(srcFs.getUri().getScheme())
&& !FS_SCHEMES_NON_ATOMIC.contains(dstFs.getUri().getScheme())) {
renamePath(srcFs, src, dst);
} else {
copyPath(srcFs, src, dstFs, dst, true, overwrite, conf);
}
}
/**
* Copies data from a src {@link Path} to a dst {@link Path}.
*
* <p>
* This method should be used in preference to
* {@link FileUtil#copy(FileSystem, Path, FileSystem, Path, boolean, boolean, Configuration)}, which does not handle
* clean up of incomplete files if there is an error while copying data.
* </p>
*
* <p>
* TODO this method does not handle cleaning up any local files leftover by writing to S3.
* </p>
*
* @param srcFs the source {@link FileSystem} where the src {@link Path} exists
* @param src the {@link Path} to copy from the source {@link FileSystem}
* @param dstFs the destination {@link FileSystem} where the dst {@link Path} should be created
* @param dst the {@link Path} to copy data to
*/
public static void copyPath(FileSystem srcFs, Path src, FileSystem dstFs, Path dst, Configuration conf)
throws IOException {
copyPath(srcFs, src, dstFs, dst, false, false, conf);
}
/**
* Copies data from a src {@link Path} to a dst {@link Path}.
*
* <p>
* This method should be used in preference to
* {@link FileUtil#copy(FileSystem, Path, FileSystem, Path, boolean, boolean, Configuration)}, which does not handle
* clean up of incomplete files if there is an error while copying data.
* </p>
*
* <p>
* TODO this method does not handle cleaning up any local files leftover by writing to S3.
* </p>
*
* @param srcFs the source {@link FileSystem} where the src {@link Path} exists
* @param src the {@link Path} to copy from the source {@link FileSystem}
* @param dstFs the destination {@link FileSystem} where the dst {@link Path} should be created
* @param dst the {@link Path} to copy data to
* @param overwrite true if the destination should be overwritten; otherwise, false
*/
public static void copyPath(FileSystem srcFs, Path src, FileSystem dstFs, Path dst, boolean overwrite,
Configuration conf) throws IOException {
copyPath(srcFs, src, dstFs, dst, false, overwrite, conf);
}
private static void copyPath(FileSystem srcFs, Path src, FileSystem dstFs, Path dst, boolean deleteSource,
boolean overwrite, Configuration conf) throws IOException {
Preconditions.checkArgument(srcFs.exists(src),
String.format("Cannot copy from %s to %s because src does not exist", src, dst));
Preconditions.checkArgument(overwrite || !dstFs.exists(dst),
String.format("Cannot copy from %s to %s because dst exists", src, dst));
try {
boolean isSourceFileSystemLocal = srcFs instanceof LocalFileSystem || srcFs instanceof RawLocalFileSystem;
if (isSourceFileSystemLocal) {
try {
dstFs.copyFromLocalFile(deleteSource, overwrite, src, dst);
} catch (IOException e) {
throw new IOException(String.format("Failed to copy %s to %s", src, dst), e);
}
} else if (!FileUtil.copy(srcFs, src, dstFs, dst, deleteSource, overwrite, conf)) {
throw new IOException(String.format("Failed to copy %s to %s", src, dst));
}
} catch (Throwable t1) {
try {
deleteIfExists(dstFs, dst, true);
} catch (Throwable t2) {
// Do nothing
}
throw t1;
}
}
/**
* Copies a src {@link Path} from a srcFs {@link FileSystem} to a dst {@link Path} on a dstFs {@link FileSystem}. If
* either the srcFs or dstFs are S3 {@link FileSystem}s (as dictated by {@link #FS_SCHEMES_NON_ATOMIC}) then data is directly
* copied from the src to the dst. Otherwise data is first copied to a tmp {@link Path}, which is then renamed to the
* dst.
*
* @param srcFs the source {@link FileSystem} where the src {@link Path} exists
* @param src the {@link Path} to copy from the source {@link FileSystem}
* @param dstFs the destination {@link FileSystem} where the dst {@link Path} should be created
* @param dst the {@link Path} to copy data to
* @param tmp the temporary {@link Path} to use when copying data
* @param overwriteDst true if the destination and tmp path should should be overwritten, false otherwise
*/
public static void copyFile(FileSystem srcFs, Path src, FileSystem dstFs, Path dst, Path tmp, boolean overwriteDst,
Configuration conf) throws IOException {
Preconditions.checkArgument(srcFs.isFile(src),
String.format("Cannot copy from %s to %s because src is not a file", src, dst));
if (FS_SCHEMES_NON_ATOMIC.contains(srcFs.getUri().getScheme())
|| FS_SCHEMES_NON_ATOMIC.contains(dstFs.getUri().getScheme())) {
copyFile(srcFs, src, dstFs, dst, overwriteDst, conf);
} else {
copyFile(srcFs, src, dstFs, tmp, overwriteDst, conf);
try {
boolean renamed = false;
if (overwriteDst && dstFs.exists(dst)) {
try {
deletePath(dstFs, dst, true);
} finally {
renamePath(dstFs, tmp, dst);
renamed = true;
}
}
if (!renamed) {
renamePath(dstFs, tmp, dst);
}
} finally {
deletePath(dstFs, tmp, true);
}
}
}
/**
* Copy a file from a srcFs {@link FileSystem} to a dstFs {@link FileSystem}. The src {@link Path} must be a file,
* that is {@link FileSystem#isFile(Path)} must return true for src.
*
* <p>
* If overwrite is specified to true, this method may delete the dst directory even if the copy from src to dst fails.
* </p>
*
* @param srcFs the src {@link FileSystem} to copy the file from
* @param src the src {@link Path} to copy
* @param dstFs the destination {@link FileSystem} to write to
* @param dst the destination {@link Path} to write to
* @param overwrite true if the dst {@link Path} should be overwritten, false otherwise
*/
public static void copyFile(FileSystem srcFs, Path src, FileSystem dstFs, Path dst, boolean overwrite,
Configuration conf) throws IOException {
Preconditions.checkArgument(srcFs.isFile(src),
String.format("Cannot copy from %s to %s because src is not a file", src, dst));
Preconditions.checkArgument(overwrite || !dstFs.exists(dst),
String.format("Cannot copy from %s to %s because dst exists", src, dst));
try (InputStream in = srcFs.open(src); OutputStream out = dstFs.create(dst, overwrite)) {
IOUtils.copyBytes(in, out, conf, false);
} catch (Throwable t1) {
try {
deleteIfExists(dstFs, dst, true);
} catch (Throwable t2) {
// Do nothing
}
throw t1;
}
}
private static void walk(List<FileStatus> results, FileSystem fileSystem, Path path) throws IOException {
for (FileStatus status : fileSystem.listStatus(path)) {
if (!status.isDirectory()) {
results.add(status);
} else {
walk(results, fileSystem, status.getPath());
}
}
}
/**
* This method is an additive implementation of the {@link FileSystem#rename(Path, Path)} method. It moves all the
* files/directories under 'from' path to the 'to' path without overwriting existing directories in the 'to' path.
*
* <p>
* The rename operation happens at the first non-existent sub-directory. If a directory at destination path already
* exists, it recursively tries to move sub-directories. If all the sub-directories also exist at the destination,
* a file level move is done
* </p>
*
* @param fileSystem on which the data needs to be moved
* @param from path of the data to be moved
* @param to path of the data to be moved
*/
public static void renameRecursively(FileSystem fileSystem, Path from, Path to) throws IOException {
log.info(String.format("Recursively renaming %s in %s to %s.", from, fileSystem.getUri(), to));
FileSystem throttledFS = getOptionallyThrottledFileSystem(fileSystem, 10000);
ExecutorService executorService = ScalingThreadPoolExecutor.newScalingThreadPool(1, 100, 100,
ExecutorsUtils.newThreadFactory(Optional.of(log), Optional.of("rename-thread-%d")));
Queue<Future<?>> futures = Queues.newConcurrentLinkedQueue();
try {
if (!fileSystem.exists(from)) {
throw new IOException("Trying to rename a path that does not exist! " + from);
}
futures.add(executorService
.submit(new RenameRecursively(throttledFS, fileSystem.getFileStatus(from), to, executorService, futures)));
int futuresUsed = 0;
while (!futures.isEmpty()) {
try {
futures.poll().get();
futuresUsed++;
} catch (ExecutionException | InterruptedException ee) {
throw new IOException(ee.getCause());
}
}
log.info(String.format("Recursive renaming of %s to %s. (details: used %d futures)", from, to, futuresUsed));
} finally {
ExecutorsUtils.shutdownExecutorService(executorService, Optional.of(log), 1, TimeUnit.SECONDS);
}
}
/**
* Calls {@link #getOptionallyThrottledFileSystem(FileSystem, int)} parsing the qps from the input {@link State}
* at key {@link #MAX_FILESYSTEM_QPS}.
* @throws IOException
*/
public static FileSystem getOptionallyThrottledFileSystem(FileSystem fs, State state) throws IOException {
DeprecationUtils.renameDeprecatedKeys(state, MAX_FILESYSTEM_QPS, DEPRECATED_KEYS);
if (state.contains(MAX_FILESYSTEM_QPS)) {
return getOptionallyThrottledFileSystem(fs, state.getPropAsInt(MAX_FILESYSTEM_QPS));
}
return fs;
}
/**
* Get a throttled {@link FileSystem} that limits the number of queries per second to a {@link FileSystem}. If
* the input qps is <= 0, no such throttling will be performed.
* @throws IOException
*/
public static FileSystem getOptionallyThrottledFileSystem(FileSystem fs, int qpsLimit) throws IOException {
if (fs instanceof Decorator) {
for (Object obj : DecoratorUtils.getDecoratorLineage(fs)) {
if (obj instanceof RateControlledFileSystem) {
// Already rate controlled
return fs;
}
}
}
if (qpsLimit > 0) {
try {
RateControlledFileSystem newFS = new RateControlledFileSystem(fs, qpsLimit);
newFS.startRateControl();
return newFS;
} catch (ExecutionException ee) {
throw new IOException("Could not create throttled FileSystem.", ee);
}
}
return fs;
}
@AllArgsConstructor
private static class RenameRecursively implements Runnable {
private final FileSystem fileSystem;
private final FileStatus from;
private final Path to;
private final ExecutorService executorService;
private final Queue<Future<?>> futures;
@Override
public void run() {
try {
// Attempt to move safely if directory, unsafely if file (for performance, files are much less likely to collide on target)
boolean moveSucessful;
try {
moveSucessful = this.from.isDirectory() ? safeRenameIfNotExists(this.fileSystem, this.from.getPath(), this.to) : unsafeRenameIfNotExists(this.fileSystem, this.from.getPath(), this.to);
} catch (AccessDeniedException e) {
// If an AccessDeniedException occurs for a directory then assume that it exists and continue the
// recursive renaming. If the error occurs for a file then re-raise the exception since the existence check
// is required to determine whether to copy the file.
if (this.from.isDirectory()) {
moveSucessful = false;
} else {
throw e;
}
}
if (!moveSucessful) {
if (this.from.isDirectory()) {
for (FileStatus fromFile : this.fileSystem.listStatus(this.from.getPath())) {
Path relativeFilePath = new Path(StringUtils.substringAfter(fromFile.getPath().toString(),
this.from.getPath().toString() + Path.SEPARATOR));
Path toFilePath = new Path(this.to, relativeFilePath);
this.futures.add(this.executorService.submit(
new RenameRecursively(this.fileSystem, fromFile, toFilePath, this.executorService, this.futures)));
}
} else {
log.info(String.format("File already exists %s. Will not rewrite", this.to));
}
}
} catch (IOException ioe) {
throw new RuntimeException("Failed to rename " + this.from.getPath() + " to " + this.to, ioe);
}
}
}
/**
* Renames from to to if to doesn't exist in a thread-safe way. This method is necessary because
* {@link FileSystem#rename} is inconsistent across file system implementations, e.g. in some of them rename(foo, bar)
* will create bar/foo if bar already existed, but it will only create bar if it didn't.
*
* <p>
* The thread-safety is only guaranteed among calls to this method. An external modification to the relevant
* target directory could still cause unexpected results in the renaming.
* </p>
*
* @param fs filesystem where rename will be executed.
* @param from origin {@link Path}.
* @param to target {@link Path}.
* @return true if rename succeeded, false if the target already exists.
* @throws IOException if rename failed for reasons other than target exists.
*/
public synchronized static boolean safeRenameIfNotExists(FileSystem fs, Path from, Path to) throws IOException {
return unsafeRenameIfNotExists(fs, from, to);
}
/**
* Renames from to to if to doesn't exist in a non-thread-safe way.
*
* @param fs filesystem where rename will be executed.
* @param from origin {@link Path}.
* @param to target {@link Path}.
* @return true if rename succeeded, false if the target already exists.
* @throws IOException if rename failed for reasons other than target exists.
*/
public static boolean unsafeRenameIfNotExists(FileSystem fs, Path from, Path to) throws IOException {
if (!fs.exists(to)) {
if (!fs.exists(to.getParent())) {
fs.mkdirs(to.getParent());
}
if (!renamePathHandleLocalFSRace(fs, from, to)) {
if (!fs.exists(to)) {
throw new IOException(String.format("Failed to rename %s to %s.", from, to));
}
return false;
}
return true;
}
return false;
}
/**
* A thread safe variation of {@link #renamePath(FileSystem, Path, Path)} which can be used in
* multi-threaded/multi-mapper environment. The rename operation always happens at file level hence directories are
* not overwritten under the 'to' path.
*
* <p>
* If the contents of destination 'to' path is not expected to be modified concurrently, use
* {@link #renamePath(FileSystem, Path, Path)} which is faster and more optimized
* </p>
*
* <b>NOTE: This does not seem to be working for all {@link FileSystem} implementations. Use
* {@link #renameRecursively(FileSystem, Path, Path)}</b>
*
* @param fileSystem on which the data needs to be moved
* @param from path of the data to be moved
* @param to path of the data to be moved
*
*/
public static void safeRenameRecursively(FileSystem fileSystem, Path from, Path to) throws IOException {
for (FileStatus fromFile : FileListUtils.listFilesRecursively(fileSystem, from)) {
Path relativeFilePath =
new Path(StringUtils.substringAfter(fromFile.getPath().toString(), from.toString() + Path.SEPARATOR));
Path toFilePath = new Path(to, relativeFilePath);
if (!fileSystem.exists(toFilePath)) {
boolean renamed = false;
// underlying file open can fail with file not found error due to some race condition
// when the parent directory is created in another thread, so retry a few times
for (int i = 0; !renamed && i < MAX_RENAME_TRIES; i++) {
try {
renamed = fileSystem.rename(fromFile.getPath(), toFilePath);
break;
} catch (FileNotFoundException e) {
if (i + 1 >= MAX_RENAME_TRIES) {
throw e;
}
}
}
if (!renamed) {
throw new IOException(String.format("Failed to rename %s to %s.", fromFile.getPath(), toFilePath));
}
log.info(String.format("Renamed %s to %s", fromFile.getPath(), toFilePath));
} else {
log.info(String.format("File already exists %s. Will not rewrite", toFilePath));
}
}
}
public static Configuration getConfFromState(State state) {
return getConfFromState(state, Optional.<String> absent());
}
/**
* Provides Hadoop configuration given state.
* It also supports decrypting values on "encryptedPath".
* Note that this encryptedPath path will be removed from full path of each config key and leaving only child path on the key(s).
* If there's same config path as child path, the one stripped will have higher priority.
*
* e.g:
* - encryptedPath: writer.fs.encrypted
* before: writer.fs.encrypted.secret
* after: secret
*
* Common use case for these encryptedPath:
* When there's have encrypted credential in job property but you'd like Filesystem to get decrypted value.
*
* @param srcConfig source config.
* @param encryptedPath Optional. If provided, config that is on this path will be decrypted. @see ConfigUtils.resolveEncrypted
* Note that config on encryptedPath will be included in the end result even it's not part of includeOnlyPath
* @return Hadoop Configuration.
*/
public static Configuration getConfFromState(State state, Optional<String> encryptedPath) {
Config config = ConfigFactory.parseProperties(state.getProperties());
if (encryptedPath.isPresent()) {
config = ConfigUtils.resolveEncrypted(config, encryptedPath);
}
Configuration conf = newConfiguration();
for (Entry<String, ConfigValue> entry : config.entrySet()) {
conf.set(entry.getKey(), entry.getValue().unwrapped().toString());
}
return conf;
}
public static Configuration getConfFromProperties(Properties properties) {
Configuration conf = newConfiguration();
for (String propName : properties.stringPropertyNames()) {
conf.set(propName, properties.getProperty(propName));
}
return conf;
}
public static State getStateFromConf(Configuration conf) {
State state = new State();
for (Entry<String, String> entry : conf) {
state.setProp(entry.getKey(), entry.getValue());
}
return state;
}
/**
* Set the group associated with a given path.
*
* @param fs the {@link FileSystem} instance used to perform the file operation
* @param path the given path
* @param group the group associated with the path
* @throws IOException
*/
public static void setGroup(FileSystem fs, Path path, String group) throws IOException {
fs.setOwner(path, fs.getFileStatus(path).getOwner(), group);
}
/**
* Serialize a {@link Writable} object into a string.
*
* @param writable the {@link Writable} object to be serialized
* @return a string serialized from the {@link Writable} object
* @throws IOException if there's something wrong with the serialization
*/
public static String serializeToString(Writable writable) throws IOException {
try (ByteArrayOutputStream byteArrayOutputStream = new ByteArrayOutputStream();
DataOutputStream dataOutputStream = new DataOutputStream(byteArrayOutputStream)) {
writable.write(dataOutputStream);
return BaseEncoding.base64().encode(byteArrayOutputStream.toByteArray());
}
}
/**
* Deserialize a {@link Writable} object from a string.
*
* @param writableClass the {@link Writable} implementation class
* @param serializedWritableStr the string containing a serialized {@link Writable} object
* @return a {@link Writable} deserialized from the string
* @throws IOException if there's something wrong with the deserialization
*/
public static Writable deserializeFromString(Class<? extends Writable> writableClass, String serializedWritableStr)
throws IOException {
return deserializeFromString(writableClass, serializedWritableStr, new Configuration());
}
/**
* Deserialize a {@link Writable} object from a string.
*
* @param writableClass the {@link Writable} implementation class
* @param serializedWritableStr the string containing a serialized {@link Writable} object
* @param configuration a {@link Configuration} object containing Hadoop configuration properties
* @return a {@link Writable} deserialized from the string
* @throws IOException if there's something wrong with the deserialization
*/
public static Writable deserializeFromString(Class<? extends Writable> writableClass, String serializedWritableStr,
Configuration configuration) throws IOException {
byte[] writableBytes = BaseEncoding.base64().decode(serializedWritableStr);
try (ByteArrayInputStream byteArrayInputStream = new ByteArrayInputStream(writableBytes);
DataInputStream dataInputStream = new DataInputStream(byteArrayInputStream)) {
Writable writable = ReflectionUtils.newInstance(writableClass, configuration);
writable.readFields(dataInputStream);
return writable;
}
}
/**
* Given a {@link FsPermission} objects, set a key, value pair in the given {@link State} for the writer to
* use when creating files. This method should be used in conjunction with {@link #deserializeWriterFilePermissions(State, int, int)}.
*/
public static void serializeWriterFilePermissions(State state, int numBranches, int branchId,
FsPermission fsPermissions) {
serializeFsPermissions(state,
ForkOperatorUtils.getPropertyNameForBranch(ConfigurationKeys.WRITER_FILE_PERMISSIONS, numBranches, branchId),
fsPermissions);
}
/**
* Given a {@link FsPermission} objects, set a key, value pair in the given {@link State} for the writer to
* use when creating files. This method should be used in conjunction with {@link #deserializeWriterDirPermissions(State, int, int)}.
*/
public static void serializeWriterDirPermissions(State state, int numBranches, int branchId,
FsPermission fsPermissions) {
serializeFsPermissions(state,
ForkOperatorUtils.getPropertyNameForBranch(ConfigurationKeys.WRITER_DIR_PERMISSIONS, numBranches, branchId),
fsPermissions);
}
/**
* Helper method that serializes a {@link FsPermission} object.
*/
private static void serializeFsPermissions(State state, String key, FsPermission fsPermissions) {
state.setProp(key, String.format("%04o", fsPermissions.toShort()));
}
/**
* Given a {@link String} in octal notation, set a key, value pair in the given {@link State} for the writer to
* use when creating files. This method should be used in conjunction with {@link #deserializeWriterFilePermissions(State, int, int)}.
*/
public static void setWriterFileOctalPermissions(State state, int numBranches, int branchId,
String octalPermissions) {
state.setProp(
ForkOperatorUtils.getPropertyNameForBranch(ConfigurationKeys.WRITER_FILE_PERMISSIONS, numBranches, branchId),
octalPermissions);
}
/**
* Given a {@link String} in octal notation, set a key, value pair in the given {@link State} for the writer to
* use when creating directories. This method should be used in conjunction with {@link #deserializeWriterDirPermissions(State, int, int)}.
*/
public static void setWriterDirOctalPermissions(State state, int numBranches, int branchId, String octalPermissions) {
state.setProp(
ForkOperatorUtils.getPropertyNameForBranch(ConfigurationKeys.WRITER_DIR_PERMISSIONS, numBranches, branchId),
octalPermissions);
}
/**
* Deserializes a {@link FsPermission}s object that should be used when a {@link DataWriter} is writing a file.
*/
public static FsPermission deserializeWriterFilePermissions(State state, int numBranches, int branchId) {
return new FsPermission(state.getPropAsShortWithRadix(
ForkOperatorUtils.getPropertyNameForBranch(ConfigurationKeys.WRITER_FILE_PERMISSIONS, numBranches, branchId),
FsPermission.getDefault().toShort(), ConfigurationKeys.PERMISSION_PARSING_RADIX));
}
/**
* Deserializes a {@link FsPermission}s object that should be used when a {@link DataWriter} is creating directories.
*/
public static FsPermission deserializeWriterDirPermissions(State state, int numBranches, int branchId) {
return new FsPermission(state.getPropAsShortWithRadix(
ForkOperatorUtils.getPropertyNameForBranch(ConfigurationKeys.WRITER_DIR_PERMISSIONS, numBranches, branchId),
FsPermission.getDefault().toShort(), ConfigurationKeys.PERMISSION_PARSING_RADIX));
}
/**
* Get {@link FsPermission} from a {@link State} object.
*
* @param props A {@link State} containing properties.
* @param propName The property name for the permission. If not contained in the given state,
* defaultPermission will be used.
* @param defaultPermission default permission if propName is not contained in props.
* @return An {@link FsPermission} object.
*/
public static FsPermission deserializeFsPermission(State props, String propName, FsPermission defaultPermission) {
short mode = props.getPropAsShortWithRadix(propName, defaultPermission.toShort(),
ConfigurationKeys.PERMISSION_PARSING_RADIX);
return new FsPermission(mode);
}
/**
* Remove illegal HDFS path characters from the given path. Illegal characters will be replaced
* with the given substitute.
*/
public static String sanitizePath(String path, String substitute) {
Preconditions.checkArgument(substitute.replaceAll(HDFS_ILLEGAL_TOKEN_REGEX, "").equals(substitute),
"substitute contains illegal characters: " + substitute);
return path.replaceAll(HDFS_ILLEGAL_TOKEN_REGEX, substitute);
}
/**
* Remove illegal HDFS path characters from the given path. Illegal characters will be replaced
* with the given substitute.
*/
public static Path sanitizePath(Path path, String substitute) {
return new Path(sanitizePath(path.toString(), substitute));
}
/**
* Try to set owner and permissions for the path. Will not throw exception.
*/
public static void setPermissions(Path location, Optional<String> owner, Optional<String> group, FileSystem fs,
FsPermission permission) {
try {
if (!owner.isPresent()) {
return;
}
if (!group.isPresent()) {
return;
}
fs.setOwner(location, owner.get(), group.get());
fs.setPermission(location, permission);
if (!fs.isDirectory(location)) {
return;
}
for (FileStatus fileStatus : fs.listStatus(location)) {
setPermissions(fileStatus.getPath(), owner, group, fs, permission);
}
} catch (IOException e) {
log.warn("Exception occurred while trying to change permissions : " + e.getMessage());
}
}
public static boolean hasContent(FileSystem fs, Path path)
throws IOException {
if (!fs.isDirectory(path)) {
return true;
}
boolean content = false;
for (FileStatus fileStatus : fs.listStatus(path)) {
content = content || hasContent(fs, fileStatus.getPath());
if (content) {
break;
}
}
return content;
}
/**
* Add "gobblin-site.xml" as a {@link Configuration} resource.
*/
public static void addGobblinSite() {
Configuration.addDefaultResource("gobblin-site.xml");
}
/**
* Get a {@link FileSystem} object for the uri specified at {@link ConfigurationKeys#SOURCE_FILEBASED_FS_URI}.
* @throws IOException
*/
public static FileSystem getSourceFileSystem(State state) throws IOException {
Configuration conf = HadoopUtils.getConfFromState(state, Optional.of(ConfigurationKeys.SOURCE_FILEBASED_ENCRYPTED_CONFIG_PATH));
String uri = state.getProp(ConfigurationKeys.SOURCE_FILEBASED_FS_URI, ConfigurationKeys.LOCAL_FS_URI);
return HadoopUtils.getOptionallyThrottledFileSystem(FileSystem.get(URI.create(uri), conf), state);
}
/**
* Get a {@link FileSystem} for `fsUri`
* @throws IOException
*/
public static FileSystem getFileSystem(URI fsUri, State state) throws IOException {
Configuration conf = HadoopUtils.getConfFromState(state, Optional.absent());
return HadoopUtils.getOptionallyThrottledFileSystem(FileSystem.get(fsUri, conf), state);
}
/**
* Get a {@link FileSystem} object for the uri specified at {@link ConfigurationKeys#WRITER_FILE_SYSTEM_URI}.
* @throws IOException
*/
public static FileSystem getWriterFileSystem(State state, int numBranches, int branchId)
throws IOException {
return HadoopUtils.getOptionallyThrottledFileSystem(WriterUtils.getWriterFS(state, numBranches, branchId), state);
}
}
| 4,109 |
0 | Create_ds/gobblin/gobblin-utility/src/main/java/org/apache/gobblin | Create_ds/gobblin/gobblin-utility/src/main/java/org/apache/gobblin/util/AvroUtils.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.util;
import java.io.ByteArrayOutputStream;
import java.io.DataOutputStream;
import java.io.IOException;
import java.util.ArrayList;
import java.util.Collections;
import java.util.HashMap;
import java.util.Iterator;
import java.util.List;
import java.util.Map;
import java.util.Map.Entry;
import java.util.stream.Collectors;
import java.util.stream.Stream;
import org.apache.avro.AvroRuntimeException;
import org.apache.avro.Schema;
import org.apache.avro.Schema.Field;
import org.apache.avro.Schema.Type;
import org.apache.avro.SchemaCompatibility;
import org.apache.avro.file.DataFileReader;
import org.apache.avro.file.SeekableInput;
import org.apache.avro.generic.GenericData;
import org.apache.avro.generic.GenericData.Record;
import org.apache.avro.generic.GenericDatumReader;
import org.apache.avro.generic.GenericDatumWriter;
import org.apache.avro.generic.GenericRecord;
import org.apache.avro.io.BinaryDecoder;
import org.apache.avro.io.DatumReader;
import org.apache.avro.io.DatumWriter;
import org.apache.avro.io.Decoder;
import org.apache.avro.io.DecoderFactory;
import org.apache.avro.io.Encoder;
import org.apache.avro.io.EncoderFactory;
import org.apache.avro.mapred.FsInput;
import org.apache.avro.util.Utf8;
import com.linkedin.avroutil1.compatibility.AvroCompatibilityHelper;
import org.apache.commons.lang.StringUtils;
import org.apache.commons.math3.util.Pair;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FSDataInputStream;
import org.apache.hadoop.fs.FileStatus;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.fs.PathFilter;
import org.apache.hadoop.fs.permission.FsAction;
import org.apache.hadoop.fs.permission.FsPermission;
import org.apache.hadoop.hive.serde2.avro.AvroSerdeUtils;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import com.google.common.base.Function;
import com.google.common.base.Joiner;
import com.google.common.base.Optional;
import com.google.common.base.Preconditions;
import com.google.common.base.Splitter;
import com.google.common.base.Strings;
import com.google.common.collect.Iterables;
import com.google.common.collect.Lists;
import com.google.common.collect.Maps;
import com.google.common.io.Closer;
import javax.annotation.Nonnull;
import javax.annotation.Nullable;
import lombok.Builder;
import lombok.Getter;
import lombok.ToString;
import lombok.extern.slf4j.Slf4j;
/**
* A Utils class for dealing with Avro objects
*/
@Slf4j
public class AvroUtils {
private static final Logger LOG = LoggerFactory.getLogger(AvroUtils.class);
public static final String FIELD_LOCATION_DELIMITER = ".";
public static final String AVRO_SUFFIX = ".avro";
public static final String SCHEMA_CREATION_TIME_KEY = "CreatedOn";
/**
* Validates that the provided reader schema can be used to decode avro data written with the
* provided writer schema.
* @param readerSchema schema to check.
* @param writerSchema schema to check.
* @param ignoreNamespace whether name and namespace should be ignored in validation
* @return true if validation passes
*/
public static boolean checkReaderWriterCompatibility(Schema readerSchema, Schema writerSchema, boolean ignoreNamespace) {
if (ignoreNamespace) {
List<Schema.Field> fields = deepCopySchemaFields(readerSchema);
readerSchema = Schema.createRecord(writerSchema.getName(), writerSchema.getDoc(), writerSchema.getNamespace(),
readerSchema.isError());
readerSchema.setFields(fields);
}
return SchemaCompatibility.checkReaderWriterCompatibility(readerSchema, writerSchema).getType().equals(SchemaCompatibility.SchemaCompatibilityType.COMPATIBLE);
}
public static Schema addSchemaCreationTime(Schema inputSchema, Schema outputSchema) {
if (inputSchema.getProp(SCHEMA_CREATION_TIME_KEY) != null && outputSchema.getProp(SCHEMA_CREATION_TIME_KEY) == null) {
outputSchema.addProp(SCHEMA_CREATION_TIME_KEY, inputSchema.getProp(SCHEMA_CREATION_TIME_KEY));
}
return outputSchema;
}
public static String getSchemaCreationTime(Schema inputSchema) {
return inputSchema.getProp(SCHEMA_CREATION_TIME_KEY);
}
public static Schema setSchemaCreationTime(Schema inputSchema, String creationTime) {
inputSchema.addProp(SCHEMA_CREATION_TIME_KEY, creationTime);
return inputSchema;
}
public static List<Field> deepCopySchemaFields(Schema readerSchema) {
return readerSchema.getFields().stream()
.map(field -> {
Field f = AvroCompatibilityHelper.createSchemaField(field.name(), field.schema(), field.doc(),
getCompatibleDefaultValue(field), field.order());
AvroSchemaUtils.copyFieldProperties(field, f);
return f;
})
.collect(Collectors.toList());
}
public static class AvroPathFilter implements PathFilter {
@Override
public boolean accept(Path path) {
return path.getName().endsWith(AVRO_SUFFIX);
}
}
/**
* Given a GenericRecord, this method will return the schema of the field specified by the path parameter. The
* fieldLocation parameter is an ordered string specifying the location of the nested field to retrieve. For example,
* field1.nestedField1 takes the the schema of the field "field1", and retrieves the schema "nestedField1" from it.
* @param schema is the record to retrieve the schema from
* @param fieldLocation is the location of the field
* @return the schema of the field
*/
public static Optional<Schema> getFieldSchema(Schema schema, String fieldLocation) {
Preconditions.checkNotNull(schema);
Preconditions.checkArgument(!Strings.isNullOrEmpty(fieldLocation));
Splitter splitter = Splitter.on(FIELD_LOCATION_DELIMITER).omitEmptyStrings().trimResults();
List<String> pathList = Lists.newArrayList(splitter.split(fieldLocation));
if (pathList.size() == 0) {
return Optional.absent();
}
return AvroUtils.getFieldSchemaHelper(schema, pathList, 0);
}
/**
* Helper method that does the actual work for {@link #getFieldSchema(Schema, String)}
* @param schema passed from {@link #getFieldSchema(Schema, String)}
* @param pathList passed from {@link #getFieldSchema(Schema, String)}
* @param field keeps track of the index used to access the list pathList
* @return the schema of the field
*/
private static Optional<Schema> getFieldSchemaHelper(Schema schema, List<String> pathList, int field) {
if (schema.getType() == Type.RECORD && schema.getField(pathList.get(field)) == null) {
return Optional.absent();
}
switch (schema.getType()) {
case UNION:
if (AvroSerdeUtils.isNullableType(schema)) {
return AvroUtils.getFieldSchemaHelper(AvroSerdeUtils.getOtherTypeFromNullableType(schema), pathList, field);
}
throw new AvroRuntimeException("Union of complex types cannot be handled : " + schema);
case MAP:
if ((field + 1) == pathList.size()) {
return Optional.fromNullable(schema.getValueType());
}
return AvroUtils.getFieldSchemaHelper(schema.getValueType(), pathList, ++field);
case RECORD:
if ((field + 1) == pathList.size()) {
return Optional.fromNullable(schema.getField(pathList.get(field)).schema());
}
return AvroUtils.getFieldSchemaHelper(schema.getField(pathList.get(field)).schema(), pathList, ++field);
default:
throw new AvroRuntimeException("Invalid type in schema : " + schema);
}
}
/**
* Given a GenericRecord, this method will return the field specified by the path parameter. The
* fieldLocation parameter is an ordered string specifying the location of the nested field to retrieve. For example,
* field1.nestedField1 takes field "field1", and retrieves "nestedField1" from it.
* @param schema is the record to retrieve the schema from
* @param fieldLocation is the location of the field
* @return the field
*/
public static Optional<Field> getField(Schema schema, String fieldLocation) {
Preconditions.checkNotNull(schema);
Preconditions.checkArgument(!Strings.isNullOrEmpty(fieldLocation));
Splitter splitter = Splitter.on(FIELD_LOCATION_DELIMITER).omitEmptyStrings().trimResults();
List<String> pathList = Lists.newArrayList(splitter.split(fieldLocation));
if (pathList.size() == 0) {
return Optional.absent();
}
return AvroUtils.getFieldHelper(schema, pathList, 0);
}
/**
* Helper method that does the actual work for {@link #getField(Schema, String)}
* @param schema passed from {@link #getFieldSchema(Schema, String)}
* @param pathList passed from {@link #getFieldSchema(Schema, String)}
* @param field keeps track of the index used to access the list pathList
* @return the field
*/
private static Optional<Field> getFieldHelper(Schema schema, List<String> pathList, int field) {
Field curField = schema.getField(pathList.get(field));
if (field + 1 == pathList.size()) {
return Optional.fromNullable(curField);
}
Schema fieldSchema = curField.schema();
switch (fieldSchema.getType()) {
case UNION:
throw new AvroRuntimeException("Union of complex types cannot be handled : " + schema);
case MAP:
return AvroUtils.getFieldHelper(fieldSchema.getValueType(), pathList, ++field);
case RECORD:
return AvroUtils.getFieldHelper(fieldSchema, pathList, ++field);
case ARRAY:
return AvroUtils.getFieldHelper(fieldSchema.getElementType(), pathList, ++field);
default:
throw new AvroRuntimeException("Invalid type " + fieldSchema.getType() + " in schema : " + schema);
}
}
/**
* Given a GenericRecord, this method will return the field specified by the path parameter. The fieldLocation
* parameter is an ordered string specifying the location of the nested field to retrieve. For example,
* field1.nestedField1 takes the the value of the field "field1", and retrieves the field "nestedField1" from it.
* @param record is the record to retrieve the field from
* @param fieldLocation is the location of the field
* @return the value of the field
*/
public static Optional<Object> getFieldValue(GenericRecord record, String fieldLocation) {
Map<String, Object> ret = getMultiFieldValue(record, fieldLocation);
return Optional.fromNullable(ret.get(fieldLocation));
}
public static Map<String, Object> getMultiFieldValue(GenericRecord record, String fieldLocation) {
Preconditions.checkNotNull(record);
Preconditions.checkArgument(!Strings.isNullOrEmpty(fieldLocation));
Splitter splitter = Splitter.on(FIELD_LOCATION_DELIMITER).omitEmptyStrings().trimResults();
List<String> pathList = splitter.splitToList(fieldLocation);
if (pathList.size() == 0) {
return Collections.emptyMap();
}
HashMap<String, Object> retVal = new HashMap<String, Object>();
AvroUtils.getFieldHelper(retVal, record, pathList, 0);
return retVal;
}
/**
* Helper method that does the actual work for {@link #getFieldValue(GenericRecord, String)}
* @param data passed from {@link #getFieldValue(GenericRecord, String)}
* @param pathList passed from {@link #getFieldValue(GenericRecord, String)}
* @param field keeps track of the index used to access the list pathList
* @return the value of the field
*/
private static void getFieldHelper(Map<String, Object> retVal,
Object data, List<String> pathList, int field) {
if (data == null) {
return;
}
if ((field + 1) == pathList.size()) {
Object val = null;
Joiner joiner = Joiner.on(".");
String key = joiner.join(pathList.iterator());
if (data instanceof Map) {
val = getObjectFromMap((Map)data, pathList.get(field));
} else if (data instanceof List) {
val = getObjectFromArray((List)data, Integer.parseInt(pathList.get(field)));
} else {
val = ((GenericRecord)data).get(pathList.get(field));
}
if (val != null) {
retVal.put(key, val);
}
return;
}
if (data instanceof Map) {
AvroUtils.getFieldHelper(retVal, getObjectFromMap((Map) data, pathList.get(field)), pathList, ++field);
return;
}
if (data instanceof List) {
if (pathList.get(field).trim().equals("*")) {
List arr = (List)data;
Iterator it = arr.iterator();
int i = 0;
while (it.hasNext()) {
Object val = it.next();
List<String> newPathList = new ArrayList<>(pathList);
newPathList.set(field, String.valueOf(i));
AvroUtils.getFieldHelper(retVal, val, newPathList, field + 1);
i++;
}
} else {
AvroUtils
.getFieldHelper(retVal, getObjectFromArray((List) data, Integer.parseInt(pathList.get(field))), pathList, ++field);
}
return;
}
AvroUtils.getFieldHelper(retVal, ((GenericRecord) data).get(pathList.get(field)), pathList, ++field);
return;
}
/**
* Given a map: key -> value, return a map: key.toString() -> value.toString(). Avro serializer wraps a String
* into {@link Utf8}. This method helps to restore the original string map object
*
* @param map a map object
* @return a map of strings
*/
@SuppressWarnings("unchecked")
public static Map<String, String> toStringMap(Object map) {
if (map == null) {
return null;
}
if (map instanceof Map) {
Map<Object, Object> rawMap = (Map<Object, Object>) map;
Map<String, String> stringMap = new HashMap<>();
for (Entry<Object, Object> entry : rawMap.entrySet()) {
stringMap.put(entry.getKey().toString(), entry.getValue().toString());
}
return stringMap;
} else {
throw new AvroRuntimeException("value must be a map");
}
}
/**
* This method is to get object from map given a key as string.
* Avro persists string as Utf8
* @param map passed from {@link #getFieldHelper(Map, Object, List, int)}
* @param key passed from {@link #getFieldHelper(Map, Object, List, int)}
* @return This could again be a GenericRecord
*/
private static Object getObjectFromMap(Map map, String key) {
Utf8 utf8Key = new Utf8(key);
Object value = map.get(utf8Key);
if (value == null) {
return map.get(key);
}
return value;
}
/**
* Get an object from an array given an index.
*/
private static Object getObjectFromArray(List array, int index) {
return array.get(index);
}
/**
* Change the schema of an Avro record.
* @param record The Avro record whose schema is to be changed.
* @param newSchema The target schema. It must be compatible as reader schema with record.getSchema() as writer schema.
* @return a new Avro record with the new schema.
* @throws IOException if conversion failed.
*/
public static GenericRecord convertRecordSchema(GenericRecord record, Schema newSchema) throws IOException {
if (record.getSchema().equals(newSchema)) {
return record;
}
try {
BinaryDecoder decoder = new DecoderFactory().binaryDecoder(recordToByteArray(record), null);
DatumReader<GenericRecord> reader = new GenericDatumReader<>(record.getSchema(), newSchema);
return reader.read(null, decoder);
} catch (IOException e) {
throw new IOException(
String.format("Cannot convert avro record to new schema. Original schema = %s, new schema = %s",
record.getSchema(), newSchema),
e);
}
}
/**
* Convert a GenericRecord to a byte array.
*/
public static byte[] recordToByteArray(GenericRecord record) throws IOException {
try (ByteArrayOutputStream out = new ByteArrayOutputStream()) {
Encoder encoder = EncoderFactory.get().directBinaryEncoder(out, null);
DatumWriter<GenericRecord> writer = new GenericDatumWriter<>(record.getSchema());
writer.write(record, encoder);
byte[] byteArray = out.toByteArray();
return byteArray;
}
}
/**
* Get Avro schema from an Avro data file.
*/
public static Schema getSchemaFromDataFile(Path dataFile, FileSystem fs) throws IOException {
try (SeekableInput sin = new FsInput(dataFile, fs.getConf());
DataFileReader<GenericRecord> reader = new DataFileReader<>(sin, new GenericDatumReader<GenericRecord>())) {
return reader.getSchema();
}
}
/**
* Parse Avro schema from a schema file.
*/
public static Schema parseSchemaFromFile(Path filePath, FileSystem fs) throws IOException {
Preconditions.checkArgument(fs.exists(filePath), filePath + " does not exist");
try (FSDataInputStream in = fs.open(filePath)) {
return new Schema.Parser().parse(in);
}
}
public static void writeSchemaToFile(Schema schema, Path filePath, FileSystem fs, boolean overwrite)
throws IOException {
writeSchemaToFile(schema, filePath, null, fs, overwrite);
}
public static void writeSchemaToFile(Schema schema, Path filePath, Path tempFilePath, FileSystem fs, boolean overwrite)
throws IOException {
writeSchemaToFile(schema, filePath, tempFilePath, fs, overwrite, new FsPermission(FsAction.ALL, FsAction.ALL,
FsAction.READ));
}
public static void writeSchemaToFile(Schema schema, Path filePath, FileSystem fs, boolean overwrite, FsPermission perm)
throws IOException {
writeSchemaToFile(schema, filePath, null, fs, overwrite, perm);
}
/**
* Write a schema to a file
* @param schema the schema
* @param filePath the target file
* @param tempFilePath if not null then this path is used for a temporary file used to stage the write
* @param fs a {@link FileSystem}
* @param overwrite should any existing target file be overwritten?
* @param perm permissions
* @throws IOException
*/
public static void writeSchemaToFile(Schema schema, Path filePath, Path tempFilePath, FileSystem fs, boolean overwrite,
FsPermission perm)
throws IOException {
boolean fileExists = fs.exists(filePath);
if (!overwrite) {
Preconditions.checkState(!fileExists, filePath + " already exists");
} else {
// delete the target file now if not using a staging file
if (fileExists && null == tempFilePath) {
HadoopUtils.deletePath(fs, filePath, true);
// file has been removed
fileExists = false;
}
}
// If the file exists then write to a temp file to make the replacement as close to atomic as possible
Path writeFilePath = fileExists ? tempFilePath : filePath;
try (DataOutputStream dos = fs.create(writeFilePath)) {
dos.writeChars(schema.toString());
}
fs.setPermission(writeFilePath, perm);
// Replace existing file with the staged file
if (fileExists) {
if (!fs.delete(filePath, true)) {
throw new IOException(
String.format("Failed to delete %s while renaming %s to %s", filePath, tempFilePath, filePath));
}
HadoopUtils.movePath(fs, tempFilePath, fs, filePath, true, fs.getConf());
}
}
/**
* Get the latest avro schema for a directory
* @param directory the input dir that contains avro files
* @param fs the {@link FileSystem} for the given directory.
* @param latest true to return latest schema, false to return oldest schema
* @return the latest/oldest schema in the directory
* @throws IOException
*/
public static Schema getDirectorySchema(Path directory, FileSystem fs, boolean latest) throws IOException {
Schema schema = null;
try (Closer closer = Closer.create()) {
List<FileStatus> files = getDirectorySchemaHelper(directory, fs);
if (files == null || files.size() == 0) {
LOG.warn("There is no previous avro file in the directory: " + directory);
} else {
FileStatus file = latest ? files.get(0) : files.get(files.size() - 1);
LOG.debug("Path to get the avro schema: " + file);
FsInput fi = new FsInput(file.getPath(), fs.getConf());
GenericDatumReader<GenericRecord> genReader = new GenericDatumReader<>();
schema = closer.register(new DataFileReader<>(fi, genReader)).getSchema();
}
} catch (IOException ioe) {
throw new IOException("Cannot get the schema for directory " + directory, ioe);
}
return schema;
}
/**
* Get the latest avro schema for a directory
* @param directory the input dir that contains avro files
* @param conf configuration
* @param latest true to return latest schema, false to return oldest schema
* @return the latest/oldest schema in the directory
* @throws IOException
*/
public static Schema getDirectorySchema(Path directory, Configuration conf, boolean latest) throws IOException {
return getDirectorySchema(directory, FileSystem.get(conf), latest);
}
private static List<FileStatus> getDirectorySchemaHelper(Path directory, FileSystem fs) throws IOException {
List<FileStatus> files = Lists.newArrayList();
if (fs.exists(directory)) {
getAllNestedAvroFiles(fs.getFileStatus(directory), files, fs);
if (files.size() > 0) {
Collections.sort(files, FileListUtils.LATEST_MOD_TIME_ORDER);
}
}
return files;
}
private static void getAllNestedAvroFiles(FileStatus dir, List<FileStatus> files, FileSystem fs) throws IOException {
if (dir.isDirectory()) {
FileStatus[] filesInDir = fs.listStatus(dir.getPath());
if (filesInDir != null) {
for (FileStatus f : filesInDir) {
getAllNestedAvroFiles(f, files, fs);
}
}
} else if (dir.getPath().getName().endsWith(AVRO_SUFFIX)) {
files.add(dir);
}
}
/**
* Merge oldSchema and newSchame. Set a field default value to null, if this field exists in the old schema but not in the new schema.
* @param oldSchema
* @param newSchema
* @return schema that contains all the fields in both old and new schema.
*/
public static Schema nullifyFieldsForSchemaMerge(Schema oldSchema, Schema newSchema) {
if (oldSchema == null) {
LOG.warn("No previous schema available, use the new schema instead.");
return newSchema;
}
if (!(oldSchema.getType().equals(Type.RECORD) && newSchema.getType().equals(Type.RECORD))) {
LOG.warn("Both previous schema and new schema need to be record type. Quit merging schema.");
return newSchema;
}
List<Field> combinedFields = Lists.newArrayList();
for (Field newFld : newSchema.getFields()) {
combinedFields.add(AvroCompatibilityHelper.createSchemaField(newFld.name(), newFld.schema(), newFld.doc(),
getCompatibleDefaultValue(newFld)));
}
for (Field oldFld : oldSchema.getFields()) {
if (newSchema.getField(oldFld.name()) == null) {
List<Schema> union = Lists.newArrayList();
Schema oldFldSchema = oldFld.schema();
if (oldFldSchema.getType().equals(Type.UNION)) {
union.add(Schema.create(Type.NULL));
for (Schema itemInUion : oldFldSchema.getTypes()) {
if (!itemInUion.getType().equals(Type.NULL)) {
union.add(itemInUion);
}
}
Schema newFldSchema = Schema.createUnion(union);
combinedFields.add(AvroCompatibilityHelper.createSchemaField(oldFld.name(), newFldSchema, oldFld.doc(),
getCompatibleDefaultValue(oldFld)));
} else {
union.add(Schema.create(Type.NULL));
union.add(oldFldSchema);
Schema newFldSchema = Schema.createUnion(union);
Object obj = getCompatibleDefaultValue(oldFld);
combinedFields.add(AvroCompatibilityHelper.createSchemaField(oldFld.name(), newFldSchema, oldFld.doc(),
getCompatibleDefaultValue(oldFld)));
}
}
}
Schema mergedSchema =
Schema.createRecord(newSchema.getName(), newSchema.getDoc(), newSchema.getNamespace(), newSchema.isError());
mergedSchema.setFields(combinedFields);
return mergedSchema;
}
/**
* Remove map, array, enum fields, as well as union fields that contain map, array or enum,
* from an Avro schema. A schema with these fields cannot be used as Mapper key in a
* MapReduce job.
*/
public static Optional<Schema> removeUncomparableFields(Schema schema) {
return removeUncomparableFields(schema, Maps.newHashMap());
}
private static Optional<Schema> removeUncomparableFields(Schema schema, Map<Schema, Optional<Schema>> processed) {
switch (schema.getType()) {
case RECORD:
return removeUncomparableFieldsFromRecord(schema, processed);
case UNION:
return removeUncomparableFieldsFromUnion(schema, processed);
case MAP:
return Optional.absent();
case ARRAY:
return Optional.absent();
case ENUM:
return Optional.absent();
default:
return Optional.of(schema);
}
}
private static Optional<Schema> removeUncomparableFieldsFromRecord(Schema record, Map<Schema, Optional<Schema>> processed) {
Preconditions.checkArgument(record.getType() == Schema.Type.RECORD);
Optional<Schema> result = processed.get(record);
if (null != result) {
return result;
}
List<Field> fields = Lists.newArrayList();
for (Field field : record.getFields()) {
Optional<Schema> newFieldSchema = removeUncomparableFields(field.schema(), processed);
if (newFieldSchema.isPresent()) {
fields.add(AvroCompatibilityHelper.createSchemaField(field.name(), newFieldSchema.get(), field.doc(),
getCompatibleDefaultValue(field)));
}
}
Schema newSchema = Schema.createRecord(record.getName(), record.getDoc(), record.getNamespace(), false);
newSchema.setFields(fields);
result = Optional.of(newSchema);
processed.put(record, result);
return result;
}
private static Optional<Schema> removeUncomparableFieldsFromUnion(Schema union, Map<Schema, Optional<Schema>> processed) {
Preconditions.checkArgument(union.getType() == Schema.Type.UNION);
Optional<Schema> result = processed.get(union);
if (null != result) {
return result;
}
List<Schema> newUnion = Lists.newArrayList();
for (Schema unionType : union.getTypes()) {
Optional<Schema> newType = removeUncomparableFields(unionType, processed);
if (newType.isPresent()) {
newUnion.add(newType.get());
}
}
// Discard the union field if one or more types are removed from the union.
if (newUnion.size() != union.getTypes().size()) {
result = Optional.absent();
} else {
result = Optional.of(Schema.createUnion(newUnion));
}
processed.put(union, result);
return result;
}
/**
* Copies the input {@link org.apache.avro.Schema} but changes the schema name.
* @param schema {@link org.apache.avro.Schema} to copy.
* @param newName name for the copied {@link org.apache.avro.Schema}.
* @return A {@link org.apache.avro.Schema} that is a copy of schema, but has the name newName.
*/
public static Schema switchName(Schema schema, String newName) {
if (schema.getName().equals(newName)) {
return schema;
}
Schema newSchema = Schema.createRecord(newName, schema.getDoc(), schema.getNamespace(), schema.isError());
List<Field> fields = schema.getFields();
Iterable<Field> fieldsNew = Iterables.transform(fields, new Function<Field, Field>() {
@Override
public Schema.Field apply(Field input) {
//this should never happen but the API has marked input as Nullable
if (null == input) {
return null;
}
Field field = AvroCompatibilityHelper.createSchemaField(input.name(), input.schema(), input.doc(),
getCompatibleDefaultValue(input), input.order());
return field;
}
});
newSchema.setFields(Lists.newArrayList(fieldsNew));
return newSchema;
}
/**
* Copies the input {@link org.apache.avro.Schema} but changes the schema namespace.
* @param schema {@link org.apache.avro.Schema} to copy.
* @param namespaceOverride namespace for the copied {@link org.apache.avro.Schema}.
* @return A {@link org.apache.avro.Schema} that is a copy of schema, but has the new namespace.
*/
public static Schema switchNamespace(Schema schema, Map<String, String> namespaceOverride) {
Schema newSchema;
String newNamespace = StringUtils.EMPTY;
// Process all Schema Types
// (Primitives are simply cloned)
switch (schema.getType()) {
case ENUM:
newNamespace = namespaceOverride.containsKey(schema.getNamespace()) ? namespaceOverride.get(schema.getNamespace())
: schema.getNamespace();
newSchema =
Schema.createEnum(schema.getName(), schema.getDoc(), newNamespace, schema.getEnumSymbols());
break;
case FIXED:
newNamespace = namespaceOverride.containsKey(schema.getNamespace()) ? namespaceOverride.get(schema.getNamespace())
: schema.getNamespace();
newSchema =
Schema.createFixed(schema.getName(), schema.getDoc(), newNamespace, schema.getFixedSize());
break;
case MAP:
newSchema = Schema.createMap(switchNamespace(schema.getValueType(), namespaceOverride));
break;
case RECORD:
newNamespace = namespaceOverride.containsKey(schema.getNamespace()) ? namespaceOverride.get(schema.getNamespace())
: schema.getNamespace();
List<Schema.Field> newFields = new ArrayList<>();
if (schema.getFields().size() > 0) {
for (Schema.Field oldField : schema.getFields()) {
Field newField = AvroCompatibilityHelper.createSchemaField(oldField.name(), switchNamespace(oldField.schema(),
namespaceOverride), oldField.doc(), getCompatibleDefaultValue(oldField), oldField.order());
// Copy field level properties
copyFieldProperties(oldField, newField);
newFields.add(newField);
}
}
newSchema = Schema.createRecord(schema.getName(), schema.getDoc(), newNamespace,
schema.isError());
newSchema.setFields(newFields);
break;
case UNION:
List<Schema> newUnionMembers = new ArrayList<>();
if (null != schema.getTypes() && schema.getTypes().size() > 0) {
for (Schema oldUnionMember : schema.getTypes()) {
newUnionMembers.add(switchNamespace(oldUnionMember, namespaceOverride));
}
}
newSchema = Schema.createUnion(newUnionMembers);
break;
case ARRAY:
newSchema = Schema.createArray(switchNamespace(schema.getElementType(), namespaceOverride));
break;
case BOOLEAN:
case BYTES:
case DOUBLE:
case FLOAT:
case INT:
case LONG:
case NULL:
case STRING:
newSchema = Schema.create(schema.getType());
break;
default:
String exceptionMessage = String.format("Schema namespace replacement failed for \"%s\" ", schema);
LOG.error(exceptionMessage);
throw new AvroRuntimeException(exceptionMessage);
}
// Copy schema metadata
copyProperties(schema, newSchema);
return newSchema;
}
/***
* Copy properties from old Avro Schema to new Avro Schema
* @param oldSchema Old Avro Schema to copy properties from
* @param newSchema New Avro Schema to copy properties to
*/
private static void copyProperties(Schema oldSchema, Schema newSchema) {
Preconditions.checkNotNull(oldSchema);
Preconditions.checkNotNull(newSchema);
// Avro 1.9 compatible change - replaced deprecated public api getJsonProps using AvroCompatibilityHelper methods
AvroSchemaUtils.copySchemaProperties(oldSchema, newSchema);
}
/**
* Serialize a generic record as a relative {@link Path}. Useful for converting {@link GenericRecord} type keys
* into file system locations. For example {field1=v1, field2=v2} returns field1=v1/field2=v2 if includeFieldNames
* is true, or v1/v2 if it is false. Illegal HDFS tokens such as ':' and '\\' will be replaced with '_'.
* Additionally, parameter replacePathSeparators controls whether to replace path separators ('/') with '_'.
*
* @param record {@link GenericRecord} to serialize.
* @param includeFieldNames If true, each token in the path will be of the form key=value, otherwise, only the value
* will be included.
* @param replacePathSeparators If true, path separators ('/') in each token will be replaced with '_'.
* @return A relative path where each level is a field in the input record.
*/
public static Path serializeAsPath(GenericRecord record, boolean includeFieldNames, boolean replacePathSeparators) {
if (record == null) {
return new Path("");
}
List<String> tokens = Lists.newArrayList();
for (Schema.Field field : record.getSchema().getFields()) {
String sanitizedName = HadoopUtils.sanitizePath(field.name(), "_");
String sanitizedValue = HadoopUtils.sanitizePath(record.get(field.name()).toString(), "_");
if (replacePathSeparators) {
sanitizedName = sanitizedName.replaceAll(Path.SEPARATOR, "_");
sanitizedValue = sanitizedValue.replaceAll(Path.SEPARATOR, "_");
}
if (includeFieldNames) {
tokens.add(String.format("%s=%s", sanitizedName, sanitizedValue));
} else if (!Strings.isNullOrEmpty(sanitizedValue)) {
tokens.add(sanitizedValue);
}
}
return new Path(Joiner.on(Path.SEPARATOR).join(tokens));
}
/**
* Escaping "\", """, ";" and "'" character in the schema string when it is being used in DDL.
* These characters are not allowed to show as part of column name but could possibly appear in documentation field.
* Therefore the escaping behavior won't cause correctness issues.
*/
public static String sanitizeSchemaString(String schemaString) {
return schemaString.replace("\\\\", "\\\\\\\\").replace("\\\"", "\\\\\\\"")
.replace(";", "\\;").replace("'", "\\'");
}
/**
* Deserialize a {@link GenericRecord} from a byte array. This method is not intended for high performance.
*/
public static GenericRecord slowDeserializeGenericRecord(byte[] serializedRecord, Schema schema) throws IOException {
Decoder decoder = DecoderFactory.get().binaryDecoder(serializedRecord, null);
GenericDatumReader<GenericRecord> reader = new GenericDatumReader<>(schema);
return reader.read(null, decoder);
}
/**
* Decorate the {@link Schema} for a record with additional {@link Field}s.
* @param inputSchema: must be a {@link Record} schema.
* @return the decorated Schema. Fields are appended to the inputSchema.
*/
public static Schema decorateRecordSchema(Schema inputSchema, @Nonnull List<Field> fieldList) {
Preconditions.checkState(inputSchema.getType().equals(Type.RECORD));
List<Field> outputFields = deepCopySchemaFields(inputSchema);
List<Field> newOutputFields = Stream.concat(outputFields.stream(), fieldList.stream()).collect(Collectors.toList());
Schema outputSchema = Schema.createRecord(inputSchema.getName(), inputSchema.getDoc(),
inputSchema.getNamespace(), inputSchema.isError());
outputSchema.setFields(newOutputFields);
copyProperties(inputSchema, outputSchema);
return outputSchema;
}
/**
* Decorate a {@link GenericRecord} with additional fields and make it conform to an extended Schema
* It is the caller's responsibility to ensure that the outputSchema is the merge of the inputRecord's schema
* and the additional fields. The method does not check this for performance reasons, because it is expected to be called in the
* critical path of processing a record.
* Use {@link AvroUtils#decorateRecordSchema(Schema, List)} to generate such a Schema before calling this method.
* @param inputRecord: record with data to be copied into the output record
* @param fieldMap: values can be primitive types or GenericRecords if nested
* @param outputSchema: the schema that the decoratedRecord will conform to
* @return an outputRecord that contains a union of the fields in the inputRecord and the field-values in the fieldMap
*/
public static GenericRecord decorateRecord(GenericRecord inputRecord, @Nonnull Map<String, Object> fieldMap,
Schema outputSchema) {
GenericRecord outputRecord = new GenericData.Record(outputSchema);
inputRecord.getSchema().getFields().forEach(f -> outputRecord.put(f.name(), inputRecord.get(f.name())));
fieldMap.forEach((key, value) -> outputRecord.put(key, value));
return outputRecord;
}
/**
* Given a generic record, Override the name and namespace of the schema and return a new generic record
* @param input input record who's name and namespace need to be overridden
* @param nameOverride new name for the record schema
* @param namespaceOverride Optional map containing namespace overrides
* @return an output record with overridden name and possibly namespace
*/
public static GenericRecord overrideNameAndNamespace(GenericRecord input, String nameOverride, Optional<Map<String, String>> namespaceOverride) {
GenericRecord output = input;
Schema newSchema = switchName(input.getSchema(), nameOverride);
if(namespaceOverride.isPresent()) {
newSchema = switchNamespace(newSchema, namespaceOverride.get());
}
try {
output = convertRecordSchema(output, newSchema);
} catch (Exception e){
log.error("Unable to generate generic data record", e);
}
return output;
}
/**
* Given a input schema, Override the name and namespace of the schema and return a new schema
* @param input
* @param nameOverride
* @param namespaceOverride
* @return a schema with overridden name and possibly namespace
*/
public static Schema overrideNameAndNamespace(Schema input, String nameOverride, Optional<Map<String, String>> namespaceOverride) {
Schema newSchema = switchName(input, nameOverride);
if(namespaceOverride.isPresent()) {
newSchema = switchNamespace(newSchema, namespaceOverride.get());
}
return newSchema;
}
@Builder
@ToString
public static class SchemaEntry {
@Getter
final String fieldName;
final Schema schema;
String fullyQualifiedType() {
return schema.getFullName();
}
}
/**
* Check if a schema has recursive fields inside it
* @param schema
* @param logger : Optional logger if you want the method to log why it thinks the schema was recursive
* @return true / false
*/
public static boolean isSchemaRecursive(Schema schema, Optional<Logger> logger) {
List<SchemaEntry> recursiveFields = new ArrayList<>();
dropRecursive(new SchemaEntry(null, schema), Collections.EMPTY_LIST, recursiveFields);
if (recursiveFields.isEmpty()) {
return false;
} else {
if (logger.isPresent()) {
logger.get().info("Found recursive fields [{}] in schema {}", recursiveFields.stream().map(f -> f.fieldName).collect(Collectors.joining(",")),
schema.getFullName());
}
return true;
}
}
/**
* Drop recursive fields from a Schema. Recursive fields are fields that refer to types that are part of the
* parent tree.
* e.g. consider this Schema for a User
* {
* "type": "record",
* "name": "User",
* "fields": [
* {"name": "name", "type": "string",
* {"name": "friend", "type": "User"}
* ]
* }
* the friend field is a recursive field. After recursion has been eliminated we expect the output Schema to look like
* {
* "type": "record",
* "name": "User",
* "fields": [
* {"name": "name", "type": "string"}
* ]
* }
*
* @param schema
* @return a Pair of (The transformed schema with recursion eliminated, A list of @link{SchemaEntry} objects which
* represent the fields that were removed from the original schema)
*/
public static Pair<Schema, List<SchemaEntry>> dropRecursiveFields(Schema schema) {
List<SchemaEntry> recursiveFields = new ArrayList<>();
return new Pair(dropRecursive(new SchemaEntry(null, schema), Collections.EMPTY_LIST, recursiveFields),
recursiveFields);
}
/**
* Inner recursive method called by {@link #dropRecursiveFields(Schema)}
* @param schemaEntry
* @param parents
* @param fieldsWithRecursion
* @return the transformed Schema, null if schema is recursive w.r.t parent schema traversed so far
*/
private static Schema dropRecursive(SchemaEntry schemaEntry, List<SchemaEntry> parents, List<SchemaEntry> fieldsWithRecursion) {
Schema schema = schemaEntry.schema;
// ignore primitive fields
switch (schema.getType()) {
case UNION:{
List<Schema> unionTypes = schema.getTypes();
List<Schema> copiedUnionTypes = new ArrayList<Schema>();
for (Schema unionSchema: unionTypes) {
SchemaEntry unionSchemaEntry = new SchemaEntry(
schemaEntry.fieldName, unionSchema);
copiedUnionTypes.add(dropRecursive(unionSchemaEntry, parents, fieldsWithRecursion));
}
if (copiedUnionTypes.stream().anyMatch(x -> x == null)) {
// one or more types in the union are referring to a parent type (directly recursive),
// entire union must be dropped
return null;
}
else {
Schema copySchema = Schema.createUnion(copiedUnionTypes);
copyProperties(schema, copySchema);
return copySchema;
}
}
case RECORD:{
// check if the type of this schema matches any in the parents list
if (parents.stream().anyMatch(parent -> parent.fullyQualifiedType().equals(schemaEntry.fullyQualifiedType()))) {
fieldsWithRecursion.add(schemaEntry);
return null;
}
List<SchemaEntry> newParents = new ArrayList<>(parents);
newParents.add(schemaEntry);
List<Schema.Field> copiedSchemaFields = new ArrayList<>();
for (Schema.Field field: schema.getFields()) {
String fieldName = schemaEntry.fieldName != null ? schemaEntry.fieldName + "." + field.name() : field.name();
SchemaEntry fieldSchemaEntry = new SchemaEntry(fieldName, field.schema());
Schema copiedFieldSchema = dropRecursive(fieldSchemaEntry, newParents, fieldsWithRecursion);
if (copiedFieldSchema == null) {
} else {
Schema.Field copiedField = AvroCompatibilityHelper.createSchemaField(field.name(), copiedFieldSchema,
field.doc(), getCompatibleDefaultValue(field), field.order());
copyFieldProperties(field, copiedField);
copiedSchemaFields.add(copiedField);
}
}
if (copiedSchemaFields.size() > 0) {
Schema copiedRecord = Schema.createRecord(schema.getName(), schema.getDoc(), schema.getNamespace(),
schema.isError());
copiedRecord.setFields(copiedSchemaFields);
copyProperties(schema, copiedRecord);
return copiedRecord;
} else {
return null;
}
}
case ARRAY: {
Schema itemSchema = schema.getElementType();
SchemaEntry itemSchemaEntry = new SchemaEntry(schemaEntry.fieldName, itemSchema);
Schema copiedItemSchema = dropRecursive(itemSchemaEntry, parents, fieldsWithRecursion);
if (copiedItemSchema == null) {
return null;
} else {
Schema copiedArraySchema = Schema.createArray(copiedItemSchema);
copyProperties(schema, copiedArraySchema);
return copiedArraySchema;
}
}
case MAP: {
Schema valueSchema = schema.getValueType();
SchemaEntry valueSchemaEntry = new SchemaEntry(schemaEntry.fieldName, valueSchema);
Schema copiedValueSchema = dropRecursive(valueSchemaEntry, parents, fieldsWithRecursion);
if (copiedValueSchema == null) {
return null;
} else {
Schema copiedMapSchema = Schema.createMap(copiedValueSchema);
copyProperties(schema, copiedMapSchema);
return copiedMapSchema;
}
}
default: {
return schema;
}
}
}
/**
* Annoyingly, Avro doesn't provide a field constructor where you can pass in "unknown to Avro" properties
* to attach to the field object in the schema even though the Schema language and object model supports it.
* This method allows for such copiers to explicitly copy the properties from a source field to a destination field.
* @param sourceField
* @param copiedField
*/
private static void copyFieldProperties(Schema.Field sourceField, Schema.Field copiedField) {
AvroSchemaUtils.copyFieldProperties(sourceField, copiedField);
}
@Nullable
public static Object getCompatibleDefaultValue(Schema.Field field) {
return AvroCompatibilityHelper.fieldHasDefault(field)
? AvroCompatibilityHelper.getGenericDefaultValue(field)
: null;
}
}
| 4,110 |
0 | Create_ds/gobblin/gobblin-utility/src/main/java/org/apache/gobblin | Create_ds/gobblin/gobblin-utility/src/main/java/org/apache/gobblin/util/Id.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.util;
import com.google.common.base.Joiner;
import com.google.common.base.Preconditions;
import com.google.common.base.Strings;
import java.util.EnumSet;
import java.util.regex.Matcher;
import java.util.regex.Pattern;
import lombok.Getter;
public abstract class Id {
public static final String SEPARATOR = "_";
private static final Joiner JOINER = Joiner.on(SEPARATOR).skipNulls();
static final Pattern PATTERN = Pattern.compile("([^" + SEPARATOR + "]+)" + SEPARATOR + "(.+)" + SEPARATOR + "(\\d+)");
public enum Parts {
PREFIX,
NAME,
SEQUENCE;
public static final EnumSet<Parts> INSTANCE_NAME = EnumSet.of(NAME, SEQUENCE);
public static final EnumSet<Parts> ALL = EnumSet.allOf(Parts.class);
}
public Id(String name) {
Preconditions.checkArgument(!Strings.isNullOrEmpty(name), "Name is null or empty.");
this.name = name;
}
public Id(String name, long sequence) {
Preconditions.checkArgument(!Strings.isNullOrEmpty(name), "Name is null or empty.");
this.name = name;
this.sequence = sequence;
}
protected abstract String getPrefix();
@Getter
private String name;
@Getter
private Long sequence;
public String get(EnumSet<Parts> parts) {
Object[] values = new Object[3];
if (parts.contains(Parts.PREFIX)) {
values[0] = getPrefix();
}
if (parts.contains(Parts.NAME)) {
values[1] = name;
}
if (parts.contains(Parts.SEQUENCE)) {
values[2] = sequence;
}
return JOINER.join(values);
}
@Override
public String toString() {
return get(Parts.ALL);
}
public static Id parse(String id) {
Matcher matcher = PATTERN.matcher(id);
if (matcher.find()) {
if (Job.PREFIX.equals(matcher.group(1))) {
return new Job(matcher.group(2), matcher.group(3));
}
if (Task.PREFIX.equals(matcher.group(1))) {
return new Task(matcher.group(2), matcher.group(3));
}
if (MultiTask.PREFIX.equals(matcher.group(1))) {
return new MultiTask(matcher.group(2), matcher.group(3));
}
}
throw new RuntimeException("Invalid id: " + id);
}
public static class Job extends Id {
public static final String PREFIX = "job";
private Job(String name) {
super(name);
}
private Job(String name, long sequence) {
super(name, sequence);
}
private Job(String name, String sequence) {
super(name, Long.parseLong(sequence));
}
@Override
protected String getPrefix() {
return PREFIX;
}
public static Job create(String name) {
return new Job(name);
}
public static Job create(String name, long sequence) {
return new Job(name, sequence);
}
}
public static class Task extends Id {
public static final String PREFIX = "task";
private Task(String name) {
super(name);
}
private Task(String name, int sequence) {
super(name, sequence);
}
private Task(String name, String sequence) {
super(name, Integer.parseInt(sequence));
}
@Override
protected String getPrefix() {
return PREFIX;
}
public static Task create(String name) {
return new Task(name);
}
public static Task create(String name, int sequence) {
return new Task(name, sequence);
}
}
public static class MultiTask extends Id {
public static final String PREFIX = "multitask";
private MultiTask(String name) {
super(name);
}
private MultiTask(String name, int sequence) {
super(name, sequence);
}
private MultiTask(String name, String sequence) {
super(name, Integer.parseInt(sequence));
}
@Override
protected String getPrefix() {
return PREFIX;
}
public static MultiTask create(String name) {
return new MultiTask(name);
}
public static MultiTask create(String name, int sequence) {
return new MultiTask(name, sequence);
}
}
}
| 4,111 |
0 | Create_ds/gobblin/gobblin-utility/src/main/java/org/apache/gobblin | Create_ds/gobblin/gobblin-utility/src/main/java/org/apache/gobblin/util/ProxiedFileSystemWrapper.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.util;
import java.io.IOException;
import java.net.URI;
import java.net.URISyntaxException;
import java.security.PrivilegedExceptionAction;
import org.apache.commons.lang.StringUtils;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.SequenceFile;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.security.UserGroupInformation;
import org.apache.hadoop.security.token.Token;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import com.google.common.base.Optional;
import com.google.common.base.Preconditions;
import com.google.common.io.Closer;
import org.apache.gobblin.configuration.ConfigurationKeys;
import org.apache.gobblin.configuration.State;
/**
* A wrapper class for generating a file system as a proxy user.
*/
@Deprecated
public class ProxiedFileSystemWrapper {
private static final Logger LOG = LoggerFactory.getLogger(ProxiedFileSystemWrapper.class);
private FileSystem proxiedFs;
/**
* Two authentication types for Hadoop Security, through TOKEN or KEYTAB.
* @deprecated Use {@link org.apache.gobblin.util.ProxiedFileSystemUtils.AuthType}.
*/
@Deprecated
public enum AuthType {
TOKEN,
KEYTAB;
}
/**
* Setter for proxiedFs.
* @param currentProxiedFs
*/
public void setProxiedFileSystem(FileSystem currentProxiedFs) {
this.proxiedFs = currentProxiedFs;
}
/**
* Same as @see #getProxiedFileSystem(State, AuthType, String, String, Configuration) where state properties will be copied
* into Configuration.
*
* @param properties
* @param authType
* @param authPath
* @param uri
* @return
* @throws IOException
* @throws InterruptedException
* @throws URISyntaxException
*/
public FileSystem getProxiedFileSystem(State properties, AuthType authType, String authPath, String uri)
throws IOException, InterruptedException, URISyntaxException {
Configuration conf = new Configuration();
JobConfigurationUtils.putStateIntoConfiguration(properties, conf);
return getProxiedFileSystem(properties, authType, authPath, uri, conf);
}
/**
* Getter for proxiedFs, using the passed parameters to create an instance of a proxiedFs.
* @param properties
* @param authType is either TOKEN or KEYTAB.
* @param authPath is the KEYTAB location if the authType is KEYTAB; otherwise, it is the token file.
* @param uri File system URI.
* @throws IOException
* @throws InterruptedException
* @throws URISyntaxException
* @return proxiedFs
*/
public FileSystem getProxiedFileSystem(State properties, AuthType authType, String authPath, String uri, final Configuration conf)
throws IOException, InterruptedException, URISyntaxException {
Preconditions.checkArgument(StringUtils.isNotBlank(properties.getProp(ConfigurationKeys.FS_PROXY_AS_USER_NAME)),
"State does not contain a proper proxy user name");
String proxyUserName = properties.getProp(ConfigurationKeys.FS_PROXY_AS_USER_NAME);
UserGroupInformation proxyUser;
switch (authType) {
case KEYTAB: // If the authentication type is KEYTAB, log in a super user first before creating a proxy user.
Preconditions.checkArgument(
StringUtils.isNotBlank(properties.getProp(ConfigurationKeys.SUPER_USER_NAME_TO_PROXY_AS_OTHERS)),
"State does not contain a proper proxy token file name");
String superUser = properties.getProp(ConfigurationKeys.SUPER_USER_NAME_TO_PROXY_AS_OTHERS);
UserGroupInformation.loginUserFromKeytab(superUser, authPath);
proxyUser = UserGroupInformation.createProxyUser(proxyUserName, UserGroupInformation.getLoginUser());
break;
case TOKEN: // If the authentication type is TOKEN, create a proxy user and then add the token to the user.
proxyUser = UserGroupInformation.createProxyUser(proxyUserName, UserGroupInformation.getLoginUser());
Optional<Token<?>> proxyToken = getTokenFromSeqFile(authPath, proxyUserName);
if (proxyToken.isPresent()) {
proxyUser.addToken(proxyToken.get());
} else {
LOG.warn("No delegation token found for the current proxy user.");
}
break;
default:
LOG.warn("Creating a proxy user without authentication, which could not perform File system operations.");
proxyUser = UserGroupInformation.createProxyUser(proxyUserName, UserGroupInformation.getLoginUser());
break;
}
final URI fsURI = URI.create(uri);
proxyUser.doAs(new PrivilegedExceptionAction<Void>() {
@Override
public Void run() throws IOException {
LOG.debug("Now performing file system operations as :" + UserGroupInformation.getCurrentUser());
proxiedFs = FileSystem.get(fsURI, conf);
return null;
}
});
return this.proxiedFs;
}
/**
* Get token from the token sequence file.
* @param authPath
* @param proxyUserName
* @return Token for proxyUserName if it exists.
* @throws IOException
*/
private static Optional<Token<?>> getTokenFromSeqFile(String authPath, String proxyUserName) throws IOException {
try (Closer closer = Closer.create()) {
FileSystem localFs = FileSystem.getLocal(new Configuration());
SequenceFile.Reader tokenReader =
closer.register(new SequenceFile.Reader(localFs, new Path(authPath), localFs.getConf()));
Text key = new Text();
Token<?> value = new Token<>();
while (tokenReader.next(key, value)) {
LOG.info("Found token for " + key);
if (key.toString().equals(proxyUserName)) {
return Optional.<Token<?>> of(value);
}
}
}
return Optional.absent();
}
}
| 4,112 |
0 | Create_ds/gobblin/gobblin-utility/src/main/java/org/apache/gobblin | Create_ds/gobblin/gobblin-utility/src/main/java/org/apache/gobblin/util/StringParsingUtils.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.util;
import java.util.regex.Matcher;
import java.util.regex.Pattern;
public class StringParsingUtils {
public static final Pattern HUMAN_READABLE_SIZE_PATTERN = Pattern.compile("([0-9\\.]+)\\s*([kKmMgGtTpP]?)[bB]?");
/**
* Convert a human readable string (e.g. 10kb) into the number of bytes.
*
* Examples: 10b, 10kb, 10mb, 10gb, 10tb, 10pb, 1.2m, ...
*/
public static long humanReadableToByteCount(String string) throws FormatException {
Matcher matcher = HUMAN_READABLE_SIZE_PATTERN.matcher(string.trim());
if (!matcher.matches()) {
throw new FormatException("Could not parse human readable size string " + string);
}
int exponent = 0;
switch (matcher.group(2).toUpperCase()) {
case "":
exponent = 0;
break;
case "K":
exponent = 10;
break;
case "M":
exponent = 20;
break;
case "G":
exponent = 30;
break;
case "T":
exponent = 40;
break;
case "P":
exponent = 50;
break;
default:
throw new FormatException("Could not parse human readable size string " + string);
}
try {
double base = Double.parseDouble(matcher.group(1));
return (long) (base * (1L << exponent));
} catch (NumberFormatException nfe) {
throw new FormatException("Could not parse human readable size string " + string);
}
}
public static class FormatException extends Exception {
public FormatException(String message) {
super(message);
}
public FormatException(String message, Throwable cause) {
super(message, cause);
}
public FormatException(Throwable cause) {
super(cause);
}
}
}
| 4,113 |
0 | Create_ds/gobblin/gobblin-utility/src/main/java/org/apache/gobblin | Create_ds/gobblin/gobblin-utility/src/main/java/org/apache/gobblin/util/SystemPropertiesWrapper.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.util;
public class SystemPropertiesWrapper {
private String get(final String name) {
return System.getProperty(name);
}
/**
* The path to the JRE that is used to run the program which does the lookup. It is not
* related to JAVA_HOME.
* e.g.
* /Library/Java/JavaVirtualMachines/jdk1.8.0_40.jdk/Contents/Home/jre
*/
public String getJavaHome() {
return get("java.home");
}
public String getJavaClassPath() {
return get("java.class.path");
}
}
| 4,114 |
0 | Create_ds/gobblin/gobblin-utility/src/main/java/org/apache/gobblin | Create_ds/gobblin/gobblin-utility/src/main/java/org/apache/gobblin/util/DatasetFilterUtils.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.util;
import java.util.List;
import java.util.Set;
import java.util.regex.Pattern;
import org.apache.commons.lang3.StringUtils;
import com.google.common.collect.Lists;
import com.google.common.collect.Sets;
import org.apache.gobblin.configuration.State;
/**
* A utility class for filtering datasets through blacklist and whitelist.
*/
public class DatasetFilterUtils {
public static List<Pattern> getPatternList(State state, String propKey) {
return getPatternList(state, propKey, StringUtils.EMPTY);
}
public static List<Pattern> getPatternList(State state, String propKey, String def) {
List<String> list = state.getPropAsList(propKey, def);
return getPatternsFromStrings(list);
}
/**
* Convert a list of Strings to a list of Patterns.
*/
public static List<Pattern> getPatternsFromStrings(List<String> strings) {
List<Pattern> patterns = Lists.newArrayList();
for (String s : strings) {
patterns.add(Pattern.compile(s));
}
return patterns;
}
public static List<String> filter(List<String> topics, List<Pattern> blacklist, List<Pattern> whitelist) {
List<String> result = Lists.newArrayList();
for (String topic : topics) {
if (survived(topic, blacklist, whitelist)) {
result.add(topic);
}
}
return result;
}
public static Set<String> filter(Set<String> topics, List<Pattern> blacklist, List<Pattern> whitelist) {
Set<String> result = Sets.newHashSet();
for (String topic : topics) {
if (survived(topic, blacklist, whitelist)) {
result.add(topic);
}
}
return result;
}
/**
* A topic survives if (1) it doesn't match the blacklist, and
* (2) either whitelist is empty, or it matches the whitelist.
* Whitelist and blacklist use regex patterns (NOT glob patterns).
*/
public static boolean survived(String topic, List<Pattern> blacklist, List<Pattern> whitelist) {
if (stringInPatterns(topic, blacklist)) {
return false;
}
return (whitelist.isEmpty() || stringInPatterns(topic, whitelist));
}
/**
* Determines whether a string matches one of the regex patterns.
*/
public static boolean stringInPatterns(String s, List<Pattern> patterns) {
for (Pattern pattern : patterns) {
if (pattern.matcher(s).matches()) {
return true;
}
}
return false;
}
}
| 4,115 |
0 | Create_ds/gobblin/gobblin-utility/src/main/java/org/apache/gobblin | Create_ds/gobblin/gobblin-utility/src/main/java/org/apache/gobblin/util/EmailUtils.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.util;
import java.net.InetAddress;
import java.net.UnknownHostException;
import org.apache.commons.mail.Email;
import org.apache.commons.mail.EmailException;
import org.apache.commons.mail.SimpleEmail;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import com.google.common.base.Splitter;
import org.apache.gobblin.configuration.ConfigurationKeys;
import org.apache.gobblin.configuration.State;
import org.apache.gobblin.password.PasswordManager;
/**
* A utility class for sending emails.
*
* @author Yinan Li
*/
public class EmailUtils {
private static final Logger LOGGER = LoggerFactory.getLogger(EmailUtils.class);
/**
* A general method for sending emails.
*
* @param state a {@link State} object containing configuration properties
* @param subject email subject
* @param message email message
* @throws EmailException if there is anything wrong sending the email
*/
public static void sendEmail(State state, String subject, String message) throws EmailException {
Email email = new SimpleEmail();
email.setHostName(state.getProp(ConfigurationKeys.EMAIL_HOST_KEY, ConfigurationKeys.DEFAULT_EMAIL_HOST));
if (state.contains(ConfigurationKeys.EMAIL_SMTP_PORT_KEY)) {
email.setSmtpPort(state.getPropAsInt(ConfigurationKeys.EMAIL_SMTP_PORT_KEY));
}
email.setFrom(state.getProp(ConfigurationKeys.EMAIL_FROM_KEY));
if (state.contains(ConfigurationKeys.EMAIL_USER_KEY) && state.contains(ConfigurationKeys.EMAIL_PASSWORD_KEY)) {
email.setAuthentication(state.getProp(ConfigurationKeys.EMAIL_USER_KEY),
PasswordManager.getInstance(state).readPassword(state.getProp(ConfigurationKeys.EMAIL_PASSWORD_KEY)));
}
Iterable<String> tos =
Splitter.on(',').trimResults().omitEmptyStrings().split(state.getProp(ConfigurationKeys.EMAIL_TOS_KEY));
for (String to : tos) {
email.addTo(to);
}
String hostName;
try {
hostName = InetAddress.getLocalHost().getHostName();
} catch (UnknownHostException uhe) {
LOGGER.error("Failed to get the host name", uhe);
hostName = "unknown";
}
email.setSubject(subject);
String fromHostLine = String.format("This email was sent from host: %s%n%n", hostName);
email.setMsg(fromHostLine + message);
email.send();
}
/**
* Send a job completion notification email.
*
* @param jobId job name
* @param message email message
* @param state job state
* @param jobState a {@link State} object carrying job configuration properties
* @throws EmailException if there is anything wrong sending the email
*/
public static void sendJobCompletionEmail(String jobId, String message, String state, State jobState)
throws EmailException {
sendEmail(jobState, String.format("Gobblin notification: job %s has completed with state %s", jobId, state),
message);
}
/**
* Send a job cancellation notification email.
*
* @param jobId job name
* @param message email message
* @param jobState a {@link State} object carrying job configuration properties
* @throws EmailException if there is anything wrong sending the email
*/
public static void sendJobCancellationEmail(String jobId, String message, State jobState) throws EmailException {
sendEmail(jobState, String.format("Gobblin notification: job %s has been cancelled", jobId), message);
}
/**
* Send a job failure alert email.
*
* @param jobName job name
* @param message email message
* @param failures number of consecutive job failures
* @param jobState a {@link State} object carrying job configuration properties
* @throws EmailException if there is anything wrong sending the email
*/
public static void sendJobFailureAlertEmail(String jobName, String message, int failures, State jobState)
throws EmailException {
sendEmail(jobState, String.format("Gobblin alert: job %s has failed %d %s consecutively in the past", jobName,
failures, failures > 1 ? "times" : "time"), message);
}
}
| 4,116 |
0 | Create_ds/gobblin/gobblin-utility/src/main/java/org/apache/gobblin | Create_ds/gobblin/gobblin-utility/src/main/java/org/apache/gobblin/util/WritableShimSerialization.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.util;
import java.io.DataInputStream;
import java.io.DataOutputStream;
import java.io.IOException;
import java.io.InputStream;
import java.io.OutputStream;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.io.serializer.Deserializer;
import org.apache.hadoop.io.serializer.Serialization;
import org.apache.hadoop.io.serializer.Serializer;
import org.apache.gobblin.compat.hadoop.WritableShim;
/**
* A serializer that understands how to write objects that implement {@link WritableShim} out to a Hadoop
* stream. This class must be present in the {@code io.serializations} key of a Hadoop config for the
* Hadoop runtime to find and instantiate it.
*/
public class WritableShimSerialization implements Serialization<WritableShim> {
/**
* Helper method to add this serializer to an existing Hadoop config.
*/
public static void addToHadoopConfiguration(Configuration conf) {
final String SERIALIZATION_KEY = "io.serializations";
String existingSerializers = conf.get(SERIALIZATION_KEY);
if (existingSerializers != null) {
conf.set(SERIALIZATION_KEY, existingSerializers + "," + WritableShimSerialization.class.getName());
} else {
conf.set(SERIALIZATION_KEY,
"org.apache.hadoop.io.serializer.WritableSerialization," + WritableShimSerialization.class.getName());
}
}
@Override
public boolean accept(Class<?> c) {
return WritableShim.class.isAssignableFrom(c);
}
@Override
public Serializer<WritableShim> getSerializer(Class<WritableShim> c) {
return new WritableShimSerializer();
}
@Override
public Deserializer<WritableShim> getDeserializer(Class<WritableShim> c) {
return new WritableShimDeserializer(c);
}
private static class WritableShimSerializer implements Serializer<WritableShim> {
private DataOutputStream out;
public WritableShimSerializer() {
out = null;
}
@Override
public void open(OutputStream out)
throws IOException {
this.out = new DataOutputStream(out);
}
@Override
public void serialize(WritableShim writableShim)
throws IOException {
writableShim.write(this.out);
}
@Override
public void close()
throws IOException {
this.out.flush();
this.out.close();
this.out = null;
}
}
private static class WritableShimDeserializer implements Deserializer<WritableShim> {
private final Class<WritableShim> clazz;
private DataInputStream in;
public WritableShimDeserializer(Class<WritableShim> c) {
this.clazz = c;
this.in = null;
}
@Override
public void open(InputStream in)
throws IOException {
this.in = new DataInputStream(in);
}
@Override
public WritableShim deserialize(WritableShim writableShim)
throws IOException {
try {
if (writableShim == null) {
writableShim = this.clazz.newInstance();
}
writableShim.readFields(in);
return writableShim;
} catch (ReflectiveOperationException e) {
throw new IOException("Error creating new object", e);
}
}
@Override
public void close()
throws IOException {
}
}
}
| 4,117 |
0 | Create_ds/gobblin/gobblin-utility/src/main/java/org/apache/gobblin | Create_ds/gobblin/gobblin-utility/src/main/java/org/apache/gobblin/util/WriterUtils.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.util;
import java.io.IOException;
import java.net.URI;
import java.util.HashSet;
import java.util.List;
import java.util.Set;
import java.util.concurrent.ExecutionException;
import java.util.function.BiConsumer;
import lombok.SneakyThrows;
import org.apache.avro.file.CodecFactory;
import org.apache.avro.file.DataFileConstants;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.fs.permission.FsPermission;
import org.apache.hadoop.security.token.Token;
import com.github.rholder.retry.Retryer;
import com.google.common.base.Optional;
import com.google.common.base.Preconditions;
import com.google.common.base.Strings;
import com.typesafe.config.Config;
import com.typesafe.config.ConfigFactory;
import lombok.extern.slf4j.Slf4j;
import org.apache.gobblin.configuration.ConfigurationKeys;
import org.apache.gobblin.configuration.State;
import org.apache.gobblin.configuration.WorkUnitState;
import org.apache.gobblin.source.workunit.WorkUnit;
import org.apache.gobblin.util.retry.RetryerFactory;
/**
* Utility class for use with the {@link org.apache.gobblin.writer.DataWriter} class.
*/
@Slf4j
public class WriterUtils {
public static final String WRITER_ENCRYPTED_CONFIG_PATH = ConfigurationKeys.WRITER_PREFIX + ".encrypted";
public static final Config NO_RETRY_CONFIG = ConfigFactory.empty();
public enum WriterFilePathType {
/**
* Write records into namespace/table folder. If namespace has multiple components, each component will be
* a folder in the path. For example: the write file path for namespace 'org.apache.gobblin' and table 'tableName'
* will be 'org/apache/gobblin/tableName'
*/
NAMESPACE_TABLE,
/**
* TABLENAME should be used for jobs that pull from multiple tables/topics and intend to write the records
* in each table/topic to a separate folder.
*/
TABLENAME,
/** Write records into the output file decided by {@link org.apache.gobblin.source.workunit.Extract}*/
DEFAULT
}
/**
* Get the {@link Path} corresponding the to the directory a given {@link org.apache.gobblin.writer.DataWriter} should be writing
* its staging data. The staging data directory is determined by combining the
* {@link ConfigurationKeys#WRITER_STAGING_DIR} and the {@link ConfigurationKeys#WRITER_FILE_PATH}.
* @param state is the {@link State} corresponding to a specific {@link org.apache.gobblin.writer.DataWriter}.
* @param numBranches is the total number of branches for the given {@link State}.
* @param branchId is the id for the specific branch that the {@link org.apache.gobblin.writer.DataWriter} will write to.
* @return a {@link Path} specifying the directory where the {@link org.apache.gobblin.writer.DataWriter} will write to.
*/
public static Path getWriterStagingDir(State state, int numBranches, int branchId) {
String writerStagingDirKey =
ForkOperatorUtils.getPropertyNameForBranch(ConfigurationKeys.WRITER_STAGING_DIR, numBranches, branchId);
Preconditions.checkArgument(state.contains(writerStagingDirKey),
"Missing required property " + writerStagingDirKey);
return new Path(
state.getProp(
ForkOperatorUtils.getPropertyNameForBranch(ConfigurationKeys.WRITER_STAGING_DIR, numBranches, branchId)),
WriterUtils.getWriterFilePath(state, numBranches, branchId));
}
/**
* Get the staging {@link Path} for {@link org.apache.gobblin.writer.DataWriter} that has attemptId in the path.
*/
public static Path getWriterStagingDir(State state, int numBranches, int branchId, String attemptId) {
Preconditions.checkArgument(attemptId != null && !attemptId.isEmpty(), "AttemptId cannot be null or empty: " + attemptId);
return new Path(getWriterStagingDir(state, numBranches, branchId), attemptId);
}
/**
* Get the {@link Path} corresponding the to the directory a given {@link org.apache.gobblin.writer.DataWriter} should be writing
* its output data. The output data directory is determined by combining the
* {@link ConfigurationKeys#WRITER_OUTPUT_DIR} and the {@link ConfigurationKeys#WRITER_FILE_PATH}.
* @param state is the {@link State} corresponding to a specific {@link org.apache.gobblin.writer.DataWriter}.
* @param numBranches is the total number of branches for the given {@link State}.
* @param branchId is the id for the specific branch that the {@link org.apache.gobblin.writer.DataWriter} will write to.
* @return a {@link Path} specifying the directory where the {@link org.apache.gobblin.writer.DataWriter} will write to.
*/
public static Path getWriterOutputDir(State state, int numBranches, int branchId) {
String writerOutputDirKey =
ForkOperatorUtils.getPropertyNameForBranch(ConfigurationKeys.WRITER_OUTPUT_DIR, numBranches, branchId);
Preconditions.checkArgument(state.contains(writerOutputDirKey), "Missing required property " + writerOutputDirKey);
return new Path(state.getProp(writerOutputDirKey), WriterUtils.getWriterFilePath(state, numBranches, branchId));
}
/**
* Get the {@link Path} corresponding the to the directory a given {@link org.apache.gobblin.publisher.BaseDataPublisher} should
* commits its output data. The final output data directory is determined by combining the
* {@link ConfigurationKeys#DATA_PUBLISHER_FINAL_DIR} and the {@link ConfigurationKeys#WRITER_FILE_PATH}.
* @param state is the {@link State} corresponding to a specific {@link org.apache.gobblin.writer.DataWriter}.
* @param numBranches is the total number of branches for the given {@link State}.
* @param branchId is the id for the specific branch that the {@link org.apache.gobblin.publisher.BaseDataPublisher} will publish.
* @return a {@link Path} specifying the directory where the {@link org.apache.gobblin.publisher.BaseDataPublisher} will publish.
*/
public static Path getDataPublisherFinalDir(State state, int numBranches, int branchId) {
String dataPublisherFinalDirKey =
ForkOperatorUtils.getPropertyNameForBranch(ConfigurationKeys.DATA_PUBLISHER_FINAL_DIR, numBranches, branchId);
Preconditions.checkArgument(state.contains(dataPublisherFinalDirKey),
"Missing required property " + dataPublisherFinalDirKey);
if (state.getPropAsBoolean(ConfigurationKeys.DATA_PUBLISHER_APPEND_EXTRACT_TO_FINAL_DIR,
ConfigurationKeys.DEFAULT_DATA_PUBLISHER_APPEND_EXTRACT_TO_FINAL_DIR)) {
return new Path(state.getProp(
ForkOperatorUtils.getPropertyNameForBranch(ConfigurationKeys.DATA_PUBLISHER_FINAL_DIR, numBranches, branchId)),
WriterUtils.getWriterFilePath(state, numBranches, branchId));
} else {
return new Path(state.getProp(
ForkOperatorUtils.getPropertyNameForBranch(ConfigurationKeys.DATA_PUBLISHER_FINAL_DIR, numBranches, branchId)));
}
}
/**
* Get the {@link Path} corresponding the the relative file path for a given {@link org.apache.gobblin.writer.DataWriter}.
* This method retrieves the value of {@link ConfigurationKeys#WRITER_FILE_PATH} from the given {@link State}. It also
* constructs the default value of the {@link ConfigurationKeys#WRITER_FILE_PATH} if not is not specified in the given
* {@link State}.
* @param state is the {@link State} corresponding to a specific {@link org.apache.gobblin.writer.DataWriter}.
* @param numBranches is the total number of branches for the given {@link State}.
* @param branchId is the id for the specific branch that the {{@link org.apache.gobblin.writer.DataWriter} will write to.
* @return a {@link Path} specifying the relative directory where the {@link org.apache.gobblin.writer.DataWriter} will write to.
*/
public static Path getWriterFilePath(State state, int numBranches, int branchId) {
if (state.contains(
ForkOperatorUtils.getPropertyNameForBranch(ConfigurationKeys.WRITER_FILE_PATH, numBranches, branchId))) {
return new Path(state.getProp(
ForkOperatorUtils.getPropertyNameForBranch(ConfigurationKeys.WRITER_FILE_PATH, numBranches, branchId)));
}
switch (getWriterFilePathType(state)) {
case NAMESPACE_TABLE:
return getNamespaceTableWriterFilePath(state);
case TABLENAME:
return WriterUtils.getTableNameWriterFilePath(state);
default:
return WriterUtils.getDefaultWriterFilePath(state, numBranches, branchId);
}
}
private static WriterFilePathType getWriterFilePathType(State state) {
String pathTypeStr =
state.getProp(ConfigurationKeys.WRITER_FILE_PATH_TYPE, ConfigurationKeys.DEFAULT_WRITER_FILE_PATH_TYPE);
return WriterFilePathType.valueOf(pathTypeStr.toUpperCase());
}
/**
* Creates {@link Path} for case {@link WriterFilePathType#NAMESPACE_TABLE} with configurations
* {@link ConfigurationKeys#EXTRACT_NAMESPACE_NAME_KEY} and {@link ConfigurationKeys#EXTRACT_TABLE_NAME_KEY}
* @param state
* @return a path
*/
public static Path getNamespaceTableWriterFilePath(State state) {
Preconditions.checkArgument(state.contains(ConfigurationKeys.EXTRACT_NAMESPACE_NAME_KEY));
Preconditions.checkArgument(state.contains(ConfigurationKeys.EXTRACT_TABLE_NAME_KEY));
String namespace = state.getProp(ConfigurationKeys.EXTRACT_NAMESPACE_NAME_KEY).replaceAll("\\.", Path.SEPARATOR);
return new Path( namespace + Path.SEPARATOR + state.getProp(ConfigurationKeys.EXTRACT_TABLE_NAME_KEY));
}
/**
* Creates {@link Path} for the {@link ConfigurationKeys#WRITER_FILE_PATH} key according to
* {@link ConfigurationKeys#EXTRACT_TABLE_NAME_KEY}.
* @param state
* @return
*/
public static Path getTableNameWriterFilePath(State state) {
Preconditions.checkArgument(state.contains(ConfigurationKeys.EXTRACT_TABLE_NAME_KEY));
return new Path(state.getProp(ConfigurationKeys.EXTRACT_TABLE_NAME_KEY));
}
/**
* Creates the default {@link Path} for the {@link ConfigurationKeys#WRITER_FILE_PATH} key.
* @param numBranches is the total number of branches for the given {@link State}.
* @param branchId is the id for the specific branch that the {@link org.apache.gobblin.writer.DataWriter} will write to.
* @return a {@link Path} specifying the directory where the {@link org.apache.gobblin.writer.DataWriter} will write to.
*/
public static Path getDefaultWriterFilePath(State state, int numBranches, int branchId) {
if (state instanceof WorkUnitState) {
WorkUnitState workUnitState = (WorkUnitState) state;
return new Path(ForkOperatorUtils.getPathForBranch(workUnitState, workUnitState.getOutputFilePath(),
numBranches, branchId));
} else if (state instanceof WorkUnit) {
WorkUnit workUnit = (WorkUnit) state;
return new Path(ForkOperatorUtils.getPathForBranch(workUnit, workUnit.getOutputFilePath(),
numBranches, branchId));
}
throw new RuntimeException("In order to get the default value for " + ConfigurationKeys.WRITER_FILE_PATH
+ " the given state must be of type " + WorkUnitState.class.getName() + " or " + WorkUnit.class.getName());
}
/**
* Get the value of {@link ConfigurationKeys#WRITER_FILE_NAME} for the a given {@link org.apache.gobblin.writer.DataWriter}. The
* method also constructs the default value of the {@link ConfigurationKeys#WRITER_FILE_NAME} if it is not set in the
* {@link State}
* @param state is the {@link State} corresponding to a specific {@link org.apache.gobblin.writer.DataWriter}.
* @param numBranches is the total number of branches for the given {@link State}.
* @param branchId is the id for the specific branch that the {{@link org.apache.gobblin.writer.DataWriter} will write to.
* @param writerId is the id for a specific {@link org.apache.gobblin.writer.DataWriter}.
* @param formatExtension is the format extension for the file (e.g. ".avro").
* @return a {@link String} representation of the file name.
*/
public static String getWriterFileName(State state, int numBranches, int branchId, String writerId,
String formatExtension) {
String defaultFileName = Strings.isNullOrEmpty(formatExtension)
? String.format("%s.%s", ConfigurationKeys.DEFAULT_WRITER_FILE_BASE_NAME, writerId)
: String.format("%s.%s.%s", ConfigurationKeys.DEFAULT_WRITER_FILE_BASE_NAME, writerId, formatExtension);
return state.getProp(
ForkOperatorUtils.getPropertyNameForBranch(ConfigurationKeys.WRITER_FILE_NAME, numBranches, branchId),
defaultFileName);
}
/**
* Creates a {@link CodecFactory} based on the specified codec name and deflate level. If codecName is absent, then
* a {@link CodecFactory#deflateCodec(int)} is returned. Otherwise the codecName is converted into a
* {@link CodecFactory} via the {@link CodecFactory#fromString(String)} method.
*
* @param codecName the name of the codec to use (e.g. deflate, snappy, xz, etc.).
* @param deflateLevel must be an integer from [0-9], and is only applicable if the codecName is "deflate".
* @return a {@link CodecFactory}.
*/
public static CodecFactory getCodecFactory(Optional<String> codecName, Optional<String> deflateLevel) {
if (!codecName.isPresent()) {
return CodecFactory.deflateCodec(ConfigurationKeys.DEFAULT_DEFLATE_LEVEL);
} else if (codecName.get().equalsIgnoreCase(DataFileConstants.DEFLATE_CODEC)) {
if (!deflateLevel.isPresent()) {
return CodecFactory.deflateCodec(ConfigurationKeys.DEFAULT_DEFLATE_LEVEL);
}
return CodecFactory.deflateCodec(Integer.parseInt(deflateLevel.get()));
} else {
return CodecFactory.fromString(codecName.get().toLowerCase());
}
}
/**
* Create the given dir as well as all missing ancestor dirs. All created dirs will have the given permission.
* This should be used instead of {@link FileSystem#mkdirs(Path, FsPermission)}, since that method only sets
* the permission for the given dir, and not recursively for the ancestor dirs.
*
* @param fs FileSystem
* @param path The dir to be created
* @param perm The permission to be set
* @throws IOException if failing to create dir or set permission.
*/
public static void mkdirsWithRecursivePermission(final FileSystem fs, final Path path, FsPermission perm) throws IOException {
mkdirsWithRecursivePermissionWithRetry(fs, path, perm, NO_RETRY_CONFIG);
}
public static void mkdirsWithRecursivePermissionWithRetry(final FileSystem fs, final Path path, FsPermission perm, Config retrierConfig) throws IOException {
if (fs.exists(path)) {
return;
}
Set<Path> pathsThatDidntExistBefore = new HashSet<>();
for (Path p = path; p != null && !fs.exists(p); p = p.getParent()) {
pathsThatDidntExistBefore.add(p);
}
if (!fs.mkdirs(path, perm)) {
throw new IOException(String.format("Unable to mkdir %s with permission %s", path, perm));
}
BiConsumer<FileSystem, Path> waitPolicy = getWaitPolicy(retrierConfig);
for (Path p : pathsThatDidntExistBefore) {
waitPolicy.accept(fs, path);
fs.setPermission(p, perm);
}
}
// define behavior for waiting until the file exists before proceeding
private static BiConsumer<FileSystem, Path> getWaitPolicy(Config retrierConfig) {
return new BiConsumer<FileSystem, Path>() {
@SneakyThrows
@Override
public void accept(FileSystem fs, Path path) {
if (retrierConfig != NO_RETRY_CONFIG) {
//Wait until file is not there as it can happen the file fail to exist right away on eventual consistent fs like Amazon S3
Retryer<Void> retryer = RetryerFactory.newInstance(retrierConfig);
try {
retryer.call(() -> {
if (!fs.exists(path)) {
throw new IOException("Path " + path + " does not exist however it should. Will wait more.");
}
return null;
});
} catch (Exception e) {
throw new IOException("Path " + path + "does not exist however it should. Giving up..."+ e);
}
}
}
};
}
public static URI getWriterFsUri(State state, int numBranches, int branchId) {
return URI.create(state.getProp(
ForkOperatorUtils.getPropertyNameForBranch(ConfigurationKeys.WRITER_FILE_SYSTEM_URI, numBranches, branchId),
ConfigurationKeys.LOCAL_FS_URI));
}
public static FileSystem getWriterFS(State state, int numBranches, int branchId)
throws IOException {
URI uri = getWriterFsUri(state, numBranches, branchId);
Configuration hadoopConf = getFsConfiguration(state);
if (state.getPropAsBoolean(ConfigurationKeys.SHOULD_FS_PROXY_AS_USER,
ConfigurationKeys.DEFAULT_SHOULD_FS_PROXY_AS_USER)) {
// Initialize file system for a proxy user.
String authMethod =
state.getProp(ConfigurationKeys.FS_PROXY_AUTH_METHOD, ConfigurationKeys.DEFAULT_FS_PROXY_AUTH_METHOD);
if (authMethod.equalsIgnoreCase(ConfigurationKeys.TOKEN_AUTH)) {
return getWriterFsUsingToken(state, uri);
} else if (authMethod.equalsIgnoreCase(ConfigurationKeys.KERBEROS_AUTH)) {
return getWriterFsUsingKeytab(state, uri);
}
}
// Initialize file system as the current user.
return FileSystem.get(uri, hadoopConf);
}
public static FileSystem getWriterFs(State state)
throws IOException {
return getWriterFS(state, 1, 0);
}
private static FileSystem getWriterFsUsingToken(State state, URI uri)
throws IOException {
try {
String user = state.getProp(ConfigurationKeys.FS_PROXY_AS_USER_NAME);
List<Token<?>> tokens = ProxiedFileSystemUtils
.getTokenFromSeqFile(user, new Path(state.getProp(ConfigurationKeys.FS_PROXY_AS_USER_TOKEN_FILE)));
if (tokens.isEmpty()) {
throw new IOException("No token found for user " + user);
}
return ProxiedFileSystemCache.fromTokens().userNameTokens(tokens)
.userNameToProxyAs(state.getProp(ConfigurationKeys.FS_PROXY_AS_USER_NAME)).fsURI(uri)
.conf(HadoopUtils.newConfiguration()).build();
} catch (ExecutionException e) {
throw new IOException(e);
}
}
private static FileSystem getWriterFsUsingKeytab(State state, URI uri)
throws IOException {
FileSystem fs = FileSystem.newInstance(uri, new Configuration());
try {
Preconditions.checkArgument(state.contains(ConfigurationKeys.FS_PROXY_AS_USER_NAME),
"Missing required property " + ConfigurationKeys.FS_PROXY_AS_USER_NAME);
Preconditions.checkArgument(state.contains(ConfigurationKeys.SUPER_USER_NAME_TO_PROXY_AS_OTHERS),
"Missing required property " + ConfigurationKeys.SUPER_USER_NAME_TO_PROXY_AS_OTHERS);
Preconditions.checkArgument(state.contains(ConfigurationKeys.SUPER_USER_KEY_TAB_LOCATION),
"Missing required property " + ConfigurationKeys.SUPER_USER_KEY_TAB_LOCATION);
String user = state.getProp(ConfigurationKeys.FS_PROXY_AS_USER_NAME);
String superUser = state.getProp(ConfigurationKeys.SUPER_USER_NAME_TO_PROXY_AS_OTHERS);
Path keytabLocation = new Path(state.getProp(ConfigurationKeys.SUPER_USER_KEY_TAB_LOCATION));
return ProxiedFileSystemCache.fromKeytab().userNameToProxyAs(user).fsURI(uri)
.superUserKeytabLocation(keytabLocation).superUserName(superUser).conf(HadoopUtils.newConfiguration())
.referenceFS(fs).build();
} catch (ExecutionException e) {
throw new IOException(e);
}
}
public static Configuration getFsConfiguration(State state) {
return HadoopUtils.getConfFromState(state, Optional.of(WRITER_ENCRYPTED_CONFIG_PATH));
}
}
| 4,118 |
0 | Create_ds/gobblin/gobblin-utility/src/main/java/org/apache/gobblin | Create_ds/gobblin/gobblin-utility/src/main/java/org/apache/gobblin/util/FileUtils.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.util;
import java.io.File;
import java.io.IOException;
import java.io.PrintWriter;
import java.nio.charset.StandardCharsets;
import java.nio.file.Files;
import java.nio.file.Path;
public class FileUtils {
public void saveToFile(final String text, final Path destPath)
throws IOException {
try (PrintWriter out = new PrintWriter(
Files.newBufferedWriter(destPath, StandardCharsets.UTF_8))) {
out.println(text);
out.flush();
}
}
/***
* Check if child path is child of parent path.
* @param parent Expected parent path.
* @param child Expected child path.
* @return If child path is child of parent path.
* @throws IOException
*/
public static boolean isSubPath(File parent, File child) throws IOException {
String childStr = child.getCanonicalPath();
String parentStr = parent.getCanonicalPath();
return childStr.startsWith(parentStr);
}
/***
* Check if child path is child of parent path.
* @param parent Expected parent path.
* @param child Expected child path.
* @return If child path is child of parent path.
* @throws IOException
*/
public static boolean isSubPath(org.apache.hadoop.fs.Path parent, org.apache.hadoop.fs.Path child) throws IOException {
String childStr = child.toString();
String parentStr = parent.toString();
return childStr.startsWith(parentStr);
}
}
| 4,119 |
0 | Create_ds/gobblin/gobblin-utility/src/main/java/org/apache/gobblin | Create_ds/gobblin/gobblin-utility/src/main/java/org/apache/gobblin/util/ApplicationLauncherUtils.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.util;
/**
* A utility class for the application launcher.
*/
public class ApplicationLauncherUtils {
/**
* Create a new app ID.
*
* @param appName application name
* @return new app ID
*/
public static String newAppId(String appName) {
String appIdSuffix = String.format("%s_%d", appName, System.currentTimeMillis());
return "app_" + appIdSuffix;
}
}
| 4,120 |
0 | Create_ds/gobblin/gobblin-utility/src/main/java/org/apache/gobblin | Create_ds/gobblin/gobblin-utility/src/main/java/org/apache/gobblin/util/PullFileLoader.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.util;
import java.io.IOException;
import java.io.InputStream;
import java.io.InputStreamReader;
import java.io.Reader;
import java.util.Arrays;
import java.util.Collection;
import java.util.Collections;
import java.util.Comparator;
import java.util.List;
import java.util.Objects;
import java.util.Properties;
import java.util.Set;
import java.util.stream.Collectors;
import org.apache.commons.configuration.ConfigurationConverter;
import org.apache.commons.configuration.ConfigurationException;
import org.apache.commons.configuration.PropertiesConfiguration;
import org.apache.hadoop.fs.FileStatus;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.fs.PathFilter;
import com.google.common.base.Charsets;
import com.google.common.base.Function;
import com.google.common.base.Preconditions;
import com.google.common.base.Predicate;
import com.google.common.collect.ImmutableMap;
import com.google.common.collect.Iterables;
import com.google.common.collect.Lists;
import com.google.common.collect.Sets;
import com.typesafe.config.Config;
import com.typesafe.config.ConfigException;
import com.typesafe.config.ConfigFactory;
import com.typesafe.config.ConfigParseOptions;
import com.typesafe.config.ConfigSyntax;
import org.apache.gobblin.configuration.ConfigurationKeys;
import javax.annotation.Nullable;
import lombok.Getter;
import lombok.extern.slf4j.Slf4j;
/**
* Used to load pull files from the file system.
*/
@Slf4j
@Getter
public class PullFileLoader {
public static final String GLOBAL_PROPS_EXTENSION = ".properties";
public static final PathFilter GLOBAL_PROPS_PATH_FILTER = new ExtensionFilter(GLOBAL_PROPS_EXTENSION);
public static final String GLOBAL_HOCON_EXTENSION = ".configuration";
public static final PathFilter GLOBAL_HOCON_PATH_FILTER = new ExtensionFilter(GLOBAL_HOCON_EXTENSION);
public static final PathFilter GLOBAL_PATH_FILTER =
new ExtensionFilter(Lists.newArrayList(GLOBAL_PROPS_EXTENSION, GLOBAL_HOCON_EXTENSION));
public static final Set<String> DEFAULT_JAVA_PROPS_PULL_FILE_EXTENSIONS = Sets.newHashSet("pull", "job");
public static final Set<String> DEFAULT_HOCON_PULL_FILE_EXTENSIONS = Sets.newHashSet("json", "conf");
public static final String PROPERTY_DELIMITER_PARSING_ENABLED_KEY = "property.parsing.enablekey";
public static final boolean DEFAULT_PROPERTY_DELIMITER_PARSING_ENABLED_KEY = false;
private final Path rootDirectory;
private final FileSystem fs;
private final ExtensionFilter javaPropsPullFileFilter;
private final ExtensionFilter hoconPullFileFilter;
/**
* A {@link PathFilter} that accepts {@link Path}s based on a set of valid extensions.
*/
private static class ExtensionFilter implements PathFilter {
private final Collection<String> extensions;
public ExtensionFilter(String extension) {
this(Lists.newArrayList(extension));
}
public ExtensionFilter(Collection<String> extensions) {
this.extensions = Lists.newArrayList();
for (String ext : extensions) {
this.extensions.add(ext.startsWith(".") ? ext : "." + ext);
}
}
@Override
public boolean accept(final Path path) {
Predicate<String> predicate = new Predicate<String>() {
@Override
public boolean apply(String input) {
return path.getName().toLowerCase().endsWith(input);
}
};
return Iterables.any(this.extensions, predicate);
}
}
public PullFileLoader(Path rootDirectory, FileSystem fs, Collection<String> javaPropsPullFileExtensions,
Collection<String> hoconPullFileExtensions) {
Set<String> commonExtensions = Sets.intersection(Sets.newHashSet(javaPropsPullFileExtensions), Sets.newHashSet(hoconPullFileExtensions));
Preconditions.checkArgument(commonExtensions.isEmpty(),
"Java props and HOCON pull file extensions intersect: " + Arrays.toString(commonExtensions.toArray()));
this.rootDirectory = rootDirectory;
this.fs = fs;
this.javaPropsPullFileFilter = new ExtensionFilter(javaPropsPullFileExtensions);
this.hoconPullFileFilter = new ExtensionFilter(hoconPullFileExtensions);
}
/**
* Load a single pull file.
* @param path The {@link Path} to the pull file to load, full path
* @param sysProps A {@link Config} used as fallback.
* @param loadGlobalProperties if true, will also load at most one *.properties file per directory from the
* {@link #rootDirectory} to the pull file {@link Path}.
* @param resolve if true, call {@link Config#resolve()} on the config after loading it
* @return The loaded {@link Config}.
* @throws IOException
*/
public Config loadPullFile(Path path, Config sysProps, boolean loadGlobalProperties, boolean resolve)
throws IOException {
Config fallback = loadGlobalProperties ? loadAncestorGlobalConfigs(path, sysProps) : sysProps;
Config loadedConfig;
if (this.javaPropsPullFileFilter.accept(path)) {
loadedConfig = loadJavaPropsWithFallback(path, fallback);
} else if (this.hoconPullFileFilter.accept(path)) {
loadedConfig = loadHoconConfigAtPath(path).withFallback(fallback);
} else {
throw new IOException(String.format("Cannot load pull file %s due to unrecognized extension.", path));
}
return resolve ? loadedConfig.resolve() : loadedConfig;
}
public Config loadPullFile(Path path, Config sysProps, boolean loadGlobalProperties)
throws IOException {
return loadPullFile(path, sysProps, loadGlobalProperties, true);
}
/**
* Find and load all pull files under a base {@link Path} recursively in an order sorted by last modified date.
* @param path base {@link Path} where pull files should be found recursively.
* @param sysProps A {@link Config} used as fallback.
* @param loadGlobalProperties if true, will also load at most one *.properties file per directory from the
* {@link #rootDirectory} to the pull file {@link Path} for each pull file.
* @return The loaded {@link Config}s. Files that fail to parse successfully will be logged,
* but will not result in a Config object
*
*/
public List<Config> loadPullFilesRecursively(Path path, Config sysProps, boolean loadGlobalProperties) {
return Lists.transform(this.fetchJobFilesRecursively(path), new Function<Path, Config>() {
@Nullable
@Override
public Config apply(@Nullable Path jobFile) {
if (jobFile == null) {
return null;
}
try {
return PullFileLoader.this.loadPullFile(jobFile, sysProps, loadGlobalProperties);
} catch (IOException ie) {
log.error("", ie);
return null;
}
}
}).stream().filter(Objects::nonNull).collect(Collectors.toList());
// only return valid parsed configs
}
public List<Path> fetchJobFilesRecursively(Path path) {
return getSortedPaths(fetchJobFilePathsRecursivelyHelper(path));
}
private List<Path> getSortedPaths(List<PathWithTimeStamp> pathsWithTimeStamps) {
List<Path> sortedPaths = Lists.newArrayList();
Collections.sort(pathsWithTimeStamps, Comparator.comparingLong(o -> o.timeStamp));
for (PathWithTimeStamp pathWithTimeStamp : pathsWithTimeStamps) {
sortedPaths.add(pathWithTimeStamp.path);
}
return sortedPaths;
}
private List<PathWithTimeStamp> fetchJobFilePathsRecursivelyHelper(Path path) {
List<PathWithTimeStamp> paths = Lists.newArrayList();
try {
FileStatus[] statuses = this.fs.listStatus(path);
if (statuses == null) {
log.error("Path does not exist: " + path);
return paths;
}
for (FileStatus status : statuses) {
if (status.isDirectory()) {
paths.addAll(fetchJobFilePathsRecursivelyHelper(status.getPath()));
} else if (this.javaPropsPullFileFilter.accept(status.getPath())) {
log.debug("modification time of {} is {}", status.getPath(), status.getModificationTime());
paths.add(new PathWithTimeStamp(status.getModificationTime(), status.getPath()));
} else if (this.hoconPullFileFilter.accept(status.getPath())) {
log.debug("modification time of {} is {}", status.getPath(), status.getModificationTime());
paths.add(new PathWithTimeStamp(status.getModificationTime(), status.getPath()));
}
}
return paths;
} catch (IOException ioe) {
log.error("Could not load properties at path: " + path, ioe);
return Lists.newArrayList();
}
}
/**
* Load at most one *.properties files from path and each ancestor of path up to and including {@link #rootDirectory}.
* Higher directories will serve as fallback for lower directories, and sysProps will serve as fallback for all of them.
* @throws IOException
*/
private Config loadAncestorGlobalConfigs(Path path, Config sysProps)
throws IOException {
Config config = sysProps;
if (!PathUtils.isAncestor(this.rootDirectory, path)) {
log.warn(String.format("Loaded path %s is not a descendant of root path %s. Cannot load global properties.", path,
this.rootDirectory));
} else {
List<Path> ancestorPaths = Lists.newArrayList();
while (PathUtils.isAncestor(this.rootDirectory, path)) {
ancestorPaths.add(path);
path = path.getParent();
}
List<Path> reversedAncestors = Lists.reverse(ancestorPaths);
for (Path ancestor : reversedAncestors) {
config = findAndLoadGlobalConfigInDirectory(ancestor, config);
}
}
return config;
}
/**
* Find at most one *.properties file in the input {@link Path} and load it using fallback as fallback.
* @return The {@link Config} in path with sysProps as fallback.
* @throws IOException
*/
private Config findAndLoadGlobalConfigInDirectory(Path path, Config fallback)
throws IOException {
FileStatus[] files = this.fs.listStatus(path, GLOBAL_PATH_FILTER);
if (files == null) {
log.warn("Could not list files at path " + path);
return ConfigFactory.empty();
}
if (files.length > 1) {
throw new IOException("Found more than one global properties file at path " + path);
}
if (files.length == 0) {
return fallback;
}
if (GLOBAL_HOCON_PATH_FILTER.accept(files[0].getPath())) {
return loadHoconConfigWithFallback(files[0].getPath(), fallback);
} else if (GLOBAL_PROPS_PATH_FILTER.accept(files[0].getPath())) {
return loadJavaPropsWithFallback(files[0].getPath(), fallback);
} else {
throw new IllegalStateException("Unsupported global configuration file: " + files[0].getPath());
}
}
/**
* Load a {@link Properties} compatible path using fallback as fallback.
* @return The {@link Config} in path with fallback as fallback.
* @throws IOException
*/
private Config loadJavaPropsWithFallback(Path propertiesPath, Config fallback)
throws IOException {
PropertiesConfiguration propertiesConfiguration = new PropertiesConfiguration();
try (InputStreamReader inputStreamReader = new InputStreamReader(this.fs.open(propertiesPath), Charsets.UTF_8)) {
propertiesConfiguration.setDelimiterParsingDisabled(ConfigUtils
.getBoolean(fallback, PROPERTY_DELIMITER_PARSING_ENABLED_KEY, DEFAULT_PROPERTY_DELIMITER_PARSING_ENABLED_KEY));
propertiesConfiguration.load(inputStreamReader);
Config configFromProps =
ConfigUtils.propertiesToConfig(ConfigurationConverter.getProperties(propertiesConfiguration));
return ConfigFactory.parseMap(ImmutableMap.of(ConfigurationKeys.JOB_CONFIG_FILE_PATH_KEY,
PathUtils.getPathWithoutSchemeAndAuthority(propertiesPath).toString())).withFallback(configFromProps)
.withFallback(fallback);
} catch (ConfigurationException ce) {
log.error("Failed to load Java properties from file at {} due to {}", propertiesPath, ce.getLocalizedMessage());
throw new IOException(ce);
}
}
private Config loadHoconConfigAtPath(Path path)
throws IOException {
try (InputStream is = fs.open(path); Reader reader = new InputStreamReader(is, Charsets.UTF_8)) {
return ConfigFactory.parseMap(ImmutableMap
.of(ConfigurationKeys.JOB_CONFIG_FILE_PATH_KEY, PathUtils.getPathWithoutSchemeAndAuthority(path).toString()))
.withFallback(ConfigFactory.parseReader(reader, ConfigParseOptions.defaults().setSyntax(ConfigSyntax.CONF)));
} catch (ConfigException configException) {
throw wrapConfigException(path, configException);
}
}
/**
* Wrap a {@link ConfigException} (which extends {@link RuntimeException} with an IOException,
* with a helpful message if possible
* @param path
* @param configException
* @return an {@link IOException} wrapping the passed in ConfigException.
*/
private IOException wrapConfigException(Path path, ConfigException configException) {
if (configException.origin() != null) {
return new IOException("Failed to parse config file " + path.toString()
+ " at lineNo:" + configException.origin().lineNumber(), configException);
} else {
return new IOException("Failed to parse config file " + path.toString(), configException);
}
}
private Config loadHoconConfigWithFallback(Path path, Config fallback)
throws IOException {
try (InputStream is = fs.open(path); Reader reader = new InputStreamReader(is, Charsets.UTF_8)) {
return ConfigFactory.parseMap(ImmutableMap
.of(ConfigurationKeys.JOB_CONFIG_FILE_PATH_KEY, PathUtils.getPathWithoutSchemeAndAuthority(path).toString()))
.withFallback(ConfigFactory.parseReader(reader, ConfigParseOptions.defaults().setSyntax(ConfigSyntax.CONF))).withFallback(fallback);
} catch (ConfigException configException) {
throw wrapConfigException(path, configException);
}
}
private static class PathWithTimeStamp {
long timeStamp;
Path path;
public PathWithTimeStamp(long timeStamp, Path path) {
this.timeStamp = timeStamp;
this.path = path;
}
}
private static class ConfigWithTimeStamp {
long timeStamp;
Config config;
public ConfigWithTimeStamp(long timeStamp, Config config) {
this.timeStamp = timeStamp;
this.config = config;
}
}
}
| 4,121 |
0 | Create_ds/gobblin/gobblin-utility/src/main/java/org/apache/gobblin | Create_ds/gobblin/gobblin-utility/src/main/java/org/apache/gobblin/util/Sleeper.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.util;
import java.util.LinkedList;
import java.util.Queue;
import lombok.Getter;
/**
* A class surrounding {@link Thread#sleep(long)} that allows mocking sleeps during testing.
*/
public class Sleeper {
/**
* A mock version of {@link Sleeper} that just register calls to sleep but returns immediately.
*/
@Getter
public static class MockSleeper extends Sleeper {
private Queue<Long> requestedSleeps = new LinkedList<>();
@Override
public void sleep(long millis) {
this.requestedSleeps.add(millis);
}
public void reset() {
this.requestedSleeps.clear();
}
}
/**
* Equivalent to {@link Thread#sleep(long)}.
*/
public void sleep(long millis) throws InterruptedException {
Thread.sleep(millis);
}
}
| 4,122 |
0 | Create_ds/gobblin/gobblin-utility/src/main/java/org/apache/gobblin | Create_ds/gobblin/gobblin-utility/src/main/java/org/apache/gobblin/util/RateControlledFileSystem.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.util;
import java.util.List;
import java.util.concurrent.Callable;
import java.util.concurrent.ExecutionException;
import org.apache.hadoop.fs.FileSystem;
import com.google.common.base.Optional;
import com.google.common.cache.Cache;
import com.google.common.cache.CacheBuilder;
import org.apache.gobblin.util.filesystem.ThrottledFileSystem;
import org.apache.gobblin.util.limiter.Limiter;
import org.apache.gobblin.util.limiter.RateBasedLimiter;
/**
* Subclass of {@link org.apache.hadoop.fs.FileSystem} that wraps with a {@link org.apache.gobblin.util.limiter.Limiter}
* to control HDFS call rate.
*
* <p>
* This classes uses Guava's {@link Cache} for storing {@link org.apache.hadoop.fs.FileSystem} URI to
* {@link org.apache.gobblin.util.limiter.Limiter} mapping.
* </p>
*
* <p>
* For methods that require HDFS calls, this class will first acquire a permit using {@link org.apache.gobblin.util.limiter.Limiter},
* to make sure HDFS call rate is allowed by the uppper limit.
* </p>
*
* @deprecated use {@link org.apache.gobblin.util.filesystem.ThrottledFileSystem}
*/
@Deprecated
public class RateControlledFileSystem extends ThrottledFileSystem {
private static final int DEFAULT_MAX_CACHE_SIZE = 100;
private static final Cache<String, RateBasedLimiter> FS_URI_TO_RATE_LIMITER_CACHE =
CacheBuilder.newBuilder().maximumSize(DEFAULT_MAX_CACHE_SIZE).build();
private final long limitPerSecond;
private final Callable<RateBasedLimiter> callableLimiter;
/**
* Determines whether the file system is rate controlled, and if so, returns the allowed rate in operations per
* second.
* @param fs {@link FileSystem} to check for rate control.
* @return {@link Optional#absent} if file system is not rate controlled, otherwise, the rate in operations per second.
*/
public static Optional<Long> getRateIfRateControlled(FileSystem fs) {
if (fs instanceof Decorator) {
List<Object> lineage = DecoratorUtils.getDecoratorLineage(fs);
for (Object obj : lineage) {
if (obj instanceof RateControlledFileSystem) {
return Optional.of(((RateControlledFileSystem) obj).limitPerSecond);
}
}
return Optional.absent();
}
return Optional.absent();
}
public RateControlledFileSystem(FileSystem fs, final long limitPerSecond) {
super(fs, null, null);
this.limitPerSecond = limitPerSecond;
this.callableLimiter = new Callable<RateBasedLimiter>() {
@Override
public RateBasedLimiter call() throws Exception {
return new RateBasedLimiter(limitPerSecond);
}
};
}
public void startRateControl() throws ExecutionException {
getRateLimiter().start();
}
protected Limiter getRateLimiter() {
try {
String key = getUri().toString();
RateBasedLimiter limiter = FS_URI_TO_RATE_LIMITER_CACHE.get(key, this.callableLimiter);
if (limiter.getRateLimitPerSecond() < this.limitPerSecond) {
try {
limiter = this.callableLimiter.call();
FS_URI_TO_RATE_LIMITER_CACHE.put(key, limiter);
} catch (Exception exc) {
throw new ExecutionException(exc);
}
}
return limiter;
} catch (ExecutionException ee) {
throw new RuntimeException(ee);
}
}
}
| 4,123 |
0 | Create_ds/gobblin/gobblin-utility/src/main/java/org/apache/gobblin | Create_ds/gobblin/gobblin-utility/src/main/java/org/apache/gobblin/util/TrustManagerSecureSocketFactory.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.util;
import java.io.IOException;
import java.net.InetAddress;
import java.net.Socket;
import java.security.NoSuchAlgorithmException;
import java.util.ArrayList;
import java.util.List;
import org.slf4j.LoggerFactory;
import javax.net.ssl.SSLContext;
import javax.net.ssl.SSLSocket;
import javax.net.ssl.SSLSocketFactory;
public class TrustManagerSecureSocketFactory extends SSLSocketFactory {
private static final org.slf4j.Logger LOG = LoggerFactory.getLogger(TrustManagerSecureSocketFactory.class);
private SSLSocketFactory sslSocketFactory;
private String[] ciphers;
private String protocol;
public TrustManagerSecureSocketFactory() {
this(null, null);
}
public TrustManagerSecureSocketFactory(String[] ciphers, String protocol) {
try {
SSLContext ctx = SSLContext.getInstance("TLS");
this.sslSocketFactory = ctx.getSocketFactory();
this.ciphers = (null == ciphers) ? sslSocketFactory.getSupportedCipherSuites() : ciphers;
this.protocol = (null == protocol) ? ctx.getProtocol() : protocol;
} catch (NoSuchAlgorithmException e) {
throw new RuntimeException("Issue in initializing TrustManagerSecureSocketFactory.", e);
}
}
public String[] getDefaultCipherSuites() {
return sslSocketFactory.getDefaultCipherSuites();
}
public String[] getSupportedCipherSuites() {
return sslSocketFactory.getSupportedCipherSuites();
}
public Socket createSocket(Socket s, String host, int port, boolean autoClose)
throws IOException {
return enableCipherSuites(sslSocketFactory.createSocket(s, host, port, autoClose),
new Object[]{host, port});
}
public Socket createSocket(String host, int port) throws IOException {
return enableCipherSuites(sslSocketFactory.createSocket(host, port),
new Object[]{host, port});
}
public Socket createSocket(String host, int port, InetAddress localHost, int localPort)
throws IOException {
return enableCipherSuites(sslSocketFactory.createSocket(host, port, localHost, localPort),
new Object[]{host, port});
}
public Socket createSocket(InetAddress host, int port) throws IOException {
return enableCipherSuites(sslSocketFactory.createSocket(host, port),
new Object[]{host, port});
}
public Socket createSocket(InetAddress address, int port, InetAddress localAddress, int localPort)
throws IOException {
return enableCipherSuites(sslSocketFactory.createSocket(address, port, localAddress, localPort),
new Object[]{address, port});
}
private Socket enableCipherSuites(Socket s, Object[] logParams) {
SSLSocket socket = (SSLSocket)s;
if (socket == null) {
LOG.warn("PROBLEM_CREATING_OUTBOUND_REQUEST_SOCKET", logParams);
return null;
}
if (protocol != null) {
String[] p = findProtocols(protocol, socket.getSupportedProtocols());
if (p != null) {
socket.setEnabledProtocols(p);
}
}
if (ciphers != null) {
socket.setEnabledCipherSuites(ciphers);
}
return socket;
}
private String[] findProtocols(String p, String[] options) {
List<String> list = new ArrayList<>();
for (String s : options) {
if (s.equals(p)) {
return new String[] {p};
} else if (s.startsWith(p)) {
list.add(s);
}
}
if (list.isEmpty()) {
return null;
}
return list.toArray(new String[0]);
}
} | 4,124 |
0 | Create_ds/gobblin/gobblin-utility/src/main/java/org/apache/gobblin | Create_ds/gobblin/gobblin-utility/src/main/java/org/apache/gobblin/util/ExecutorsUtils.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.util;
import java.util.List;
import java.util.concurrent.Callable;
import java.util.concurrent.ExecutionException;
import java.util.concurrent.ExecutorService;
import java.util.concurrent.Executors;
import java.util.concurrent.Future;
import java.util.concurrent.ScheduledExecutorService;
import java.util.concurrent.ThreadFactory;
import java.util.concurrent.ThreadPoolExecutor;
import java.util.concurrent.TimeUnit;
import com.google.common.util.concurrent.ListeningScheduledExecutorService;
import org.apache.gobblin.util.executors.MDCPropagatingCallable;
import org.apache.gobblin.util.executors.MDCPropagatingRunnable;
import org.apache.gobblin.util.executors.MDCPropagatingScheduledExecutorService;
import org.slf4j.Logger;
import com.google.common.base.Function;
import com.google.common.base.Optional;
import com.google.common.base.Preconditions;
import com.google.common.collect.Lists;
import com.google.common.util.concurrent.ListeningExecutorService;
import com.google.common.util.concurrent.MoreExecutors;
import com.google.common.util.concurrent.ThreadFactoryBuilder;
import org.apache.gobblin.util.executors.MDCPropagatingExecutorService;
/**
* A utility class to use with {@link java.util.concurrent.Executors} in cases such as when creating new thread pools.
*
* @author Yinan Li
*/
public class ExecutorsUtils {
private static final ThreadFactory DEFAULT_THREAD_FACTORY = newThreadFactory(Optional.<Logger>absent());
public static final long EXECUTOR_SERVICE_SHUTDOWN_TIMEOUT = 60;
public static final TimeUnit EXECUTOR_SERVICE_SHUTDOWN_TIMEOUT_TIMEUNIT = TimeUnit.SECONDS;
/**
* Get a default {@link java.util.concurrent.ThreadFactory}.
*
* @return the default {@link java.util.concurrent.ThreadFactory}
*/
public static ThreadFactory defaultThreadFactory() {
return DEFAULT_THREAD_FACTORY;
}
/**
* Get a new {@link java.util.concurrent.ThreadFactory} that uses a {@link LoggingUncaughtExceptionHandler}
* to handle uncaught exceptions.
*
* @param logger an {@link com.google.common.base.Optional} wrapping the {@link org.slf4j.Logger} that the
* {@link LoggingUncaughtExceptionHandler} uses to log uncaught exceptions thrown in threads
* @return a new {@link java.util.concurrent.ThreadFactory}
*/
public static ThreadFactory newThreadFactory(Optional<Logger> logger) {
return newThreadFactory(logger, Optional.<String>absent());
}
/**
* Get a new {@link java.util.concurrent.ThreadFactory} that uses a {@link LoggingUncaughtExceptionHandler}
* to handle uncaught exceptions and the given thread name format.
*
* @param logger an {@link com.google.common.base.Optional} wrapping the {@link org.slf4j.Logger} that the
* {@link LoggingUncaughtExceptionHandler} uses to log uncaught exceptions thrown in threads
* @param nameFormat an {@link com.google.common.base.Optional} wrapping a thread naming format
* @return a new {@link java.util.concurrent.ThreadFactory}
*/
public static ThreadFactory newThreadFactory(Optional<Logger> logger, Optional<String> nameFormat) {
return newThreadFactory(new ThreadFactoryBuilder(), logger, nameFormat);
}
/**
* Get a new {@link ThreadFactory} that uses a {@link LoggingUncaughtExceptionHandler}
* to handle uncaught exceptions, uses the given thread name format, and produces daemon threads.
*
* @param logger an {@link Optional} wrapping the {@link Logger} that the
* {@link LoggingUncaughtExceptionHandler} uses to log uncaught exceptions thrown in threads
* @param nameFormat an {@link Optional} wrapping a thread naming format
* @return a new {@link ThreadFactory}
*/
public static ThreadFactory newDaemonThreadFactory(Optional<Logger> logger, Optional<String> nameFormat) {
return newThreadFactory(new ThreadFactoryBuilder().setDaemon(true), logger, nameFormat);
}
/**
* Get a new {@link ThreadFactory} that uses a {@link LoggingUncaughtExceptionHandler}
* to handle uncaught exceptions.
* Tasks running within such threads will have the same access control and class loader settings as the
* thread that invokes this method.
*
* @param logger an {@link Optional} wrapping the {@link Logger} that the
* {@link LoggingUncaughtExceptionHandler} uses to log uncaught exceptions thrown in threads
* @return a new {@link ThreadFactory}
*/
public static ThreadFactory newPrivilegedThreadFactory(Optional<Logger> logger) {
return newThreadFactory(new ThreadFactoryBuilder().setThreadFactory(Executors.privilegedThreadFactory()), logger,
Optional.<String>absent());
}
private static ThreadFactory newThreadFactory(ThreadFactoryBuilder builder, Optional<Logger> logger,
Optional<String> nameFormat) {
if (nameFormat.isPresent()) {
builder.setNameFormat(nameFormat.get());
}
return builder.setUncaughtExceptionHandler(new LoggingUncaughtExceptionHandler(logger)).build();
}
/**
* Creates an {@link ListeningExecutorService} whose {@code submit}
* and {@code execute} methods propagate the MDC information across
* thread boundaries.
* @param executorService the {@link ExecutorService} to wrap
* @return a new instance of {@link ListeningExecutorService}
*/
public static ListeningExecutorService loggingDecorator(ExecutorService executorService) {
return new MDCPropagatingExecutorService(executorService);
}
/**
* Creates an {@link ListeningScheduledExecutorService} whose
* {@code submit}, {@code execute}, {@code schedule},
* {@code scheduleAtFixedRate}, and {@code scheduleWithFixedDelay}
* methods propagate the MDC information across thread boundaries.
* @param scheduledExecutorService the {@link ScheduledExecutorService} to wrap
* @return a new instance of {@link ListeningScheduledExecutorService}
*/
public static ListeningScheduledExecutorService loggingDecorator(ScheduledExecutorService scheduledExecutorService) {
return new MDCPropagatingScheduledExecutorService(scheduledExecutorService);
}
/**
* Creates an {@link Runnable} which propagates the MDC
* information across thread boundaries.
* @param runnable the {@link Runnable} to wrap
* @return a new instance of {@link Runnable}
*/
public static Runnable loggingDecorator(Runnable runnable) {
if (runnable instanceof MDCPropagatingRunnable) {
return runnable;
}
return new MDCPropagatingRunnable(runnable);
}
/**
* Creates an {@link Callable<T>} which propagates the MDC
* information across thread boundaries.
* @param callable the {@link Callable<T>} to wrap
* @return a new instance of {@link Callable<T>}
*/
public static <T> Callable<T> loggingDecorator(Callable<T> callable) {
if (callable instanceof MDCPropagatingCallable) {
return callable;
}
return new MDCPropagatingCallable<T>(callable);
}
/**
* Shutdown an {@link ExecutorService} gradually, first disabling new task submissions and later cancelling
* existing tasks.
*
* <p>
* The implementation is based on the implementation of Guava's MoreExecutors.shutdownAndAwaitTermination,
* which is available since version 17.0. We cannot use Guava version 17.0 or after directly, however, as
* it cannot be used with Hadoop 2.6.0 or after due to the issue reported in HADOOP-10961.
* </p>
*
* @param executorService the {@link ExecutorService} to shutdown
* @param logger an {@link Optional} wrapping the {@link Logger} that is used to log metadata of the executorService
* if it cannot shutdown all its threads
* @param timeout the maximum time to wait for the {@code ExecutorService} to terminate
* @param unit the time unit of the timeout argument
*/
public static void shutdownExecutorService(ExecutorService executorService, Optional<Logger> logger, long timeout,
TimeUnit unit) {
Preconditions.checkNotNull(unit);
// Disable new tasks from being submitted
executorService.shutdown();
if (logger.isPresent()) {
logger.get().info("Attempting to shutdown ExecutorService: " + executorService);
}
try {
long halfTimeoutNanos = TimeUnit.NANOSECONDS.convert(timeout, unit) / 2;
// Wait for half the duration of the timeout for existing tasks to terminate
if (!executorService.awaitTermination(halfTimeoutNanos, TimeUnit.NANOSECONDS)) {
// Cancel currently executing tasks
executorService.shutdownNow();
if (logger.isPresent()) {
logger.get().info("Shutdown un-successful, attempting shutdownNow of ExecutorService: " + executorService);
}
// Wait the other half of the timeout for tasks to respond to being cancelled
if (!executorService.awaitTermination(halfTimeoutNanos, TimeUnit.NANOSECONDS) && logger.isPresent()) {
logger.get().error("Could not shutdown all threads in ExecutorService: " + executorService);
}
} else if (logger.isPresent()) {
logger.get().info("Successfully shutdown ExecutorService: " + executorService);
}
} catch (InterruptedException ie) {
// Preserve interrupt status
Thread.currentThread().interrupt();
// (Re-)Cancel if current thread also interrupted
executorService.shutdownNow();
if (logger.isPresent()) {
logger.get().info("Attempting to shutdownNow ExecutorService: " + executorService);
}
}
}
/**
* Shutdown an {@link ExecutorService} gradually, first disabling new task submissions and
* later cancelling existing tasks.
*
* <p>
* This method calls {@link #shutdownExecutorService(ExecutorService, Optional, long, TimeUnit)}
* with default timeout time {@link #EXECUTOR_SERVICE_SHUTDOWN_TIMEOUT} and time unit
* {@link #EXECUTOR_SERVICE_SHUTDOWN_TIMEOUT_TIMEUNIT}.
* </p>
*
* @param executorService the {@link ExecutorService} to shutdown
* @param logger an {@link Optional} wrapping a {@link Logger} to be used during shutdown
*/
public static void shutdownExecutorService(ExecutorService executorService, Optional<Logger> logger) {
shutdownExecutorService(executorService, logger, EXECUTOR_SERVICE_SHUTDOWN_TIMEOUT,
EXECUTOR_SERVICE_SHUTDOWN_TIMEOUT_TIMEUNIT);
}
/**
* A utility method to parallelize loops. Applies the {@link Function} to every element in the {@link List} in
* parallel by spawning threads. A list containing the result obtained by applying the function is returned. The
* method is a blocking call and will wait for all the elements in the list to be processed or timeoutInSecs which
* ever is earlier.
* <p>
* <b>NOTE: The method is an all or none implementation. Meaning, if any of the thread fails, the method will throw an
* {@link ExecutionException} even if other threads completed successfully</b>
* </p>
*
* <ul>
* <li>Uses a Fixed thread pool of size threadCount.
* <li>Uses {@link #shutdownExecutorService(ExecutorService, Optional, long, TimeUnit)} to shutdown the executor
* service
* <li>All threads are daemon threads
* </ul>
*
* @param list input list on which the function is applied in parallel
* @param function to be applied on every element of the list
* @param threadCount to be used to process the list
* @param timeoutInSecs to wait for all the threads to complete
* @param logger an {@link Optional} wrapping a {@link Logger} to be used during shutdown
*
* @return a list containing the result obtained by applying the function on each element of the input list in the
* same order
*
* @throws IllegalArgumentException if input list or function is null
* @throws ExecutionException <ul>
* <li>if any computation threw an exception
* <li>if any computation was cancelled
* <li>if any thread was interrupted while waiting
* <ul>
*/
public static <F, T> List<T> parallelize(final List<F> list, final Function<F, T> function, int threadCount,
int timeoutInSecs, Optional<Logger> logger) throws ExecutionException {
Preconditions.checkArgument(list != null, "Input list can not be null");
Preconditions.checkArgument(function != null, "Function can not be null");
final List<T> results = Lists.newArrayListWithCapacity(list.size());
List<Future<T>> futures = Lists.newArrayListWithCapacity(list.size());
ExecutorService executorService =
MoreExecutors.getExitingExecutorService(
(ThreadPoolExecutor) Executors.newFixedThreadPool(threadCount, ExecutorsUtils.newThreadFactory(logger)), 2,
TimeUnit.MINUTES);
for (final F l : list) {
futures.add(executorService.submit(new Callable<T>() {
@Override
public T call() throws Exception {
return function.apply(l);
}
}));
}
ExecutorsUtils.shutdownExecutorService(executorService, logger, timeoutInSecs, TimeUnit.SECONDS);
for (Future<T> future : futures) {
try {
results.add(future.get());
} catch (InterruptedException e) {
throw new ExecutionException("Thread interrupted", e);
}
}
return results;
}
}
| 4,125 |
0 | Create_ds/gobblin/gobblin-utility/src/main/java/org/apache/gobblin | Create_ds/gobblin/gobblin-utility/src/main/java/org/apache/gobblin/util/AutoCloseableLock.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.util;
import java.util.concurrent.locks.Lock;
/**
* An auto-closeable {@link Lock} that can be used with try-with-resources.
* The lock is locked in the constructor and unlocked in {@link #close()}.
*
* <p>Usage:
* <pre> {@code
* try (AutoCloseableLock lock = new AutoCloseableLock(innerLock)) {
* ... do stuff
* }
* }
* </pre>
* </p>
*/
public class AutoCloseableLock implements AutoCloseable {
private final Lock lock;
public AutoCloseableLock(Lock lock) {
this.lock = lock;
this.lock.lock();
}
@Override
public void close() {
this.lock.unlock();
}
}
| 4,126 |
0 | Create_ds/gobblin/gobblin-utility/src/main/java/org/apache/gobblin | Create_ds/gobblin/gobblin-utility/src/main/java/org/apache/gobblin/util/EmptyIterable.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.util;
import java.util.Iterator;
import com.google.common.collect.ImmutableSet;
/**
* A type of {@link java.lang.Iterable}s corresponding to empty {@link java.util.Collection}s.
*
* @author Yinan Li
*
* @param <T> record type
*/
public class EmptyIterable<T> implements Iterable<T> {
@Override
public Iterator<T> iterator() {
return ImmutableSet.<T>of().iterator();
}
}
| 4,127 |
0 | Create_ds/gobblin/gobblin-utility/src/main/java/org/apache/gobblin | Create_ds/gobblin/gobblin-utility/src/main/java/org/apache/gobblin/util/TemplateUtils.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.util;
import java.util.Properties;
import org.apache.gobblin.configuration.ConfigurationKeys;
public class TemplateUtils {
/**
* create a complete property file based on the given template
*/
public static Properties mergeTemplateWithUserCustomizedFile(Properties template, Properties userCustomized) {
Properties cleanedTemplate = new Properties();
cleanedTemplate.putAll(template);
if (cleanedTemplate.containsKey(ConfigurationKeys.REQUIRED_ATRRIBUTES_LIST)) {
cleanedTemplate.remove(ConfigurationKeys.REQUIRED_ATRRIBUTES_LIST);
}
Properties cleanedUserCustomized = new Properties();
cleanedUserCustomized.putAll(userCustomized);
if (cleanedUserCustomized.containsKey(ConfigurationKeys.JOB_TEMPLATE_PATH)) {
cleanedUserCustomized.remove(ConfigurationKeys.JOB_TEMPLATE_PATH);
}
return PropertiesUtils.combineProperties(cleanedTemplate, cleanedUserCustomized);
}
}
| 4,128 |
0 | Create_ds/gobblin/gobblin-utility/src/main/java/org/apache/gobblin | Create_ds/gobblin/gobblin-utility/src/main/java/org/apache/gobblin/util/ExponentialBackoff.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.util;
import java.util.concurrent.Callable;
import java.util.concurrent.ExecutionException;
import lombok.Builder;
/**
* Utility class for exponential backoff.
*
* Usage:
* ExponentialBackoff exponentialBackoff = ExponentialBackoff.builder().build();
* exponentialBackoff.awaitNextRetry();
*/
public class ExponentialBackoff {
private final double alpha;
private final long maxWait;
private final int maxRetries;
private final long maxDelay;
private int retryNumber = 0;
private long totalWait = 0;
private long nextDelay;
/**
* @param alpha the multiplier for each backoff iteration. (def: 2)
* @param maxRetries maximum number of retries allowed. (def: infinite)
* @param maxWait maximum wait allowed in millis. (def: infinite)
* @param maxDelay maximum delay allowed in millis. (def: infinite)
* @param initialDelay initial delay in millis. (def: 20)
*/
@Builder
private ExponentialBackoff(Double alpha, Integer maxRetries, Long maxWait, Long maxDelay, Long initialDelay) {
this.alpha = alpha == null ? 2 : alpha;
this.maxRetries = maxRetries == null ? Integer.MAX_VALUE : maxRetries;
this.maxWait = maxWait == null ? Long.MAX_VALUE : maxWait;
this.maxDelay = maxDelay == null ? Long.MAX_VALUE : maxDelay;
this.nextDelay = initialDelay == null ? 20 : initialDelay;
}
/**
* Block until next retry can be executed.
*
* This method throws an exception if the max number of retries has been reached. For an alternative see
* {@link #awaitNextRetryIfAvailable()}.
*
* @throws NoMoreRetriesException If maximum number of retries has been reached.
*/
public void awaitNextRetry() throws InterruptedException, NoMoreRetriesException {
this.retryNumber++;
if (this.retryNumber > this.maxRetries) {
throw new NoMoreRetriesException("Reached maximum number of retries: " + this.maxRetries);
} else if (this.totalWait > this.maxWait) {
throw new NoMoreRetriesException("Reached maximum time to wait: " + this.maxWait);
}
Thread.sleep(this.nextDelay);
this.totalWait += this.nextDelay;
this.nextDelay = Math.min((long) (this.alpha * this.nextDelay) + 1, this.maxDelay);
}
/**
* Block until next retry can be executed unless max retries has been reached.
*
* This method uses the return value to specify if a retry is allowed, which the caller should respect. For an alternative see
* {@link #awaitNextRetry()}.
*
* @return true if the next execution can be run, false if the max number of retries has been reached.
*/
public boolean awaitNextRetryIfAvailable() throws InterruptedException {
try {
awaitNextRetry();
return true;
} catch (NoMoreRetriesException exc) {
return false;
}
}
/**
* Thrown if no more retries are available for {@link ExponentialBackoff}.
*/
public static class NoMoreRetriesException extends Exception {
public NoMoreRetriesException(String message) {
super(message);
}
}
/**
* Evaluate a condition until true with exponential backoff.
* @param callable Condition.
* @return true if the condition returned true.
* @throws ExecutionException if the condition throws an exception.
*/
@Builder(builderMethodName = "awaitCondition", buildMethodName = "await")
private static boolean evaluateConditionUntilTrue(Callable<Boolean> callable, Double alpha, Integer maxRetries,
Long maxWait, Long maxDelay, Long initialDelay) throws ExecutionException, InterruptedException {
ExponentialBackoff exponentialBackoff = new ExponentialBackoff(alpha, maxRetries, maxWait, maxDelay, initialDelay);
while (true) {
try {
if (callable.call()) {
return true;
}
} catch (Throwable t) {
throw new ExecutionException(t);
}
if (!exponentialBackoff.awaitNextRetryIfAvailable()) {
return false;
}
}
}
}
| 4,129 |
0 | Create_ds/gobblin/gobblin-utility/src/main/java/org/apache/gobblin | Create_ds/gobblin/gobblin-utility/src/main/java/org/apache/gobblin/util/ImmutableProperties.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.util;
import java.util.Collections;
import java.util.Map;
import java.util.Properties;
import lombok.experimental.Delegate;
/**
* Immutable wrapper for {@link Properties}.
*/
public class ImmutableProperties extends Properties {
@Delegate
private final Map<Object, Object> props;
public ImmutableProperties(Properties props) {
this.props = Collections.unmodifiableMap(props);
}
@Override
public boolean equals(Object o) {
if (this == o) {
return true;
}
if (o == null || getClass() != o.getClass()) {
return false;
}
if (!super.equals(o)) {
return false;
}
ImmutableProperties that = (ImmutableProperties) o;
return props != null ? props.equals(that.props) : that.props == null;
}
@Override
public int hashCode() {
int result = super.hashCode();
result = 31 * result + (props != null ? props.hashCode() : 0);
return result;
}
/**
* Need to override this method otherwise it will call super.get(key).
*/
@Override
public String getProperty(String key) {
Object oval = this.get(key);
String sval = (oval instanceof String) ? (String) oval : null;
return ((sval == null) && (defaults != null)) ? defaults.getProperty(key) : sval;
}
}
| 4,130 |
0 | Create_ds/gobblin/gobblin-utility/src/main/java/org/apache/gobblin | Create_ds/gobblin/gobblin-utility/src/main/java/org/apache/gobblin/util/Either.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.util;
import lombok.AccessLevel;
import lombok.AllArgsConstructor;
import com.google.common.base.Optional;
/**
* Represents a value of one of two possible types. Similar to Scala's Either.
* Each instance of {@link Either} is of type {@link Left} or {@link Right} depending of the class of its value.
*
* <p>
* This class is useful when a class has two different states, each of which depends on a variable of a different
* type (for example a String name or Numeric id). While the normal pattern is to have two variables, only one of
* which is set, or two Optionals, only one of which is present, that pattern does not protect from having none
* or both variables set by programming errors. Instead, a single instance of Either can be used, which guarantees
* exactly one type will be set.
* </p>
*
* @param <S> first possible type for the value.
* @param <T> second possible type for the value.
*/
@AllArgsConstructor(access = AccessLevel.PRIVATE)
public abstract class Either<S, T> {
protected final Optional<S> s;
protected final Optional<T> t;
/**
* An instance of {@link Either} with value of type {@link S}.
*/
public static class Left<S, T> extends Either<S, T> {
public Left(S s) {
super(Optional.fromNullable(s), Optional.<T>absent());
}
/**
* @return value of type {@link S}.
*/
public S getLeft() {
return this.s.orNull();
}
}
/**
* An instance of {@link Either} with value of type {@link T}.
*/
public static class Right<S, T> extends Either<S, T> {
public Right(T t) {
super(Optional.<S>absent(), Optional.fromNullable(t));
}
/**
* @return value of type {@link T}.
*/
public T getRight() {
return this.t.orNull();
}
}
/**
* Create an instance of {@link Either} with value of type {@link S}.
* @param left value of this instance.
* @return an instance of {@link Left}.
*/
public static <S, T> Either<S, T> left(S left) {
return new Left<>(left);
}
/**
* Create an instance of {@link Either} with value of type {@link T}.
* @param right value of this instance.
* @return an instance of {@link Right}.
*/
public static <S, T> Either<S, T> right(T right) {
return new Right<>(right);
}
/**
* Get the value of this {@link Either} instance, returned as class {@link Object}. To get a strongly typed return,
* check the specific type of {@link Either} and call {@link Left#getLeft} or {@link Right#getRight}.
* @return value as an instance of {@link Object}.
*/
public Object get() {
if (this instanceof Left) {
return this.s.orNull();
}
return this.t.orNull();
}
}
| 4,131 |
0 | Create_ds/gobblin/gobblin-utility/src/main/java/org/apache/gobblin | Create_ds/gobblin/gobblin-utility/src/main/java/org/apache/gobblin/util/DownloadUtils.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.util;
import java.io.BufferedOutputStream;
import java.io.File;
import java.io.FileOutputStream;
import java.io.IOException;
import java.io.OutputStream;
import java.net.URI;
import java.net.URL;
import java.nio.file.Files;
import java.security.AccessController;
import java.security.PrivilegedAction;
import java.util.Map;
import java.util.logging.Logger;
import com.google.common.collect.Maps;
import com.google.common.io.Resources;
import groovy.grape.Grape;
import groovy.lang.GroovyClassLoader;
import lombok.NonNull;
/**
* Utility class for downloads using grape
*/
public class DownloadUtils {
private DownloadUtils() {
// Utility class's constructor do nothing
}
public static final String IVY_SETTINGS_FILE_NAME = "ivysettings.xml";
// This will be useful when there's already `ivysettings.xml` existed in the classpath.
private static final String CLIENT_IVY_SETTINGS_FILE_NAME = "client_ivysettings.xml";
private static final Logger logger = Logger.getLogger(DownloadUtils.class.getName());
/**
* Download jar through {@link Grape} given an org, module and version
* It is assumed that an ivy settings file exists on the classpath
*/
public static URI[] downloadJar(String org, String module, String version, boolean transitive) throws IOException {
Map<String, Object> artifactMap = Maps.newHashMap();
artifactMap.put("org", org);
artifactMap.put("module", module);
artifactMap.put("version", version);
artifactMap.put("transitive", transitive);
return downloadJar(artifactMap);
}
public static URI[] downloadJar(Map<String, Object> artifactMap) throws IOException {
System.setProperty("grape.config", getIvySettingsFile().getAbsolutePath());
Map<String, Object> args = Maps.newHashMap();
args.put("classLoader", AccessController.doPrivileged(new PrivilegedAction<GroovyClassLoader>() {
@Override
public GroovyClassLoader run() {
return new GroovyClassLoader();
}
}));
return Grape.resolve(args, artifactMap);
}
@NonNull
private static URL getSettingsUrl() throws IOException {
URL clientSettingsUrl = Thread.currentThread().getContextClassLoader().getResource(CLIENT_IVY_SETTINGS_FILE_NAME);
if (clientSettingsUrl == null) {
URL settingsUrl = Thread.currentThread().getContextClassLoader().getResource(IVY_SETTINGS_FILE_NAME);
if (settingsUrl == null) {
throw new IOException("Failed to find " + IVY_SETTINGS_FILE_NAME + "and "
+ CLIENT_IVY_SETTINGS_FILE_NAME +" from class path");
} else {
logger.info("Fallback to ivysettings.xml in the classpath");
return settingsUrl;
}
} else {
logger.info("Using customized client_ivysettings.xml file");
return clientSettingsUrl;
}
}
/**
* Get ivy settings file from classpath
*/
public static File getIvySettingsFile() throws IOException {
URL settingsUrl = getSettingsUrl();
// Check if settingsUrl is file on classpath
File ivySettingsFile = new File(settingsUrl.getFile());
if (ivySettingsFile.exists()) {
// can access settingsUrl as a file
return ivySettingsFile;
}
// Create temporary Ivy settings file.
ivySettingsFile = Files.createTempFile("ivy.settings", ".xml").toFile();
ivySettingsFile.deleteOnExit();
try (OutputStream os = new BufferedOutputStream(new FileOutputStream(ivySettingsFile))) {
Resources.copy(settingsUrl, os);
}
return ivySettingsFile;
}
}
| 4,132 |
0 | Create_ds/gobblin/gobblin-utility/src/main/java/org/apache/gobblin | Create_ds/gobblin/gobblin-utility/src/main/java/org/apache/gobblin/util/LoggingUncaughtExceptionHandler.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.util;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import com.google.common.base.Optional;
/**
* A type of {@link java.lang.Thread.UncaughtExceptionHandler} that logs uncaught exceptions.
*
* @author Yinan Li
*/
public class LoggingUncaughtExceptionHandler implements Thread.UncaughtExceptionHandler {
private final Logger logger;
public LoggingUncaughtExceptionHandler(Optional<Logger> logger) {
if (logger.isPresent()) {
this.logger = logger.get();
} else {
this.logger = LoggerFactory.getLogger(LoggingUncaughtExceptionHandler.class);
}
}
@Override
public void uncaughtException(Thread t, Throwable e) {
this.logger.error(String.format("Thread %s threw an uncaught exception: %s", t, e), e);
}
}
| 4,133 |
0 | Create_ds/gobblin/gobblin-utility/src/main/java/org/apache/gobblin | Create_ds/gobblin/gobblin-utility/src/main/java/org/apache/gobblin/util/ParallelRunner.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.util;
import lombok.Data;
import lombok.Getter;
import lombok.Setter;
import java.io.Closeable;
import java.io.IOException;
import java.util.Collection;
import java.util.List;
import java.util.concurrent.Callable;
import java.util.concurrent.ExecutionException;
import java.util.concurrent.ExecutorService;
import java.util.concurrent.Executors;
import java.util.concurrent.Future;
import java.util.concurrent.TimeUnit;
import java.util.concurrent.TimeoutException;
import java.util.concurrent.locks.Lock;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileAlreadyExistsException;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.SequenceFile;
import org.apache.hadoop.io.Writable;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import com.google.common.base.Optional;
import com.google.common.collect.Lists;
import com.google.common.util.concurrent.Striped;
import org.apache.gobblin.configuration.State;
/**
* A class that is responsible for running certain methods in parallel. Methods in this class returns immediately and
* are run in a fixed-size thread pool.
*
* <p>
* This class is intended to be used in the following pattern. This example uses the serialize() method.
*
* <pre> {@code
* Closer closer = Closer.create();
* try {
* // Do stuff
* ParallelRunner runner = closer.register(new ParallelRunner(threads, fs));
* runner.serialize(state1, outputFilePath1);
* // Submit more serialization tasks
* runner.serialize(stateN, outputFilePathN);
* // Do stuff
* } catch (Throwable e) {
* throw closer.rethrow(e);
* } finally {
* closer.close();
* }}
* </pre>
*
* Note that calling {@link #close()} will wait for all submitted tasks to complete and then stop the
* {@link ParallelRunner} by shutting down the {@link ExecutorService}.
* </p>
*
* @author Yinan Li
*/
public class ParallelRunner implements Closeable {
private static final Logger LOGGER = LoggerFactory.getLogger(ParallelRunner.class);
public static final String PARALLEL_RUNNER_THREADS_KEY = "parallel.runner.threads";
public static final int DEFAULT_PARALLEL_RUNNER_THREADS = 10;
private final ExecutorService executor;
/**
* Setting of fs is allowed to support reusing the {@link ParallelRunner} with different {@link FileSystem}s
* after all tasks have completed execution.
*/
@Getter
@Setter
private FileSystem fs;
private final List<NamedFuture> futures = Lists.newArrayList();
private final Striped<Lock> locks = Striped.lazyWeakLock(Integer.MAX_VALUE);
private final FailPolicy failPolicy;
public ParallelRunner(int threads, FileSystem fs) {
this(threads, fs, FailPolicy.FAIL_ONE_FAIL_ALL);
}
public ParallelRunner(int threads, FileSystem fs, FailPolicy failPolicy) {
this.executor = ExecutorsUtils.loggingDecorator(Executors.newFixedThreadPool(threads,
ExecutorsUtils.newThreadFactory(Optional.of(LOGGER), Optional.of("ParallelRunner"))));
this.fs = fs;
this.failPolicy = failPolicy;
}
/**
* Policies indicating how {@link ParallelRunner} should handle failure of tasks.
*/
public static enum FailPolicy {
/** If a task fails, a warning will be logged, but the {@link ParallelRunner} will still succeed.*/
ISOLATE_FAILURES,
/** If a task fails, all tasks will be tried, but {@link ParallelRunner#close} will throw the Exception.*/
FAIL_ONE_FAIL_ALL
}
/**
* A future with a name / message for reporting.
*/
@Data
public static class NamedFuture {
private final Future<?> future;
private final String name;
}
/**
* Serialize a {@link State} object into a file.
*
* <p>
* This method submits a task to serialize the {@link State} object and returns immediately
* after the task is submitted.
* </p>
*
* @param state the {@link State} object to be serialized
* @param outputFilePath the file to write the serialized {@link State} object to
* @param <T> the {@link State} object type
*/
public <T extends State> void serializeToFile(final T state, final Path outputFilePath) {
// Use a Callable with a Void return type to allow exceptions to be thrown
this.futures.add(new NamedFuture(this.executor.submit(new Callable<Void>() {
@Override
public Void call() throws Exception {
SerializationUtils.serializeState(ParallelRunner.this.fs, outputFilePath, state);
return null;
}
}), "Serialize state to " + outputFilePath));
}
/**
* Deserialize a {@link State} object from a file.
*
* <p>
* This method submits a task to deserialize the {@link State} object and returns immediately
* after the task is submitted.
* </p>
*
* @param state an empty {@link State} object to which the deserialized content will be populated
* @param inputFilePath the input file to read from
* @param <T> the {@link State} object type
*/
public <T extends State> void deserializeFromFile(final T state, final Path inputFilePath) {
this.futures.add(new NamedFuture(this.executor.submit(new Callable<Void>() {
@Override
public Void call() throws Exception {
SerializationUtils.deserializeState(ParallelRunner.this.fs, inputFilePath, state);
return null;
}
}), "Deserialize state from " + inputFilePath));
}
/**
* Deserialize a list of {@link State} objects from a Hadoop {@link SequenceFile}.
*
* <p>
* This method submits a task to deserialize the {@link State} objects and returns immediately
* after the task is submitted.
* </p>
*
* @param stateClass the {@link Class} object of the {@link State} class
* @param inputFilePath the input {@link SequenceFile} to read from
* @param states a {@link Collection} object to store the deserialized {@link State} objects
* @param deleteAfter a flag telling whether to delete the {@link SequenceFile} afterwards
* @param <T> the {@link State} object type
*/
public <T extends State> void deserializeFromSequenceFile(final Class<? extends Writable> keyClass,
final Class<T> stateClass, final Path inputFilePath, final Collection<T> states, final boolean deleteAfter) {
this.futures.add(new NamedFuture(this.executor.submit(new Callable<Void>() {
@Override
public Void call() throws Exception {
Configuration conf = new Configuration(ParallelRunner.this.fs.getConf());
WritableShimSerialization.addToHadoopConfiguration(conf);
try (@SuppressWarnings("deprecation") SequenceFile.Reader reader = new SequenceFile.Reader(
ParallelRunner.this.fs, inputFilePath, conf)) {
Writable key = keyClass.newInstance();
T state = stateClass.newInstance();
while (reader.next(key)) {
state = (T) reader.getCurrentValue(state);
states.add(state);
state = stateClass.newInstance();
}
if (deleteAfter) {
HadoopUtils.deletePath(ParallelRunner.this.fs, inputFilePath, false);
}
}
return null;
}
}), "Deserialize state from file " + inputFilePath));
}
/**
* Delete a {@link Path}.
*
* <p>
* This method submits a task to delete a {@link Path} and returns immediately
* after the task is submitted.
* </p>
*
* @param path path to be deleted.
*/
public void deletePath(final Path path, final boolean recursive) {
this.futures.add(new NamedFuture(this.executor.submit(new Callable<Void>() {
@Override
public Void call() throws Exception {
Lock lock = ParallelRunner.this.locks.get(path.toString());
lock.lock();
try {
HadoopUtils.deletePath(ParallelRunner.this.fs, path, recursive);
return null;
} finally {
lock.unlock();
}
}
}), "Delete path " + path));
}
/**
* Rename a {@link Path}.
*
* <p>
* This method submits a task to rename a {@link Path} and returns immediately
* after the task is submitted.
* </p>
*
* @param src path to be renamed
* @param dst new path after rename
* @param group an optional group name for the destination path
*/
public void renamePath(final Path src, final Path dst, final Optional<String> group) {
this.futures.add(new NamedFuture(this.executor.submit(new Callable<Void>() {
@Override
public Void call() throws Exception {
Lock lock = ParallelRunner.this.locks.get(src.toString());
lock.lock();
try {
if (ParallelRunner.this.fs.exists(src)) {
HadoopUtils.renamePath(ParallelRunner.this.fs, src, dst);
if (group.isPresent()) {
HadoopUtils.setGroup(ParallelRunner.this.fs, dst, group.get());
}
}
return null;
} catch (FileAlreadyExistsException e) {
LOGGER.warn(String.format("Failed to rename %s to %s: dst already exists", src, dst), e);
return null;
} finally {
lock.unlock();
}
}
}), "Rename " + src + " to " + dst));
}
/**
* Move a {@link Path}.
*
* <p>
* This method submits a task to move a {@link Path} and returns immediately
* after the task is submitted.
* </p>
*
* @param src path to be moved
* @param dstFs the destination {@link FileSystem}
* @param dst the destination path
* @param group an optional group name for the destination path
*/
public void movePath(final Path src, final FileSystem dstFs, final Path dst, final Optional<String> group) {
movePath(src, dstFs, dst, false, group);
}
/**
* Move a {@link Path}.
*
* <p>
* This method submits a task to move a {@link Path} and returns immediately
* after the task is submitted.
* </p>
*
* @param src path to be moved
* @param dstFs the destination {@link FileSystem}
* @param dst the destination path
* @param overwrite true to overwrite the destination
* @param group an optional group name for the destination path
*/
public void movePath(final Path src, final FileSystem dstFs, final Path dst, final boolean overwrite,
final Optional<String> group) {
this.futures.add(new NamedFuture(this.executor.submit(new Callable<Void>() {
@Override
public Void call() throws Exception {
Lock lock = ParallelRunner.this.locks.get(src.toString());
lock.lock();
try {
if (ParallelRunner.this.fs.exists(src)) {
HadoopUtils.movePath(ParallelRunner.this.fs, src, dstFs, dst, overwrite, dstFs.getConf());
if (group.isPresent()) {
HadoopUtils.setGroup(dstFs, dst, group.get());
}
}
return null;
} catch (FileAlreadyExistsException e) {
LOGGER.warn(String.format("Failed to move %s to %s: dst already exists", src, dst), e);
return null;
} finally {
lock.unlock();
}
}
}), "Move " + src + " to " + dst));
}
/**
* Submit a callable to the thread pool
*
* <p>
* This method submits a task and returns immediately
* </p>
*
* @param callable the callable to submit
* @param name for the future
*/
public void submitCallable(Callable<Void> callable, String name) {
this.futures.add(new NamedFuture(this.executor.submit(callable), name));
}
/**
* Wait for all submitted tasks to complete. The {@link ParallelRunner} can be reused after this call.
* @throws IOException
*/
public void waitForTasks(long timeoutInMills) throws IOException {
// Wait for all submitted tasks to complete
boolean wasInterrupted = false;
IOException exception = null;
for (NamedFuture future : this.futures) {
try {
if (wasInterrupted) {
future.getFuture().cancel(true);
} else {
future.getFuture().get(timeoutInMills, TimeUnit.MILLISECONDS);
}
} catch (InterruptedException ie) {
LOGGER.warn("Task was interrupted: " + future.getName());
wasInterrupted = true;
if (exception == null) {
exception = new IOException(ie);
}
} catch (ExecutionException ee) {
LOGGER.warn("Task failed: " + future.getName(), ee.getCause());
if (exception == null) {
exception = new IOException(ee.getCause());
}
} catch (TimeoutException te) {
LOGGER.warn("Tasks not fully finished before Parallel runner waiting until timeout due to:", te);
if (exception == null) {
exception = new IOException(te.getCause());
}
}
}
if (wasInterrupted) {
Thread.currentThread().interrupt();
}
if (exception != null && this.failPolicy == FailPolicy.FAIL_ONE_FAIL_ALL) {
throw exception;
}
// clear so that more tasks can be submitted to this ParallelRunner
futures.clear();
}
/**
* Wait until default timeout(infinite long, if not specified) for all tasks under this parallel runner.
*/
public void waitForTasks() throws IOException{
this.waitForTasks(Long.MAX_VALUE);
}
@Override
public void close() throws IOException {
try {
waitForTasks();
} finally {
ExecutorsUtils.shutdownExecutorService(this.executor, Optional.of(LOGGER));
}
}
}
| 4,134 |
0 | Create_ds/gobblin/gobblin-utility/src/main/java/org/apache/gobblin | Create_ds/gobblin/gobblin-utility/src/main/java/org/apache/gobblin/util/CLIPasswordEncryptor.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.util;
import java.util.regex.Matcher;
import java.util.regex.Pattern;
import org.apache.commons.cli.CommandLine;
import org.apache.commons.cli.DefaultParser;
import org.apache.commons.cli.HelpFormatter;
import org.apache.commons.cli.Option;
import org.apache.commons.cli.Options;
import org.apache.commons.cli.ParseException;
import org.apache.commons.lang.StringUtils;
import org.apache.hadoop.fs.Path;
import org.jasypt.util.text.BasicTextEncryptor;
import org.jasypt.util.text.StrongTextEncryptor;
import org.jasypt.util.text.TextEncryptor;
import com.google.common.base.Optional;
import org.apache.gobblin.password.PasswordManager;
/**
* A command line tool for encrypting password.
* Usage: -h print usage, -p plain password, -m master password, -f master password file, -s use strong encryptor.
*
* @author Ziyang Liu
*/
public class CLIPasswordEncryptor {
private static final char HELP_OPTION = 'h';
private static final char PLAIN_PWD_OPTION = 'p';
private static final char MASTER_PWD_OPTION = 'm';
private static final char STRONG_ENCRYPTOR_OPTION = 's';
private static final char MASTER_PWD_FILE_OPTION = 'f';
private static final char ENCRYPTED_PWD_OPTION = 'e';
private static final Pattern ENCRYPTED_PATTERN = Pattern.compile("ENC\\(([^)]+)\\)");
private static final Options CLI_OPTIONS = new Options();
public static void main(String[] args) throws ParseException {
CommandLine cl = parseArgs(args);
if (shouldPrintUsageAndExit(cl)) {
printUsage();
return;
}
String masterPassword = getMasterPassword(cl);
TextEncryptor encryptor = getEncryptor(cl, masterPassword);
if (cl.hasOption(ENCRYPTED_PWD_OPTION)) {
Matcher matcher = ENCRYPTED_PATTERN.matcher(cl.getOptionValue(ENCRYPTED_PWD_OPTION));
if (matcher.find()) {
String encrypted = matcher.group(1);
System.out.println(encryptor.decrypt(encrypted));
} else {
throw new RuntimeException("Input encrypted password does not match pattern \"ENC(...)\"");
}
} else if (cl.hasOption(PLAIN_PWD_OPTION)){
System.out.println("ENC(" + encryptor.encrypt(cl.getOptionValue(PLAIN_PWD_OPTION)) + ")");
} else {
printUsage();
throw new RuntimeException(String.format("Must provide -%s or -%s option.", PLAIN_PWD_OPTION, ENCRYPTED_PWD_OPTION));
}
}
private static TextEncryptor getEncryptor(CommandLine cl, String masterPassword) {
if (cl.hasOption(STRONG_ENCRYPTOR_OPTION)) {
StrongTextEncryptor encryptor = new StrongTextEncryptor();
encryptor.setPassword(masterPassword);
return encryptor;
} else {
BasicTextEncryptor encryptor = new BasicTextEncryptor();
encryptor.setPassword(masterPassword);
return encryptor;
}
}
private static String getMasterPassword(CommandLine cl) {
if (cl.hasOption(MASTER_PWD_OPTION)) {
if (cl.hasOption(MASTER_PWD_FILE_OPTION)) {
System.out.println(String.format("both -%s and -%s are provided. Using -%s", MASTER_PWD_OPTION,
MASTER_PWD_FILE_OPTION, MASTER_PWD_OPTION));
}
return cl.getOptionValue(MASTER_PWD_OPTION);
}
Path masterPwdLoc = new Path(cl.getOptionValue(MASTER_PWD_FILE_OPTION));
Optional<String> masterPwd = PasswordManager.getMasterPassword(masterPwdLoc);
if (masterPwd.isPresent()) {
return masterPwd.get();
}
throw new RuntimeException("Failed to get master password from " + masterPwdLoc);
}
private static CommandLine parseArgs(String[] args) throws ParseException {
initOptions(CLI_OPTIONS);
return new DefaultParser().parse(CLI_OPTIONS, args);
}
private static Options initOptions(Options options) {
options.addOption(new Option(StringUtils.EMPTY + HELP_OPTION, "print this message"));
options.addOption(Option.builder(StringUtils.EMPTY + PLAIN_PWD_OPTION).argName("plain password").hasArg()
.desc("plain password to be encrypted").build());
options.addOption(Option.builder(StringUtils.EMPTY + MASTER_PWD_OPTION).argName("master password").hasArg()
.desc("master password used to encrypt the plain password").build());
options.addOption(Option.builder(StringUtils.EMPTY + MASTER_PWD_FILE_OPTION).argName("master password file")
.hasArg().desc("file that contains the master password used to encrypt the plain password").build());
options.addOption(new Option(StringUtils.EMPTY + STRONG_ENCRYPTOR_OPTION, "use strong encryptor"));
options.addOption(Option.builder(StringUtils.EMPTY + ENCRYPTED_PWD_OPTION).argName("decrypt the input").hasArg().build());
return options;
}
private static boolean shouldPrintUsageAndExit(CommandLine cl) {
if (cl.hasOption(HELP_OPTION)) {
return true;
}
if (!masterpasswordProvided(cl)) {
return true;
}
return false;
}
private static boolean masterpasswordProvided(CommandLine cl) {
return cl.hasOption(MASTER_PWD_OPTION) || cl.hasOption(MASTER_PWD_FILE_OPTION);
}
private static void printUsage() {
new HelpFormatter().printHelp(" ", CLI_OPTIONS);
}
}
| 4,135 |
0 | Create_ds/gobblin/gobblin-utility/src/main/java/org/apache/gobblin | Create_ds/gobblin/gobblin-utility/src/main/java/org/apache/gobblin/util/PropertiesUtils.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.util;
import java.io.IOException;
import java.io.StringReader;
import java.io.StringWriter;
import java.util.List;
import java.util.Map;
import java.util.Properties;
import java.util.stream.Collectors;
import org.apache.commons.lang3.StringUtils;
import com.google.common.base.Optional;
import com.google.common.base.Preconditions;
import com.google.common.base.Splitter;
import com.google.common.collect.ImmutableMap;
/**
* A utility class for {@link Properties} objects.
*/
public class PropertiesUtils {
private static final Splitter LIST_SPLITTER = Splitter.on(",").trimResults().omitEmptyStrings();
/**
* Combine a variable number of {@link Properties} into a single {@link Properties}.
*/
public static Properties combineProperties(Properties... properties) {
Properties combinedProperties = new Properties();
for (Properties props : properties) {
combinedProperties.putAll(props);
}
return combinedProperties;
}
/**
* Converts a {@link Properties} object to a {@link Map} where each key is a {@link String}.
*/
public static Map<String, ?> propsToStringKeyMap(Properties properties) {
ImmutableMap.Builder<String, Object> mapBuilder = ImmutableMap.builder();
for (Map.Entry<Object, Object> entry : properties.entrySet()) {
mapBuilder.put(entry.getKey().toString(), entry.getValue());
}
return mapBuilder.build();
}
public static boolean getPropAsBoolean(Properties properties, String key, String defaultValue) {
return Boolean.valueOf(properties.getProperty(key, defaultValue));
}
public static int getPropAsInt(Properties properties, String key, int defaultValue) {
return Integer.parseInt(properties.getProperty(key, Integer.toString(defaultValue)));
}
/** @throws {@link NullPointerException} when `key` not in `properties` */
public static int getRequiredPropAsInt(Properties properties, String key) {
return Integer.parseInt(getRequiredPropRaw(properties, key, Optional.of("an integer")));
}
/** @throws {@link NullPointerException} when `key` not in `properties` */
public static String getRequiredProp(Properties properties, String key) {
return getRequiredPropRaw(properties, key, Optional.absent());
}
/** @throws {@link NullPointerException} when `key` not in `properties` */
public static String getRequiredPropRaw(Properties properties, String key, Optional<String> desc) {
String value = properties.getProperty(key);
Preconditions.checkNotNull(value, "'" + key + "' must be set" + desc.transform(s -> " (to " + desc + ")").or(""));
return value;
}
public static long getPropAsLong(Properties properties, String key, long defaultValue) {
return Long.parseLong(properties.getProperty(key, Long.toString(defaultValue)));
}
/**
* Get the value of a comma separated property as a {@link List} of strings.
*
* @param key property key
* @return value associated with the key as a {@link List} of strings
*/
public static List<String> getPropAsList(Properties properties, String key) {
return LIST_SPLITTER.splitToList(properties.getProperty(key));
}
/**
* Extract all the values whose keys start with a <code>prefix</code>
* @param properties the given {@link Properties} instance
* @param prefix of keys to be extracted
* @return a list of values in the properties
*/
public static List<String> getValuesAsList(Properties properties, Optional<String> prefix) {
if (prefix.isPresent()) {
properties = extractPropertiesWithPrefix(properties, prefix);
}
Properties finalProperties = properties;
return properties.keySet().stream().map(key -> finalProperties.getProperty(key.toString())).collect(Collectors.toList());
}
/**
* Get the value of a property as a list of strings, using the given default value if the property is not set.
*
* @param key property key
* @param def default value
* @return value (the default value if the property is not set) associated with the key as a list of strings
*/
public static List<String> getPropAsList(Properties properties, String key, String def) {
return LIST_SPLITTER.splitToList(properties.getProperty(key, def));
}
/**
* Extract all the keys that start with a <code>prefix</code> in {@link Properties} to a new {@link Properties}
* instance.
*
* @param properties the given {@link Properties} instance
* @param prefix of keys to be extracted
* @return a {@link Properties} instance
*/
public static Properties extractPropertiesWithPrefix(Properties properties, Optional<String> prefix) {
Preconditions.checkNotNull(properties);
Preconditions.checkNotNull(prefix);
Properties extractedProperties = new Properties();
for (Map.Entry<Object, Object> entry : properties.entrySet()) {
if (StringUtils.startsWith(entry.getKey().toString(), prefix.or(StringUtils.EMPTY))) {
extractedProperties.put(entry.getKey().toString(), entry.getValue());
}
}
return extractedProperties;
}
/**
* Extract all the keys that start with a <code>prefix</code> in {@link Properties} to a new {@link Properties}
* instance. It removes the prefix from the properties.
*
* @param properties the given {@link Properties} instance
* @param prefix of keys to be extracted
* @return a {@link Properties} instance
*/
public static Properties extractPropertiesWithPrefixAfterRemovingPrefix(Properties properties, String prefix) {
Preconditions.checkNotNull(properties);
Preconditions.checkNotNull(prefix);
Properties extractedProperties = new Properties();
for (Map.Entry<Object, Object> entry : properties.entrySet()) {
if (StringUtils.startsWith(entry.getKey().toString(), prefix)) {
extractedProperties.put(entry.getKey().toString().substring(prefix.length()), entry.getValue());
}
}
return extractedProperties;
}
public static String serialize(Properties properties) throws IOException {
StringWriter outputWriter = new StringWriter();
properties.store(outputWriter, "");
String rst = outputWriter.toString();
outputWriter.close();
return rst;
}
public static Properties deserialize(String serialized) throws IOException {
StringReader reader = new StringReader(serialized);
Properties properties = new Properties();
properties.load(reader);
reader.close();
return properties;
}
public static String prettyPrintProperties(Properties properties) {
return properties.entrySet().stream()
.map(entry -> "\"" + entry.getKey() + "\"" + " : " + "\"" + entry.getValue() + "\"")
.collect(Collectors.joining(",\n"));
}
}
| 4,136 |
0 | Create_ds/gobblin/gobblin-utility/src/main/java/org/apache/gobblin | Create_ds/gobblin/gobblin-utility/src/main/java/org/apache/gobblin/util/HiveJdbcConnector.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.util;
import java.io.Closeable;
import java.io.File;
import java.io.IOException;
import java.lang.reflect.Method;
import java.net.URL;
import java.net.URLClassLoader;
import java.sql.Connection;
import java.sql.DriverManager;
import java.sql.PreparedStatement;
import java.sql.SQLException;
import java.sql.Statement;
import java.util.Enumeration;
import java.util.Properties;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import com.google.common.base.Preconditions;
import org.apache.gobblin.password.PasswordManager;
/**
* A class for managing a Hive JDBC connection.
*
* @author Ziyang Liu
*/
public class HiveJdbcConnector implements Closeable {
public static final String HIVESERVER_VERSION = "hiveserver.version";
public static final String DEFAULT_HIVESERVER_VERSION = "2";
public static final String HIVESERVER_CONNECTION_STRING = "hiveserver.connection.string";
public static final String HIVESERVER_URL = "hiveserver.url";
public static final String HIVESERVER_USER = "hiveserver.user";
public static final String HIVESERVER_PASSWORD = "hiveserver.password";
public static final String HIVESITE_DIR = "hivesite.dir";
public static final String HIVE_EXECUTION_SIMULATE = "hive.execution.simulate";
private static final String HIVE_JDBC_DRIVER_NAME = "org.apache.hadoop.hive.jdbc.HiveDriver";
private static final String HIVE2_JDBC_DRIVER_NAME = "org.apache.hive.jdbc.HiveDriver";
private static final String HIVE_EMBEDDED_CONNECTION_STRING = "jdbc:hive://";
private static final String HIVE2_EMBEDDED_CONNECTION_STRING = "jdbc:hive2://";
private static final String HIVE_CONFIG_KEY_PREFIX = "hive.";
private static final int MAX_OUTPUT_STMT_LENGTH = 1000;
private static final Logger LOG = LoggerFactory.getLogger(HiveJdbcConnector.class);
// Connection to the Hive DB
private Connection conn;
private int hiveServerVersion;
private boolean isSimulate;
private HiveJdbcConnector() {
this(false);
}
private HiveJdbcConnector(boolean isSimulate) {
this.isSimulate = isSimulate;
}
/**
* Create a new {@link HiveJdbcConnector} using the specified Hive server version.
* @param hiveServerVersion is the Hive server version to use
* @return a HiveJdbcConnector with the specified hiveServerVersion
* @throws SQLException
*/
@SuppressWarnings("resource")
public static HiveJdbcConnector newEmbeddedConnector(int hiveServerVersion) throws SQLException {
return new HiveJdbcConnector().withHiveServerVersion(hiveServerVersion).withHiveEmbeddedConnection();
}
/**
* Create a new {@link HiveJdbcConnector} based on the configs in a {@link Properties} object
* @param compactRunProps contains the configuration keys to construct the {@link HiveJdbcConnector}
* @throws SQLException if there is a problem setting up the JDBC connection
* @return
*/
public static HiveJdbcConnector newConnectorWithProps(Properties compactRunProps) throws SQLException {
boolean isSimulate = Boolean.valueOf(compactRunProps.getProperty(HIVE_EXECUTION_SIMULATE));
HiveJdbcConnector hiveJdbcConnector = new HiveJdbcConnector(isSimulate);
// Set the Hive Server type
hiveJdbcConnector.withHiveServerVersion(
Integer.parseInt(compactRunProps.getProperty(HIVESERVER_VERSION, DEFAULT_HIVESERVER_VERSION)));
// Add the Hive Site Dir to the classpath
if (compactRunProps.containsKey(HIVESITE_DIR)) {
HiveJdbcConnector.addHiveSiteDirToClasspath(compactRunProps.getProperty(HIVESITE_DIR));
}
// Set and create the Hive JDBC connection
if (compactRunProps.containsKey(HIVESERVER_CONNECTION_STRING)) {
hiveJdbcConnector.withHiveConnectionFromUrl(compactRunProps.getProperty(HIVESERVER_CONNECTION_STRING));
} else if (compactRunProps.containsKey(HIVESERVER_URL)) {
hiveJdbcConnector.withHiveConnectionFromUrlUserPassword(compactRunProps.getProperty(HIVESERVER_URL),
compactRunProps.getProperty(HIVESERVER_USER),
PasswordManager.getInstance(compactRunProps).readPassword(compactRunProps.getProperty(HIVESERVER_PASSWORD)));
} else {
hiveJdbcConnector.withHiveEmbeddedConnection();
}
// Set Hive properties
hiveJdbcConnector.setHiveProperties(compactRunProps);
return hiveJdbcConnector;
}
/**
* Set the {@link HiveJdbcConnector#hiveServerVersion}. The hiveServerVersion must be either 1 or 2.
* @param hiveServerVersion
* @return
*/
private HiveJdbcConnector withHiveServerVersion(int hiveServerVersion) {
try {
if (hiveServerVersion == 1) {
Class.forName(HIVE_JDBC_DRIVER_NAME);
} else if (hiveServerVersion == 2) {
Class.forName(HIVE2_JDBC_DRIVER_NAME);
} else {
throw new RuntimeException(hiveServerVersion + " is not a valid HiveServer version.");
}
} catch (ClassNotFoundException e) {
throw new RuntimeException("Cannot find a suitable driver of HiveServer version " + hiveServerVersion + ".", e);
}
this.hiveServerVersion = hiveServerVersion;
return this;
}
/**
* Set the {@link HiveJdbcConnector#conn} based on the URL given to the method
* @param hiveServerUrl is the URL to connect to
* @throws SQLException if there is a problem connectiong to the URL
* @return
*/
private HiveJdbcConnector withHiveConnectionFromUrl(String hiveServerUrl) throws SQLException {
this.conn = DriverManager.getConnection(hiveServerUrl);
return this;
}
/**
* Set the {@link HiveJdbcConnector#conn} based on the URL, user, and password given to the method
* @param hiveServerUrl is the URL to connect to
* @param hiveServerUser is the username to connect with
* @param hiveServerPassword is the password to authenticate with when connecting to the URL
* @throws SQLException if there is a problem connecting to the URL
* @return
*/
private HiveJdbcConnector withHiveConnectionFromUrlUserPassword(String hiveServerUrl, String hiveServerUser,
String hiveServerPassword) throws SQLException {
this.conn = DriverManager.getConnection(hiveServerUrl, hiveServerUser, hiveServerPassword);
return this;
}
/**
* Set the {@link HiveJdbcConnector#conn} based on {@link HiveJdbcConnector#hiveServerVersion}
* @throws SQLException if there is a problem connection to the URL
* @return
*/
private HiveJdbcConnector withHiveEmbeddedConnection() throws SQLException {
if (this.hiveServerVersion == 1) {
this.conn = DriverManager.getConnection(HIVE_EMBEDDED_CONNECTION_STRING);
} else {
this.conn = DriverManager.getConnection(HIVE2_EMBEDDED_CONNECTION_STRING);
}
return this;
}
/**
* Helper method to add the directory containing the hive-site.xml file to the classpath
* @param hiveSiteDir is the path to to the folder containing the hive-site.xml file
*/
private static void addHiveSiteDirToClasspath(String hiveSiteDir) {
LOG.info("Adding " + hiveSiteDir + " to CLASSPATH");
File f = new File(hiveSiteDir);
try {
URL u = f.toURI().toURL();
URLClassLoader urlClassLoader = (URLClassLoader) ClassLoader.getSystemClassLoader();
Class<URLClassLoader> urlClass = URLClassLoader.class;
Method method = urlClass.getDeclaredMethod("addURL", new Class[] { URL.class });
method.setAccessible(true);
method.invoke(urlClassLoader, new Object[] { u });
} catch (ReflectiveOperationException | IOException e) {
throw new RuntimeException("Unable to add hive.site.dir to CLASSPATH", e);
}
}
/**
* Helper method that executes a series of "set ?=?" queries for the Hive connection in {@link HiveJdbcConnector#conn}.
* @param props specifies which set methods to run. For example, if the config contains "hive.mapred.min.split.size=100"
* then "set mapred.min.split.size=100" will be executed.
* @throws SQLException is thrown if there is a problem executing the "set" queries
*/
private void setHiveProperties(Properties props) throws SQLException {
Preconditions.checkNotNull(this.conn, "The Hive connection must be set before any queries can be run");
try (PreparedStatement preparedStatement = this.conn.prepareStatement("set ?=?")) {
Enumeration<?> enumeration = props.propertyNames();
while (enumeration.hasMoreElements()) {
String propertyName = (String) enumeration.nextElement();
if (propertyName.startsWith(HIVE_CONFIG_KEY_PREFIX)) {
preparedStatement.setString(1, propertyName);
preparedStatement.setString(2, props.getProperty(propertyName));
preparedStatement.execute();
}
}
}
}
/***
* Executes the given SQL statements.
*
* @param statements SQL statements to be executed.
* @throws SQLException if any issue in executing any statement.
*/
public void executeStatements(String... statements) throws SQLException {
Preconditions.checkNotNull(this.conn, "The Hive connection must be set before any queries can be run");
for (String statement : statements) {
if (isSimulate) {
LOG.info("[SIMULATE MODE] STATEMENT NOT RUN: " + choppedStatementNoLineChange(statement));
} else {
LOG.info("RUNNING STATEMENT: " + choppedStatementNoLineChange(statement));
try (Statement stmt = this.conn.createStatement()) {
try {
stmt.execute(statement);
} catch (SQLException sqe) {
LOG.error("Failed statement: " + choppedStatementNoLineChange(statement));
throw sqe;
}
}
}
}
}
// Chopped statements with all line-changing character being removed for saving space of log.
static String choppedStatementNoLineChange(String statement) {
// \r\n needs to be the first element in the pipe.
statement = statement.replaceAll("\\r\\n|\\r|\\n", " ");
if (statement.length() <= MAX_OUTPUT_STMT_LENGTH) {
return statement;
}
return statement.substring(0, MAX_OUTPUT_STMT_LENGTH) + "...... (" + (statement.length() - MAX_OUTPUT_STMT_LENGTH)
+ " characters omitted)";
}
public Connection getConnection() {
return this.conn;
}
@Override
public void close() throws IOException {
if (this.conn != null) {
try {
this.conn.close();
} catch (SQLException e) {
LOG.error("Failed to close JDBC connection", e);
}
}
}
}
| 4,137 |
0 | Create_ds/gobblin/gobblin-utility/src/main/java/org/apache/gobblin | Create_ds/gobblin/gobblin-utility/src/main/java/org/apache/gobblin/util/AvroFlattener.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.util;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.List;
import org.apache.avro.AvroRuntimeException;
import org.apache.avro.Schema;
import org.apache.commons.lang3.StringUtils;
import org.apache.log4j.Logger;
import com.google.common.base.Optional;
import com.google.common.base.Preconditions;
import com.google.common.collect.ImmutableList;
import com.linkedin.avroutil1.compatibility.AvroCompatibilityHelper;
/***
* This class provides methods to flatten an Avro Schema to make it more optimal for ORC
* (Hive does not support predicate pushdown for ORC with nested fields)
*
* The behavior of Avro Schema un-nesting is listed below:
*
* 1. Record within Record (and so on recursively) are flattened into the parent Record
* Record R1 {
* fields: {[
* {
* Record R2 {
* fields: {[
* {
* Record R3 {
* fields: {[
* {
* String S2
* }
* ]}
* }, {
* String S3
* }
* }
*
* ]}
* }
* }, {
* String S1
* }
* ]}
* }
* will be flattened to:
* Record R1 {
* fields: {[
* {
* String S1
* }, {
* String S2
* }, {
* String S3
* }
* ]}
* }
*
* 2. All fields un-nested from a Record within an Option (ie. Union of the type [null, Record] or [Record, null])
* within a Record are moved to parent Record as a list of Option fields
* Record R1 {
* fields : {[
* {
* Union : [
* null,
* Record R2 {
* fields : {[
* {
* String S1
* }, {
* String S2
* }
* ]}
* }
* }
* ]}
* }
* will be flattened to:
* Record R1 {
* fields : {[
* {
* Union : [ null, String S1]
* }, {
* Union : [ null, String S2]
* }
* ]}
* }
*
* 3. Array or Map will not be un-nested, however Records within it will be un-nested as described above
*
* 4. All un-nested fields are decorated with a new property "flatten_source" which is a dot separated string
* concatenation of parent fields name, similarly un-nested fields are renamed to double-underscore string
* concatenation of parent fields name
*
* 5. Primitive Types are not un-nested
*/
public class AvroFlattener {
private static final Logger LOG = Logger.getLogger(AvroFlattener.class);
private static final String FLATTENED_NAME_JOINER = "__";
private static final String FLATTENED_SOURCE_JOINER = ".";
public static final String FLATTENED_SOURCE_KEY = "flatten_source";
private String flattenedNameJoiner;
private String flattenedSourceJoiner;
/***
* Flatten the Schema to un-nest recursive Records (to make it optimal for ORC)
* @param schema Avro Schema to flatten
* @param flattenComplexTypes Flatten complex types recursively other than Record and Option
* @return Flattened Avro Schema
*/
public Schema flatten(Schema schema, boolean flattenComplexTypes) {
Preconditions.checkNotNull(schema);
// To help make it configurable later
this.flattenedNameJoiner = FLATTENED_NAME_JOINER;
this.flattenedSourceJoiner = FLATTENED_SOURCE_JOINER;
Schema flattenedSchema = flatten(schema, false, flattenComplexTypes);
LOG.debug("Original Schema : " + schema);
LOG.debug("Flattened Schema: " + flattenedSchema);
return flattenedSchema;
}
/***
* Flatten the Schema to un-nest recursive Records (to make it optimal for ORC)
* @param schema Schema to flatten
* @param shouldPopulateLineage is set to true if the field is going to be flattened and moved up the hierarchy -
* so that lineage information can be tagged to it; which happens when there is a
* Record within a Record OR Record within Option within Record and so on,
* however not when there is a Record within Map or Array
* @param flattenComplexTypes Flatten complex types recursively other than Record and Option
* @return Flattened Avro Schema
*/
private Schema flatten(Schema schema, boolean shouldPopulateLineage, boolean flattenComplexTypes) {
Schema flattenedSchema;
// Process all Schema Types
// (Primitives are simply cloned)
switch (schema.getType()) {
case ARRAY:
// Array might be an array of recursive Records, flatten them
if (flattenComplexTypes) {
flattenedSchema = Schema.createArray(flatten(schema.getElementType(), false));
} else {
flattenedSchema = Schema.createArray(schema.getElementType());
}
break;
case BOOLEAN:
flattenedSchema = Schema.create(schema.getType());
break;
case BYTES:
flattenedSchema = Schema.create(schema.getType());
break;
case DOUBLE:
flattenedSchema = Schema.create(schema.getType());
break;
case ENUM:
flattenedSchema =
Schema.createEnum(schema.getName(), schema.getDoc(), schema.getNamespace(), schema.getEnumSymbols());
break;
case FIXED:
flattenedSchema =
Schema.createFixed(schema.getName(), schema.getDoc(), schema.getNamespace(), schema.getFixedSize());
break;
case FLOAT:
flattenedSchema = Schema.create(schema.getType());
break;
case INT:
flattenedSchema = Schema.create(schema.getType());
break;
case LONG:
flattenedSchema = Schema.create(schema.getType());
break;
case MAP:
if (flattenComplexTypes) {
flattenedSchema = Schema.createMap(flatten(schema.getValueType(), false));
} else {
flattenedSchema = Schema.createMap(schema.getValueType());
}
break;
case NULL:
flattenedSchema = Schema.create(schema.getType());
break;
case RECORD:
flattenedSchema = flattenRecord(schema, shouldPopulateLineage, flattenComplexTypes);
break;
case STRING:
flattenedSchema = Schema.create(schema.getType());
break;
case UNION:
flattenedSchema = flattenUnion(schema, shouldPopulateLineage, flattenComplexTypes);
break;
default:
String exceptionMessage = String.format("Schema flattening failed for \"%s\" ", schema);
LOG.error(exceptionMessage);
throw new AvroRuntimeException(exceptionMessage);
}
// Copy schema metadata
copyProperties(schema, flattenedSchema);
return flattenedSchema;
}
/***
* Flatten Record schema
* @param schema Record Schema to flatten
* @param shouldPopulateLineage If lineage information should be tagged in the field, this is true when we are
* un-nesting fields
* @param flattenComplexTypes Flatten complex types recursively other than Record and Option
* @return Flattened Record Schema
*/
private Schema flattenRecord(Schema schema, boolean shouldPopulateLineage, boolean flattenComplexTypes) {
Preconditions.checkNotNull(schema);
Preconditions.checkArgument(Schema.Type.RECORD.equals(schema.getType()));
Schema flattenedSchema;
List<Schema.Field> flattenedFields = new ArrayList<>();
if (schema.getFields().size() > 0) {
for (Schema.Field oldField : schema.getFields()) {
List<Schema.Field> newFields = flattenField(oldField, ImmutableList.<String>of(),
shouldPopulateLineage, flattenComplexTypes, Optional.<Schema>absent());
if (null != newFields && newFields.size() > 0) {
flattenedFields.addAll(newFields);
}
}
}
flattenedSchema = Schema.createRecord(schema.getName(), schema.getDoc(), schema.getNamespace(),
schema.isError());
flattenedSchema.setFields(flattenedFields);
return flattenedSchema;
}
/***
* Flatten Union Schema
* @param schema Union Schema to flatten
* @param shouldPopulateLineage If lineage information should be tagged in the field, this is true when we are
* un-nesting fields
* @param flattenComplexTypes Flatten complex types recursively other than Record and Option
* @return Flattened Union Schema
*/
private Schema flattenUnion(Schema schema, boolean shouldPopulateLineage, boolean flattenComplexTypes) {
Preconditions.checkNotNull(schema);
Preconditions.checkArgument(Schema.Type.UNION.equals(schema.getType()));
Schema flattenedSchema;
List<Schema> flattenedUnionMembers = new ArrayList<>();
if (null != schema.getTypes() && schema.getTypes().size() > 0) {
for (Schema oldUnionMember : schema.getTypes()) {
if (flattenComplexTypes) {
// It's member might still recursively contain records
flattenedUnionMembers.add(flatten(oldUnionMember, shouldPopulateLineage, flattenComplexTypes));
} else {
flattenedUnionMembers.add(oldUnionMember);
}
}
}
flattenedSchema = Schema.createUnion(flattenedUnionMembers);
return flattenedSchema;
}
/***
* Flatten Record field, and compute a list of flattened fields
*
* Note: Lineage represents the source path from root for the flattened field. For. eg. If the original schema is:
* {
* "type" : "record",
* "name" : "parentRecordName",
* "fields" : [ {
* "name" : "parentFieldRecord",
* "type" : {
* "type" : "record",
* "name" : "nestedRecordName",
* "fields" : [ {
* "name" : "nestedFieldString",
* "type" : "string"
* }, {
* "name" : "nestedFieldInt",
* "type" : "int"
* } ]
* }
* }]
* }
* The expected output schema is:
* {
* "type" : "record",
* "name" : "parentRecordName",
* "fields" : [ {
* "name" : "parentFieldRecord__nestedFieldString",
* "type" : "string",
* "flatten_source" : "parentFieldRecord.nestedFieldString"
* }, {
* "name" : "parentFieldRecord__nestedFieldInt",
* "type" : "int",
* "flatten_source" : "parentFieldRecord.nestedFieldInt"
* }, {
* "name" : "parentFieldInt",
* "type" : "int"
* } ]
* }
* Here, 'flatten_source' and field 'name' has also been modified to represent their origination from nested schema
* lineage helps to determine that
*
* @param f Field to flatten
* @param parentLineage Parent's lineage represented as a List of Strings
* @param shouldPopulateLineage If lineage information should be tagged in the field, this is true when we are
* un-nesting fields
* @param flattenComplexTypes Flatten complex types recursively other than Record and Option
* @param shouldWrapInOption If the field should be wrapped as an OPTION, if we un-nest fields within an OPTION
* we make all the unnested fields as OPTIONs
* @return List of flattened Record fields
*/
private List<Schema.Field> flattenField(Schema.Field f, ImmutableList<String> parentLineage,
boolean shouldPopulateLineage, boolean flattenComplexTypes, Optional<Schema> shouldWrapInOption) {
Preconditions.checkNotNull(f);
Preconditions.checkNotNull(f.schema());
Preconditions.checkNotNull(f.name());
List<Schema.Field> flattenedFields = new ArrayList<>();
ImmutableList<String> lineage = ImmutableList.<String>builder()
.addAll(parentLineage.iterator()).add(f.name()).build();
// If field.Type = RECORD, un-nest its fields and return them
if (Schema.Type.RECORD.equals(f.schema().getType())) {
if (null != f.schema().getFields() && f.schema().getFields().size() > 0) {
for (Schema.Field field : f.schema().getFields()) {
flattenedFields.addAll(flattenField(field, lineage, true, flattenComplexTypes, Optional.<Schema>absent()));
}
}
}
// If field.Type = OPTION, un-nest its fields and return them
else {
Optional<Schema> optionalRecord = isOfOptionType(f.schema());
if (optionalRecord.isPresent()) {
Schema record = optionalRecord.get();
if (record.getFields().size() > 0) {
for (Schema.Field field : record.getFields()) {
flattenedFields.addAll(flattenField(field, lineage, true, flattenComplexTypes, Optional.of(f.schema())));
}
}
}
// If field.Type = any-other, copy and return it
else {
// Compute name and source using lineage
String flattenName = f.name();
String flattenSource = StringUtils.EMPTY;
if (shouldPopulateLineage) {
flattenName = StringUtils.join(lineage, flattenedNameJoiner);
flattenSource = StringUtils.join(lineage, flattenedSourceJoiner);
}
// Copy field
Schema flattenedFieldSchema = flatten(f.schema(), shouldPopulateLineage, flattenComplexTypes);
if (shouldWrapInOption.isPresent()) {
boolean isNullFirstMember = Schema.Type.NULL.equals(shouldWrapInOption.get().getTypes().get(0).getType());
// If already Union, just copy it instead of wrapping (Union within Union is not supported)
if (Schema.Type.UNION.equals(flattenedFieldSchema.getType())) {
List<Schema> newUnionMembers = new ArrayList<>();
if (isNullFirstMember) {
newUnionMembers.add(Schema.create(Schema.Type.NULL));
}
for (Schema type : flattenedFieldSchema.getTypes()) {
if (Schema.Type.NULL.equals(type.getType())) {
continue;
}
newUnionMembers.add(type);
}
if (!isNullFirstMember) {
newUnionMembers.add(Schema.create(Schema.Type.NULL));
}
flattenedFieldSchema = Schema.createUnion(newUnionMembers);
}
// Wrap the Union, since parent Union is an option
else {
// If the field within the parent Union has a non-null default value, then null should not be the first member
if (f.hasDefaultValue() && f.defaultVal() != null) {
isNullFirstMember = false;
}
if (isNullFirstMember) {
flattenedFieldSchema =
Schema.createUnion(Arrays.asList(Schema.create(Schema.Type.NULL), flattenedFieldSchema));
} else {
flattenedFieldSchema =
Schema.createUnion(Arrays.asList(flattenedFieldSchema, Schema.create(Schema.Type.NULL)));
}
}
}
Schema.Field field = AvroCompatibilityHelper.createSchemaField(flattenName, flattenedFieldSchema, f.doc(),
AvroUtils.getCompatibleDefaultValue(f), f.order());
if (StringUtils.isNotBlank(flattenSource)) {
field.addProp(FLATTENED_SOURCE_KEY, flattenSource);
}
// Avro 1.9 compatible change - replaced deprecated public api getJsonProps with AvroCompatibilityHelper methods
AvroSchemaUtils.copyFieldProperties(f, field);
flattenedFields.add(field);
}
}
return flattenedFields;
}
/***
* Check if the Avro Schema is of type OPTION
* ie. [null, RECORD] or [RECORD, null]
* @param schema Avro Schema to check
* @return Optional Avro Record if schema is of type OPTION
*/
private static Optional<Schema> isOfOptionType(Schema schema) {
Preconditions.checkNotNull(schema);
// If not of type UNION, cant be an OPTION
if (!Schema.Type.UNION.equals(schema.getType())) {
return Optional.<Schema>absent();
}
// If has more than two members, can't be an OPTION
List<Schema> types = schema.getTypes();
if (null != types && types.size() == 2) {
Schema first = types.get(0);
Schema second = types.get(1);
// One member should be of type NULL and other of type RECORD
if (Schema.Type.NULL.equals(first.getType()) && Schema.Type.RECORD.equals(second.getType())) {
return Optional.of(second);
} else if (Schema.Type.RECORD.equals(first.getType()) && Schema.Type.NULL.equals(second.getType())) {
return Optional.of(first);
}
}
return Optional.<Schema>absent();
}
/***
* Copy properties from old Avro Schema to new Avro Schema
* @param oldSchema Old Avro Schema to copy properties from
* @param newSchema New Avro Schema to copy properties to
*/
private static void copyProperties(Schema oldSchema, Schema newSchema) {
Preconditions.checkNotNull(oldSchema);
Preconditions.checkNotNull(newSchema);
// Avro 1.9 compatible change - replaced deprecated public api getJsonProps using AvroCompatibilityHelper methods
AvroSchemaUtils.copySchemaProperties(oldSchema, newSchema);
}
}
| 4,138 |
0 | Create_ds/gobblin/gobblin-utility/src/main/java/org/apache/gobblin | Create_ds/gobblin/gobblin-utility/src/main/java/org/apache/gobblin/util/AvroSchemaUtils.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.util;
import com.linkedin.avroutil1.compatibility.AvroCompatibilityHelper;
import java.util.List;
import lombok.extern.slf4j.Slf4j;
import org.apache.avro.Schema;
/**
* Avro schema utility class to perform schema property conversion to the appropriate data types
*/
@Slf4j
public class AvroSchemaUtils {
private AvroSchemaUtils() {
}
/**
* Get schema property value as integer
* @param schema
* @param prop
* @return Integer
*/
public static Integer getValueAsInteger(final Schema schema, String prop) {
String value = AvroCompatibilityHelper.getSchemaPropAsJsonString(schema, prop,
false, false);
try {
return Integer.parseInt(value);
} catch (NumberFormatException ex) {
log.error("Exception while converting to integer ", ex.getCause());
throw new IllegalArgumentException(ex);
}
}
/***
* Copy properties to an Avro Schema field
* @param fromField Avro Schema Field to copy properties from
* @param toField Avro Schema Field to copy properties to
*/
public static void copyFieldProperties(final Schema.Field fromField, final Schema.Field toField) {
List<String> allPropNames = AvroCompatibilityHelper.getAllPropNames(fromField);
if (null != allPropNames) {
for (String propName : allPropNames) {
String propValue = AvroCompatibilityHelper.getFieldPropAsJsonString(fromField, propName,
true, false);
AvroCompatibilityHelper.setFieldPropFromJsonString(toField, propName, propValue, false);
}
}
}
/***
* Copy properties to an Avro Schema
* @param fromSchema Avro Schema to copy properties from
* @param toSchema Avro Schema to copy properties to
*/
public static void copySchemaProperties(final Schema fromSchema, final Schema toSchema) {
List<String> allPropNames = AvroCompatibilityHelper.getAllPropNames(fromSchema);
if (null != allPropNames) {
for (String propName : allPropNames) {
String propValue = AvroCompatibilityHelper.getSchemaPropAsJsonString(fromSchema, propName,
true, false);
AvroCompatibilityHelper.setSchemaPropFromJsonString(toSchema, propName, propValue, false);
}
}
}
}
| 4,139 |
0 | Create_ds/gobblin/gobblin-utility/src/main/java/org/apache/gobblin | Create_ds/gobblin/gobblin-utility/src/main/java/org/apache/gobblin/util/ForkOperatorUtils.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.util;
import org.apache.hadoop.fs.Path;
import com.google.common.base.Preconditions;
import org.apache.gobblin.configuration.ConfigurationKeys;
import org.apache.gobblin.configuration.State;
import org.apache.gobblin.configuration.WorkUnitState;
/**
* Utility class for use with the {@link org.apache.gobblin.fork.ForkOperator} class.
*
* @author Yinan Li
*/
public class ForkOperatorUtils {
/**
* Get a new property key from an original one with branch index (if applicable).
*
* @param key property key
* @param numBranches number of branches (non-negative)
* @param branchId branch id (non-negative)
* @return a new property key
*/
public static String getPropertyNameForBranch(String key, int numBranches, int branchId) {
Preconditions.checkArgument(numBranches >= 0, "The number of branches is expected to be non-negative");
Preconditions.checkArgument(branchId >= 0, "The branchId is expected to be non-negative");
return numBranches > 1 ? key + "." + branchId : key;
}
/**
* Get a new property key from an original one with branch index (if applicable).
*
* @param key property key
* @param branch branch index
* @return a new property key
*/
public static String getPropertyNameForBranch(String key, int branch) {
// A branch index of -1 means there is no fork and branching
return branch >= 0 ? key + "." + branch : key;
}
/**
* Get a new property key from an original one based on the branch id. The method assumes the branch id specified by
* the {@link ConfigurationKeys#FORK_BRANCH_ID_KEY} parameter in the given WorkUnitState. The fork id key specifies
* which fork this parameter belongs to. Note this method will only provide the aforementioned functionality for
* {@link org.apache.gobblin.converter.Converter}s. To get the same functionality in {@link org.apache.gobblin.writer.DataWriter}s use
* the {@link org.apache.gobblin.writer.DataWriterBuilder#forBranch(int)} to construct a writer with a specific branch id.
*
* @param workUnitState contains the fork id key
* @param key property key
* @return a new property key
*/
public static String getPropertyNameForBranch(WorkUnitState workUnitState, String key) {
Preconditions.checkNotNull(workUnitState, "Cannot get a property from a null WorkUnit");
Preconditions.checkNotNull(key, "Cannot get a the value for a null key");
if (!workUnitState.contains(ConfigurationKeys.FORK_BRANCH_ID_KEY)) {
return key;
}
return workUnitState.getPropAsInt(ConfigurationKeys.FORK_BRANCH_ID_KEY) >= 0
? key + "." + workUnitState.getPropAsInt(ConfigurationKeys.FORK_BRANCH_ID_KEY) : key;
}
/**
* Get a new path with the given branch name as a sub directory.
*
* @param numBranches number of branches (non-negative)
* @param branchId branch id (non-negative)
* @return a new path
*/
public static String getPathForBranch(State state, String path, int numBranches, int branchId) {
Preconditions.checkNotNull(state);
Preconditions.checkNotNull(path);
Preconditions.checkArgument(numBranches >= 0, "The number of branches is expected to be non-negative");
Preconditions.checkArgument(branchId >= 0, "The branch id is expected to be non-negative");
return numBranches > 1
? path + Path.SEPARATOR + state.getProp(ConfigurationKeys.FORK_BRANCH_NAME_KEY + "." + branchId,
ConfigurationKeys.DEFAULT_FORK_BRANCH_NAME + branchId)
: path;
}
/**
* Get the fork branch ID of a branch of a given task.
*
* @param taskId task ID
* @param index branch index
* @return a fork branch ID
*/
public static String getForkId(String taskId, int index) {
return taskId + "." + index;
}
}
| 4,140 |
0 | Create_ds/gobblin/gobblin-utility/src/main/java/org/apache/gobblin | Create_ds/gobblin/gobblin-utility/src/main/java/org/apache/gobblin/util/JobConfigurationUtils.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.util;
import java.io.IOException;
import java.net.URI;
import java.util.Iterator;
import java.util.Properties;
import java.util.Map.Entry;
import org.apache.commons.configuration.ConfigurationConverter;
import org.apache.commons.configuration.ConfigurationException;
import org.apache.commons.configuration.PropertiesConfiguration;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.gobblin.configuration.State;
/**
* A utility class for working with job configurations.
*
* @author Yinan Li
*/
public class JobConfigurationUtils {
/**
* Get a new {@link Properties} instance by combining a given system configuration {@link Properties}
* object (first) and a job configuration {@link Properties} object (second).
*
* @param sysProps the given system configuration {@link Properties} object
* @param jobProps the given job configuration {@link Properties} object
* @return a new {@link Properties} instance
*/
public static Properties combineSysAndJobProperties(Properties sysProps, Properties jobProps) {
Properties combinedJobProps = new Properties();
combinedJobProps.putAll(sysProps);
combinedJobProps.putAll(jobProps);
return combinedJobProps;
}
/**
* Put all configuration properties in a given {@link Properties} object into a given
* {@link Configuration} object.
*
* @param properties the given {@link Properties} object
* @param configuration the given {@link Configuration} object
*/
public static void putPropertiesIntoConfiguration(Properties properties, Configuration configuration) {
for (String name : properties.stringPropertyNames()) {
configuration.set(name, properties.getProperty(name));
}
}
/**
* Put all configuration properties in a given {@link Configuration} object into a given
* {@link Properties} object.
*
* @param configuration the given {@link Configuration} object
* @param properties the given {@link Properties} object
*/
public static void putConfigurationIntoProperties(Configuration configuration, Properties properties) {
for (Iterator<Entry<String, String>> it = configuration.iterator(); it.hasNext();) {
Entry<String, String> entry = it.next();
properties.put(entry.getKey(), entry.getValue());
}
}
/**
* Put all configuration properties in a given {@link State} object into a given
* {@link Configuration} object.
*
* @param state the given {@link State} object
* @param configuration the given {@link Configuration} object
*/
public static void putStateIntoConfiguration(State state, Configuration configuration) {
for (String key : state.getPropertyNames()) {
String value = state.getProp(key);
if (value != null) { // ignore `null`, to prevent `IllegalArgumentException` from `Configuration::set`
configuration.set(key, value);
}
}
}
/**
* Load the properties from the specified file into a {@link Properties} object.
*
* @param fileName the name of the file to load properties from
* @param conf configuration object to determine the file system to be used
* @return a new {@link Properties} instance
*/
public static Properties fileToProperties(String fileName, Configuration conf)
throws IOException, ConfigurationException {
PropertiesConfiguration propsConfig = new PropertiesConfiguration();
Path filePath = new Path(fileName);
URI fileURI = filePath.toUri();
if (fileURI.getScheme() == null && fileURI.getAuthority() == null) {
propsConfig.load(FileSystem.getLocal(conf).open(filePath));
} else {
propsConfig.load(filePath.getFileSystem(conf).open(filePath));
}
return ConfigurationConverter.getProperties(propsConfig);
}
/**
* Load the properties from the specified file into a {@link Properties} object.
*
* @param fileName the name of the file to load properties from
* @return a new {@link Properties} instance
*/
public static Properties fileToProperties(String fileName) throws IOException, ConfigurationException {
return fileToProperties(fileName, new Configuration());
}
}
| 4,141 |
0 | Create_ds/gobblin/gobblin-utility/src/main/java/org/apache/gobblin | Create_ds/gobblin/gobblin-utility/src/main/java/org/apache/gobblin/util/PathUtils.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.util;
import java.io.IOException;
import java.net.URI;
import java.util.regex.Pattern;
import org.apache.commons.lang.StringUtils;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import com.google.common.base.Strings;
import lombok.extern.slf4j.Slf4j;
@Slf4j
public class PathUtils {
public static final Pattern GLOB_TOKENS = Pattern.compile("[,\\?\\*\\[\\{]");
public static Path mergePaths(Path path1, Path path2) {
String path2Str = path2.toUri().getPath();
if (!path2Str.startsWith("/")) {
path2Str = "/" + path2Str;
}
return new Path(path1.toUri().getScheme(), path1.toUri().getAuthority(), path1.toUri().getPath() + path2Str);
}
public static Path relativizePath(Path fullPath, Path pathPrefix) {
return new Path(getPathWithoutSchemeAndAuthority(pathPrefix).toUri()
.relativize(getPathWithoutSchemeAndAuthority(fullPath).toUri()));
}
/**
* Checks whether possibleAncestor is an ancestor of fullPath.
* @param possibleAncestor Possible ancestor of fullPath.
* @param fullPath path to check.
* @return true if possibleAncestor is an ancestor of fullPath.
*/
public static boolean isAncestor(Path possibleAncestor, Path fullPath) {
if (fullPath == null) {
return false;
}
return !relativizePath(fullPath, possibleAncestor).equals(getPathWithoutSchemeAndAuthority(fullPath));
}
/**
* Removes the Scheme and Authority from a Path.
*
* @see Path
* @see URI
*/
public static Path getPathWithoutSchemeAndAuthority(Path path) {
return new Path(null, null, path.toUri().getPath());
}
/**
* Returns the root path for the specified path.
*
* @see Path
*/
public static Path getRootPath(Path path) {
if (path.isRoot()) {
return path;
}
return getRootPath(path.getParent());
}
/**
* Returns the root path child for the specified path.
* Example: input: /a/b/c then it will return /a
*
*/
public static Path getRootPathChild(Path path) {
if (path.getParent() == null) {
return null;
}
if (path.getParent().isRoot()) {
return path;
}
return getRootPathChild(path.getParent());
}
/**
* Removes the leading slash if present.
*
*/
public static Path withoutLeadingSeparator(Path path) {
return new Path(StringUtils.removeStart(path.toString(), Path.SEPARATOR));
}
/**
* Finds the deepest ancestor of input that is not a glob.
*/
public static Path deepestNonGlobPath(Path input) {
Path commonRoot = input;
while (commonRoot != null && isGlob(commonRoot)) {
commonRoot = commonRoot.getParent();
}
return commonRoot;
}
/**
* @return true if path has glob tokens (e.g. *, {, \, }, etc.)
*/
public static boolean isGlob(Path path) {
return (path != null) && GLOB_TOKENS.matcher(path.toString()).find();
}
/**
* Removes all <code>extensions</code> from <code>path</code> if they exist.
*
* <pre>
* PathUtils.removeExtention("file.txt", ".txt") = file
* PathUtils.removeExtention("file.txt.gpg", ".txt", ".gpg") = file
* PathUtils.removeExtention("file", ".txt") = file
* PathUtils.removeExtention("file.txt", ".tar.gz") = file.txt
* PathUtils.removeExtention("file.txt.gpg", ".txt") = file.gpg
* PathUtils.removeExtention("file.txt.gpg", ".gpg") = file.txt
* </pre>
*
* @param path in which the <code>extensions</code> need to be removed
* @param extensions to be removed
*
* @return a new {@link Path} without <code>extensions</code>
*/
public static Path removeExtension(Path path, String... extensions) {
String pathString = path.toString();
for (String extension : extensions) {
pathString = StringUtils.remove(pathString, extension);
}
return new Path(pathString);
}
/**
* Suffix all <code>extensions</code> to <code>path</code>.
*
* <pre>
* PathUtils.addExtension("/tmp/data/file", ".txt") = file.txt
* PathUtils.addExtension("/tmp/data/file.txt.gpg", ".zip") = file.txt.gpg.zip
* PathUtils.addExtension("/tmp/data/file.txt", ".tar", ".gz") = file.txt.tar.gz
* PathUtils.addExtension("/tmp/data/file.txt.gpg", ".tar.txt") = file.txt.gpg.tar.txt
* </pre>
*
* @param path to which the <code>extensions</code> need to be added
* @param extensions to be added
*
* @return a new {@link Path} with <code>extensions</code>
*/
public static Path addExtension(Path path, String... extensions) {
StringBuilder pathStringBuilder = new StringBuilder(path.toString());
for (String extension : extensions) {
if (!Strings.isNullOrEmpty(extension)) {
pathStringBuilder.append(extension);
}
}
return new Path(pathStringBuilder.toString());
}
public static Path combinePaths(String... paths) {
if (paths.length == 0) {
throw new IllegalArgumentException("Paths cannot be empty!");
}
Path path = new Path(paths[0]);
for (int i = 1; i < paths.length; i++) {
path = new Path(path, paths[i]);
}
return path;
}
/**
* Is an absolute path (ie a slash relative path part)
* AND a scheme is null AND authority is null.
*/
public static boolean isAbsoluteAndSchemeAuthorityNull(Path path) {
return (path.isAbsolute() &&
path.toUri().getScheme() == null && path.toUri().getAuthority() == null);
}
/**
* Deletes empty directories starting with startPath and all ancestors up to but not including limitPath.
* @param fs {@link FileSystem} where paths are located.
* @param limitPath only {@link Path}s that are strict descendants of this path will be deleted.
* @param startPath first {@link Path} to delete. Afterwards empty ancestors will be deleted.
* @throws IOException
*/
public static void deleteEmptyParentDirectories(FileSystem fs, Path limitPath, Path startPath)
throws IOException {
if (PathUtils.isAncestor(limitPath, startPath) && !PathUtils.getPathWithoutSchemeAndAuthority(limitPath)
.equals(PathUtils.getPathWithoutSchemeAndAuthority(startPath)) && fs.listStatus(startPath).length == 0) {
if (!fs.delete(startPath, false)) {
log.warn("Failed to delete empty directory " + startPath);
} else {
log.info("Deleted empty directory " + startPath);
}
deleteEmptyParentDirectories(fs, limitPath, startPath.getParent());
} else {
log.info(String.format("%s is not ancestor of %s, will not delete %s in this case", limitPath, startPath, startPath));
}
}
/**
* Compare two path without shedme and authority (the prefix)
* @param path1
* @param path2
* @return
*/
public static boolean compareWithoutSchemeAndAuthority(Path path1, Path path2) {
return PathUtils.getPathWithoutSchemeAndAuthority(path1).equals(getPathWithoutSchemeAndAuthority(path2));
}
}
| 4,142 |
0 | Create_ds/gobblin/gobblin-utility/src/main/java/org/apache/gobblin | Create_ds/gobblin/gobblin-utility/src/main/java/org/apache/gobblin/util/LdapUtils.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.util;
import com.typesafe.config.Config;
import java.util.HashSet;
import java.util.Hashtable;
import java.util.Set;
import javax.naming.Context;
import javax.naming.NamingEnumeration;
import javax.naming.NamingException;
import javax.naming.directory.DirContext;
import javax.naming.directory.InitialDirContext;
import javax.naming.directory.SearchControls;
import javax.naming.directory.SearchResult;
import org.apache.gobblin.password.PasswordManager;
import org.apache.log4j.Logger;
/**
* This is a utility class for accessing Active Directory.
* Utility factory which returns an instance of {@link LdapUtils}
*/
public class LdapUtils {
public static final String LDAP_PREFIX = "groupOwnershipService.ldap";
public static final String LDAP_BASE_DN_KEY = LDAP_PREFIX + ".baseDn";
public static final String LDAP_HOST_KEY = LDAP_PREFIX + ".host";
public static final String LDAP_PORT_KEY = LDAP_PREFIX + ".port";
public static final String LDAP_USER_KEY = LDAP_PREFIX + ".username";
public static final String LDAP_PASSWORD_KEY = LDAP_PREFIX + ".password";
public static final String LDAP_USE_SECURE_TRUSTMANAGER = LDAP_PREFIX + ".useSecureTrustManager";
private static final Logger logger = Logger.getLogger(LdapUtils.class);
private final String _ldapHost;
private final String _ldapPort;
private final String _ldapBaseDN;
// Creds of headless account for searching LDAP
private final String _ldapUser;
private final String _ldapPassword;
private final boolean _ldapUseSecureTrustManager;
private final String _personSearchFilter = "(&(objectcategory=Person)(samaccountname=%s))";
private final String _groupSearchFilter = "(&(objectcategory=Group)(cn=%s))";
private final String _memberSearchFilter = "(&(objectcategory=Person)(memberof=%s))";
private final String _distinguishedName = "distinguishedName";
private final String _samAccount = "sAMAccountName";
private final String _memberOf = "memberof";
public LdapUtils(Config config) {
PasswordManager passwordManager = PasswordManager.getInstance(ConfigUtils.configToState(config));
String password = passwordManager.readPassword(config.getString(LDAP_PASSWORD_KEY));
_ldapHost = config.getString(LDAP_HOST_KEY);
_ldapPort = config.getString(LDAP_PORT_KEY);
_ldapUser = config.getString(LDAP_USER_KEY);
_ldapPassword = password;
_ldapBaseDN = config.getString(LDAP_BASE_DN_KEY);
if(config.hasPath(LDAP_USE_SECURE_TRUSTMANAGER)) {
_ldapUseSecureTrustManager = config.getBoolean(LDAP_USE_SECURE_TRUSTMANAGER);
} else {
_ldapUseSecureTrustManager = false;
}
}
/**
* Returns DirContext for making LDAP call
*
* @param username The LDAP sAMAccountName
* @param password The LDAP password
* @throws NamingException
*/
private DirContext getDirContext(String username, String password) throws NamingException {
Hashtable<String, Object> env = new Hashtable<>();
env.put(Context.INITIAL_CONTEXT_FACTORY, "com.sun.jndi.ldap.LdapCtxFactory");
env.put(Context.PROVIDER_URL, String.format("ldaps://%s:%s", _ldapHost, _ldapPort));
env.put(Context.SECURITY_AUTHENTICATION, "simple");
env.put(Context.SECURITY_PROTOCOL, "ssl");
env.put(Context.SECURITY_PRINCIPAL, username);
env.put(Context.SECURITY_CREDENTIALS, password);
if (_ldapUseSecureTrustManager) {
env.put("java.naming.ldap.factory.socket", TrustManagerSecureSocketFactory.class.getCanonicalName());
} else {
env.put("java.naming.ldap.factory.socket", TrustManagerSocketFactory.class.getCanonicalName());
}
return new InitialDirContext(env);
}
/**
* Returns LDAP SearchResult for given filter and ctx
*
* @param searchFilter The LDAP filter
* @param ctx The DirContext for LDAP
* @throws NamingException
*/
private NamingEnumeration<SearchResult> searchLDAP(String searchFilter, DirContext ctx) throws NamingException {
String baseDN = _ldapBaseDN;
SearchControls controls = new SearchControls();
controls.setSearchScope(SearchControls.SUBTREE_SCOPE);
return ctx.search(baseDN, searchFilter, controls);
}
/**
* Returns String Attribute value
*
* @param result The LDAP SearchResult, could be either Person or Group
* @param attribute Attribute to find from SearchResult
* @throws NamingException
*/
private String getAttribute(SearchResult result, String attribute) throws NamingException {
return result.getAttributes().get(attribute).get().toString();
}
public Set<String> getGroupMembers(String groupName) throws NamingException {
// Username and password for binding must exist
if (_ldapUser == null || _ldapPassword == null) {
throw new IllegalStateException("Username and password must be provided when initiating the class");
}
DirContext ctx;
Set<String> resultSet = new HashSet<>();
ctx = getDirContext(_ldapUser, _ldapPassword);
logger.info("Searching for groups");
String searchFilter = String.format(_groupSearchFilter, groupName);
NamingEnumeration<SearchResult> groupResults = searchLDAP(searchFilter, ctx);
SearchResult group = groupResults.next();
String distinguishedName = getAttribute(group, _distinguishedName);
String membersSearchFilter = String.format(_memberSearchFilter, distinguishedName);
logger.info("Searching for members");
NamingEnumeration<SearchResult> members = searchLDAP(membersSearchFilter, ctx);
while (members.hasMoreElements()) {
SearchResult member = members.next();
resultSet.add(getAttribute(member, _samAccount));
}
logger.info(String.format("Members part of group %s: %s", groupName, resultSet.toString()));
return resultSet;
}
}
| 4,143 |
0 | Create_ds/gobblin/gobblin-utility/src/main/java/org/apache/gobblin | Create_ds/gobblin/gobblin-utility/src/main/java/org/apache/gobblin/util/DummyTrustManager.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.util;
import java.security.cert.CertificateException;
import java.security.cert.X509Certificate;
import javax.net.ssl.X509TrustManager;
/**
* A default trust manager used by {@link: TrustManagerSocketFactory}
*/
class DummyTrustManager implements X509TrustManager {
@Override
public void checkClientTrusted(X509Certificate[] chain, String authType) throws CertificateException {
return;
}
@Override
public void checkServerTrusted(X509Certificate[] chain, String authType) throws CertificateException {
return;
}
@Override
public X509Certificate[] getAcceptedIssuers() {
return null;
}
}
| 4,144 |
0 | Create_ds/gobblin/gobblin-utility/src/main/java/org/apache/gobblin | Create_ds/gobblin/gobblin-utility/src/main/java/org/apache/gobblin/util/NoopCloseable.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.util;
import java.io.Closeable;
/**
* A {@link Closeable} that does nothing.
*/
public class NoopCloseable implements Closeable {
public static NoopCloseable INSTANCE = new NoopCloseable();
@Override
public void close() {
// Do nothing
}
}
| 4,145 |
0 | Create_ds/gobblin/gobblin-utility/src/main/java/org/apache/gobblin | Create_ds/gobblin/gobblin-utility/src/main/java/org/apache/gobblin/util/ClosableTimerContext.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.util;
import com.codahale.metrics.Timer;
import java.io.Closeable;
import java.io.IOException;
/**
* This class is a wrapper on com.codahale.metrics.Timer.Context
* and implements Closable. Timer.Context implements AutoCloseable
* (with io.dropwizard.metrics library update from 3.x.y to 4.x.y)
* and this wrapper class allows caller to still continue using Closable.
*/
public class ClosableTimerContext implements Closeable {
private final Timer.Context timerContext;
public ClosableTimerContext(final Timer.Context timerContext) {
this.timerContext = timerContext;
}
@Override
public void close() throws IOException {
timerContext.close();
}
}
| 4,146 |
0 | Create_ds/gobblin/gobblin-utility/src/main/java/org/apache/gobblin | Create_ds/gobblin/gobblin-utility/src/main/java/org/apache/gobblin/util/HostUtils.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.util;
import java.net.InetAddress;
import java.net.UnknownHostException;
public class HostUtils {
public static String getHostName() {
try {
return InetAddress.getLocalHost().getCanonicalHostName();
} catch (UnknownHostException e) {
throw new RuntimeException("Error determining hostname", e);
}
}
public static String getPrincipalUsingHostname(String name, String realm) {
return name + "/" + getHostName() + "@" + realm;
}
}
| 4,147 |
0 | Create_ds/gobblin/gobblin-utility/src/main/java/org/apache/gobblin | Create_ds/gobblin/gobblin-utility/src/main/java/org/apache/gobblin/util/JvmUtils.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.util;
import com.google.common.base.Joiner;
import com.google.common.base.Optional;
import org.apache.commons.lang.StringUtils;
import java.lang.management.ManagementFactory;
import java.lang.management.RuntimeMXBean;
import java.util.List;
public class JvmUtils {
private static final Joiner JOINER = Joiner.on(" ").skipNulls();
private static final PortUtils PORT_UTILS = new PortUtils();
private JvmUtils() {
}
/**
* Gets the input arguments passed to the JVM.
* @return The input arguments.
*/
public static String getJvmInputArguments() {
RuntimeMXBean runtimeMxBean = ManagementFactory.getRuntimeMXBean();
List<String> arguments = runtimeMxBean.getInputArguments();
return String.format("JVM Input Arguments: %s", JOINER.join(arguments));
}
/**
* Formats the specified jvm arguments such that any tokens are replaced with concrete values;
* @param jvmArguments
* @return The formatted jvm arguments.
*/
public static String formatJvmArguments(Optional<String> jvmArguments) {
if (jvmArguments.isPresent()) {
return PORT_UTILS.replacePortTokens(jvmArguments.get());
}
return StringUtils.EMPTY;
}
}
| 4,148 |
0 | Create_ds/gobblin/gobblin-utility/src/main/java/org/apache/gobblin | Create_ds/gobblin/gobblin-utility/src/main/java/org/apache/gobblin/util/AzkabanTags.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.util;
import java.util.Map;
import lombok.extern.slf4j.Slf4j;
import org.apache.commons.lang.StringUtils;
import org.apache.gobblin.configuration.ConfigurationKeys;
import org.apache.hadoop.conf.Configuration;
import com.google.common.collect.ImmutableMap;
import com.google.common.collect.Maps;
/**
* Utility class for collecting metadata specific to a Azkaban runtime environment.
*/
@Slf4j
public class AzkabanTags {
public static final ImmutableMap<String, String> PROPERTIES_TO_TAGS_MAP = new ImmutableMap.Builder<String, String>()
.put(ConfigurationKeys.AZKABAN_PROJECT_NAME, "azkabanProjectName")
.put(ConfigurationKeys.AZKABAN_FLOW_ID, "azkabanFlowId")
.put(ConfigurationKeys.AZKABAN_JOB_ID, "azkabanJobId")
.put(ConfigurationKeys.AZKABAN_EXEC_ID, "azkabanExecId")
.put(ConfigurationKeys.AZKABAN_URL, "azkabanURL")
.put(ConfigurationKeys.AZKABAN_FLOW_URL, "azkabanFlowURL")
.put(ConfigurationKeys.AZKABAN_JOB_URL, "azkabanJobURL")
.put(ConfigurationKeys.AZKABAN_JOB_EXEC_URL, "azkabanJobExecURL")
.build();
/**
* Uses {@link #getAzkabanTags(Configuration)} with default Hadoop {@link Configuration}
*/
public static Map<String, String> getAzkabanTags() {
return getAzkabanTags(new Configuration());
}
/**
* Gets all useful Azkaban runtime properties required by metrics as a {@link Map}.
* Below metrics will be fetched if available:
* - azkabanFlowId : name of Azkaban flow
* - azkabanFlowURL : URL of Azkaban flow
* - azkabanURL : URL of Azkaban flow execution
* - azkabanExecId : ID of flow execution
* - azkabanJobId : name of Azkaban job
* - azkabanJobURL : URL of Azkaban job
* - azkabanJobExecURL : URL of Azkaban job execution
*
* @param conf Hadoop Configuration that contains the properties. Keys of {@link #PROPERTIES_TO_TAGS_MAP} lists out
* all the properties to look for in {@link Configuration}.
*
* @return a {@link Map} with keys as property names (name mapping in {@link #PROPERTIES_TO_TAGS_MAP}) and the value
* of the property from {@link Configuration}
*/
public static Map<String, String> getAzkabanTags(Configuration conf) {
Map<String, String> tagMap = Maps.newHashMap();
for (Map.Entry<String, String> entry : PROPERTIES_TO_TAGS_MAP.entrySet()) {
if (StringUtils.isNotBlank(conf.get(entry.getKey()))) {
tagMap.put(entry.getValue(), conf.get(entry.getKey()));
} else {
log.warn(String.format("No config value found for config %s. Metrics will not have tag %s", entry.getKey(),
entry.getValue()));
}
}
return tagMap;
}
}
| 4,149 |
0 | Create_ds/gobblin/gobblin-utility/src/main/java/org/apache/gobblin | Create_ds/gobblin/gobblin-utility/src/main/java/org/apache/gobblin/util/TimeRangeChecker.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.util;
import java.util.List;
import java.util.Map;
import com.google.common.base.Predicate;
import com.google.common.collect.ImmutableMap;
import com.google.common.collect.Iterables;
import org.apache.gobblin.configuration.ConfigurationKeys;
import lombok.AllArgsConstructor;
import org.joda.time.DateTime;
import org.joda.time.DateTimeConstants;
import org.joda.time.DateTimeZone;
import org.joda.time.Interval;
import org.joda.time.format.DateTimeFormat;
import org.joda.time.format.DateTimeFormatter;
/**
* Utility class to check if a given time is in a pre-defined range. Particularly useful for Job-Schedulers such as
* Azkaban that don't provide a day level scheduling granularity.
*/
public class TimeRangeChecker {
private static final DateTimeZone DATE_TIME_ZONE = DateTimeZone.forID(ConfigurationKeys.PST_TIMEZONE_NAME);
private static final String HOUR_MINUTE_FORMAT = "HH-mm";
private static final DateTimeFormatter HOUR_MINUTE_FORMATTER = DateTimeFormat.forPattern("HH-mm");
private static final Map<Integer, String> DAYS_OF_WEEK = new ImmutableMap.Builder<Integer, String>()
.put(DateTimeConstants.MONDAY, "MONDAY")
.put(DateTimeConstants.TUESDAY, "TUESDAY")
.put(DateTimeConstants.WEDNESDAY, "WEDNESDAY")
.put(DateTimeConstants.THURSDAY, "THURSDAY")
.put(DateTimeConstants.FRIDAY, "FRIDAY")
.put(DateTimeConstants.SATURDAY, "SATURDAY")
.put(DateTimeConstants.SUNDAY, "SUNDAY").build();
/**
* Checks if a specified time is on a day that is specified the a given {@link List} of acceptable days, and that the
* hours + minutes of the specified time fall into a range defined by startTimeStr and endTimeStr.
*
* @param days is a {@link List} of days, if the specified {@link DateTime} does not have a day that falls is in this
* {@link List} then this method will return false.
* @param startTimeStr defines the start range that the currentTime can fall into. This {@link String} should be of
* the pattern defined by {@link #HOUR_MINUTE_FORMAT}.
* @param endTimeStr defines the start range that the currentTime can fall into. This {@link String} should be of
* the pattern defined by {@link #HOUR_MINUTE_FORMAT}.
* @param currentTime is a {@link DateTime} for which this method will check if it is in the given {@link List} of
* days and falls into the time range defined by startTimeStr and endTimeStr.
*
* @return true if the given time is in the defined range, false otherwise.
*/
public static boolean isTimeInRange(List<String> days, String startTimeStr, String endTimeStr, DateTime currentTime) {
if (!Iterables.any(days, new AreDaysEqual(DAYS_OF_WEEK.get(currentTime.getDayOfWeek())))) {
return false;
}
DateTime startTime = null;
DateTime endTime = null;
try {
startTime = HOUR_MINUTE_FORMATTER.withZone(DATE_TIME_ZONE).parseDateTime(startTimeStr);
} catch (IllegalArgumentException e) {
throw new IllegalArgumentException("startTimeStr format is invalid, must be of format " + HOUR_MINUTE_FORMAT, e);
}
try {
endTime = HOUR_MINUTE_FORMATTER.withZone(DATE_TIME_ZONE).parseDateTime(endTimeStr);
} catch (IllegalArgumentException e) {
throw new IllegalArgumentException("endTimeStr format is invalid, must be of format " + HOUR_MINUTE_FORMAT, e);
}
startTime = startTime.withDate(currentTime.getYear(), currentTime.getMonthOfYear(), currentTime.getDayOfMonth());
endTime = endTime.withDate(currentTime.getYear(), currentTime.getMonthOfYear(), currentTime.getDayOfMonth());
Interval interval = new Interval(startTime.getMillis(), endTime.getMillis(), DATE_TIME_ZONE);
return interval.contains(currentTime.getMillis());
}
@AllArgsConstructor
private static class AreDaysEqual implements Predicate<String> {
private String day;
@Override
public boolean apply(String day) {
return this.day.equalsIgnoreCase(day);
}
}
}
| 4,150 |
0 | Create_ds/gobblin/gobblin-utility/src/main/java/org/apache/gobblin | Create_ds/gobblin/gobblin-utility/src/main/java/org/apache/gobblin/util/DatePartitionType.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.util;
import java.util.LinkedHashMap;
import java.util.Map;
import org.joda.time.DateTime;
import org.joda.time.DateTimeFieldType;
import org.joda.time.chrono.ISOChronology;
/**
* Temporal granularity types for writing ({@link org.apache.gobblin.writer.partitioner.TimeBasedWriterPartitioner}) and reading
* ({@link org.apache.gobblin.source.DatePartitionedAvroFileSource}) date partitioned data.
*
* @author Lorand Bendig
*
*/
public enum DatePartitionType {
YEAR("yyyy", DateTimeFieldType.year()),
MONTH("yyyy/MM", DateTimeFieldType.monthOfYear()),
DAY("yyyy/MM/dd", DateTimeFieldType.dayOfMonth()),
HOUR("yyyy/MM/dd/HH", DateTimeFieldType.hourOfDay()),
MINUTE("yyyy/MM/dd/HH/mm", DateTimeFieldType.minuteOfHour());
private static final Map<String, DateTimeFieldType> lookupByPattern = new LinkedHashMap<>();
static {
lookupByPattern.put("s", DateTimeFieldType.secondOfMinute());
lookupByPattern.put("m", DateTimeFieldType.minuteOfHour());
lookupByPattern.put("h", DateTimeFieldType.hourOfDay());
lookupByPattern.put("H", DateTimeFieldType.hourOfDay());
lookupByPattern.put("K", DateTimeFieldType.hourOfDay());
lookupByPattern.put("d", DateTimeFieldType.dayOfMonth());
lookupByPattern.put("D", DateTimeFieldType.dayOfMonth());
lookupByPattern.put("e", DateTimeFieldType.dayOfMonth());
lookupByPattern.put("w", DateTimeFieldType.weekOfWeekyear());
lookupByPattern.put("M", DateTimeFieldType.monthOfYear());
lookupByPattern.put("y", DateTimeFieldType.year());
lookupByPattern.put("Y", DateTimeFieldType.year());
}
private DateTimeFieldType dateTimeField;
private String dateTimePattern;
private DatePartitionType(String dateTimePattern, DateTimeFieldType dateTimeField) {
this.dateTimeField = dateTimeField;
this.dateTimePattern = dateTimePattern;
}
/**
* @param pattern full partitioning pattern
* @return a DateTimeFieldType corresponding to the smallest temporal unit in the pattern.
* E.g for yyyy/MM/dd {@link DateTimeFieldType#dayOfMonth()}
*/
public static DateTimeFieldType getLowestIntervalUnit(String pattern) {
DateTimeFieldType intervalUnit = null;
for (Map.Entry<String, DateTimeFieldType> pat : lookupByPattern.entrySet()) {
if (pattern.contains(pat.getKey())) {
intervalUnit = pat.getValue();
break;
}
}
return intervalUnit;
}
/**
* Get the number of milliseconds associated with a partition type. Eg
* getUnitMilliseconds() of DatePartitionType.MINUTE = 60,000.
*/
public long getUnitMilliseconds() {
return dateTimeField.getDurationType().getField(ISOChronology.getInstance()).getUnitMillis();
}
public DateTimeFieldType getDateTimeFieldType() {
return dateTimeField;
}
public int getField(DateTime dateTime) {
return dateTime.get(this.dateTimeField);
}
public String getDateTimePattern() {
return dateTimePattern;
}
} | 4,151 |
0 | Create_ds/gobblin/gobblin-utility/src/main/java/org/apache/gobblin | Create_ds/gobblin/gobblin-utility/src/main/java/org/apache/gobblin/util/ProxiedFileSystemCache.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.util;
import java.io.IOException;
import java.net.URI;
import java.util.Collections;
import java.util.List;
import java.util.Properties;
import java.util.concurrent.Callable;
import java.util.concurrent.ExecutionException;
import com.google.common.base.Optional;
import com.google.common.base.Preconditions;
import com.google.common.cache.Cache;
import com.google.common.cache.CacheBuilder;
import lombok.AllArgsConstructor;
import lombok.Builder;
import lombok.NonNull;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.security.token.Token;
import org.apache.gobblin.configuration.ConfigurationKeys;
import org.apache.gobblin.configuration.State;
/**
* A cache for storing a mapping between Hadoop users and user {@link FileSystem} objects.
*
* <p>
* This classes uses Guava's {@link Cache} for storing the user to {@link FileSystem} mapping, and creates the
* {@link FileSystem}s using the {@link ProxiedFileSystemUtils} class.
* </p>
*
* @see Cache
* @see ProxiedFileSystemUtils
*/
public class ProxiedFileSystemCache {
private static final String KEY_SEPARATOR = ";";
private static final String RATE_CONTROLLED_TOKEN = "RateControlled";
private static final int DEFAULT_MAX_CACHE_SIZE = 1000;
private static final Cache<String, FileSystem> USER_NAME_TO_FILESYSTEM_CACHE =
CacheBuilder.newBuilder().maximumSize(DEFAULT_MAX_CACHE_SIZE).build();
/**
* Gets a {@link FileSystem} that can perform any operations allowed by the specified userNameToProxyAs.
*
* @param userNameToProxyAs The name of the user the super user should proxy as
* @param properties {@link java.util.Properties} containing initialization properties.
* @param fsURI The {@link URI} for the {@link FileSystem} that should be created.
* @return a {@link FileSystem} that can execute commands on behalf of the specified userNameToProxyAs
* @throws IOException
* @deprecated use {@link #fromProperties}
*/
@Deprecated
public static FileSystem getProxiedFileSystem(@NonNull final String userNameToProxyAs, Properties properties,
URI fsURI) throws IOException {
return getProxiedFileSystem(userNameToProxyAs, properties, fsURI, new Configuration());
}
/**
* Gets a {@link FileSystem} that can perform any operations allowed by the specified userNameToProxyAs.
*
* @param userNameToProxyAs The name of the user the super user should proxy as
* @param properties {@link java.util.Properties} containing initialization properties.
* @param conf The {@link Configuration} for the {@link FileSystem} that should be created.
* @return a {@link FileSystem} that can execute commands on behalf of the specified userNameToProxyAs
* @throws IOException
* @deprecated use {@link #fromProperties}
*/
@Deprecated
public static FileSystem getProxiedFileSystem(@NonNull final String userNameToProxyAs, Properties properties,
Configuration conf) throws IOException {
return getProxiedFileSystem(userNameToProxyAs, properties, FileSystem.getDefaultUri(conf), conf);
}
/**
* Gets a {@link FileSystem} that can perform any operations allowed by the specified userNameToProxyAs.
*
* @param userNameToProxyAs The name of the user the super user should proxy as
* @param properties {@link java.util.Properties} containing initialization properties.
* @param fsURI The {@link URI} for the {@link FileSystem} that should be created.
* @param configuration The {@link Configuration} for the {@link FileSystem} that should be created.
* @return a {@link FileSystem} that can execute commands on behalf of the specified userNameToProxyAs
* @throws IOException
* @deprecated Use {@link #fromProperties}
*/
@Deprecated
public static FileSystem getProxiedFileSystem(@NonNull final String userNameToProxyAs, final Properties properties,
final URI fsURI, final Configuration configuration) throws IOException {
return getProxiedFileSystem(userNameToProxyAs, properties, fsURI, configuration, null);
}
/**
* Gets a {@link FileSystem} that can perform any operations allowed by the specified userNameToProxyAs.
*
* @param userNameToProxyAs The name of the user the super user should proxy as
* @param properties {@link java.util.Properties} containing initialization properties.
* @param fsURI The {@link URI} for the {@link FileSystem} that should be created.
* @param configuration The {@link Configuration} for the {@link FileSystem} that should be created.
* @param referenceFS reference {@link FileSystem}. Used to replicate certain decorators of the reference FS:
* {@link RateControlledFileSystem}.
* @return a {@link FileSystem} that can execute commands on behalf of the specified userNameToProxyAs
* @throws IOException
*/
@Builder(builderClassName = "ProxiedFileSystemFromProperties", builderMethodName = "fromProperties")
private static FileSystem getProxiedFileSystem(@NonNull String userNameToProxyAs, Properties properties, URI fsURI,
Configuration configuration, FileSystem referenceFS) throws IOException {
Preconditions.checkNotNull(userNameToProxyAs, "Must provide a user name to proxy as.");
Preconditions.checkNotNull(properties, "Properties is a mandatory field for proxiedFileSystem generation.");
URI actualURI = resolveUri(fsURI, configuration, referenceFS);
Configuration actualConfiguration = resolveConfiguration(configuration, referenceFS);
try {
return USER_NAME_TO_FILESYSTEM_CACHE.get(getFileSystemKey(actualURI, userNameToProxyAs, referenceFS),
new CreateProxiedFileSystemFromProperties(userNameToProxyAs, properties, actualURI, actualConfiguration,
referenceFS));
} catch (ExecutionException ee) {
throw new IOException("Failed to get proxied file system for user " + userNameToProxyAs, ee);
}
}
/**
* Cached version of {@link ProxiedFileSystemUtils#createProxiedFileSystemUsingKeytab(State, URI, Configuration)}.
* @deprecated use {@link #fromKeytab}.
*/
@Deprecated
public static FileSystem getProxiedFileSystemUsingKeytab(State state, URI fsURI, Configuration conf)
throws ExecutionException {
Preconditions.checkArgument(state.contains(ConfigurationKeys.FS_PROXY_AS_USER_NAME));
Preconditions.checkArgument(state.contains(ConfigurationKeys.SUPER_USER_NAME_TO_PROXY_AS_OTHERS));
Preconditions.checkArgument(state.contains(ConfigurationKeys.SUPER_USER_KEY_TAB_LOCATION));
return getProxiedFileSystemUsingKeytab(state.getProp(ConfigurationKeys.FS_PROXY_AS_USER_NAME),
state.getProp(ConfigurationKeys.SUPER_USER_NAME_TO_PROXY_AS_OTHERS),
new Path(state.getProp(ConfigurationKeys.SUPER_USER_KEY_TAB_LOCATION)), fsURI, conf);
}
/**
* Cached version of {@link ProxiedFileSystemUtils#createProxiedFileSystemUsingKeytab(String, String, Path, URI, Configuration)}.
* @deprecated use {@link #fromKeytab}.
*/
@Deprecated
public static FileSystem getProxiedFileSystemUsingKeytab(@NonNull final String userNameToProxyAs,
final String superUserName, final Path superUserKeytabLocation, final URI fsURI, final Configuration conf)
throws ExecutionException {
try {
return getProxiedFileSystemUsingKeytab(userNameToProxyAs, superUserName, superUserKeytabLocation, fsURI, conf,
null);
} catch (IOException ioe) {
throw new ExecutionException(ioe);
}
}
/**
* Cached version of {@link ProxiedFileSystemUtils#createProxiedFileSystemUsingKeytab(String, String, Path, URI, Configuration)}.
*/
@Builder(builderClassName = "ProxiedFileSystemFromKeytab", builderMethodName = "fromKeytab")
private static FileSystem getProxiedFileSystemUsingKeytab(@NonNull final String userNameToProxyAs,
final String superUserName, final Path superUserKeytabLocation, final URI fsURI, final Configuration conf,
FileSystem referenceFS) throws IOException, ExecutionException {
Preconditions.checkNotNull(userNameToProxyAs, "Must provide a user name to proxy as.");
Preconditions.checkNotNull(superUserName, "Must provide a super user name.");
Preconditions.checkNotNull(superUserKeytabLocation, "Must provide a keytab location.");
URI actualURI = resolveUri(fsURI, conf, referenceFS);
Configuration actualConfiguration = resolveConfiguration(conf, referenceFS);
return USER_NAME_TO_FILESYSTEM_CACHE.get(getFileSystemKey(actualURI, userNameToProxyAs, referenceFS),
new CreateProxiedFileSystemFromKeytab(userNameToProxyAs, superUserName, superUserKeytabLocation, actualURI,
actualConfiguration, referenceFS));
}
/**
* Cached version of {@link ProxiedFileSystemUtils#createProxiedFileSystemUsingToken(String, Token, URI, Configuration)}.
* @deprecated use {@link #fromToken}.
*/
@Deprecated
public static FileSystem getProxiedFileSystemUsingToken(@NonNull final String userNameToProxyAs,
final Token<?> userNameToken, final URI fsURI, final Configuration conf) throws ExecutionException {
try {
return getProxiedFileSystemUsingToken(userNameToProxyAs, userNameToken, fsURI, conf, null);
} catch (IOException ioe) {
throw new ExecutionException(ioe);
}
}
/**
* Cached version of {@link ProxiedFileSystemUtils#createProxiedFileSystemUsingToken(String, Token, URI, Configuration)}.
* Deprecated in favor of {@link #getProxiedFileSystemUsingTokens}
*/
@Deprecated
@Builder(builderClassName = "ProxiedFileSystemFromToken", builderMethodName = "fromToken")
private static FileSystem getProxiedFileSystemUsingToken(@NonNull String userNameToProxyAs, Token<?> userNameToken,
URI fsURI, Configuration conf, FileSystem referenceFS) throws IOException, ExecutionException {
Preconditions.checkNotNull(userNameToProxyAs, "Must provide a user name to proxy as.");
Preconditions.checkNotNull(userNameToken, "Must provide token for user to proxy.");
URI actualURI = resolveUri(fsURI, conf, referenceFS);
Configuration actualConfiguration = resolveConfiguration(conf, referenceFS);
return USER_NAME_TO_FILESYSTEM_CACHE.get(getFileSystemKey(actualURI, userNameToProxyAs, referenceFS),
new CreateProxiedFileSystemFromToken(userNameToProxyAs, Collections.singletonList(userNameToken), actualURI, actualConfiguration,
referenceFS));
}
@Builder(builderClassName = "ProxiedFileSystemFromTokens", builderMethodName = "fromTokens")
private static FileSystem getProxiedFileSystemUsingTokens(@NonNull String userNameToProxyAs, List<Token<?>> userNameTokens,
URI fsURI, Configuration conf, FileSystem referenceFS) throws IOException, ExecutionException {
Preconditions.checkNotNull(userNameToProxyAs, "Must provide a user name to proxy as.");
Preconditions.checkNotNull(userNameTokens, "Must provide token for user to proxy.");
URI actualURI = resolveUri(fsURI, conf, referenceFS);
Configuration actualConfiguration = resolveConfiguration(conf, referenceFS);
return USER_NAME_TO_FILESYSTEM_CACHE.get(getFileSystemKey(actualURI, userNameToProxyAs, referenceFS),
new CreateProxiedFileSystemFromToken(userNameToProxyAs, userNameTokens, actualURI, actualConfiguration,
referenceFS));
}
@AllArgsConstructor
private static class CreateProxiedFileSystemFromProperties implements Callable<FileSystem> {
@NonNull
private final String userNameToProxyAs;
@NonNull
private final Properties properties;
@NonNull
private final URI uri;
@NonNull
private final Configuration configuration;
private final FileSystem referenceFS;
@Override
public FileSystem call() throws Exception {
FileSystem fs = ProxiedFileSystemUtils.createProxiedFileSystem(this.userNameToProxyAs, this.properties, this.uri,
this.configuration);
if (this.referenceFS != null) {
return decorateFilesystemFromReferenceFS(fs, this.referenceFS);
}
return fs;
}
}
@AllArgsConstructor
private static class CreateProxiedFileSystemFromKeytab implements Callable<FileSystem> {
@NonNull
private final String userNameToProxyAs;
@NonNull
private final String superUser;
@NonNull
private final Path keytabLocation;
@NonNull
private final URI uri;
@NonNull
private final Configuration configuration;
private final FileSystem referenceFS;
@Override
public FileSystem call() throws Exception {
FileSystem fs = ProxiedFileSystemUtils.createProxiedFileSystemUsingKeytab(this.userNameToProxyAs, this.superUser,
this.keytabLocation, this.uri, this.configuration);
if (this.referenceFS != null) {
return decorateFilesystemFromReferenceFS(fs, this.referenceFS);
}
return fs;
}
}
@AllArgsConstructor
private static class CreateProxiedFileSystemFromToken implements Callable<FileSystem> {
@NonNull
private final String userNameToProxyAs;
@NonNull
private final List<Token<?>> userNameTokens;
@NonNull
private final URI uri;
@NonNull
private final Configuration configuration;
private final FileSystem referenceFS;
@Override
public FileSystem call() throws Exception {
FileSystem fs = ProxiedFileSystemUtils.createProxiedFileSystemUsingToken(this.userNameToProxyAs,
this.userNameTokens, this.uri, this.configuration);
if (this.referenceFS != null) {
return decorateFilesystemFromReferenceFS(fs, this.referenceFS);
}
return fs;
}
}
private static URI resolveUri(URI uri, Configuration configuration, FileSystem fileSystem) throws IOException {
if (uri != null) {
return uri;
}
if (fileSystem != null) {
return fileSystem.getUri();
}
if (configuration != null) {
return FileSystem.getDefaultUri(configuration);
}
throw new IOException("FileSystem URI could not be determined from available inputs.");
}
private static Configuration resolveConfiguration(Configuration configuration, FileSystem fileSystem)
throws IOException {
if (configuration != null) {
return configuration;
}
if (fileSystem != null) {
return fileSystem.getConf();
}
throw new IOException("FileSystem configuration could not be determined from available inputs.");
}
private static String getFileSystemKey(URI uri, String user, FileSystem referenceFS) {
StringBuilder keyBuilder = new StringBuilder();
keyBuilder.append(uri.toString());
keyBuilder.append(KEY_SEPARATOR);
keyBuilder.append(user);
if (referenceFS != null && RateControlledFileSystem.getRateIfRateControlled(referenceFS).isPresent()) {
keyBuilder.append(KEY_SEPARATOR);
keyBuilder.append(RATE_CONTROLLED_TOKEN);
}
return keyBuilder.toString();
}
private static FileSystem decorateFilesystemFromReferenceFS(FileSystem newFS, FileSystem referenceFS) {
FileSystem decoratedFs = newFS;
Optional<Long> decoratedFSRateOpt = RateControlledFileSystem.getRateIfRateControlled(decoratedFs);
if (!decoratedFSRateOpt.isPresent()) {
Optional<Long> referenceRateOpt = RateControlledFileSystem.getRateIfRateControlled(referenceFS);
if (referenceRateOpt.isPresent()) {
decoratedFs = new RateControlledFileSystem(decoratedFs, referenceRateOpt.get());
}
}
return decoratedFs;
}
}
| 4,152 |
0 | Create_ds/gobblin/gobblin-utility/src/main/java/org/apache/gobblin | Create_ds/gobblin/gobblin-utility/src/main/java/org/apache/gobblin/util/ProxiedFileSystemUtils.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.util;
import java.io.IOException;
import java.net.URI;
import java.security.PrivilegedExceptionAction;
import java.util.Collections;
import java.util.List;
import java.util.Properties;
import com.google.common.base.Preconditions;
import com.google.common.io.Closer;
import lombok.AllArgsConstructor;
import lombok.NonNull;
import lombok.extern.slf4j.Slf4j;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.SequenceFile;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.security.UserGroupInformation;
import org.apache.hadoop.security.token.Token;
import org.apache.gobblin.configuration.ConfigurationKeys;
import org.apache.gobblin.configuration.State;
/**
* Utility class for creating {@link FileSystem} objects while proxied as another user. This class requires access to a
* user with secure impersonation priveleges. The {@link FileSystem} objects returned will have full permissions to
* access any operations on behalf of the specified user.
*
* <p>
* As a user, use methods in {@link org.apache.gobblin.util.ProxiedFileSystemCache} to generate the proxied file systems.
* </p>
*
* @see <a href="http://hadoop.apache.org/docs/r1.2.1/Secure_Impersonation.html">Secure Impersonation</a>,
* <a href="https://hadoop.apache.org/docs/r1.2.1/api/org/apache/hadoop/security/UserGroupInformation.html">UserGroupInformation</a>
*
* TODO figure out the proper generic type for the {@link Token} objects.
*/
@Slf4j
public class ProxiedFileSystemUtils {
public static final String AUTH_TYPE_KEY = "gobblin.utility.user.proxy.auth.type";
public static final String AUTH_TOKEN_PATH = "gobblin.utility.proxy.auth.token.path";
// Two authentication types for Hadoop Security, through TOKEN or KEYTAB.
public enum AuthType {
TOKEN,
KEYTAB;
}
/**
* Creates a {@link FileSystem} that can perform any operations allowed by the specified userNameToProxyAs.
*
* @param userNameToProxyAs The name of the user the super user should proxy as
* @param properties {@link java.util.Properties} containing initialization properties.
* @param fsURI The {@link URI} for the {@link FileSystem} that should be created.
* @param conf The {@link Configuration} for the {@link FileSystem} that should be created.
* @return a {@link FileSystem} that can execute commands on behalf of the specified userNameToProxyAs
* @throws IOException
*/
static FileSystem createProxiedFileSystem(@NonNull final String userNameToProxyAs, Properties properties, URI fsURI,
Configuration conf) throws IOException {
Preconditions.checkArgument(properties.containsKey(AUTH_TYPE_KEY));
switch (AuthType.valueOf(properties.getProperty(AUTH_TYPE_KEY))) {
case TOKEN:
Preconditions.checkArgument(properties.containsKey(AUTH_TOKEN_PATH));
Path tokenPath = new Path(properties.getProperty(AUTH_TOKEN_PATH));
List<Token<?>> proxyTokens = getTokenFromSeqFile(userNameToProxyAs, tokenPath);
if (proxyTokens.size() > 0) {
try {
return createProxiedFileSystemUsingToken(userNameToProxyAs, proxyTokens, fsURI, conf);
} catch (InterruptedException e) {
throw new IOException("Failed to proxy as user " + userNameToProxyAs, e);
}
}
throw new IOException("No delegation token found for proxy user " + userNameToProxyAs);
case KEYTAB:
Preconditions.checkArgument(properties.containsKey(ConfigurationKeys.SUPER_USER_NAME_TO_PROXY_AS_OTHERS)
&& properties.containsKey(ConfigurationKeys.SUPER_USER_KEY_TAB_LOCATION));
String superUserName = properties.getProperty(ConfigurationKeys.SUPER_USER_NAME_TO_PROXY_AS_OTHERS);
Path keytabPath = new Path(properties.getProperty(ConfigurationKeys.SUPER_USER_KEY_TAB_LOCATION));
try {
return createProxiedFileSystemUsingKeytab(userNameToProxyAs, superUserName, keytabPath, fsURI, conf);
} catch (InterruptedException e) {
throw new IOException("Failed to proxy as user " + userNameToProxyAs, e);
}
default:
throw new IOException("User proxy auth type " + properties.getProperty(AUTH_TYPE_KEY) + " not recognized.");
}
}
/**
* Creates a {@link FileSystem} that can perform any operations allowed by the specified userNameToProxyAs. This
* method first logs in as the specified super user. If Hadoop security is enabled, then logging in entails
* authenticating via Kerberos. So logging in requires contacting the Kerberos infrastructure. A proxy user is then
* created on behalf of the logged in user, and a {@link FileSystem} object is created using the proxy user's UGI.
*
* @param userNameToProxyAs The name of the user the super user should proxy as
* @param superUserName The name of the super user with secure impersonation priveleges
* @param superUserKeytabLocation The location of the keytab file for the super user
* @param fsURI The {@link URI} for the {@link FileSystem} that should be created
* @param conf The {@link Configuration} for the {@link FileSystem} that should be created
*
* @return a {@link FileSystem} that can execute commands on behalf of the specified userNameToProxyAs
*/
static FileSystem createProxiedFileSystemUsingKeytab(String userNameToProxyAs, String superUserName,
Path superUserKeytabLocation, URI fsURI, Configuration conf) throws IOException, InterruptedException {
return loginAndProxyAsUser(userNameToProxyAs, superUserName, superUserKeytabLocation)
.doAs(new ProxiedFileSystem(fsURI, conf));
}
/**
* Create a {@link FileSystem} that can perform any operations allowed the by the specified userNameToProxyAs. This
* method uses the {@link #createProxiedFileSystemUsingKeytab(String, String, Path, URI, Configuration)} object to perform
* all its work. A specific set of configuration keys are required to be set in the given {@link State} object:
*
* <ul>
* <li>{@link ConfigurationKeys#FS_PROXY_AS_USER_NAME} specifies the user name to proxy as</li>
* <li>{@link ConfigurationKeys#SUPER_USER_NAME_TO_PROXY_AS_OTHERS} specifies the name of the user with secure
* impersonation priveleges</li>
* <li>{@link ConfigurationKeys#SUPER_USER_KEY_TAB_LOCATION} specifies the location of the super user's keytab file</li>
* <ul>
*
* @param state The {@link State} object that contains all the necessary key, value pairs for
* {@link #createProxiedFileSystemUsingKeytab(String, String, Path, URI, Configuration)}
* @param fsURI The {@link URI} for the {@link FileSystem} that should be created
* @param conf The {@link Configuration} for the {@link FileSystem} that should be created
*
* @return a {@link FileSystem} that can execute commands on behalf of the specified userNameToProxyAs
*/
static FileSystem createProxiedFileSystemUsingKeytab(State state, URI fsURI, Configuration conf)
throws IOException, InterruptedException {
Preconditions.checkArgument(state.contains(ConfigurationKeys.FS_PROXY_AS_USER_NAME));
Preconditions.checkArgument(state.contains(ConfigurationKeys.SUPER_USER_NAME_TO_PROXY_AS_OTHERS));
Preconditions.checkArgument(state.contains(ConfigurationKeys.SUPER_USER_KEY_TAB_LOCATION));
return createProxiedFileSystemUsingKeytab(state.getProp(ConfigurationKeys.FS_PROXY_AS_USER_NAME),
state.getProp(ConfigurationKeys.SUPER_USER_NAME_TO_PROXY_AS_OTHERS),
new Path(state.getProp(ConfigurationKeys.SUPER_USER_KEY_TAB_LOCATION)), fsURI, conf);
}
/**
* Create a {@link FileSystem} that can perform any operations allowed the by the specified userNameToProxyAs. The
* method first proxies as userNameToProxyAs, and then adds the specified {@link Token} to the given
* {@link UserGroupInformation} object. It then uses the {@link UserGroupInformation#doAs(PrivilegedExceptionAction)}
* method to create a {@link FileSystem}.
*
* @param userNameToProxyAs The name of the user the super user should proxy as
* @param userNameTokens List of {@link Token}s to add to the proxied user's {@link UserGroupInformation}.
* @param fsURI The {@link URI} for the {@link FileSystem} that should be created
* @param conf The {@link Configuration} for the {@link FileSystem} that should be created
*
* @return a {@link FileSystem} that can execute commands on behalf of the specified userNameToProxyAs
*/
static FileSystem createProxiedFileSystemUsingToken(@NonNull String userNameToProxyAs,
@NonNull List<Token<?>> userNameTokens, URI fsURI, Configuration conf) throws IOException, InterruptedException {
UserGroupInformation ugi =
UserGroupInformation.createProxyUser(userNameToProxyAs, UserGroupInformation.getLoginUser());
for (Token<?> userNameToken : userNameTokens) {
ugi.addToken(userNameToken);
}
return ugi.doAs(new ProxiedFileSystem(fsURI, conf));
}
/**
* Returns true if superUserName can proxy as userNameToProxyAs using the specified superUserKeytabLocation, false
* otherwise.
*/
public static boolean canProxyAs(String userNameToProxyAs, String superUserName, Path superUserKeytabLocation) {
try {
loginAndProxyAsUser(userNameToProxyAs, superUserName, superUserKeytabLocation);
} catch (IOException e) {
return false;
}
return true;
}
/**
* Retrives a {@link Token} from a given sequence file for a specified user. The sequence file should contain a list
* of key, value pairs where each key corresponds to a user and each value corresponds to a {@link Token} for that
* user.
*
* @param userNameKey The name of the user to retrieve a {@link Token} for
* @param tokenFilePath The path to the sequence file containing the {@link Token}s
*
* @return A {@link Token} for the given user name
*/
public static List<Token<?>> getTokenFromSeqFile(String userNameKey, Path tokenFilePath) throws IOException {
log.info("Reading tokens from sequence file " + tokenFilePath);
try (Closer closer = Closer.create()) {
FileSystem localFs = FileSystem.getLocal(new Configuration());
@SuppressWarnings("deprecation")
SequenceFile.Reader tokenReader =
closer.register(new SequenceFile.Reader(localFs, tokenFilePath, localFs.getConf()));
Text key = new Text();
Token<?> value = new Token<>();
while (tokenReader.next(key, value)) {
log.debug("Found token for user: " + key);
if (key.toString().equals(userNameKey)) {
return Collections.singletonList(value);
}
}
}
log.warn("Did not find any tokens for user " + userNameKey);
return Collections.emptyList();
}
private static UserGroupInformation loginAndProxyAsUser(@NonNull String userNameToProxyAs,
@NonNull String superUserName, Path superUserKeytabLocation) throws IOException {
if (!UserGroupInformation.getLoginUser().getUserName().equals(superUserName)) {
Preconditions.checkNotNull(superUserKeytabLocation);
UserGroupInformation.loginUserFromKeytab(superUserName, superUserKeytabLocation.toString());
}
return UserGroupInformation.createProxyUser(userNameToProxyAs, UserGroupInformation.getLoginUser());
}
@AllArgsConstructor
private static class ProxiedFileSystem implements PrivilegedExceptionAction<FileSystem> {
@NonNull
private URI fsURI;
@NonNull
private Configuration conf;
@Override
public FileSystem run() throws IOException {
log.info("Creating a filesystem for user: " + UserGroupInformation.getCurrentUser());
return FileSystem.get(this.fsURI, this.conf);
}
}
}
| 4,153 |
0 | Create_ds/gobblin/gobblin-utility/src/main/java/org/apache/gobblin | Create_ds/gobblin/gobblin-utility/src/main/java/org/apache/gobblin/util/ClusterNameTags.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.util;
import java.util.Map;
import org.apache.hadoop.conf.Configuration;
import com.google.common.base.Strings;
import com.google.common.collect.ImmutableMap;
/**
* Utility class for collecting metadata specific to the current Hadoop cluster.
*
* @see ClustersNames
*/
public class ClusterNameTags {
public static final String CLUSTER_IDENTIFIER_TAG_NAME = "clusterIdentifier";
/**
* Uses {@link #getClusterNameTags(Configuration)} with default Hadoop {@link Configuration}.
*
* @return a {@link Map} of key, value pairs containing the cluster metadata
*/
public static Map<String, String> getClusterNameTags() {
return getClusterNameTags(new Configuration());
}
/**
* Gets all useful Hadoop cluster metrics.
*
* @param conf a Hadoop {@link Configuration} to collect the metadata from
*
* @return a {@link Map} of key, value pairs containing the cluster metadata
*/
public static Map<String, String> getClusterNameTags(Configuration conf) {
ImmutableMap.Builder<String, String> tagMap = ImmutableMap.builder();
String clusterIdentifierTag = ClustersNames.getInstance().getClusterName(conf);
if (!Strings.isNullOrEmpty(clusterIdentifierTag)) {
tagMap.put(CLUSTER_IDENTIFIER_TAG_NAME, clusterIdentifierTag);
}
return tagMap.build();
}
}
| 4,154 |
0 | Create_ds/gobblin/gobblin-utility/src/main/java/org/apache/gobblin | Create_ds/gobblin/gobblin-utility/src/main/java/org/apache/gobblin/util/TrustManagerSocketFactory.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.util;
import java.io.IOException;
import java.net.InetAddress;
import java.net.Socket;
import java.net.UnknownHostException;
import java.security.KeyManagementException;
import java.security.NoSuchAlgorithmException;
import java.security.SecureRandom;
import javax.net.SocketFactory;
import javax.net.ssl.SSLContext;
import javax.net.ssl.SSLSocketFactory;
import javax.net.ssl.TrustManager;
/**
* A SSL socket wrapper used to create sockets with a {@link: javax.net.ssl.TrustManager}
*/
public class TrustManagerSocketFactory extends SSLSocketFactory {
private SSLSocketFactory _sslSocketFactory;
public TrustManagerSocketFactory() {
try {
SSLContext ctx = SSLContext.getInstance("TLS");
ctx.init(null, new TrustManager[]{new DummyTrustManager()}, new SecureRandom());
_sslSocketFactory = ctx.getSocketFactory();
} catch (KeyManagementException | NoSuchAlgorithmException e) {
}
}
public static SocketFactory getDefault() {
return new TrustManagerSocketFactory();
}
@Override
public Socket createSocket(Socket socket, String host, int port, boolean autoClose) throws IOException {
return _sslSocketFactory.createSocket(socket, host, port, autoClose);
}
@Override
public String[] getDefaultCipherSuites() {
return _sslSocketFactory.getDefaultCipherSuites();
}
@Override
public String[] getSupportedCipherSuites() {
return _sslSocketFactory.getSupportedCipherSuites();
}
@Override
public Socket createSocket(String host, int port) throws IOException, UnknownHostException {
return _sslSocketFactory.createSocket(host, port);
}
@Override
public Socket createSocket(InetAddress host, int port) throws IOException {
return _sslSocketFactory.createSocket(host, port);
}
@Override
public Socket createSocket(String host, int port, InetAddress localHost, int localPort)
throws IOException, UnknownHostException {
return _sslSocketFactory.createSocket(host, port, localHost, localPort);
}
@Override
public Socket createSocket(InetAddress address, int port, InetAddress localAddress, int localPort)
throws IOException {
return _sslSocketFactory.createSocket(address, port, localAddress, localPort);
}
}
| 4,155 |
0 | Create_ds/gobblin/gobblin-utility/src/main/java/org/apache/gobblin | Create_ds/gobblin/gobblin-utility/src/main/java/org/apache/gobblin/util/HeapDumpForTaskUtils.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.util;
import java.io.BufferedWriter;
import java.io.IOException;
import java.io.OutputStreamWriter;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.apache.gobblin.configuration.ConfigurationKeys;
/**
* A utility class for generating script to move the heap dump .prof files to HDFS for hadoop tasks, when Java heap out of memory error is thrown.
*/
public class HeapDumpForTaskUtils {
private static final Logger LOG = LoggerFactory.getLogger(HeapDumpForTaskUtils.class);
private static final String DUMP_FOLDER = "dumps";
/**
* Generate the dumpScript, which is used when OOM error is thrown during task execution.
* The current content dumpScript puts the .prof files to the DUMP_FOLDER within the same directory of the dumpScript.
*
* User needs to add the following options to the task java.opts:
*
* -XX:+HeapDumpOnOutOfMemoryError -XX:HeapDumpPath=./heapFileName.hprof -XX:OnOutOfMemoryError=./dumpScriptFileName
*
* @param dumpScript The path to the dumpScript, which needs to be added to the Distributed cache.
* To use it, simply put the path of dumpScript to the gobblin config: job.hdfs.files.
* @param fs File system
* @param heapFileName the name of the .prof file.
* @param chmod chmod for the dump script. For hdfs file, e.g, "hadoop fs -chmod 755"
* @throws IOException
*/
public static void generateDumpScript(Path dumpScript, FileSystem fs, String heapFileName, String chmod)
throws IOException {
if (fs.exists(dumpScript)) {
LOG.info("Heap dump script already exists: " + dumpScript);
return;
}
try (BufferedWriter scriptWriter =
new BufferedWriter(new OutputStreamWriter(fs.create(dumpScript), ConfigurationKeys.DEFAULT_CHARSET_ENCODING))) {
Path dumpDir = new Path(dumpScript.getParent(), DUMP_FOLDER);
if (!fs.exists(dumpDir)) {
fs.mkdirs(dumpDir);
}
scriptWriter.write("#!/bin/sh\n");
scriptWriter.write("if [ -n \"$HADOOP_PREFIX\" ]; then\n");
scriptWriter
.write(" ${HADOOP_PREFIX}/bin/hadoop dfs -put " + heapFileName + " " + dumpDir + "/${PWD//\\//_}.hprof\n");
scriptWriter.write("else\n");
scriptWriter
.write(" ${HADOOP_HOME}/bin/hadoop dfs -put " + heapFileName + " " + dumpDir + "/${PWD//\\//_}.hprof\n");
scriptWriter.write("fi\n");
} catch (IOException ioe) {
LOG.error("Heap dump script is not generated successfully.");
if (fs.exists(dumpScript)) {
fs.delete(dumpScript, true);
}
throw ioe;
}
Runtime.getRuntime().exec(chmod + " " + dumpScript);
}
}
| 4,156 |
0 | Create_ds/gobblin/gobblin-utility/src/main/java/org/apache/gobblin | Create_ds/gobblin/gobblin-utility/src/main/java/org/apache/gobblin/util/GobblinProcessBuilder.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.util;
import java.io.IOException;
import java.util.List;
public class GobblinProcessBuilder {
public Process start(final List<String> command)
throws IOException {
final ProcessBuilder processBuilder = new ProcessBuilder(command);
processBuilder.redirectOutput(ProcessBuilder.Redirect.INHERIT);
final Process process = processBuilder.start();
return process;
}
}
| 4,157 |
0 | Create_ds/gobblin/gobblin-utility/src/main/java/org/apache/gobblin | Create_ds/gobblin/gobblin-utility/src/main/java/org/apache/gobblin/util/ClustersNames.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.util;
import com.google.common.base.Preconditions;
import com.google.common.io.Closer;
import org.apache.commons.lang.StringUtils;
import org.apache.hadoop.conf.Configuration;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import java.io.IOException;
import java.io.InputStream;
import java.net.*;
import java.util.ArrayList;
import java.util.List;
import java.util.Properties;
/**
* Allows conversion of URLs identifying a Hadoop cluster (e.g. resource manager url or
* a job tracker URL) to a human-readable name.
*
* <p>The class will automatically load a resource named {@link #URL_TO_NAME_MAP_RESOURCE_NAME} to
* get a default mapping. It expects this resource to be in the Java Properties file format. The
* name of the property is the cluster URL and the value is the human-readable name.
*
* <p><b>IMPORTANT:</b> Don't forget to escape colons ":" in the file as those may be interpreted
* as name/value separators.
*/
public class ClustersNames {
public static final String URL_TO_NAME_MAP_RESOURCE_NAME = "GobblinClustersNames.properties";
private static final Logger LOG = LoggerFactory.getLogger(ClustersNames.class);
private static final Configuration HADOOP_CONFIGURATION = new Configuration();
private static ClustersNames THE_INSTANCE;
private Properties urlToNameMap = new Properties();
protected ClustersNames() {
try (Closer closer = Closer.create()) {
InputStream propsInput = closer.register(getClass().getResourceAsStream(URL_TO_NAME_MAP_RESOURCE_NAME));
if (null == propsInput) {
propsInput = closer.register(ClassLoader.getSystemResourceAsStream(URL_TO_NAME_MAP_RESOURCE_NAME));
}
if (null != propsInput) {
try {
this.urlToNameMap.load(propsInput);
LOG.info("Loaded cluster names map:" + this.urlToNameMap);
} catch (IOException e) {
LOG.warn("Unable to load cluster names map: " + e, e);
}
} else {
LOG.info("no default cluster mapping found");
}
} catch (IOException e) {
LOG.warn("unable to close resource input stream for " + URL_TO_NAME_MAP_RESOURCE_NAME + ":" + e, e);
}
}
/**
* Returns human-readable name of the cluster.
*
* Method first checks config for exact cluster url match. If nothing is found,
* it will also check host:port and just hostname match.
* If it still could not find a match, hostname from the url will be returned.
*
* For incomplete or invalid urls, we'll return a name based on clusterUrl,
* that will have only alphanumeric characters, dashes, underscores and dots.
* */
public String getClusterName(String clusterUrl) {
if (null == clusterUrl) {
return null;
}
List<String> candidates = generateUrlMatchCandidates(clusterUrl);
for (String candidate : candidates) {
String name = this.urlToNameMap.getProperty(candidate);
if (name != null) {
return name;
}
}
return candidates.get(candidates.size() - 1);
}
public void addClusterMapping(String clusterUrl, String clusterName) {
Preconditions.checkNotNull(clusterUrl, "cluster URL expected");
Preconditions.checkNotNull(clusterName, "cluster name expected");
this.urlToNameMap.put(clusterUrl, clusterName);
}
public void addClusterMapping(URL clusterUrl, String clusterName) {
Preconditions.checkNotNull(clusterUrl, "cluster URL expected");
Preconditions.checkNotNull(clusterName, "cluster name expected");
this.urlToNameMap.put(clusterUrl.toString(), clusterName);
}
/**
* @see #getClusterName(String) for logic description.
*/
private static List<String> generateUrlMatchCandidates(String clusterIdentifier) {
ArrayList<String> candidates = new ArrayList<>();
candidates.add(clusterIdentifier);
try {
URI uri = new URI(clusterIdentifier.trim());
if (uri.getHost() != null) {
if (uri.getPort() != -1) {
candidates.add(uri.getHost() + ":" + uri.getPort());
}
// we prefer a config entry with 'host:port', but if it's missing
// we'll consider just 'host' config entry
candidates.add(uri.getHost());
} else if (uri.getScheme() != null && uri.getPath() != null) {
// we have a scheme and a path, but not the host name
// assuming local host
candidates.add("localhost");
} else {
candidates.add(getSafeFallbackName(clusterIdentifier));
}
} catch (URISyntaxException e) {
candidates.add(getSafeFallbackName(clusterIdentifier));
}
return candidates;
}
private static String getSafeFallbackName(String clusterIdentifier) {
return clusterIdentifier.replaceAll("[^\\w-\\.]", "_");
}
/**
*
* Returns the cluster name on which the application is running. Uses default hadoop {@link Configuration} to get the
* url of the resourceManager or jobtracker. The URL is then translated into a human readable cluster name using
* {@link #getClusterName(String)}
*
* @see #getClusterName(Configuration)
*
*/
public String getClusterName() {
return getClusterName(HADOOP_CONFIGURATION);
}
/**
* Returns the cluster name on which the application is running. Uses Hadoop configuration passed in to get the
* url of the resourceManager or jobtracker. The URL is then translated into a human readable cluster name using
* {@link #getClusterName(String)}
*
* <p>
* <b>MapReduce mode</b> Uses the value for "yarn.resourcemanager.address" from {@link Configuration} excluding the
* port number.
* </p>
*
* <p>
* <b>Standalone mode (outside of hadoop)</b> Uses the Hostname of {@link InetAddress#getLocalHost()}
* </p>
*
* <p>
* Use {@link #getClusterName(String)} if you already have the cluster URL
* </p>
*
* @see #getClusterName()
* @param conf Hadoop configuration to use to get resourceManager or jobTracker URLs
*/
public String getClusterName(Configuration conf) {
// ResourceManager address in Hadoop2
String clusterIdentifier = conf.get("yarn.resourcemanager.address");
clusterIdentifier = getClusterName(clusterIdentifier);
// If job is running outside of Hadoop (Standalone) use hostname
// If clusterIdentifier is localhost or 0.0.0.0 use hostname
if (clusterIdentifier == null || StringUtils.startsWithIgnoreCase(clusterIdentifier, "localhost")
|| StringUtils.startsWithIgnoreCase(clusterIdentifier, "0.0.0.0")) {
try {
clusterIdentifier = InetAddress.getLocalHost().getHostName();
} catch (UnknownHostException e) {
// Do nothing. Tag will not be generated
}
}
return clusterIdentifier;
}
public static ClustersNames getInstance() {
synchronized (ClustersNames.class) {
if (null == THE_INSTANCE) {
THE_INSTANCE = new ClustersNames();
}
return THE_INSTANCE;
}
}
}
| 4,158 |
0 | Create_ds/gobblin/gobblin-utility/src/main/java/org/apache/gobblin/util | Create_ds/gobblin/gobblin-utility/src/main/java/org/apache/gobblin/util/filters/AndPathFilter.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.util.filters;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.fs.PathFilter;
/**
* Combines multiple {@link PathFilter}s. {@link Path} is accepted only if all filters accept it.
*/
public class AndPathFilter implements PathFilter {
public AndPathFilter(PathFilter... pathFilters) {
this.pathFilters = pathFilters;
}
PathFilter[] pathFilters;
@Override
public boolean accept(Path path) {
for (PathFilter filter : this.pathFilters) {
if (!filter.accept(path)) {
return false;
}
}
return true;
}
}
| 4,159 |
0 | Create_ds/gobblin/gobblin-utility/src/main/java/org/apache/gobblin/util | Create_ds/gobblin/gobblin-utility/src/main/java/org/apache/gobblin/util/filters/HiddenFilter.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.util.filters;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.fs.PathFilter;
/**
* A {@link PathFilter} that filters out hidden files (those starting with '_' or '.').
*/
public class HiddenFilter implements PathFilter {
private static final String[] HIDDEN_FILE_PREFIX = { "_", "." };
@Override
public boolean accept(Path path) {
String name = path.getName();
for (String prefix : HIDDEN_FILE_PREFIX) {
if (name.startsWith(prefix)) {
return false;
}
}
return true;
}
}
| 4,160 |
0 | Create_ds/gobblin/gobblin-utility/src/main/java/org/apache/gobblin/util | Create_ds/gobblin/gobblin-utility/src/main/java/org/apache/gobblin/util/filters/RegexPathFilter.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.util.filters;
import java.util.Properties;
import java.util.regex.Pattern;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.fs.PathFilter;
import lombok.ToString;
/**
* Use a regex to filter {@link Path}. If {@link #include} is set to true, include {@link Path} that matches the regex.
* Otherwise, include {@link Path} that does not match the regex.
*/
@ToString
public class RegexPathFilter implements PathFilter {
private final Pattern regex;
private final boolean include;
public static final String REGEX = "path.filter.regex";
public RegexPathFilter(Properties props) {
this(props.getProperty(REGEX));
}
public RegexPathFilter(String regex) {
this(regex, true);
}
public RegexPathFilter(String regex, boolean include) {
this.regex = Pattern.compile(regex);
this.include = include;
}
@Override
public boolean accept(Path path) {
boolean matches = this.regex.matcher(path.getName()).matches();
return include ? matches : !matches;
}
}
| 4,161 |
0 | Create_ds/gobblin/gobblin-utility/src/main/java/org/apache/gobblin/util | Create_ds/gobblin/gobblin-utility/src/main/java/org/apache/gobblin/util/filters/TarGpgPathFilter.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.util.filters;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.fs.PathFilter;
/**
* A {@link PathFilter} to accept encrypted tarball files.
*/
public class TarGpgPathFilter implements PathFilter {
@Override
public boolean accept(Path path) {
return path.getName().endsWith(".tar.gz.gpg");
}
}
| 4,162 |
0 | Create_ds/gobblin/gobblin-utility/src/main/java/org/apache/gobblin/util | Create_ds/gobblin/gobblin-utility/src/main/java/org/apache/gobblin/util/eventbus/EventBusKey.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.util.eventbus;
import lombok.EqualsAndHashCode;
import lombok.Getter;
import org.apache.gobblin.broker.iface.SharedResourceKey;
@EqualsAndHashCode
@Getter
public class EventBusKey implements SharedResourceKey{
private final String sourceClassName;
public EventBusKey(String sourceClassName) {
this.sourceClassName = sourceClassName;
}
/**
* @return A serialization of the {@link SharedResourceKey} into a short, sanitized string. Users configure a
* shared resource using the value of this method.
*/
@Override
public String toConfigurationKey() {
return this.sourceClassName;
}
}
| 4,163 |
0 | Create_ds/gobblin/gobblin-utility/src/main/java/org/apache/gobblin/util | Create_ds/gobblin/gobblin-utility/src/main/java/org/apache/gobblin/util/eventbus/EventBusFactory.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.util.eventbus;
import java.io.IOException;
import com.google.common.eventbus.EventBus;
import org.apache.gobblin.broker.ResourceInstance;
import org.apache.gobblin.broker.iface.ConfigView;
import org.apache.gobblin.broker.iface.NotConfiguredException;
import org.apache.gobblin.broker.iface.ScopeType;
import org.apache.gobblin.broker.iface.ScopedConfigView;
import org.apache.gobblin.broker.iface.SharedResourceFactory;
import org.apache.gobblin.broker.iface.SharedResourceFactoryResponse;
import org.apache.gobblin.broker.iface.SharedResourcesBroker;
/**
* A {@link SharedResourceFactory} for creating {@link EventBus} instances.
* @param <S>
*/
public class EventBusFactory<S extends ScopeType<S>> implements SharedResourceFactory<EventBus, EventBusKey, S> {
public static final String FACTORY_NAME = "eventbus";
@Override
public String getName() {
return FACTORY_NAME;
}
public static <S extends ScopeType<S>> EventBus get(String eventBusName, SharedResourcesBroker<S> broker)
throws IOException {
try {
return broker.getSharedResource(new EventBusFactory<S>(), new EventBusKey(eventBusName));
} catch (NotConfiguredException e) {
throw new IOException(e);
}
}
@Override
public SharedResourceFactoryResponse<EventBus> createResource(SharedResourcesBroker<S> broker,
ScopedConfigView<S, EventBusKey> config) {
EventBusKey eventBusKey = config.getKey();
EventBus eventBus = new EventBus(eventBusKey.getSourceClassName());
return new ResourceInstance<>(eventBus);
}
@Override
public S getAutoScope(SharedResourcesBroker<S> broker, ConfigView<S, EventBusKey> config) {
return broker.selfScope().getType().rootScope();
}
}
| 4,164 |
0 | Create_ds/gobblin/gobblin-utility/src/main/java/org/apache/gobblin/util | Create_ds/gobblin/gobblin-utility/src/main/java/org/apache/gobblin/util/request_allocation/GreedyAllocator.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.util.request_allocation;
import java.util.Iterator;
import java.util.concurrent.Callable;
import com.google.common.base.Function;
import com.google.common.collect.Iterators;
import org.apache.gobblin.util.iterators.InterruptibleIterator;
import javax.annotation.Nullable;
import lombok.extern.slf4j.Slf4j;
/**
* A {@link RequestAllocator} that selects {@link Request}s without any order guarantees until the {@link ResourcePool}
* is full, then stops. This allocator will mostly ignore the prioritizer.
*
* <p>
* This allocator is useful when there is no prioritization or fairness required. It is generally the fastest and least
* memory intensive implementation.
* </p>
*/
@Slf4j
public class GreedyAllocator<T extends Request<T>> extends PriorityIterableBasedRequestAllocator<T> {
public static class Factory implements RequestAllocator.Factory {
@Override
public <T extends Request<T>> RequestAllocator<T> createRequestAllocator(RequestAllocatorConfig<T> configuration) {
return new GreedyAllocator<>(configuration);
}
}
public GreedyAllocator(RequestAllocatorConfig<T> configuration) {
super(log, configuration);
}
@Override
protected Iterator<T> getJoinIterator(Iterator<? extends Requestor<T>> requestors,
final ConcurrentBoundedPriorityIterable<T> requestIterable) {
Iterator<T> unionIterator = Iterators.concat(Iterators.transform(requestors, new Function<Requestor<T>, Iterator<T>>() {
@Nullable
@Override
public Iterator<T> apply(Requestor<T> input) {
return input.iterator();
}
}));
return new InterruptibleIterator<>(unionIterator, new Callable<Boolean>() {
@Override
public Boolean call()
throws Exception {
return requestIterable.isFull();
}
});
}
}
| 4,165 |
0 | Create_ds/gobblin/gobblin-utility/src/main/java/org/apache/gobblin/util | Create_ds/gobblin/gobblin-utility/src/main/java/org/apache/gobblin/util/request_allocation/PriorityIterableBasedRequestAllocator.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.util.request_allocation;
import java.util.Collections;
import java.util.Iterator;
import java.util.List;
import java.util.concurrent.Callable;
import java.util.concurrent.ExecutionException;
import org.slf4j.Logger;
import com.google.common.base.Function;
import com.google.common.base.Optional;
import com.google.common.collect.Iterators;
import lombok.AccessLevel;
import lombok.Getter;
import org.apache.gobblin.util.Either;
import org.apache.gobblin.util.ExecutorsUtils;
import org.apache.gobblin.util.executors.IteratorExecutor;
public abstract class PriorityIterableBasedRequestAllocator<T extends Request<T>> implements RequestAllocator<T> {
private final Logger log;
@Getter(value = AccessLevel.PROTECTED)
private final RequestAllocatorConfig<T> configuration;
//These are for submitting alertable events
@Getter
private List<T> requestsExceedingAvailableResourcePool;
@Getter
private List<T> requestsRejectedWithLowPriority;
@Getter
private List<T> requestsRejectedDueToInsufficientEviction;
@Getter
private List<T> requestsDropped;
public PriorityIterableBasedRequestAllocator(Logger log, RequestAllocatorConfig<T> configuration) {
this.log = log;
this.configuration = configuration;
}
@Override
public AllocatedRequestsIterator<T> allocateRequests(Iterator<? extends Requestor<T>> requestors,
ResourcePool resourcePool) {
final ConcurrentBoundedPriorityIterable<T> iterable =
new ConcurrentBoundedPriorityIterable<>(this.configuration.getPrioritizer(),
this.configuration.getResourceEstimator(), this.configuration.getStoreRejectedRequestsSetting(),
resourcePool);
final Iterator<T> joinIterator = getJoinIterator(requestors, iterable);
if (this.configuration.getAllowedThreads() <= 1) {
while (joinIterator.hasNext()) {
iterable.add(joinIterator.next());
}
} else {
IteratorExecutor<Void> executor =
new IteratorExecutor<>(Iterators.transform(joinIterator, new Function<T, Callable<Void>>() {
@Override
public Callable<Void> apply(final T input) {
return new Callable<Void>() {
@Override
public Void call()
throws Exception {
iterable.add(input);
return null;
}
};
}
}), this.configuration.getAllowedThreads(),
ExecutorsUtils.newThreadFactory(Optional.of(log), Optional.of("request-allocator-%d")));
try {
List<Either<Void, ExecutionException>> results = executor.executeAndGetResults();
// Throw runtime failure if an exception occurs during execution to fail the job
IteratorExecutor.logAndThrowFailures(results, log, 10);
} catch (InterruptedException ie) {
log.error("Request allocation was interrupted.");
return new AllocatedRequestsIteratorBase<>(
Collections.emptyIterator(), resourcePool);
}
}
iterable.logStatistics(Optional.of(this.log));
//Get all requests rejected/dropped
getRejectedAndDroppedRequests(iterable);
return new AllocatedRequestsIteratorBase<>(iterable.iterator(), resourcePool);
}
public void getRejectedAndDroppedRequests(ConcurrentBoundedPriorityIterable<T> iterable) {
requestsExceedingAvailableResourcePool = iterable.getRequestsExceedingAvailableResourcePool();
requestsRejectedWithLowPriority = iterable.getRequestsRejectedWithLowPriority();
requestsRejectedDueToInsufficientEviction = iterable.getRequestsRejectedDueToInsufficientEviction();
requestsDropped = iterable.getRequestsDropped();
}
protected abstract Iterator<T> getJoinIterator(Iterator<? extends Requestor<T>> requestors,
ConcurrentBoundedPriorityIterable<T> requestIterable);
}
| 4,166 |
0 | Create_ds/gobblin/gobblin-utility/src/main/java/org/apache/gobblin/util | Create_ds/gobblin/gobblin-utility/src/main/java/org/apache/gobblin/util/request_allocation/AllocatedRequestsIteratorBase.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.util.request_allocation;
import java.util.Iterator;
import java.util.Random;
import com.google.common.base.Function;
import lombok.Getter;
/**
* A basic implementation of {@link AllocatedRequestsIterator}.
* @param <T>
*/
public class AllocatedRequestsIteratorBase<T extends Request<T>> implements AllocatedRequestsIterator<T> {
private final Iterator<RequestWithResourceRequirement<T>> underlying;
private final double[] currentRequirement;
public AllocatedRequestsIteratorBase(Iterator<RequestWithResourceRequirement<T>> underlying, ResourcePool resourcePool) {
this.underlying = underlying;
this.currentRequirement = new double[resourcePool.getNumDimensions()];
}
@Override
public ResourceRequirement totalResourcesUsed() {
return new ResourceRequirement(this.currentRequirement);
}
@Override
public boolean hasNext() {
return this.underlying.hasNext();
}
@Override
public T next() {
RequestWithResourceRequirement<T> nextElement = this.underlying.next();
VectorAlgebra.addVector(this.currentRequirement, nextElement.getResourceRequirement().getResourceVector(), 1.0,
this.currentRequirement);
return nextElement.getT();
}
@Override
public void remove() {
throw new UnsupportedOperationException();
}
/**
* Stores and element and its {@link ResourceRequirement}.
*/
@Getter
public static class RequestWithResourceRequirement<T> {
public static final Random RANDOM = new Random();
private final T t;
private final ResourceRequirement resourceRequirement;
private final long id;
RequestWithResourceRequirement(T t, ResourceRequirement resourceRequirement) {
this.t = t;
this.resourceRequirement = resourceRequirement;
this.id = RANDOM.nextLong();
}
}
/**
* A {@link Function} used to extract the actual {@link Request} from a {@link RequestWithResourceRequirement}.
*/
public static class TExtractor<T> implements Function<RequestWithResourceRequirement<T>, T> {
@Override
public T apply(RequestWithResourceRequirement<T> ttAndRequirement) {
return ttAndRequirement.getT();
}
}
}
| 4,167 |
0 | Create_ds/gobblin/gobblin-utility/src/main/java/org/apache/gobblin/util | Create_ds/gobblin/gobblin-utility/src/main/java/org/apache/gobblin/util/request_allocation/VectorAlgebra.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.util.request_allocation;
/**
* Basic vector operations. These operations are NOT safe (e.g. no bound checks on vectors), so they are package-private.
*/
class VectorAlgebra {
/**
* Performs x + cy
*/
static double[] addVector(double[] x, double[] y, double c, double[] reuse) {
if (reuse == null) {
reuse = new double[x.length];
}
for (int i = 0; i < x.length; i++) {
reuse[i] = x[i] + c * y[i];
}
return reuse;
}
/**
* Performs c * x
*/
static double[] scale(double[] x, double c, double[] reuse) {
if (reuse == null) {
reuse = new double[x.length];
}
for (int i = 0; i < x.length; i++) {
reuse[i] = c * x[i];
}
return reuse;
}
/**
* @return true if test is larger than reference in any dimension. If orequal is true, then return true if test is larger
* than or equal than reference in any dimension.
*/
static boolean exceedsVector(double[] reference, double[] test, boolean orequal) {
for (int i = 0; i < reference.length; i++) {
if (reference[i] < test[i]) {
return true;
}
if (orequal && reference[i] == test[i]) {
return true;
}
}
return false;
}
}
| 4,168 |
0 | Create_ds/gobblin/gobblin-utility/src/main/java/org/apache/gobblin/util | Create_ds/gobblin/gobblin-utility/src/main/java/org/apache/gobblin/util/request_allocation/SimpleHierarchicalPrioritizer.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.util.request_allocation;
import java.io.Serializable;
import java.util.Comparator;
import lombok.AllArgsConstructor;
/**
* A {@link HierarchicalPrioritizer} built from two input {@link Comparator}s: one for {@link Requestor} and one
* for {@link Request}.
*/
@AllArgsConstructor
public class SimpleHierarchicalPrioritizer<T extends Request<T>> implements HierarchicalPrioritizer<T>, Serializable {
private final Comparator<Requestor<T>> requestorComparator;
private final Comparator<T> requestComparator;
@Override
public int compareRequestors(Requestor<T> r1, Requestor<T> r2) {
return this.requestorComparator.compare(r1, r2);
}
@Override
public int compare(T o1, T o2) {
int requestorComparison = this.requestorComparator.compare(o1.getRequestor(), o2.getRequestor());
if (requestorComparison != 0) {
return requestorComparison;
}
return this.requestComparator.compare(o1, o2);
}
}
| 4,169 |
0 | Create_ds/gobblin/gobblin-utility/src/main/java/org/apache/gobblin/util | Create_ds/gobblin/gobblin-utility/src/main/java/org/apache/gobblin/util/request_allocation/RequestAllocatorConfig.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.util.request_allocation;
import java.io.Serializable;
import java.util.Comparator;
import lombok.AllArgsConstructor;
import lombok.Getter;
import com.typesafe.config.Config;
import com.typesafe.config.ConfigFactory;
@AllArgsConstructor
@Getter
public class RequestAllocatorConfig<T extends Request<T>> {
private final Comparator<T> prioritizer;
private final ResourceEstimator<T> resourceEstimator;
private final int allowedThreads;
private Config limitedScopeConfig;
private String storeRejectedRequestsSetting;
public enum StoreRejectedRequestsConfig {
ALL, MIN, NONE
}
public static <T extends Request<T>> Builder<T> builder(ResourceEstimator<T> resourceEstimator) {
return new Builder<>(resourceEstimator);
}
public static class Builder<T extends Request<T>> {
private Comparator<T> prioritizer = new AllEqualPrioritizer<>();
private final ResourceEstimator<T> resourceEstimator;
private int allowedThreads = 1;
private Config limitedScopeConfig;
private String storeRejectedRequestsSetting = StoreRejectedRequestsConfig.MIN.name();
public Builder(ResourceEstimator<T> resourceEstimator) {
this.resourceEstimator = resourceEstimator;
}
public Builder<T> allowParallelization() {
return allowParallelization(20);
}
public Builder<T> allowParallelization(int maxThreads) {
this.allowedThreads = maxThreads;
return this;
}
public Builder<T> withLimitedScopeConfig(Config config) {
this.limitedScopeConfig = config;
return this;
}
public Builder<T> withPrioritizer(Comparator<T> prioritizer) {
this.prioritizer = prioritizer;
return this;
}
public Builder<T> storeRejectedRequests(String storeRejectedRequestsSetting) {
this.storeRejectedRequestsSetting = storeRejectedRequestsSetting;
return this;
}
public RequestAllocatorConfig<T> build() {
if (this.limitedScopeConfig == null) {
this.limitedScopeConfig = ConfigFactory.empty();
}
return new RequestAllocatorConfig<>(this.prioritizer, this.resourceEstimator, this.allowedThreads,
this.limitedScopeConfig, this.storeRejectedRequestsSetting);
}
}
public static class AllEqualPrioritizer<T> implements Comparator<T>, Serializable {
@Override
public int compare(T o1, T o2) {
return 0;
}
}
}
| 4,170 |
0 | Create_ds/gobblin/gobblin-utility/src/main/java/org/apache/gobblin/util | Create_ds/gobblin/gobblin-utility/src/main/java/org/apache/gobblin/util/request_allocation/RequestAllocatorUtils.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.util.request_allocation;
import com.typesafe.config.Config;
import org.apache.gobblin.util.ClassAliasResolver;
import lombok.extern.slf4j.Slf4j;
@Slf4j
public class RequestAllocatorUtils {
public static final String ALLOCATOR_ALIAS_KEY = "requestAllocatorAlias";
/**
* Infer and construct a {@link RequestAllocator} from an input {@link Config}.
*/
public static <T extends Request<T>> RequestAllocator<T> inferFromConfig(RequestAllocatorConfig<T> configuration) {
try {
String alias = configuration.getLimitedScopeConfig().hasPath(ALLOCATOR_ALIAS_KEY) ?
configuration.getLimitedScopeConfig().getString(ALLOCATOR_ALIAS_KEY) :
BruteForceAllocator.Factory.class.getName();
RequestAllocator.Factory allocatorFactory = new ClassAliasResolver<>(RequestAllocator.Factory.class).
resolveClass(alias).newInstance();
log.info("Using allocator factory " + allocatorFactory.getClass().getName());
return allocatorFactory.createRequestAllocator(configuration);
} catch (ReflectiveOperationException roe) {
throw new RuntimeException(roe);
}
}
}
| 4,171 |
0 | Create_ds/gobblin/gobblin-utility/src/main/java/org/apache/gobblin/util | Create_ds/gobblin/gobblin-utility/src/main/java/org/apache/gobblin/util/request_allocation/PreOrderAllocator.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.util.request_allocation;
import java.io.IOException;
import java.util.Iterator;
import java.util.List;
import java.util.concurrent.Callable;
import com.google.common.collect.Lists;
import org.apache.gobblin.util.iterators.InterruptibleIterator;
import lombok.extern.slf4j.Slf4j;
/**
* A {@link RequestAllocator} that operates over {@link PushDownRequestor}s, getting a pre-ordered iterator of
* {@link Request}s from each {@link Requestor} and merging them, stopping as soon as the {@link ResourcePool} is
* exhausted.
*
* <p>
* This {@link RequestAllocator} is ideal when computing the {@link ResourceRequirement} of a {@link Request} is
* expensive, as the pre-ordering allows it to not compute {@link ResourceRequirement}s for low priority
* {@link Request}s.
* </p>
*/
@Slf4j
public class PreOrderAllocator<T extends Request<T>> extends PriorityIterableBasedRequestAllocator<T> {
public static class Factory implements RequestAllocator.Factory {
@Override
public <T extends Request<T>> RequestAllocator<T> createRequestAllocator(RequestAllocatorConfig<T> configuration) {
return new PreOrderAllocator<>(configuration);
}
}
public PreOrderAllocator(RequestAllocatorConfig<T> configuration) {
super(log, configuration);
}
@Override
protected Iterator<T> getJoinIterator(Iterator<? extends Requestor<T>> requestors,
final ConcurrentBoundedPriorityIterable<T> requestIterable) {
List<Iterator<T>> iteratorList = Lists.newArrayList();
while (requestors.hasNext()) {
Requestor<T> requestor = requestors.next();
if (!(requestor instanceof PushDownRequestor)) {
throw new RuntimeException(String.format("%s can only be used with %s.", PreOrderAllocator.class, PushDownRequestor.class));
}
try {
iteratorList.add(((PushDownRequestor<T>) requestor).getRequests(getConfiguration().getPrioritizer()));
} catch (IOException ioe) {
log.error("Failed to get requests from " + requestor);
}
}
PriorityMultiIterator<T> multiIterator = new PriorityMultiIterator<>(iteratorList, getConfiguration().getPrioritizer());
return new InterruptibleIterator<>(multiIterator, new Callable<Boolean>() {
@Override
public Boolean call()
throws Exception {
return requestIterable.isFull();
}
});
}
}
| 4,172 |
0 | Create_ds/gobblin/gobblin-utility/src/main/java/org/apache/gobblin/util | Create_ds/gobblin/gobblin-utility/src/main/java/org/apache/gobblin/util/request_allocation/HierarchicalAllocator.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.util.request_allocation;
import java.util.Collections;
import java.util.Comparator;
import java.util.Iterator;
import java.util.List;
import java.util.NoSuchElementException;
import com.google.common.base.Optional;
import com.google.common.base.Preconditions;
import com.google.common.collect.Lists;
import lombok.RequiredArgsConstructor;
import lombok.extern.slf4j.Slf4j;
/**
* A {@link RequestAllocator} optimized for {@link HierarchicalPrioritizer}s. Processes {@link Requestor}s in order of
* priority. Once the {@link ResourcePool} is full, lower priority {@link Requestor}s will not even be materialized.
*/
@RequiredArgsConstructor
@Slf4j
public class HierarchicalAllocator<T extends Request<T>> implements RequestAllocator<T> {
public static class Factory implements RequestAllocator.Factory {
@Override
public <T extends Request<T>> RequestAllocator<T> createRequestAllocator(RequestAllocatorConfig<T> cofiguration) {
Preconditions.checkArgument(cofiguration.getPrioritizer() instanceof HierarchicalPrioritizer,
"Prioritizer must be a " + HierarchicalPrioritizer.class.getSimpleName());
RequestAllocator<T> underlying = RequestAllocatorUtils.inferFromConfig(cofiguration);
return new HierarchicalAllocator<>((HierarchicalPrioritizer<T>) cofiguration.getPrioritizer(), underlying);
}
}
private final HierarchicalPrioritizer<T> prioritizer;
private final RequestAllocator<T> underlying;
@Override
public AllocatedRequestsIterator<T> allocateRequests(Iterator<? extends Requestor<T>> requestors, ResourcePool resourcePool) {
List<Requestor<T>> requestorList = Lists.newArrayList(requestors);
Comparator<Requestor<T>> requestorComparator = new Comparator<Requestor<T>>() {
@Override
public int compare(Requestor<T> o1, Requestor<T> o2) {
return prioritizer.compareRequestors(o1, o2);
}
};
Collections.sort(requestorList, requestorComparator);
return new HierarchicalIterator(resourcePool, new SingleTierIterator(requestorComparator, requestorList));
}
/**
* Automatically handles allocation for each tier of {@link Requestor}s, computation of correct {@link #totalResourcesUsed()},
* and not materializing next tier once {@link ResourcePool} is full.
*/
private class HierarchicalIterator implements AllocatedRequestsIterator<T> {
private SingleTierIterator singleTierIterator;
private AllocatedRequestsIterator<T> currentIterator;
private ResourcePool resourcePool;
private final ResourceRequirement currentRequirement;
public HierarchicalIterator(ResourcePool resourcePool, SingleTierIterator singleTierIterator) {
this.singleTierIterator = singleTierIterator;
this.resourcePool = resourcePool;
this.currentRequirement = resourcePool.getResourceRequirementBuilder().zero().build();
}
@Override
public boolean hasNext() {
while (this.currentIterator == null || !this.currentIterator.hasNext()) {
if (this.currentIterator != null) {
this.currentRequirement.add(this.currentIterator.totalResourcesUsed());
}
if (this.resourcePool.exceedsSoftBound(this.currentRequirement, true)) {
return false;
}
Optional<SingleTierIterator> tmp = this.singleTierIterator.nextTier();
if (!tmp.isPresent()) {
return false;
}
this.singleTierIterator = tmp.get();
ResourcePool contractedPool = this.resourcePool.contractPool(this.currentRequirement);
this.currentIterator = HierarchicalAllocator.this.underlying.allocateRequests(this.singleTierIterator, contractedPool);
}
return true;
}
@Override
public T next() {
if (!hasNext()) {
throw new NoSuchElementException();
}
return this.currentIterator.next();
}
@Override
public void remove() {
throw new UnsupportedOperationException();
}
@Override
public ResourceRequirement totalResourcesUsed() {
return ResourceRequirement.add(this.currentRequirement, this.currentIterator.totalResourcesUsed(), null);
}
}
/**
* An {@link Iterator} that only returns entries of the same priority as the first entry it returns. Assumes the input
* {@link List} is sorted.
*/
private class SingleTierIterator implements Iterator<Requestor<T>> {
private final Comparator<Requestor<T>> prioritizer;
private final List<Requestor<T>> requestors;
private final Requestor<T> referenceRequestor;
private int nextRequestorIdx;
public SingleTierIterator(Comparator<Requestor<T>> prioritizer, List<Requestor<T>> requestors) {
this(prioritizer, requestors, 0);
}
private SingleTierIterator(Comparator<Requestor<T>> prioritizer, List<Requestor<T>> requestors, int initialIndex) {
this.prioritizer = prioritizer;
this.requestors = requestors;
if (this.requestors.size() > initialIndex) {
this.referenceRequestor = requestors.get(initialIndex);
} else {
this.referenceRequestor = null;
}
this.nextRequestorIdx = initialIndex;
log.debug("Starting a single tier iterator with reference requestor: {}", this.referenceRequestor);
}
@Override
public boolean hasNext() {
return this.requestors.size() > nextRequestorIdx &&
this.prioritizer.compare(this.referenceRequestor, this.requestors.get(this.nextRequestorIdx)) == 0;
}
@Override
public Requestor<T> next() {
if (!hasNext()) {
throw new NoSuchElementException();
}
return this.requestors.get(this.nextRequestorIdx++);
}
/**
* @return a {@link SingleTierIterator} for the next tier in the input {@link List}.
*/
Optional<SingleTierIterator> nextTier() {
if (this.nextRequestorIdx < this.requestors.size()) {
return Optional.of(new SingleTierIterator(this.prioritizer, this.requestors, this.nextRequestorIdx));
}
return Optional.absent();
}
@Override
public void remove() {
throw new UnsupportedOperationException();
}
}
}
| 4,173 |
0 | Create_ds/gobblin/gobblin-utility/src/main/java/org/apache/gobblin/util | Create_ds/gobblin/gobblin-utility/src/main/java/org/apache/gobblin/util/request_allocation/BruteForceAllocator.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.util.request_allocation;
import java.util.Iterator;
import com.google.common.base.Function;
import com.google.common.collect.Iterators;
import javax.annotation.Nullable;
import lombok.extern.slf4j.Slf4j;
/**
* A {@link RequestAllocator} that simply adds every work unit to a {@link ConcurrentBoundedPriorityIterable}, then returns
* the iterator.
*/
@Slf4j
public class BruteForceAllocator<T extends Request<T>> extends PriorityIterableBasedRequestAllocator<T> {
public static class Factory implements RequestAllocator.Factory {
@Override
public <T extends Request<T>> RequestAllocator<T> createRequestAllocator(RequestAllocatorConfig<T> configuration) {
return new BruteForceAllocator<>(configuration);
}
}
public BruteForceAllocator(RequestAllocatorConfig<T> configuration) {
super(log, configuration);
}
@Override
protected Iterator<T> getJoinIterator(Iterator<? extends Requestor<T>> requestors,
ConcurrentBoundedPriorityIterable<T> requestIterable) {
return Iterators.concat(Iterators.transform(requestors, new Function<Requestor<T>, Iterator<T>>() {
@Nullable
@Override
public Iterator<T> apply(Requestor<T> input) {
return input.iterator();
}
}));
}
}
| 4,174 |
0 | Create_ds/gobblin/gobblin-utility/src/main/java/org/apache/gobblin/util | Create_ds/gobblin/gobblin-utility/src/main/java/org/apache/gobblin/util/request_allocation/ResourceEstimator.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.util.request_allocation;
import com.typesafe.config.Config;
/**
* Computes the {@link ResourceRequirement} for {@link Request}s. See {@link RequestAllocator}.
* @param <T>
*/
public interface ResourceEstimator<T> {
interface Factory<T> {
ResourceEstimator<T> create(Config config);
}
/**
* @return The {@link ResourceRequirement} for input {@link Request}.
*/
ResourceRequirement estimateRequirement(T t, ResourcePool resourcePool);
}
| 4,175 |
0 | Create_ds/gobblin/gobblin-utility/src/main/java/org/apache/gobblin/util | Create_ds/gobblin/gobblin-utility/src/main/java/org/apache/gobblin/util/request_allocation/HierarchicalPrioritizer.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.util.request_allocation;
import java.util.Comparator;
/**
* A {@link Comparator} for {@link Request}s that can also compare {@link Requestor}s, and which guarantees that
* given {@link Request}s r1, r2, then r1.getRequestor > r2.getRequestor implies r1 > r2.
*/
public interface HierarchicalPrioritizer<T extends Request> extends Comparator<T> {
int compareRequestors(Requestor<T> r1, Requestor<T> r2);
}
| 4,176 |
0 | Create_ds/gobblin/gobblin-utility/src/main/java/org/apache/gobblin/util | Create_ds/gobblin/gobblin-utility/src/main/java/org/apache/gobblin/util/request_allocation/AllocatedRequestsIterator.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.util.request_allocation;
import java.util.Iterator;
/**
* An {@link Iterator} over {@link Request} that also provides with the total resources used by all consumed entries.
*/
public interface AllocatedRequestsIterator<T extends Request<T>> extends Iterator<T> {
/**
* @return The total resources used by the elements consumed so far from this iterator.
*/
ResourceRequirement totalResourcesUsed();
}
| 4,177 |
0 | Create_ds/gobblin/gobblin-utility/src/main/java/org/apache/gobblin/util | Create_ds/gobblin/gobblin-utility/src/main/java/org/apache/gobblin/util/request_allocation/PushDownRequestor.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.util.request_allocation;
import java.io.IOException;
import java.util.Comparator;
import java.util.Iterator;
/**
* A {@link Requestor} that can provide an {@link Iterator} of {@link Request}s already sorted by the input
* prioritizer. Allows push down of certain prioritizers to more efficient layers.
*/
public interface PushDownRequestor<T extends Request> extends Requestor<T> {
/**
* Return an {@link Iterator} of {@link Request}s already sorted by the input prioritizer.
*/
Iterator<T> getRequests(Comparator<T> prioritizer) throws IOException;
}
| 4,178 |
0 | Create_ds/gobblin/gobblin-utility/src/main/java/org/apache/gobblin/util | Create_ds/gobblin/gobblin-utility/src/main/java/org/apache/gobblin/util/request_allocation/ConcurrentBoundedPriorityIterable.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.util.request_allocation;
import java.util.Comparator;
import java.util.Iterator;
import java.util.List;
import java.util.TreeSet;
import lombok.Getter;
import lombok.extern.slf4j.Slf4j;
import org.slf4j.Logger;
import com.google.common.annotations.VisibleForTesting;
import com.google.common.base.Optional;
import com.google.common.collect.Lists;
/**
* A concurrent bounded priority {@link Iterable}. Given a {@link ResourcePool}, a {@link ResourceEstimator}, and a
* {@link Comparator} as a prioritizer, user can attempt to add elements to this container.
* The container stores a subset of the offered elements such that their {@link ResourceRequirement} is within the
* bounds of the {@link ResourcePool}, preferentially storing high priority elements.
* This functionality is achieved by automatic eviction of low priority items when space for a higher priority item
* is needed.
* A call to {@link #iterator()} returns an iterator over the current elements in the container in order from high
* priority to low priority. Note after calling {@link #iterator()}, no more requests can be added (will throw
* {@link RuntimeException}).
*
* Note: as with a priority queue, e1 < e2 means that e1 is higher priority than e2.
*/
@Slf4j
public class ConcurrentBoundedPriorityIterable<T> implements Iterable<AllocatedRequestsIteratorBase.RequestWithResourceRequirement<T>> {
private final TreeSet<AllocatedRequestsIteratorBase.RequestWithResourceRequirement<T>> elements;
private final int dimensions;
@Getter
private final Comparator<? super T> comparator;
private final Comparator<AllocatedRequestsIteratorBase.RequestWithResourceRequirement<T>> allDifferentComparator;
private final ResourceEstimator<T> estimator;
private final ResourcePool resourcePool;
private final ResourceRequirement currentRequirement;
private volatile boolean rejectedElement = false;
private volatile boolean closed = false;
// These are just for statistics
private final ResourceRequirement maxResourceRequirement;
private int requestsOffered = 0;
private int requestsRefused = 0;
private int requestsEvicted = 0;
//These are for submitting alertable events
private String storeRejectedRequestsSetting;
@Getter
private List<T> requestsExceedingAvailableResourcePool = Lists.newArrayList();
@Getter
private List<T> requestsRejectedWithLowPriority = Lists.newArrayList();
@Getter
private List<T> requestsRejectedDueToInsufficientEviction = Lists.newArrayList();
@Getter
private List<T> requestsDropped = Lists.newArrayList();
// These are ResourceRequirements for temporary use to avoid instantiation costs
private final ResourceRequirement candidateRequirement;
private final ResourceRequirement tmpRequirement;
private final ResourceRequirement reuse;
public ConcurrentBoundedPriorityIterable(final Comparator<? super T> prioritizer,
ResourceEstimator<T> resourceEstimator, String storeRejectedRequestsSetting, ResourcePool pool) {
this.estimator = resourceEstimator;
this.resourcePool = pool;
this.dimensions = this.resourcePool.getNumDimensions();
this.comparator = prioritizer;
this.allDifferentComparator = new AllDifferentComparator();
this.elements = new TreeSet<>(this.allDifferentComparator);
this.storeRejectedRequestsSetting = storeRejectedRequestsSetting;
this.currentRequirement = this.resourcePool.getResourceRequirementBuilder().zero().build();
this.maxResourceRequirement = new ResourceRequirement(this.currentRequirement);
this.candidateRequirement = new ResourceRequirement(this.currentRequirement);
this.tmpRequirement = new ResourceRequirement(this.currentRequirement);
this.reuse = new ResourceRequirement(this.currentRequirement);
}
/**
* This is the actual {@link Comparator} used in the {@link TreeSet}. Since {@link TreeSet}s use the provided
* {@link Comparator} to determine equality, we must force elements with the same priority to be different according
* to the {@link TreeSet}.
*/
private class AllDifferentComparator implements Comparator<AllocatedRequestsIteratorBase.RequestWithResourceRequirement<T>> {
@Override
public int compare(AllocatedRequestsIteratorBase.RequestWithResourceRequirement<T> t1,
AllocatedRequestsIteratorBase.RequestWithResourceRequirement<T> t2) {
int providedComparison = ConcurrentBoundedPriorityIterable.this.comparator.compare(t1.getT(), t2.getT());
if (providedComparison != 0) {
return providedComparison;
}
return Long.compare(t1.getId(), t2.getId());
}
}
/**
* Offer an element to the container.
* @return true if the element was added, false if there was no space and we could not evict any elements to make it fit.
* Note that the element may get evicted by future offers, so a return of true is not a guarantee that the
* element will be present at any time in the future.
*/
public boolean add(T t) {
if (this.closed) {
throw new RuntimeException(
ConcurrentBoundedPriorityIterable.class.getSimpleName() + " is no longer accepting requests!");
}
AllocatedRequestsIteratorBase.RequestWithResourceRequirement<T> newElement =
new AllocatedRequestsIteratorBase.RequestWithResourceRequirement<>(t,
this.estimator.estimateRequirement(t, this.resourcePool));
boolean addedWorkunits = addImpl(newElement);
if (!addedWorkunits) {
this.rejectedElement = true;
}
return addedWorkunits;
}
private synchronized boolean addImpl(AllocatedRequestsIteratorBase.RequestWithResourceRequirement<T> newElement) {
this.maxResourceRequirement.entryWiseMax(newElement.getResourceRequirement());
this.requestsOffered++;
if (this.resourcePool.exceedsHardBound(newElement.getResourceRequirement(), false)) {
// item does not fit even in empty pool
log.warn(String.format("Request %s is larger than the available resource pool. If the pool is not expanded, "
+ "it will never be selected. Request: %s.", newElement.getT(),
this.resourcePool.stringifyRequirement(newElement.getResourceRequirement())));
if (!this.storeRejectedRequestsSetting
.equalsIgnoreCase(RequestAllocatorConfig.StoreRejectedRequestsConfig.NONE.name())) {
this.requestsExceedingAvailableResourcePool.add(newElement.getT());
}
this.requestsRefused++;
return false;
}
ResourceRequirement candidateRequirement = ResourceRequirement
.add(this.currentRequirement, newElement.getResourceRequirement(), this.candidateRequirement);
if (this.resourcePool.exceedsHardBound(candidateRequirement, false)) {
if (this.comparator.compare(this.elements.last().getT(), newElement.getT()) <= 0) {
log.debug(
"Request {} does not fit in resource pool and is lower priority than current lowest priority request. "
+ "Rejecting", newElement.getT());
this.requestsRefused++;
if (this.storeRejectedRequestsSetting
.equalsIgnoreCase(RequestAllocatorConfig.StoreRejectedRequestsConfig.ALL.name())) {
this.requestsRejectedWithLowPriority.add(newElement.getT());
}
return false;
}
List<AllocatedRequestsIteratorBase.RequestWithResourceRequirement<T>> toDrop = Lists.newArrayList();
this.currentRequirement.copyInto(this.tmpRequirement);
for (AllocatedRequestsIteratorBase.RequestWithResourceRequirement<T> dropCandidate : this.elements
.descendingSet()) {
if (this.comparator.compare(dropCandidate.getT(), newElement.getT()) <= 0) {
log.debug("Cannot evict enough requests to fit request {}. " + "Rejecting", newElement.getT());
this.requestsRefused++;
if (this.storeRejectedRequestsSetting
.equalsIgnoreCase(RequestAllocatorConfig.StoreRejectedRequestsConfig.ALL.name())) {
this.requestsRejectedDueToInsufficientEviction.add(newElement.getT());
}
return false;
}
this.tmpRequirement.subtract(dropCandidate.getResourceRequirement());
toDrop.add(dropCandidate);
if (!this.resourcePool.exceedsHardBound(
ResourceRequirement.add(this.tmpRequirement, newElement.getResourceRequirement(), this.reuse), false)) {
break;
}
}
for (AllocatedRequestsIteratorBase.RequestWithResourceRequirement<T> drop : toDrop) {
log.debug("Evicting request {}.", drop.getT());
this.requestsEvicted++;
if (this.storeRejectedRequestsSetting
.equalsIgnoreCase(RequestAllocatorConfig.StoreRejectedRequestsConfig.ALL.name())) {
this.requestsDropped.add(drop.getT());
}
this.elements.remove(drop);
this.currentRequirement.subtract(drop.getResourceRequirement());
}
}
this.elements.add(newElement);
this.currentRequirement.add(newElement.getResourceRequirement());
return true;
}
/**
* @return Whether any calls to {@link #add} have returned false, i.e. some element has been rejected due
* to strict capacity issues.
*/
public boolean hasRejectedElement() {
return this.rejectedElement;
}
/**
* @return Whether the list has reached its soft bound.
*/
public synchronized boolean isFull() {
return this.resourcePool.exceedsSoftBound(this.currentRequirement, true);
}
/**
* Log statistics about this {@link ConcurrentBoundedPriorityIterable}.
*/
public synchronized void logStatistics(Optional<Logger> logger) {
Logger actualLogger = logger.or(log);
StringBuilder messageBuilder = new StringBuilder("Statistics for ").
append(ConcurrentBoundedPriorityIterable.class.getSimpleName()).append(": {");
messageBuilder.append(this.resourcePool).append(", ");
messageBuilder.append("totalResourcesUsed: ")
.append(this.resourcePool.stringifyRequirement(this.currentRequirement)).append(", ");
messageBuilder.append("maxRequirementPerDimension: ")
.append(this.resourcePool.stringifyRequirement(this.maxResourceRequirement)).append(", ");
messageBuilder.append("requestsOffered: ").append(this.requestsOffered).append(", ");
messageBuilder.append("requestsAccepted: ")
.append(this.requestsOffered - this.requestsEvicted - this.requestsRefused).append(", ");
messageBuilder.append("requestsRefused: ").append(this.requestsRefused).append(", ");
messageBuilder.append("requestsEvicted: ").append(this.requestsEvicted);
messageBuilder.append("}");
actualLogger.info(messageBuilder.toString());
}
@VisibleForTesting
void reopen() {
this.closed = false;
}
@Override
public Iterator<AllocatedRequestsIteratorBase.RequestWithResourceRequirement<T>> iterator() {
this.closed = true;
return this.elements.iterator();
}
}
| 4,179 |
0 | Create_ds/gobblin/gobblin-utility/src/main/java/org/apache/gobblin/util | Create_ds/gobblin/gobblin-utility/src/main/java/org/apache/gobblin/util/request_allocation/RequestAllocator.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.util.request_allocation;
import java.util.Comparator;
import java.util.Iterator;
/**
* This interface is intended to solve the problem of selecting a subset of expensive requests based on priority when
* there are limited resources to perform such requests.
*
* <p>
* We assume there are a number of {@link Requestor}s each one sending a finite stream of {@link Request}s.
* Each such request requires a certain amount of resources, and there is a finite pool of resources available.
* Additionally, some requests have higher priority than others. Our objective is to select a set of requests to satisfy
* such that their total resource usage is within the bounds of the finite resource pool, and such that, as much as
* possible, a request will not be selected if there was a request with a higher priority that was not selected.
* </p>
*
* <p>
* We model the problem as follows:
* <ol>
* <li> A request is an implementation of {@link Request}. </li>
* <li> A {@link Requestor} is a stream ({@link Iterator}) of {@link Request}s. We use a stream as opposed to a set or
* list because the {@link Requestor} is encouraged to lazily materialize requests only as needed by the request allocator. </li>
* <li> A {@link ResourcePool} is a vector of doubles representing the available resources along a sequence of dimensions
* (e.g. bytes, files to copy). </li>
* <li> A {@link ResourceRequirement} is a vector of doubles representing the resources need by a particular request.
* We assume that resource requirements are combined exclusively through vector addition. </li>
* <li> A {@link ResourceEstimator} is a class that, given a {@link Request}, computes its {@link ResourceRequirement}. </li>
* <li> A prioritizer is a {@link Comparator} which, given two {@link Request}s, determines which is higher priority
* (smaller is higher priority, following the {@link java.util.PriorityQueue} model).</li>
* </ol>
* </p>
*
* @param <T>
*/
public interface RequestAllocator<T extends Request<T>> {
interface Factory {
/**
* Create a {@link RequestAllocator} with the input prioritizer and {@link ResourceEstimator}.
*/
<T extends Request<T>> RequestAllocator<T> createRequestAllocator(RequestAllocatorConfig<T> configuration);
}
/**
* Compute the subset of accepted {@link Request}s from the input {@link Requestor}s which fit withing
* the {@link ResourcePool}.
*/
AllocatedRequestsIterator<T> allocateRequests(Iterator<? extends Requestor<T>> requestors, ResourcePool resourcePool);
}
| 4,180 |
0 | Create_ds/gobblin/gobblin-utility/src/main/java/org/apache/gobblin/util | Create_ds/gobblin/gobblin-utility/src/main/java/org/apache/gobblin/util/request_allocation/Requestor.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.util.request_allocation;
import java.util.Iterator;
/**
* A wrapper around a {@link Iterator} of {@link Request}s used for request allocation problem. See {@link RequestAllocator}.
*/
public interface Requestor<T extends Request> extends Iterable<T> {
}
| 4,181 |
0 | Create_ds/gobblin/gobblin-utility/src/main/java/org/apache/gobblin/util | Create_ds/gobblin/gobblin-utility/src/main/java/org/apache/gobblin/util/request_allocation/ResourceRequirement.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.util.request_allocation;
import lombok.AccessLevel;
import lombok.AllArgsConstructor;
import lombok.Getter;
/**
* Represents a requirement of resources in a {@link ResourcePool}. Essentially a vector of doubles of the same dimension
* as that in the {@link ResourcePool}. See {@link ResourcePool}.
*/
@AllArgsConstructor(access = AccessLevel.PROTECTED)
public class ResourceRequirement {
public static class Builder {
private final double[] requirement;
private final ResourcePool pool;
public Builder(ResourcePool pool) {
this.pool = pool;
this.requirement = pool.getDefaultResourceUse(null);
}
/**
* Set all resource requirements to 0. Overrides defaults.
*/
public Builder zero() {
for (int i = 0; i < this.requirement.length; i++) {
this.requirement[i] = 0;
}
return this;
}
/**
* Specify the resource requirement along a dimension.
*/
public Builder setRequirement(String dimension, double value) {
if (!this.pool.getDimensionIndex().containsKey(dimension)) {
throw new IllegalArgumentException(String.format("No dimension %s in this resource pool.", dimension));
}
int idx = this.pool.getDimensionIndex().get(dimension);
this.requirement[idx] = value;
return this;
}
public ResourceRequirement build() {
return new ResourceRequirement(this.requirement);
}
}
@Getter
private final double[] resourceVector;
public ResourceRequirement(ResourceRequirement other) {
this.resourceVector = other.resourceVector.clone();
}
/**
* Vector addition of this and other {@link ResourceRequirement}.
*/
public void add(ResourceRequirement other) {
VectorAlgebra.addVector(this.resourceVector, other.resourceVector, 1., this.resourceVector);
}
/**
* Vector addition of this and other {@link ResourceRequirement}.
*/
public void subtract(ResourceRequirement other) {
VectorAlgebra.addVector(this.resourceVector, other.resourceVector, -1., this.resourceVector);
}
void entryWiseMax(ResourceRequirement other) {
for (int i = 0; i < this.resourceVector.length; i ++) {
this.resourceVector[i] = Math.max(this.resourceVector[i], other.resourceVector[i]);
}
}
ResourceRequirement copyInto(ResourceRequirement reuse) {
if (reuse == null) {
return new ResourceRequirement(this.resourceVector.clone());
} else {
System.arraycopy(this.resourceVector, 0, reuse.getResourceVector(), 0, this.resourceVector.length);
return reuse;
}
}
public static ResourceRequirement add(ResourceRequirement r1, ResourceRequirement r2, ResourceRequirement reuse) {
if (reuse == null) {
reuse = new ResourceRequirement(r1.resourceVector.clone());
}
VectorAlgebra.addVector(r1.resourceVector, r2.resourceVector, 1., reuse.resourceVector);
return reuse;
}
}
| 4,182 |
0 | Create_ds/gobblin/gobblin-utility/src/main/java/org/apache/gobblin/util | Create_ds/gobblin/gobblin-utility/src/main/java/org/apache/gobblin/util/request_allocation/PriorityMultiIterator.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.util.request_allocation;
import java.util.Collection;
import java.util.Comparator;
import java.util.Iterator;
import java.util.PriorityQueue;
import lombok.Data;
/**
* An {@link Iterator} that takes multiple input {@link Iterator}s each of whose elements are ordered by the input
* {@link Comparator} and iterates over the elements in all input iterators in a globally ordered way.
*
* Note: this class does not check whether the input {@link Iterator}s are ordered correctly, so it is package-private
* to prevent misuse.
*/
class PriorityMultiIterator<T> implements Iterator<T> {
private final PriorityQueue<TAndIterator> queue;
private final Comparator<TAndIterator> actualComparator;
public PriorityMultiIterator(Collection<Iterator<T>> orderedIterators, final Comparator<T> prioritizer) {
this.actualComparator = new Comparator<TAndIterator>() {
@Override
public int compare(TAndIterator o1, TAndIterator o2) {
return prioritizer.compare(o1.getT(), o2.getT());
}
};
this.queue = new PriorityQueue<>(orderedIterators.size(), this.actualComparator);
for (Iterator<T> iterator : orderedIterators) {
if (iterator.hasNext()) {
this.queue.offer(new TAndIterator(iterator.next(), iterator));
}
}
}
@Override
public boolean hasNext() {
return !this.queue.isEmpty();
}
@Override
public T next() {
TAndIterator nextTAndIterator = this.queue.poll();
if (nextTAndIterator.getIterator().hasNext()) {
this.queue.offer(new TAndIterator(nextTAndIterator.getIterator().next(), nextTAndIterator.getIterator()));
}
return nextTAndIterator.getT();
}
@Data
private class TAndIterator {
private final T t;
private final Iterator<T> iterator;
}
@Override
public void remove() {
throw new UnsupportedOperationException();
}
}
| 4,183 |
0 | Create_ds/gobblin/gobblin-utility/src/main/java/org/apache/gobblin/util | Create_ds/gobblin/gobblin-utility/src/main/java/org/apache/gobblin/util/request_allocation/ResourcePool.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.util.request_allocation;
import java.util.Arrays;
import java.util.List;
import java.util.Map;
import com.google.common.collect.ImmutableMap;
import com.google.common.collect.Lists;
import lombok.AccessLevel;
import lombok.Builder;
import lombok.Getter;
import lombok.Singular;
/**
* Represents a pool of available resources for a {@link RequestAllocator}. The resources pool is essentially a vector
* of doubles where each dimension represents a resource. A set of resource requests exceeds the availability of the
* pool if the vector sum of those requests is larger than the vector of resources in the pool along any dimension.
*/
@Getter(value = AccessLevel.PROTECTED)
public class ResourcePool {
public static final double DEFAULT_DIMENSION_TOLERANCE = 1.2;
private final ImmutableMap<String, Integer> dimensionIndex;
private final double[] softBound;
private final double[] hardBound;
private final double[] defaultResourceUse;
/**
* @param maxResources Maximum resource availability along each dimension. Each entry in this map is a dimension. Note
* this is considered a soft bound (e.g. max resources may be exceeded by a tolerance).
* @param tolerances The hard limit on resources availability along each dimension is set to maxResource * tolerance.
* The default tolerance is {@link #DEFAULT_DIMENSION_TOLERANCE}. It is recommended to always have a
* tolerance >1, as some {@link RequestAllocator}s will do unnecessary work if the soft and hard
* bounds are too close to each other.
* @param defaultRequirements Specifies the default usage of the resources along each dimension when creating a
* {@link ResourceRequirement}. Default is 0.
*/
@Builder
protected ResourcePool(@Singular Map<String, Double> maxResources, @Singular Map<String, Double> tolerances,
@Singular Map<String, Double> defaultRequirements) {
ImmutableMap.Builder<String, Integer> indexBuilder = ImmutableMap.builder();
this.softBound = new double[maxResources.size()];
int currentIndex = 0;
for (Map.Entry<String, Double> resource : maxResources.entrySet()) {
indexBuilder.put(resource.getKey(), currentIndex);
this.softBound[currentIndex] = resource.getValue();
currentIndex++;
}
this.dimensionIndex = indexBuilder.build();
this.hardBound = this.softBound.clone();
for (int i = 0; i < this.hardBound.length; i++) {
this.hardBound[i] *= DEFAULT_DIMENSION_TOLERANCE;
}
this.defaultResourceUse = new double[this.softBound.length];
for (Map.Entry<String, Integer> idxEntry : this.dimensionIndex.entrySet()) {
if (tolerances.containsKey(idxEntry.getKey())) {
this.hardBound[idxEntry.getValue()] =
this.softBound[idxEntry.getValue()] * Math.max(1.0, tolerances.get(idxEntry.getKey()));
}
if (defaultRequirements.containsKey(idxEntry.getKey())) {
this.defaultResourceUse[idxEntry.getValue()] = defaultRequirements.get(idxEntry.getKey());
}
}
}
private ResourcePool(double[] softBound, double[] hardBound, double[] defaultResourceUse, ImmutableMap<String, Integer> dimensionIndex) {
this.softBound = softBound;
this.hardBound = hardBound;
this.defaultResourceUse = defaultResourceUse;
this.dimensionIndex = dimensionIndex;
}
protected ResourcePool(ResourcePool other) {
this.softBound = other.getSoftBound();
this.hardBound = other.getHardBound();
this.defaultResourceUse = other.getDefaultResourceUse();
this.dimensionIndex = other.getDimensionIndex();
}
@Override
public String toString() {
StringBuilder builder = new StringBuilder(ResourcePool.class.getSimpleName()).append(": {");
builder.append("softBound").append(": ").append(vectorToString(this.softBound));
builder.append(", ");
builder.append("hardBound").append(": ").append(vectorToString(this.hardBound));
builder.append("}");
return builder.toString();
}
/**
* Stringify a {@link ResourceRequirement} with the appropriate dimension labels.
*/
public String stringifyRequirement(ResourceRequirement requirement) {
return vectorToString(requirement.getResourceVector());
}
private String vectorToString(double[] vector) {
List<String> tokens = Lists.newArrayListWithCapacity(this.dimensionIndex.size());
for (Map.Entry<String, Integer> dimension : dimensionIndex.entrySet()) {
tokens.add(dimension.getKey() + ": " + vector[dimension.getValue()]);
}
return Arrays.toString(tokens.toArray());
}
/**
* @return true if input {@link ResourceRequirement} exceeds the soft bound long any dimension. If the parameter
* orEqual is true, then matching along any dimension will also return true.
*/
public boolean exceedsSoftBound(ResourceRequirement requirement, boolean orEqual) {
return VectorAlgebra.exceedsVector(this.softBound, requirement.getResourceVector(), orEqual);
}
/**
* @return true if input {@link ResourceRequirement} exceeds the hard bound long any dimension. If the parameter
* orEqual is true, then matching along any dimension will also return true.
*/
public boolean exceedsHardBound(ResourceRequirement requirement, boolean orEqual) {
return VectorAlgebra.exceedsVector(this.hardBound, requirement.getResourceVector(), orEqual);
}
/**
* Use to create a {@link ResourceRequirement} compatible with this {@link ResourcePool}.
*/
public ResourceRequirement.Builder getResourceRequirementBuilder() {
return new ResourceRequirement.Builder(this);
}
/**
* @return a new {@link ResourcePool} which is a copy of this {@link ResourcePool} except its resource vector has been
* reduced by the input {@link ResourceRequirement}.
*/
protected ResourcePool contractPool(ResourceRequirement requirement) {
return new ResourcePool(VectorAlgebra.addVector(this.softBound, requirement.getResourceVector(), -1., null),
VectorAlgebra.addVector(this.hardBound, requirement.getResourceVector(), -1., null),
this.defaultResourceUse, this.dimensionIndex);
}
/**
* Get the dimensionality of the embedded resource vector.
*/
int getNumDimensions() {
return this.dimensionIndex.size();
}
double[] getDefaultResourceUse(double[] reuse) {
if (reuse != null && this.defaultResourceUse.length == reuse.length) {
System.arraycopy(this.defaultResourceUse, 0, reuse, 0, this.defaultResourceUse.length);
return reuse;
}
return this.defaultResourceUse.clone();
}
}
| 4,184 |
0 | Create_ds/gobblin/gobblin-utility/src/main/java/org/apache/gobblin/util | Create_ds/gobblin/gobblin-utility/src/main/java/org/apache/gobblin/util/request_allocation/Request.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.util.request_allocation;
/**
* Represents an expensive request in the request allocation problem. See {@link RequestAllocator}.
*/
public interface Request<T extends Request> {
Requestor<T> getRequestor();
}
| 4,185 |
0 | Create_ds/gobblin/gobblin-utility/src/main/java/org/apache/gobblin/util | Create_ds/gobblin/gobblin-utility/src/main/java/org/apache/gobblin/util/measurement/GrowthMilestoneTracker.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.util.measurement;
import java.util.Iterator;
import java.util.Optional;
import java.util.stream.LongStream;
/** Stateful class to track growth/accumulation/"high watermark" against milestones */
public class GrowthMilestoneTracker {
private final Iterator<Long> milestoneSequence = createMilestoneSequence();
private Long nextMilestone = milestoneSequence.next();
/** @return whether `n >=` the next monotonically increasing milestone (with no effort to handle wrap-around) */
public final boolean isAnotherMilestone(long n) {
return this.calcLargestNewMilestone(n).isPresent();
}
/** @return largest monotonically increasing milestone iff `n >=` some new one (no effort to handle wrap-around) */
public final Optional<Long> calcLargestNewMilestone(long n) {
if (n < this.nextMilestone) {
return Optional.empty();
}
Long largestMilestoneAchieved;
do {
largestMilestoneAchieved = this.nextMilestone;
this.nextMilestone = this.milestoneSequence.hasNext() ? this.milestoneSequence.next() : Long.MAX_VALUE;
} while (n >= this.nextMilestone);
return Optional.of(largestMilestoneAchieved);
}
/**
* @return positive monotonically increasing milestones, for {@link GrowthMilestoneTracker#isAnotherMilestone(long)}
* to track against; if/whenever exhausted, {@link Long#MAX_VALUE} becomes stand-in thereafter
* DEFAULT SEQ: [1, 10, 100, 1000, 10k, 15k, 20k, 25k, 30k, ..., 50k, 75k, 100k, 125k, ..., 250k, 300k, 350k, ... )
*/
protected Iterator<Long> createMilestoneSequence() {
LongStream initially = LongStream.iterate(1L, i -> i * 10).limit((long) Math.log10(1000));
LongStream next = LongStream.rangeClosed(2L, 9L).map(i -> i * 5000); // 10k - 45k
LongStream then = LongStream.rangeClosed(2L, 9L).map(i -> i * 25000); // 50k - 225k
LongStream thereafter = LongStream.iterate(250000L, i -> i + 50000);
return
LongStream.concat(initially,
LongStream.concat(next,
LongStream.concat(then, thereafter))
).iterator();
}
}
| 4,186 |
0 | Create_ds/gobblin/gobblin-utility/src/main/java/org/apache/gobblin/util | Create_ds/gobblin/gobblin-utility/src/main/java/org/apache/gobblin/util/test/HelloWorldSource.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.util.test;
import java.io.IOException;
import java.util.ArrayList;
import java.util.List;
import com.typesafe.config.Config;
import com.typesafe.config.ConfigFactory;
import org.apache.gobblin.configuration.SourceState;
import org.apache.gobblin.configuration.WorkUnitState;
import org.apache.gobblin.source.Source;
import org.apache.gobblin.source.extractor.Extractor;
import org.apache.gobblin.source.workunit.Extract;
import org.apache.gobblin.source.workunit.Extract.TableType;
import org.apache.gobblin.source.workunit.WorkUnit;
import org.apache.gobblin.util.ConfigUtils;
/**
* Hello world!
*/
public class HelloWorldSource implements Source<String, String> {
public static final String CONFIG_NAMESPACE = "gobblin.source.helloWorld";
public static final String NUM_HELLOS_KEY = "numHellos";
public static final String NUM_HELLOS_FULL_KEY = CONFIG_NAMESPACE + "." + NUM_HELLOS_KEY;
public static final int DEFAULT_NUM_HELLOS = 1;
public static final String HELLO_ID_KEY = "helloId";
public static final String HELLO_ID_FULL_KEY = CONFIG_NAMESPACE +"." + HELLO_ID_KEY;
@Override
public List<WorkUnit> getWorkunits(SourceState state) {
Config rootCfg = ConfigUtils.propertiesToConfig(state.getProperties());
Config cfg = rootCfg.hasPath(CONFIG_NAMESPACE) ? rootCfg.getConfig(CONFIG_NAMESPACE) :
ConfigFactory.empty();
int numHellos = cfg.hasPath(NUM_HELLOS_KEY) ? cfg.getInt(NUM_HELLOS_KEY) : DEFAULT_NUM_HELLOS;
Extract extract = new Extract(TableType.APPEND_ONLY,
HelloWorldSource.class.getPackage().getName(),
HelloWorldSource.class.getSimpleName());
List<WorkUnit> wus = new ArrayList<>(numHellos);
for (int i = 1; i <= numHellos; ++i) {
WorkUnit wu = new WorkUnit(extract);
wu.setProp(HELLO_ID_FULL_KEY, i);
wus.add(wu);
}
return wus;
}
@Override
public Extractor<String, String> getExtractor(WorkUnitState state) {
int helloId = state.getPropAsInt(HELLO_ID_FULL_KEY);
return new ExtractorImpl(helloId);
}
@Override
public void shutdown(SourceState state) {
// Nothing to do
}
public static class ExtractorImpl implements Extractor<String, String> {
private final int _helloId;
private int _recordsEmitted = 0;
public ExtractorImpl(int helloId) {
_helloId = helloId;
}
@Override public void close() throws IOException {
// Nothing to do
}
@Override public String getSchema() throws IOException {
return "string";
}
@Override public String readRecord(String reuse) {
if (_recordsEmitted > 0) {
return null;
}
++_recordsEmitted;
return helloMessage(_helloId);
}
public static String helloMessage(int helloId) {
return "Hello world " + helloId + " !";
}
@Override public long getExpectedRecordCount() {
return 1;
}
@Override public long getHighWatermark() {
return 0;
}
}
}
| 4,187 |
0 | Create_ds/gobblin/gobblin-utility/src/main/java/org/apache/gobblin/util | Create_ds/gobblin/gobblin-utility/src/main/java/org/apache/gobblin/util/test/FastSequentialSource.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.util.test;
import java.io.IOException;
import java.util.List;
import com.google.common.collect.Lists;
import org.apache.gobblin.configuration.SourceState;
import org.apache.gobblin.configuration.WorkUnitState;
import org.apache.gobblin.source.Source;
import org.apache.gobblin.source.extractor.DataRecordException;
import org.apache.gobblin.source.extractor.Extractor;
import org.apache.gobblin.source.workunit.WorkUnit;
import lombok.RequiredArgsConstructor;
/**
* A very simple {@link Source} that just emits long values counting up from 1 until either it reaches a user specified
* value or a user specified time elapses.
*/
public class FastSequentialSource implements Source<String, Long> {
public static final String NUM_WORK_UNITS = FastSequentialSource.class.getSimpleName() + ".numWorkUnits";
public static final String MAX_RECORDS_PER_WORK_UNIT = FastSequentialSource.class.getSimpleName() + ".maxRecordsPerWorkUnit";
public static final String MAX_SECONDS_PER_WORK_UNIT = FastSequentialSource.class.getSimpleName() + ".maxSecondsPerWorkUnit";
@Override
public List<WorkUnit> getWorkunits(SourceState state) {
List<WorkUnit> workUnits = Lists.newArrayList();
for (int i = 0; i < state.getPropAsInt(NUM_WORK_UNITS, 1); i++) {
workUnits.add(new WorkUnit());
}
return workUnits;
}
@Override
public Extractor<String, Long> getExtractor(WorkUnitState state) throws IOException {
return new FastSequentialExtractor(state.getPropAsLong(MAX_RECORDS_PER_WORK_UNIT), state.getPropAsLong(MAX_SECONDS_PER_WORK_UNIT));
}
@Override
public void shutdown(SourceState state) {
}
@RequiredArgsConstructor
public static class FastSequentialExtractor implements Extractor<String, Long> {
private final long maxRecords;
private final long maxSeconds;
private volatile long endTime;
private volatile long recordNumber;
@Override
public String getSchema() throws IOException {
return "schema";
}
@Override
public Long readRecord(@Deprecated Long reuse) throws DataRecordException, IOException {
if (this.endTime == 0) {
this.endTime = System.currentTimeMillis() + this.maxSeconds * 1000;
}
if (System.currentTimeMillis() > this.endTime || this.recordNumber >= this.maxRecords) {
return null;
}
return this.recordNumber++;
}
@Override
public long getExpectedRecordCount() {
return this.maxRecords;
}
@Override
public long getHighWatermark() {
return 0;
}
@Override
public void close() throws IOException {
}
}
}
| 4,188 |
0 | Create_ds/gobblin/gobblin-utility/src/main/java/org/apache/gobblin/util | Create_ds/gobblin/gobblin-utility/src/main/java/org/apache/gobblin/util/test/StdoutWriter.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.util.test;
import java.io.IOException;
import java.nio.charset.Charset;
import org.apache.gobblin.writer.DataWriter;
import org.apache.gobblin.writer.DataWriterBuilder;
/**
* A simple writer implementation that writes the output to Stdout
*/
public class StdoutWriter<D> implements DataWriter<D> {
private long _numRecordsWritten = 0;
private long _numBytesWritten = 0;
@Override
public void close() {
// NO-OP
}
@Override
public void write(D record) throws IOException {
if (null != record) {
String s = record.toString();
System.out.println(s);
++ _numRecordsWritten;
_numBytesWritten += s.getBytes(Charset.defaultCharset()).length;
}
}
@Override
public void commit() {
// NO-OP
}
@Override
public void cleanup() {
// NO-OP
}
@Override
public long recordsWritten() {
return _numRecordsWritten;
}
@Override
public long bytesWritten() throws IOException {
return _numBytesWritten;
}
@Override
public void flush() throws IOException {
System.out.flush();
}
public static class Builder<D> extends DataWriterBuilder<Object, D> {
@Override
public DataWriter<D> build() throws IOException {
return new StdoutWriter<>();
}
}
}
| 4,189 |
0 | Create_ds/gobblin/gobblin-utility/src/main/java/org/apache/gobblin/util | Create_ds/gobblin/gobblin-utility/src/main/java/org/apache/gobblin/util/test/StressTestingSource.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.util.test;
import java.io.IOException;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.List;
import java.util.Random;
import java.util.concurrent.TimeUnit;
import com.google.common.base.Charsets;
import org.apache.gobblin.configuration.SourceState;
import org.apache.gobblin.configuration.WorkUnitState;
import org.apache.gobblin.source.Source;
import org.apache.gobblin.source.extractor.DataRecordException;
import org.apache.gobblin.source.extractor.Extractor;
import org.apache.gobblin.source.workunit.Extract;
import org.apache.gobblin.source.workunit.Extract.TableType;
import org.apache.gobblin.source.workunit.WorkUnit;
/**
* A {@link Source} to be used for stress testing
*
* This source uses an extractor that can be configured to have sleep and computation time before returning a record.
* The size of the returned record can also be configured.
*/
public class StressTestingSource implements Source<String, byte[]> {
public static final String CONFIG_NAMESPACE = "stressTest";
public static final String NUM_WORK_UNITS_KEY = CONFIG_NAMESPACE + "." + "numWorkUnits";
public static final int DEFAULT_NUM_WORK_UNITS = 1;
public static final String RUN_DURATION_KEY = CONFIG_NAMESPACE + "." + "runDurationSecs";
public static final int DEFAULT_RUN_DURATION = 0;
public static final String COMPUTE_TIME_MICRO_KEY = CONFIG_NAMESPACE + "." + "computeTimeMicro";
public static final int DEFAULT_COMPUTE_TIME_MICRO = 0;
public static final String SLEEP_TIME_MICRO_KEY = CONFIG_NAMESPACE + "." + "sleepTimeMicro";
public static final int DEFAULT_SLEEP_TIME = 0;
public static final String NUM_RECORDS_KEY = CONFIG_NAMESPACE + "." + "numRecords";
public static final int DEFAULT_NUM_RECORDS = 1;
public static final String MEM_ALLOC_BYTES_KEY = CONFIG_NAMESPACE + "." + "memAllocBytes";
public static final int DEFAULT_MEM_ALLOC_BYTES = 8;
public static final String THROW_EXCEPTION = CONFIG_NAMESPACE + "." + "throwException";
public static final boolean DEFAULT_THROW_EXCEPTION = false;
private static final long INVALID_TIME = -1;
@Override
public List<WorkUnit> getWorkunits(SourceState state) {
int numWorkUnits = state.getPropAsInt(NUM_WORK_UNITS_KEY, DEFAULT_NUM_WORK_UNITS);
Extract extract = new Extract(TableType.APPEND_ONLY,
StressTestingSource.class.getPackage().getName(),
StressTestingSource.class.getSimpleName());
List<WorkUnit> wus = new ArrayList<>(numWorkUnits);
for (int i = 1; i <= numWorkUnits; ++i) {
WorkUnit wu = new WorkUnit(extract);
wus.add(wu);
}
return wus;
}
@Override
public Extractor<String, byte[]> getExtractor(WorkUnitState state) {
return new ExtractorImpl(state);
}
@Override
public void shutdown(SourceState state) {
// Nothing to do
}
public static class ExtractorImpl implements Extractor<String, byte[]> {
private int recordsEmitted = 0;
private final long startTime;
private final long endTime;
private final int computeTimeNano;
private final int sleepTimeMicro;
private final int numRecords;
private final int memAllocBytes;
private final Random random;
private final boolean throwException;
public ExtractorImpl(WorkUnitState state) {
this.random = new Random();
this.startTime = System.currentTimeMillis();
int runDuration = state.getPropAsInt(RUN_DURATION_KEY, DEFAULT_RUN_DURATION);
// set the end time based on the configured duration
if (runDuration > 0) {
this.endTime = this.startTime + runDuration * 1000;
} else {
this.endTime = INVALID_TIME;
}
this.computeTimeNano = state.getPropAsInt(COMPUTE_TIME_MICRO_KEY, DEFAULT_COMPUTE_TIME_MICRO) * 1000;
this.sleepTimeMicro = state.getPropAsInt(SLEEP_TIME_MICRO_KEY, DEFAULT_SLEEP_TIME);
// num records only takes effect if the duration is not specified
this.numRecords = this.endTime == INVALID_TIME ? state.getPropAsInt(NUM_RECORDS_KEY, DEFAULT_NUM_RECORDS) : 0;
this.memAllocBytes = state.getPropAsInt(MEM_ALLOC_BYTES_KEY, DEFAULT_MEM_ALLOC_BYTES);
this.throwException = state.getPropAsBoolean(THROW_EXCEPTION, DEFAULT_THROW_EXCEPTION);
}
@Override
public void close() throws IOException {
// Nothing to do
}
@Override
public String getSchema() throws IOException {
return "string";
}
/**
* Read a record with configurable idle and compute time.
**/
@Override
public byte[] readRecord(byte[] reuse) throws DataRecordException, IOException {
// If an end time is configured then it is used as the stopping point otherwise the record count limit is used
if ((this.endTime != INVALID_TIME && System.currentTimeMillis() > this.endTime) ||
(this.numRecords > 0 && this.recordsEmitted >= this.numRecords)) {
if (this.throwException) {
throw new IOException("This is one test exception");
}
return null;
}
// spend time computing
if (this.computeTimeNano > 0) {
final long startComputeNanoTime = System.nanoTime();
final byte[] bytes = new byte[100];
while (System.nanoTime() - startComputeNanoTime < this.computeTimeNano) {
random.nextBytes(bytes);
}
}
// sleep
if (this.sleepTimeMicro > 0) {
try {
TimeUnit.MICROSECONDS.sleep(this.sleepTimeMicro);
} catch (InterruptedException e) {
}
}
this.recordsEmitted++;
return newMessage(this.memAllocBytes);
}
@Override public long getExpectedRecordCount() {
return this.numRecords;
}
@Override public long getHighWatermark() {
return 0;
}
/**
* Create a message of numBytes size.
* @param numBytes number of bytes to allocate for the message
*/
private byte[] newMessage(int numBytes) {
byte[] stringBytes = String.valueOf(this.recordsEmitted).getBytes(Charsets.UTF_8);
return Arrays.copyOf(stringBytes, numBytes);
}
}
}
| 4,190 |
0 | Create_ds/gobblin/gobblin-utility/src/main/java/org/apache/gobblin/util | Create_ds/gobblin/gobblin-utility/src/main/java/org/apache/gobblin/util/test/TestIOUtils.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.util.test;
import java.io.EOFException;
import java.io.File;
import java.io.FileInputStream;
import java.io.InputStream;
import java.util.ArrayList;
import java.util.List;
import org.apache.avro.Schema;
import org.apache.avro.generic.GenericDatumReader;
import org.apache.avro.generic.GenericRecord;
import org.apache.avro.io.Decoder;
import org.apache.avro.io.DecoderFactory;
/**
* Test utils to read from and write records from a file
*/
public class TestIOUtils {
/**
* Reads all records from a json file as {@link GenericRecord}s
*/
public static List<GenericRecord> readAllRecords(String jsonDataPath, String schemaPath)
throws Exception {
List<GenericRecord> records = new ArrayList<>();
File jsonDataFile = new File(jsonDataPath);
File schemaFile = new File(schemaPath);
Schema schema = new Schema.Parser().parse(schemaFile);
GenericDatumReader<GenericRecord> datumReader = new GenericDatumReader<>(schema);
try (InputStream is = new FileInputStream(jsonDataFile)) {
Decoder decoder = DecoderFactory.get().jsonDecoder(schema, is);
while (true) {
records.add(datumReader.read(null, decoder));
}
} catch (EOFException eof) {
// read all records
}
return records;
}
}
| 4,191 |
0 | Create_ds/gobblin/gobblin-utility/src/main/java/org/apache/gobblin/util | Create_ds/gobblin/gobblin-utility/src/main/java/org/apache/gobblin/util/test/TestingSource.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.util.test;
import java.io.IOException;
import java.util.ArrayList;
import java.util.List;
import org.apache.gobblin.configuration.SourceState;
import org.apache.gobblin.configuration.WorkUnitState;
import org.apache.gobblin.source.Source;
import org.apache.gobblin.source.extractor.DataRecordException;
import org.apache.gobblin.source.extractor.Extractor;
import org.apache.gobblin.source.workunit.WorkUnit;
import lombok.Getter;
import lombok.Setter;
/**
* A trivial implementation of Source to be used to testing.
*/
public class TestingSource implements Source<String, String> {
@Setter @Getter protected List<WorkUnit> _workunits = new ArrayList<>();
@Override
public List<WorkUnit> getWorkunits(SourceState state) {
return _workunits;
}
@Override
public Extractor<String, String> getExtractor(WorkUnitState state) throws IOException {
return new Extract();
}
@Override
public void shutdown(SourceState state) {
// Nothing to do
}
public static class Extract implements Extractor<String, String> {
@Override public void close() throws IOException {
// Nothing to do
}
@Override
public String getSchema() throws IOException {
return "none";
}
@Override
public String readRecord(String reuse) throws DataRecordException, IOException {
return null;
}
@Override
public long getExpectedRecordCount() {
return 0;
}
@Override
public long getHighWatermark() {
return 0;
}
}
}
| 4,192 |
0 | Create_ds/gobblin/gobblin-utility/src/main/java/org/apache/gobblin/util | Create_ds/gobblin/gobblin-utility/src/main/java/org/apache/gobblin/util/retry/RetryerFactory.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.util.retry;
import java.util.Map;
import java.util.Optional;
import java.util.concurrent.TimeUnit;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import com.github.rholder.retry.Retryer;
import com.github.rholder.retry.RetryerBuilder;
import com.github.rholder.retry.RetryListener;
import com.github.rholder.retry.StopStrategies;
import com.github.rholder.retry.WaitStrategies;
import com.google.common.base.Predicate;
import com.google.common.collect.ImmutableMap;
import com.typesafe.config.Config;
import com.typesafe.config.ConfigFactory;
import org.apache.gobblin.exception.NonTransientException;
/**
* Factory class that builds Retryer.
* It's recommended to use with ConfigBuilder so that with State and with prefix of the config key,
* user can easily instantiate Retryer.
*
* see GoogleAnalyticsUnsampledExtractor for some examples.
*
* @param <T>
*/
public class RetryerFactory<T> {
private static final Logger LOG = LoggerFactory.getLogger(RetryerFactory.class);
public static final String RETRY_MULTIPLIER = "multiplier";
public static final String RETRY_INTERVAL_MS = "interval_ms";
public static final String RETRY_TIME_OUT_MS = "time_out_ms";
public static final String RETRY_TYPE = "retry_type";
// value large or equal to 1
public static final String RETRY_TIMES = "retry_times";
private static final Predicate<Throwable> RETRY_EXCEPTION_PREDICATE;
private static final Config DEFAULTS;
static {
RETRY_EXCEPTION_PREDICATE = t -> !(t instanceof NonTransientException);
Map<String, Object> configMap = ImmutableMap.<String, Object>builder()
.put(RETRY_TIME_OUT_MS, TimeUnit.MINUTES.toMillis(5L))
.put(RETRY_INTERVAL_MS, TimeUnit.SECONDS.toMillis(30L))
.put(RETRY_MULTIPLIER, TimeUnit.SECONDS.toMillis(1L))
.put(RETRY_TYPE, RetryType.EXPONENTIAL.name())
.put(RETRY_TIMES, 2)
.build();
DEFAULTS = ConfigFactory.parseMap(configMap);
}
public static enum RetryType {
EXPONENTIAL,
FIXED,
FIXED_ATTEMPT;
}
/**
* Creates new instance of retryer based on the config.
* Accepted config keys are defined in RetryerFactory as static member variable.
* You can use State along with ConfigBuilder and config prefix to build config.
*
* @param config
* @param optRetryListener e.g. for logging failures
*/
public static <T> Retryer<T> newInstance(Config config, Optional<RetryListener> optRetryListener) {
config = config.withFallback(DEFAULTS);
RetryType type = RetryType.valueOf(config.getString(RETRY_TYPE).toUpperCase());
RetryerBuilder<T> builder;
switch (type) {
case EXPONENTIAL:
builder = newExponentialRetryerBuilder(config);
break;
case FIXED:
builder = newFixedRetryerBuilder(config);
break;
case FIXED_ATTEMPT:
builder = newFixedAttemptBoundRetryerBuilder(config);
break;
default:
throw new IllegalArgumentException(type + " is not supported");
}
optRetryListener.ifPresent(builder::withRetryListener);
return builder.build();
}
/**
* Creates new instance of retryer based on the config and having no {@link RetryListener}
*/
public static <T> Retryer<T> newInstance(Config config) {
return newInstance(config, Optional.empty());
}
private static <T> RetryerBuilder<T> newFixedRetryerBuilder(Config config) {
return RetryerBuilder.<T> newBuilder()
.retryIfException(RETRY_EXCEPTION_PREDICATE)
.withWaitStrategy(WaitStrategies.fixedWait(config.getLong(RETRY_INTERVAL_MS), TimeUnit.MILLISECONDS))
.withStopStrategy(StopStrategies.stopAfterDelay(config.getLong(RETRY_TIME_OUT_MS), TimeUnit.MILLISECONDS));
}
private static <T> RetryerBuilder<T> newExponentialRetryerBuilder(Config config) {
return RetryerBuilder.<T> newBuilder()
.retryIfException(RETRY_EXCEPTION_PREDICATE)
.withWaitStrategy(WaitStrategies.exponentialWait(config.getLong(RETRY_MULTIPLIER),
config.getLong(RETRY_INTERVAL_MS),
TimeUnit.MILLISECONDS))
.withStopStrategy(StopStrategies.stopAfterDelay(config.getLong(RETRY_TIME_OUT_MS), TimeUnit.MILLISECONDS));
}
private static <T> RetryerBuilder<T> newFixedAttemptBoundRetryerBuilder(Config config) {
return RetryerBuilder.<T> newBuilder()
.retryIfException(RETRY_EXCEPTION_PREDICATE)
.withWaitStrategy(WaitStrategies.fixedWait(config.getLong(RETRY_INTERVAL_MS), TimeUnit.MILLISECONDS))
.withStopStrategy(StopStrategies.stopAfterAttempt(config.getInt(RETRY_TIMES)));
}
}
| 4,193 |
0 | Create_ds/gobblin/gobblin-utility/src/main/java/org/apache/gobblin/util | Create_ds/gobblin/gobblin-utility/src/main/java/org/apache/gobblin/util/limiter/PoolBasedLimiter.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.util.limiter;
import java.io.Closeable;
import java.io.IOException;
import java.util.concurrent.Semaphore;
import com.google.common.primitives.Ints;
import com.typesafe.config.Config;
import org.apache.gobblin.annotation.Alias;
/**
* An implementation of {@link Limiter} that ony allows permits to be acquired from a pool.
*
* <p>
* This implementation uses a {@link Semaphore} as a permit pool. {@link #acquirePermits(long)}
* is blocking and will always return {@link true} after the permits are successfully acquired
* (probably after being blocked for some amount of time). Permit refills are supported by this
* implementation using {@link Semaphore#release(int)}. Also {@link #acquirePermits(long)} only
* accepts input arguments that can be safely casted to an integer bounded by
* {@link Integer#MAX_VALUE}.
* </p>
*
* @author Yinan Li
*/
public class PoolBasedLimiter implements Limiter {
@Alias(value = "PoolBasedLimiter")
public static class Factory implements LimiterFactory {
public static final String POOL_SIZE_KEY = "poolSize";
@Override
public Limiter buildLimiter(Config config) {
if (!config.hasPath(POOL_SIZE_KEY)) {
throw new IllegalArgumentException("Missing key " + POOL_SIZE_KEY);
}
return new PoolBasedLimiter(config.getInt(POOL_SIZE_KEY));
}
}
private final Semaphore permitPool;
public PoolBasedLimiter(int poolSize) {
this.permitPool = new Semaphore(poolSize);
}
@Override
public void start() {
// Nothing to do
}
@Override
public Closeable acquirePermits(long permits)
throws InterruptedException {
int permitsToAcquire = Ints.checkedCast(permits);
this.permitPool.acquire(permitsToAcquire);
return new PoolPermitCloseable(this.permitPool, permitsToAcquire);
}
@Override
public void stop() {
// Nothing to do
}
private static class PoolPermitCloseable implements Closeable {
private final Semaphore permitPool;
private final int permits;
public PoolPermitCloseable(Semaphore permitPool, int permits) {
this.permitPool = permitPool;
this.permits = permits;
}
@Override
public void close()
throws IOException {
this.permitPool.release(this.permits);
}
}
}
| 4,194 |
0 | Create_ds/gobblin/gobblin-utility/src/main/java/org/apache/gobblin/util | Create_ds/gobblin/gobblin-utility/src/main/java/org/apache/gobblin/util/limiter/CountBasedLimiter.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.util.limiter;
import java.io.Closeable;
import com.typesafe.config.Config;
import lombok.Getter;
import org.apache.gobblin.annotation.Alias;
/**
* An implementation of {@link Limiter} that limits the number of permits allowed to be issued.
*
* <p>
* {@link #acquirePermits(long)} will return {@code false} once if there's not enough permits
* available to satisfy the request. Permit refills are not supported in this implementation.
* </p>
* </p>
*
* @author Yinan Li
*/
public class CountBasedLimiter extends NonRefillableLimiter {
public static final String FACTORY_ALIAS = "CountBasedLimiter";
@Alias(value = FACTORY_ALIAS)
public static class Factory implements LimiterFactory {
public static final String COUNT_KEY = "maxPermits";
@Override
public Limiter buildLimiter(Config config) {
if (!config.hasPath(COUNT_KEY)) {
throw new IllegalArgumentException("Missing key " + COUNT_KEY);
}
return new CountBasedLimiter(config.getLong(COUNT_KEY));
}
}
@Getter
private final long countLimit;
private long count;
public CountBasedLimiter(long countLimit) {
this.countLimit = countLimit;
this.count = 0;
}
@Override
public void start() {
// Nothing to do
}
@Override
public synchronized Closeable acquirePermits(long permits)
throws InterruptedException {
// Check if the request can be satisfied
if (this.count + permits <= this.countLimit) {
this.count += permits;
return NO_OP_CLOSEABLE;
}
return null;
}
@Override
public void stop() {
// Nothing to do
}
}
| 4,195 |
0 | Create_ds/gobblin/gobblin-utility/src/main/java/org/apache/gobblin/util | Create_ds/gobblin/gobblin-utility/src/main/java/org/apache/gobblin/util/limiter/Limiter.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.util.limiter;
import java.io.Closeable;
/**
* An interface for classes that implement some logic limiting on the occurrences of some events,
* e.g., data record extraction using an {@link org.apache.gobblin.source.extractor.Extractor}.
*
* @author Yinan Li
*/
public interface Limiter {
/**
* Start the {@link Limiter}.
*
* See {@link #stop()}
*/
public void start();
/**
* Acquire a given number of permits.
*
* <p>
* Depending on the implementation, the caller of this method may be blocked.
* It is also up to the caller to decide how to deal with the return value.
* </p>
*
* @param permits number of permits to get
* @return a {@link Closeable} instance if the requested permits have been successfully acquired,
* or {@code null} if otherwise; in the former case, calling {@link Closeable#close()} on
* the returned {@link Closeable} instance will release the acquired permits.
* @throws InterruptedException if the caller is interrupted while being blocked
*/
public Closeable acquirePermits(long permits) throws InterruptedException;
/**
* Stop the {@link Limiter}.
*
* See {@link #start()}
*/
public void stop();
}
| 4,196 |
0 | Create_ds/gobblin/gobblin-utility/src/main/java/org/apache/gobblin/util | Create_ds/gobblin/gobblin-utility/src/main/java/org/apache/gobblin/util/limiter/MultiLimiter.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.util.limiter;
import java.io.Closeable;
import java.io.IOException;
import java.util.Set;
import com.google.common.collect.ImmutableList;
import com.google.common.collect.Sets;
import com.google.common.io.Closer;
import lombok.Getter;
/**
* A {@link Limiter} that contains multiple underlying {@link Limiter}s and requests permits from all those limiters.
*
* <p>
* Permits are requested serially from the underlying brokers in the order they appear in the constructor. If an underlying
* limiter is unable to provide permits, permits from previous underlying limiters are closed. However, for
* {@link NonRefillableLimiter}s, the permits are lost permanently.
* </p>
*
* <p>
* Note: {@link MultiLimiter} does some optimization of the underlying limiters:
* * underlying {@link MultiLimiter}s are opened, and their underlying limiters are used instead.
* * {@link NoopLimiter}s are ignored.
* * underlying {@link Limiter}s are deduplicated to avoid requesting permits from the same limiter twice.
* Deduplication is done based on the {@link #equals(Object)} method of the underlying limiters.
* </p>
*/
public class MultiLimiter implements Limiter {
@Getter
private final ImmutableList<Limiter> underlyingLimiters;
public MultiLimiter(Limiter... underlyingLimiters) {
ImmutableList.Builder<Limiter> builder = ImmutableList.builder();
Set<Limiter> seenLimiters = Sets.newHashSet();
for (Limiter limiter : underlyingLimiters) {
if (limiter instanceof MultiLimiter) {
for (Limiter innerLimiter : ((MultiLimiter) limiter).underlyingLimiters) {
addLimiterIfNotSeen(innerLimiter, builder, seenLimiters);
}
} else if (!(limiter instanceof NoopLimiter)) {
addLimiterIfNotSeen(limiter, builder, seenLimiters);
}
}
this.underlyingLimiters = builder.build();
}
private void addLimiterIfNotSeen(Limiter limiter, ImmutableList.Builder<Limiter> builder, Set<Limiter> seenLimiters) {
if (!seenLimiters.contains(limiter)) {
builder.add(limiter);
seenLimiters.add(limiter);
}
}
@Override
public void start() {
for (Limiter limiter : this.underlyingLimiters) {
limiter.start();
}
}
@Override
public Closeable acquirePermits(long permits) throws InterruptedException {
Closer closer = Closer.create();
for (Limiter limiter : this.underlyingLimiters) {
Closeable permit = limiter.acquirePermits(permits);
if (permit == null) {
try {
closer.close();
} catch (IOException ioe) {
throw new RuntimeException("Could not return intermediate permits.");
}
return null;
}
closer.register(permit);
}
return closer;
}
@Override
public void stop() {
for (Limiter limiter : this.underlyingLimiters) {
limiter.stop();
}
}
}
| 4,197 |
0 | Create_ds/gobblin/gobblin-utility/src/main/java/org/apache/gobblin/util | Create_ds/gobblin/gobblin-utility/src/main/java/org/apache/gobblin/util/limiter/DefaultLimiterFactory.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.util.limiter;
import java.util.concurrent.TimeUnit;
import com.google.common.base.Preconditions;
import org.apache.gobblin.configuration.State;
/**
* A default factory class for {@link Limiter}s.
*
* @author Yinan Li
*/
public class DefaultLimiterFactory {
public static final String EXTRACT_LIMIT_TYPE_KEY = "extract.limit.type";
public static final String EXTRACT_LIMIT_RATE_LIMIT_KEY = "extract.limit.rateLimit";
public static final String EXTRACT_LIMIT_RATE_LIMIT_TIMEUNIT_KEY = "extract.limit.rateLimitTimeunit";
public static final String EXTRACT_LIMIT_TIME_LIMIT_KEY = "extract.limit.timeLimit";
public static final String EXTRACT_LIMIT_TIME_LIMIT_TIMEUNIT_KEY = "extract.limit.timeLimitTimeunit";
public static final String EXTRACT_LIMIT_COUNT_LIMIT_KEY = "extract.limit.count.limit";
public static final String EXTRACT_LIMIT_POOL_SIZE_KEY = "extract.limit.pool.size";
@Deprecated
public static final String EXTRACT_LIMIT_RATE_LIMIT_KEY_DEP = "extract.limit.rate.limit";
@Deprecated
public static final String EXTRACT_LIMIT_RATE_LIMIT_TIMEUNIT_KEY_DEP = "extract.limit.rate.limit.timeunit";
@Deprecated
public static final String EXTRACT_LIMIT_TIME_LIMIT_KEY_DEP = "extract.limit.time.limit";
@Deprecated
public static final String EXTRACT_LIMIT_TIME_LIMIT_TIMEUNIT_KEY_DEP = "extract.limit.time.limit.timeunit";
/**
* Create a new {@link Limiter} instance of one of the types in {@link BaseLimiterType}.
*
* <p>
* Note this method will always return a new {@link Limiter} instance of one of the supported types defined
* in {@link BaseLimiterType} as long as the input configuration specifies a supported {@link BaseLimiterType}.
* It will throw an {@link IllegalArgumentException} if none of the supported {@link BaseLimiterType}s is
* specified or if any of the required configuration properties for the specified {@link BaseLimiterType}
* is not present.
* </p>
*
* <p>
* This method will return a functional {@link Limiter} if the configuration is correct. If instead, a
* {@link Limiter} is optional or the caller is fine with a {@link Limiter} that is not really limiting any
* events, then the caller should first make sure that the {@link Limiter} should indeed be created using
* this method, or handle the exception (if any is thrown) appropriately.
* </p>
*
* @param state a {@link State} instance carrying configuration properties
* @return a new {@link Limiter} instance
* @throws IllegalArgumentException if the input configuration does not specify a valid supported
*/
public static Limiter newLimiter(State state) {
state = convertDeprecatedConfigs(state);
Preconditions.checkArgument(state.contains(EXTRACT_LIMIT_TYPE_KEY),
String.format("Missing configuration property %s for the Limiter type", EXTRACT_LIMIT_TYPE_KEY));
BaseLimiterType type = BaseLimiterType.forName(state.getProp(EXTRACT_LIMIT_TYPE_KEY));
switch (type) {
case RATE_BASED:
Preconditions.checkArgument(state.contains(EXTRACT_LIMIT_RATE_LIMIT_KEY));
int rateLimit = Integer.parseInt(state.getProp(EXTRACT_LIMIT_RATE_LIMIT_KEY));
if (state.contains(EXTRACT_LIMIT_RATE_LIMIT_TIMEUNIT_KEY)) {
TimeUnit rateTimeUnit = TimeUnit.valueOf(state.getProp(EXTRACT_LIMIT_RATE_LIMIT_TIMEUNIT_KEY).toUpperCase());
return new RateBasedLimiter(rateLimit, rateTimeUnit);
}
return new RateBasedLimiter(rateLimit);
case TIME_BASED:
Preconditions.checkArgument(state.contains(EXTRACT_LIMIT_TIME_LIMIT_KEY));
long timeLimit = Long.parseLong(state.getProp(EXTRACT_LIMIT_TIME_LIMIT_KEY));
if (state.contains(EXTRACT_LIMIT_TIME_LIMIT_TIMEUNIT_KEY)) {
TimeUnit timeTimeUnit = TimeUnit.valueOf(state.getProp(EXTRACT_LIMIT_TIME_LIMIT_TIMEUNIT_KEY).toUpperCase());
return new TimeBasedLimiter(timeLimit, timeTimeUnit);
}
return new TimeBasedLimiter(timeLimit);
case COUNT_BASED:
Preconditions.checkArgument(state.contains(EXTRACT_LIMIT_COUNT_LIMIT_KEY));
long countLimit = Long.parseLong(state.getProp(EXTRACT_LIMIT_COUNT_LIMIT_KEY));
return new CountBasedLimiter(countLimit);
case POOL_BASED:
Preconditions.checkArgument(state.contains(EXTRACT_LIMIT_POOL_SIZE_KEY));
int poolSize = Integer.parseInt(state.getProp(EXTRACT_LIMIT_POOL_SIZE_KEY));
return new PoolBasedLimiter(poolSize);
default:
throw new IllegalArgumentException("Unrecognized Limiter type: " + type.toString());
}
}
/**
* Convert deprecated keys {@value #EXTRACT_LIMIT_RATE_LIMIT_KEY_DEP}, {@value #EXTRACT_LIMIT_RATE_LIMIT_TIMEUNIT_KEY_DEP},
* {@value #EXTRACT_LIMIT_TIME_LIMIT_KEY_DEP}, and {@value #EXTRACT_LIMIT_TIME_LIMIT_TIMEUNIT_KEY_DEP}, since they are not
* TypeSafe compatible. The deprecated keys will be removed from @param state, and replaced with
* {@value #EXTRACT_LIMIT_RATE_LIMIT_KEY}, {@value #EXTRACT_LIMIT_RATE_LIMIT_TIMEUNIT_KEY}, {@value #EXTRACT_LIMIT_TIME_LIMIT_KEY},
* and {@value #EXTRACT_LIMIT_TIME_LIMIT_TIMEUNIT_KEY}, respectively.
*/
private static State convertDeprecatedConfigs(State state) {
if (state.contains(EXTRACT_LIMIT_RATE_LIMIT_KEY_DEP)) {
state.setProp(EXTRACT_LIMIT_RATE_LIMIT_KEY, state.getProp(EXTRACT_LIMIT_RATE_LIMIT_KEY_DEP));
state.removeProp(EXTRACT_LIMIT_RATE_LIMIT_KEY_DEP);
}
if (state.contains(EXTRACT_LIMIT_RATE_LIMIT_TIMEUNIT_KEY_DEP)) {
state.setProp(EXTRACT_LIMIT_RATE_LIMIT_TIMEUNIT_KEY, state.getProp(EXTRACT_LIMIT_RATE_LIMIT_TIMEUNIT_KEY_DEP));
state.removeProp(EXTRACT_LIMIT_RATE_LIMIT_TIMEUNIT_KEY_DEP);
}
if (state.contains(EXTRACT_LIMIT_TIME_LIMIT_KEY_DEP)) {
state.setProp(EXTRACT_LIMIT_TIME_LIMIT_KEY, state.getProp(EXTRACT_LIMIT_TIME_LIMIT_KEY_DEP));
state.removeProp(EXTRACT_LIMIT_TIME_LIMIT_KEY_DEP);
}
if (state.contains(EXTRACT_LIMIT_TIME_LIMIT_TIMEUNIT_KEY_DEP)) {
state.setProp(EXTRACT_LIMIT_TIME_LIMIT_TIMEUNIT_KEY, state.getProp(EXTRACT_LIMIT_TIME_LIMIT_TIMEUNIT_KEY_DEP));
state.removeProp(EXTRACT_LIMIT_TIME_LIMIT_TIMEUNIT_KEY_DEP);
}
return state;
}
}
| 4,198 |
0 | Create_ds/gobblin/gobblin-utility/src/main/java/org/apache/gobblin/util | Create_ds/gobblin/gobblin-utility/src/main/java/org/apache/gobblin/util/limiter/TimeBasedLimiter.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.util.limiter;
import java.io.Closeable;
import java.util.concurrent.ScheduledThreadPoolExecutor;
import java.util.concurrent.TimeUnit;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import com.google.common.base.Optional;
import com.typesafe.config.Config;
import org.apache.gobblin.annotation.Alias;
import org.apache.gobblin.util.ExecutorsUtils;
/**
* An implementation of {@link Limiter} that limits the time elapsed for some events.
*
* <p>
* This implementation uses a task scheduled in a {@link ScheduledThreadPoolExecutor} that will
* fire once after a given amount of time has elapsed. The task once fired, will flip a boolean
* flag that tells if permits should be issued. The flag is initially set to {@code true}. Thus,
* no permits are issued once the flag is flipped after the given amount of time has elapsed.
* </p>
*
* <p>
* {@link #acquirePermits(long)} will return {@code false} once the time limit is reached. Permit
* refills are not supported in this implementation.
* </p>
*
* @author Yinan Li
*/
public class TimeBasedLimiter extends NonRefillableLimiter {
@Alias(value = "time")
public static class Factory implements LimiterFactory {
public static final String MAX_SECONDS_KEY = "maxSeconds";
@Override
public Limiter buildLimiter(Config config) {
if (!config.hasPath(MAX_SECONDS_KEY)) {
throw new RuntimeException("Missing key " + MAX_SECONDS_KEY);
}
return new TimeBasedLimiter(config.getLong(MAX_SECONDS_KEY));
}
}
private static final Logger LOGGER = LoggerFactory.getLogger(TimeBasedLimiter.class);
private final long timeLimit;
private final TimeUnit timeUnit;
private final ScheduledThreadPoolExecutor flagFlippingExecutor;
// A flag telling if a permit is allowed to be issued
private volatile boolean canIssuePermit = true;
public TimeBasedLimiter(long timeLimit) {
this(timeLimit, TimeUnit.SECONDS);
}
public TimeBasedLimiter(long timeLimit, TimeUnit timeUnit) {
this.timeLimit = timeLimit;
this.timeUnit = timeUnit;
this.flagFlippingExecutor = new ScheduledThreadPoolExecutor(1,
ExecutorsUtils.newThreadFactory(Optional.of(LOGGER), Optional.of("TimeBasedThrottler")));
}
@Override
public void start() {
this.flagFlippingExecutor.schedule(new Runnable() {
@Override
public void run() {
// Flip the flag once the scheduled time is reached
TimeBasedLimiter.this.canIssuePermit = false;
}
}, this.timeLimit, this.timeUnit);
}
@Override
public Closeable acquirePermits(long permits) throws InterruptedException {
return this.canIssuePermit ? NO_OP_CLOSEABLE : null;
}
@Override
public void stop() {
this.flagFlippingExecutor.shutdownNow();
}
}
| 4,199 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.