index int64 0 0 | repo_id stringlengths 9 205 | file_path stringlengths 31 246 | content stringlengths 1 12.2M | __index_level_0__ int64 0 10k |
|---|---|---|---|---|
0 | Create_ds/gobblin/gobblin-utility/src/main/java/org/apache/gobblin/util | Create_ds/gobblin/gobblin-utility/src/main/java/org/apache/gobblin/util/limiter/NoopLimiter.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.util.limiter;
import java.io.Closeable;
import java.io.IOException;
/**
* A {@link Limiter} that satisfies all requests.
*/
public class NoopLimiter implements Limiter {
@Override
public void start() {
}
@Override
public Closeable acquirePermits(long permits) throws InterruptedException {
return new Closeable() {
@Override
public void close() throws IOException {
}
};
}
@Override
public void stop() {
}
}
| 4,200 |
0 | Create_ds/gobblin/gobblin-utility/src/main/java/org/apache/gobblin/util | Create_ds/gobblin/gobblin-utility/src/main/java/org/apache/gobblin/util/limiter/RateBasedLimiter.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.util.limiter;
import java.io.Closeable;
import java.util.concurrent.TimeUnit;
import com.google.common.primitives.Ints;
import com.google.common.util.concurrent.RateLimiter;
import com.typesafe.config.Config;
import org.apache.gobblin.annotation.Alias;
import lombok.Getter;
/**
* An implementation of {@link Limiter} that limits the rate of some events. This implementation uses
* Guava's {@link RateLimiter}.
*
* <p>
* {@link #acquirePermits(long)} is blocking and will always return {@link true} after the permits
* are successfully acquired (probably after being blocked for some amount of time). Permit refills
* are not supported in this implementation. Also {@link #acquirePermits(long)} only accepts input
* arguments that can be safely casted to an integer bounded by {@link Integer#MAX_VALUE}.
* </p>
*
* @author Yinan Li
*/
public class RateBasedLimiter extends NonRefillableLimiter {
@Alias(value = "qps")
public static class Factory implements LimiterFactory {
public static final String QPS_KEY = "qps";
@Override
public Limiter buildLimiter(Config config) {
if (!config.hasPath(QPS_KEY)) {
throw new RuntimeException("Missing key " + QPS_KEY);
}
return new RateBasedLimiter(config.getDouble(QPS_KEY));
}
}
private final RateLimiter rateLimiter;
@Getter
private double rateLimitPerSecond;
public RateBasedLimiter(double rateLimit) {
this(rateLimit, TimeUnit.SECONDS);
}
public RateBasedLimiter(double rateLimit, TimeUnit timeUnit) {
this.rateLimitPerSecond = convertRate(rateLimit, timeUnit, TimeUnit.SECONDS);
this.rateLimiter = RateLimiter.create(this.rateLimitPerSecond);
}
@Override
public void start() {
// Nothing to do
}
@Override
public Closeable acquirePermits(long permits) throws InterruptedException {
this.rateLimiter.acquire(Ints.checkedCast(permits));
return NO_OP_CLOSEABLE;
}
@Override
public void stop() {
// Nothing to do
}
private static double convertRate(double originalRate, TimeUnit originalTimeUnit, TimeUnit targetTimeUnit) {
return originalRate / targetTimeUnit.convert(1, originalTimeUnit);
}
}
| 4,201 |
0 | Create_ds/gobblin/gobblin-utility/src/main/java/org/apache/gobblin/util | Create_ds/gobblin/gobblin-utility/src/main/java/org/apache/gobblin/util/limiter/NonRefillableLimiter.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.util.limiter;
import java.io.Closeable;
import java.io.IOException;
/**
* A type of {@link Limiter}s that do not support permit refills by returning a no-op
* {@link Closeable} in {@link #acquirePermits(long)}.
*
* @author Yinan Li
*/
public abstract class NonRefillableLimiter implements Limiter {
protected static final Closeable NO_OP_CLOSEABLE = new Closeable() {
@Override
public void close()
throws IOException {
// Nothing to do
}
};
@Override
public Closeable acquirePermits(long permits) throws InterruptedException {
return NO_OP_CLOSEABLE;
}
}
| 4,202 |
0 | Create_ds/gobblin/gobblin-utility/src/main/java/org/apache/gobblin/util | Create_ds/gobblin/gobblin-utility/src/main/java/org/apache/gobblin/util/limiter/BaseLimiterType.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.util.limiter;
/**
* An enumeration of types of {@link Limiter}s supported out-of-the-box.
*
* @author Yinan Li
*/
public enum BaseLimiterType {
/**
* For {@link RateBasedLimiter}.
*/
RATE_BASED("rate"),
/**
* For {@link TimeBasedLimiter}.
*/
TIME_BASED("time"),
/**
* For {@link CountBasedLimiter}.
*/
COUNT_BASED("count"),
/**
* For {@link PoolBasedLimiter}.
*/
POOL_BASED("pool");
private final String name;
BaseLimiterType(String name) {
this.name = name;
}
@Override
public String toString() {
return this.name;
}
/**
* Get a {@link BaseLimiterType} for the given name.
*
* @param name the given name
* @return a {@link BaseLimiterType} for the given name
*/
public static BaseLimiterType forName(String name) {
if (name.equalsIgnoreCase(RATE_BASED.name)) {
return RATE_BASED;
}
if (name.equalsIgnoreCase(TIME_BASED.name)) {
return TIME_BASED;
}
if (name.equalsIgnoreCase(COUNT_BASED.name)) {
return COUNT_BASED;
}
if (name.equalsIgnoreCase(POOL_BASED.name)) {
return POOL_BASED;
}
throw new IllegalArgumentException("No Limiter implementation available for name: " + name);
}
}
| 4,203 |
0 | Create_ds/gobblin/gobblin-utility/src/main/java/org/apache/gobblin/util | Create_ds/gobblin/gobblin-utility/src/main/java/org/apache/gobblin/util/limiter/NotEnoughPermitsException.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.util.limiter;
import java.io.IOException;
/**
* Indicates there were not enough permits in the {@link Limiter} to finish the copy.
*/
public class NotEnoughPermitsException extends IOException {
public NotEnoughPermitsException(String operation) {
super("Not enough permits to perform " + operation);
}
public NotEnoughPermitsException(String operation, Throwable t) {
super("Not enough permits to perform " + operation, t);
}
}
| 4,204 |
0 | Create_ds/gobblin/gobblin-utility/src/main/java/org/apache/gobblin/util | Create_ds/gobblin/gobblin-utility/src/main/java/org/apache/gobblin/util/limiter/LimiterFactory.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.util.limiter;
import com.typesafe.config.Config;
/**
* Interface for factories that build {@link Limiter}s.
*/
public interface LimiterFactory {
Limiter buildLimiter(Config config);
}
| 4,205 |
0 | Create_ds/gobblin/gobblin-utility/src/main/java/org/apache/gobblin/util/limiter | Create_ds/gobblin/gobblin-utility/src/main/java/org/apache/gobblin/util/limiter/broker/SharedLimiterKey.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.util.limiter.broker;
import org.apache.gobblin.broker.iface.SharedResourceKey;
import lombok.EqualsAndHashCode;
import lombok.Getter;
/**
* A {@link SharedResourceKey} for use with {@link SharedLimiterFactory}. The resourceLimitedPath should identify the resource
* that will be limited, for example the uri of an external service for which calls should be throttled.
*/
@Getter
@EqualsAndHashCode
public class SharedLimiterKey implements SharedResourceKey {
public enum GlobalLimiterPolicy {
USE_GLOBAL,
USE_GLOBAL_IF_CONFIGURED,
LOCAL_ONLY
}
/**
* A "/" separated path representing the resource limited. For example:
* - /filesystem/myHDFSCluster
* - /databases/mysql/myInstance
*/
private final String resourceLimitedPath;
private final GlobalLimiterPolicy globalLimiterPolicy;
public SharedLimiterKey(String resourceLimitedPath) {
this(resourceLimitedPath, GlobalLimiterPolicy.USE_GLOBAL_IF_CONFIGURED);
}
public SharedLimiterKey(String resourceLimitedPath, GlobalLimiterPolicy globalLimiterPolicy) {
this.resourceLimitedPath = (resourceLimitedPath.startsWith("/") ? "" : "/") + resourceLimitedPath;
this.globalLimiterPolicy = globalLimiterPolicy;
}
public String toString() {
return toConfigurationKey();
}
@Override
public String toConfigurationKey() {
// remove leading "/"
return this.resourceLimitedPath.substring(1).replace("/", ".");
}
}
| 4,206 |
0 | Create_ds/gobblin/gobblin-utility/src/main/java/org/apache/gobblin/util/limiter | Create_ds/gobblin/gobblin-utility/src/main/java/org/apache/gobblin/util/limiter/broker/SharedLimiterFactory.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.util.limiter.broker;
import java.util.Collection;
import com.typesafe.config.Config;
import org.apache.gobblin.broker.ResourceInstance;
import org.apache.gobblin.broker.iface.ConfigView;
import org.apache.gobblin.broker.iface.NoSuchScopeException;
import org.apache.gobblin.broker.iface.NotConfiguredException;
import org.apache.gobblin.broker.iface.ScopeType;
import org.apache.gobblin.broker.iface.ScopedConfigView;
import org.apache.gobblin.broker.iface.SharedResourceFactory;
import org.apache.gobblin.broker.iface.SharedResourcesBroker;
import org.apache.gobblin.broker.iface.SharedResourceFactoryResponse;
import org.apache.gobblin.broker.ResourceCoordinate;
import org.apache.gobblin.util.ClassAliasResolver;
import org.apache.gobblin.util.ConfigUtils;
import org.apache.gobblin.util.limiter.Limiter;
import org.apache.gobblin.util.limiter.LimiterFactory;
import org.apache.gobblin.util.limiter.MultiLimiter;
import org.apache.gobblin.util.limiter.NoopLimiter;
import lombok.extern.slf4j.Slf4j;
/**
* A {@link SharedResourceFactory} to create shared {@link Limiter}s using a {@link SharedResourcesBroker}.
*
* <p>
* The factory creates a {@link MultiLimiter} combining a {@link Limiter} at the indicated scope with the
* {@link Limiter} at all immediate parent scopes (obtained from the broker itself). The factory reads the property
* {@link #LIMITER_CLASS_KEY} from the input {@link ConfigView} to determine the class of {@link Limiter} to create
* at the indicated scope. If the key is not found, a {@link NoopLimiter} is generated instead (to combine with
* parent {@link Limiter}s).
* </p>
*/
@Slf4j
public class SharedLimiterFactory<S extends ScopeType<S>> implements SharedResourceFactory<Limiter, SharedLimiterKey, S> {
public static final String NAME = "limiter";
public static final String LIMITER_CLASS_KEY = "class";
public static final String FAIL_IF_NO_GLOBAL_LIMITER_KEY = "failIfNoGlobalLimiter";
/**
* Skip use of global limiter. In general, this should not be used, but it is provided to easily disable global limiters
* in case of issues with the coordination server.
*/
public static final String SKIP_GLOBAL_LIMITER_KEY = "skipGlobalLimiter";
public static final String FAIL_ON_UNKNOWN_RESOURCE_ID = "faiOnUnknownResourceId";
private static final ClassAliasResolver<LimiterFactory> RESOLVER = new ClassAliasResolver<>(LimiterFactory.class);
@Override
public String getName() {
return NAME;
}
@Override
public SharedResourceFactoryResponse<Limiter>
createResource(SharedResourcesBroker<S> broker, ScopedConfigView<S, SharedLimiterKey> configView)
throws NotConfiguredException{
Config config = configView.getConfig();
SharedLimiterKey.GlobalLimiterPolicy globalLimiterPolicy = configView.getKey().getGlobalLimiterPolicy();
if (ConfigUtils.getBoolean(config, SKIP_GLOBAL_LIMITER_KEY, false)) {
if (globalLimiterPolicy != SharedLimiterKey.GlobalLimiterPolicy.LOCAL_ONLY) {
log.info("Skip global limiter is set. Switching to local only limiter.");
SharedLimiterKey modifiedKey = new SharedLimiterKey(configView.getKey().getResourceLimitedPath(),
SharedLimiterKey.GlobalLimiterPolicy.LOCAL_ONLY);
return new ResourceCoordinate<>(this, modifiedKey, (S) configView.getScope());
}
} else if (config.hasPath(FAIL_IF_NO_GLOBAL_LIMITER_KEY) && config.getBoolean(FAIL_IF_NO_GLOBAL_LIMITER_KEY) &&
globalLimiterPolicy != SharedLimiterKey.GlobalLimiterPolicy.USE_GLOBAL) {
// if user has specified FAIL_IF_NO_GLOBAL_LIMITER_KEY, promote the policy from USE_GLOBAL_IF_CONFIGURED to USE_GLOBAL
// e.g. fail if no GLOBAL configuration is present
SharedLimiterKey modifiedKey = new SharedLimiterKey(configView.getKey().getResourceLimitedPath(),
SharedLimiterKey.GlobalLimiterPolicy.USE_GLOBAL);
return new ResourceCoordinate<>(this, modifiedKey, (S) configView.getScope());
}
Limiter limiter;
if (!configView.getScope().isLocal() && !globalLimiterPolicy.equals(SharedLimiterKey.GlobalLimiterPolicy.LOCAL_ONLY)) {
try {
log.info("Looking for Restli Limiter factory.");
Class<?> klazz = Class.forName("org.apache.gobblin.util.limiter.RestliLimiterFactory");
return new ResourceCoordinate<>((SharedResourceFactory<Limiter, SharedLimiterKey, S>) klazz.newInstance(),
configView.getKey(), (S) configView.getScope());
} catch (ReflectiveOperationException roe) {
if (globalLimiterPolicy.equals(SharedLimiterKey.GlobalLimiterPolicy.USE_GLOBAL)) {
throw new RuntimeException("There is no Global limiter factory in the classpath.");
}
}
}
if (config.hasPath(LIMITER_CLASS_KEY)) {
try {
LimiterFactory factory = RESOLVER.resolveClass(config.getString(LIMITER_CLASS_KEY)).newInstance();
limiter = factory.buildLimiter(config);
} catch (ReflectiveOperationException roe) {
throw new RuntimeException(roe);
}
} else {
if (config.hasPath(FAIL_ON_UNKNOWN_RESOURCE_ID) && config.getBoolean(FAIL_ON_UNKNOWN_RESOURCE_ID)) {
throw new NotConfiguredException();
}
limiter = new NoopLimiter();
}
ScopeType<S> scope = configView.getScope();
Collection<S> parentScopes = scope.parentScopes();
if (parentScopes != null) {
try {
for (S parentScope : parentScopes) {
limiter = new MultiLimiter(limiter,
broker.getSharedResourceAtScope(this, configView.getKey(), parentScope));
}
} catch (NoSuchScopeException nsse) {
throw new RuntimeException("Could not get higher scope limiter. This is an error in code.", nsse);
}
}
return new ResourceInstance<>(limiter);
}
/**
* @return brokers self scope.
*/
@Override
public S getAutoScope(SharedResourcesBroker<S> broker, ConfigView<S, SharedLimiterKey> config) {
return broker.selfScope().getType();
}
}
| 4,207 |
0 | Create_ds/gobblin/gobblin-utility/src/main/java/org/apache/gobblin/util | Create_ds/gobblin/gobblin-utility/src/main/java/org/apache/gobblin/util/callbacks/CallbackResult.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.util.callbacks;
import java.util.concurrent.Future;
import com.google.common.base.Preconditions;
import lombok.AllArgsConstructor;
import lombok.Data;
/**
* A helper class to simplify the access the result of a callback (if any)
*/
@Data
@AllArgsConstructor
public class CallbackResult<R> {
private final R result;
private final Throwable error;
private final boolean canceled;
public boolean hasFailed() {
return null != this.error;
}
public boolean isSuccessful() {
return !hasFailed() && !isCanceled();
}
public static <R> CallbackResult<R> createCancelled() {
return new CallbackResult<R>(null, null, true);
}
public static <R> CallbackResult<R> createFailed(Throwable t) {
return new CallbackResult<R>(null, t, false);
}
public static <R> CallbackResult<R> createSuccessful(R result) {
return new CallbackResult<R>(result, null, false);
}
public static <R> CallbackResult<R> createFromFuture(Future<R> execFuture)
throws InterruptedException {
Preconditions.checkNotNull(execFuture);
if (execFuture.isCancelled()) {
return createCancelled();
}
try {
R res = execFuture.get();
return createSuccessful(res);
}
catch (Exception e) {
if (execFuture.isCancelled()) {
return createCancelled();
}
else {
return createFailed(e.getCause());
}
}
}
}
| 4,208 |
0 | Create_ds/gobblin/gobblin-utility/src/main/java/org/apache/gobblin/util | Create_ds/gobblin/gobblin-utility/src/main/java/org/apache/gobblin/util/callbacks/CallbacksDispatcher.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.util.callbacks;
import java.io.Closeable;
import java.io.IOException;
import java.util.ArrayList;
import java.util.IdentityHashMap;
import java.util.List;
import java.util.Map;
import java.util.WeakHashMap;
import java.util.concurrent.Callable;
import java.util.concurrent.ExecutorService;
import java.util.concurrent.Executors;
import java.util.concurrent.Future;
import java.util.concurrent.TimeUnit;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import com.google.common.base.Function;
import com.google.common.base.Optional;
import com.google.common.base.Preconditions;
import org.apache.gobblin.util.ExecutorsUtils;
import lombok.AllArgsConstructor;
import lombok.Data;
/**
* A helper to dispatch callbacks to a set of listeners. The CallbacksDispatcher is responsible for
* managing the list if listeners which implement a common interface L. Invocation happens through
* the {@link #execCallbacks(CallbackFactory)} method.
*
* @param L the listener type; it is strongly advised that the class implements toString() to
* provide useful logging
*/
public class CallbacksDispatcher<L> implements Closeable {
private final Logger _log;
private final List<L> _listeners = new ArrayList<>();
private final WeakHashMap<L, Void> _autoListeners = new WeakHashMap<>();
private final ExecutorService _execService;
/**
* Constructor
* @param execService optional executor services for the callbacks; if none is specified, a
* single-thread executor will be used
* @param log optional logger; if none is specified, a default one will be created
*/
public CallbacksDispatcher(Optional<ExecutorService> execService, Optional<Logger> log) {
Preconditions.checkNotNull(execService);
Preconditions.checkNotNull(log);
_log = log.isPresent() ? log.get() : LoggerFactory.getLogger(getClass());
_execService = execService.isPresent() ? execService.get() : getDefaultExecutor(_log);
}
public static ExecutorService getDefaultExecutor(Logger log) {
return Executors.newSingleThreadExecutor(
ExecutorsUtils.newThreadFactory(Optional.of(log), Optional.of(log.getName() + "-%d")));
}
public CallbacksDispatcher() {
this(Optional.<ExecutorService>absent(), Optional.<Logger>absent());
}
public CallbacksDispatcher(Logger log) {
this(Optional.<ExecutorService>absent(), Optional.of(log));
}
public CallbacksDispatcher(ExecutorService execService, Logger log) {
this(Optional.of(execService), Optional.of(log));
}
@Override
public void close()
throws IOException {
ExecutorsUtils.shutdownExecutorService(_execService, Optional.of(_log), 5, TimeUnit.SECONDS);
}
public synchronized List<L> getListeners() {
// Clone to protect against adding/removing listeners while running callbacks
ArrayList<L> res = new ArrayList<>(_listeners);
// Scan any auto listeners
for (Map.Entry<L, Void> entry: _autoListeners.entrySet()) {
res.add(entry.getKey());
}
return res;
}
public synchronized void addListener(L listener) {
Preconditions.checkNotNull(listener);
_log.debug("Adding listener:" + listener);
_listeners.add(listener);
}
/**
* Only weak references are stored for weak listeners. They will be removed from the dispatcher
* automatically, once the listener objects are GCed. Note that weak listeners cannot be removed
* explicitly. */
public synchronized void addWeakListener(L listener) {
Preconditions.checkNotNull(listener);
_log.info("Adding a weak listener " + listener);
_autoListeners.put(listener, null);
}
public synchronized void removeListener(L listener) {
Preconditions.checkNotNull(listener);
_log.info("Removing listener:" + listener);
_listeners.remove(listener);
}
public Logger getLog() {
return _log;
}
public <R> CallbackResults<L, R> execCallbacks(Function<? super L, R> callback, L listener)
throws InterruptedException {
Preconditions.checkNotNull(listener);
List<L> listenerList = new ArrayList<>(1);
listenerList.add(listener);
return execCallbacks(callback, listenerList);
}
public <R> CallbackResults<L, R> execCallbacks(Function<? super L, R> callback)
throws InterruptedException {
Preconditions.checkNotNull(callback);
List<L> listeners = getListeners();
return execCallbacks(callback, listeners);
}
private <R> CallbackResults<L, R> execCallbacks(Function<? super L, R> callback,
List<L> listeners)
throws InterruptedException {
CallbackResults<L, R> res = new CallbackResults<L, R>();
if (0 == listeners.size()) {
return res;
}
List<Callable<R>> callbacks = new ArrayList<>(listeners.size());
for (L listener: listeners) {
callbacks.add(new CallbackCallable<>(callback, listener));
}
List<Future<R>> futures = _execService.invokeAll(callbacks);
for (int i = 0; i < listeners.size(); ++i) {
CallbackResult<R> cr = CallbackResult.createFromFuture(futures.get(i));
L listener = listeners.get(i);
if (cr.isCanceled()) {
_log.warn("Callback cancelled: " + callbacks.get(i) + " on " + listener);
res.cancellations.put(listener, cr);
}
else if (cr.hasFailed()) {
_log.error("Callback error: " + callbacks.get(i) + " on " + listener + ":" + cr.getError(), cr.getError());
res.failures.put(listener, cr);
}
else {
res.successes.put(listener, cr);
}
}
return res;
}
@AllArgsConstructor
public class CallbackCallable<R> implements Callable<R> {
final Function<? super L, R> _callback;
final L _listener;
@Override public R call() throws Exception {
_log.info("Calling " + _callback + " on " + _listener);
return _callback.apply(_listener);
}
}
@Data
public static class CallbackResults<L, R> {
private final Map<L, CallbackResult<R>> successes = new IdentityHashMap<>();
private final Map<L, CallbackResult<R>> failures = new IdentityHashMap<>();
private final Map<L, CallbackResult<R>> cancellations = new IdentityHashMap<>();
}
}
| 4,209 |
0 | Create_ds/gobblin/gobblin-utility/src/main/java/org/apache/gobblin/util | Create_ds/gobblin/gobblin-utility/src/main/java/org/apache/gobblin/util/callbacks/Callback.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.util.callbacks;
import com.google.common.base.Function;
import lombok.AllArgsConstructor;
import lombok.Getter;
/**
* A helper class to wrap a function and provide a name for logging purposes
*/
@Getter
@AllArgsConstructor
public abstract class Callback<L, R> implements Function<L, R> {
private final String name;
@Override public String toString() {
return this.name;
}
}
| 4,210 |
0 | Create_ds/gobblin/gobblin-utility/src/main/java/org/apache/gobblin/util | Create_ds/gobblin/gobblin-utility/src/main/java/org/apache/gobblin/util/callbacks/package-info.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/**
* This package provides some simple instrumentation to handling callback execution.
*
* <dl>
* <dt>Listeners</dt>
* <dd>Listeners are the object to which the callbacks are sent. This package does not impose
* too many restrictions on what listeners should look like. For a given
* {@link org.apache.gobblin.util.callbacks.CallbacksDispatcher}, they should all implement the same
* interface. </dd>
* <dt>Callbacks</dt>
* <dd>Callbacks are represented as {@link com.google.common.base.Function}<L, R> instances which
* take one L parameter, the listener to be applied on, and can return a result of type R. If no
* meaningful result is returned, R should be Void. There is a helper class
* {@link org.apache.gobblin.util.callbacks.Callback} which allows to assign a meaningful string to the
* callback. Typically, this is the name of the callback and short description of any bound
* arguments.
*
* <p>Note that callback instances as defined above can't take any arguments to be passed to the
* listeners. They should be viewed as a binding of the actual callback method call to a specific
* set of arguments.
*
* <p>For example, if we want to define a callback <code>void onNewJob(JobSpec newJob)</code> to
* be sent to JobCatalogListener interface, we need:
* <ul>
* <li>Define the <code>NewJobCallback implements Callback<JobCatalogListener, Void> </code>
* <li>In the constructor, the above class should take and save a parameter for the JobSpec.
* <li>The apply method, should look something like <code>input.onNewJob(this.newJob)</code>
* </ul>
* </dd>
* <dt>Callbacks Dispatcher</dt>
* <dd> The {@link org.apache.gobblin.util.callbacks.CallbacksDispatcher}<L> is responsible for:
* <ul>
* <li>Maintaining a list of listeners of type L.
* <li>Dispatch callbacks invoked through {@link org.apache.gobblin.util.callbacks.CallbacksDispatcher#execCallbacks(com.google.common.base.Function)}
* to each of the above listeners.
* <li>Provide parallelism of the callbacks if necessary.
* <li>Guarantee isolation of failures in callbacks.
* <li>Provide logging for debugging purposes.
* <li>Classify callback results in {@link org.apache.gobblin.util.callbacks.CallbacksDispatcher.CallbackResults}
* for easier upstream processing.
* </ul>
* </dd>
*
* </dl>
*/
package org.apache.gobblin.util.callbacks;
| 4,211 |
0 | Create_ds/gobblin/gobblin-utility/src/main/java/org/apache/gobblin/util | Create_ds/gobblin/gobblin-utility/src/main/java/org/apache/gobblin/util/dataset/DatasetUtils.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.util.dataset;
import java.util.Map;
import java.util.regex.Pattern;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import com.google.common.base.Preconditions;
import com.google.common.base.Predicate;
import com.google.common.base.Strings;
import com.google.common.collect.Iterables;
import com.google.common.collect.Maps;
import com.google.gson.JsonArray;
import com.google.gson.JsonElement;
import com.google.gson.JsonObject;
import org.apache.gobblin.configuration.State;
import org.apache.gobblin.configuration.StateUtils;
import org.apache.gobblin.source.workunit.WorkUnit;
public class DatasetUtils {
private static final Logger LOG = LoggerFactory.getLogger(DatasetUtils.class);
public static final String DATASET = "dataset";
/**
* A configuration key that allows a user to specify config parameters on a dataset specific level. The value of this
* config should be a JSON array. Each entry should be a {@link JsonObject} and should contain a
* {@link com.google.gson.JsonPrimitive} that identifies the dataset. All configs in each dataset entry will
* be added to the {@link WorkUnit}s for that dataset.
*
* <p>
* An example value could be: "[{"dataset" : "myDataset1", "writer.partition.columns" : "header.memberId"},
* {"dataset" : "myDataset2", "writer.partition.columns" : "auditHeader.time"}]".
* </p>
*
* <p>
* The "dataset" field also allows regular expressions. For example, one can specify key, value
* "dataset" : "myDataset.*". In this case all datasets whose name matches the pattern "myDataset.*" will have
* all the specified config properties added to their {@link WorkUnit}s. If more a dataset matches multiple
* "dataset"s then the properties from all the {@link JsonObject}s will be added to their {@link WorkUnit}s.
* </p>
*/
public static final String DATASET_SPECIFIC_PROPS = DATASET + ".specific.props";
/**
* For backward compatibility.
*/
private static final String KAFKA_TOPIC_SPECIFIC_STATE = "kafka.topic.specific.state";
private DatasetUtils() {}
/**
* Given a {@link Iterable} of dataset identifiers (e.g., name, URN, etc.), return a {@link Map} that links each
* dataset with the extra configuration information specified in the state via {@link #DATASET_SPECIFIC_PROPS}.
*/
public static Map<String, State> getDatasetSpecificProps(Iterable<String> datasets, State state) {
if (!Strings.isNullOrEmpty(state.getProp(DATASET_SPECIFIC_PROPS))
|| !Strings.isNullOrEmpty(state.getProp(KAFKA_TOPIC_SPECIFIC_STATE))) {
Map<String, State> datasetSpecificConfigMap = Maps.newHashMap();
JsonArray array = !Strings.isNullOrEmpty(state.getProp(DATASET_SPECIFIC_PROPS))
? state.getPropAsJsonArray(DATASET_SPECIFIC_PROPS) : state.getPropAsJsonArray(KAFKA_TOPIC_SPECIFIC_STATE);
// Iterate over the entire JsonArray specified by the config key
for (JsonElement datasetElement : array) {
// Check that each entry in the JsonArray is a JsonObject
Preconditions.checkArgument(datasetElement.isJsonObject(),
"The value for property " + DATASET_SPECIFIC_PROPS + " is malformed");
JsonObject object = datasetElement.getAsJsonObject();
// Only process JsonObjects that have a dataset identifier
if (object.has(DATASET)) {
JsonElement datasetNameElement = object.get(DATASET);
Preconditions.checkArgument(datasetNameElement.isJsonPrimitive(), "The value for property "
+ DATASET_SPECIFIC_PROPS + " is malformed, the " + DATASET + " field must be a string");
// Iterate through each dataset that matches the value of the JsonObjects DATASET field
for (String dataset : Iterables.filter(datasets, new DatasetPredicate(datasetNameElement.getAsString()))) {
// If an entry already exists for a dataset, add it to the current state, else create a new state
if (datasetSpecificConfigMap.containsKey(dataset)) {
datasetSpecificConfigMap.get(dataset).addAll(StateUtils.jsonObjectToState(object, DATASET));
} else {
datasetSpecificConfigMap.put(dataset, StateUtils.jsonObjectToState(object, DATASET));
}
}
} else {
LOG.warn("Skipping JsonElement " + datasetElement + " as it is does not contain a field with key " + DATASET);
}
}
return datasetSpecificConfigMap;
}
return Maps.newHashMap();
}
/**
* Implementation of {@link Predicate} that takes in a dataset regex via its constructor. It returns true in the
* {@link #apply(String)} method only if the dataset regex matches the specified dataset identifier.
*/
private static class DatasetPredicate implements Predicate<String> {
private final Pattern datasetPattern;
private DatasetPredicate(String datasetRegex) {
this.datasetPattern = Pattern.compile(datasetRegex, Pattern.CASE_INSENSITIVE);
}
@Override
public boolean apply(String input) {
return this.datasetPattern.matcher(input).matches();
}
}
}
| 4,212 |
0 | Create_ds/gobblin/gobblin-utility/src/main/java/org/apache/gobblin/util | Create_ds/gobblin/gobblin-utility/src/main/java/org/apache/gobblin/util/io/StreamCopier.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.util.io;
import java.io.Closeable;
import java.io.IOException;
import java.io.InputStream;
import java.io.OutputStream;
import java.nio.ByteBuffer;
import java.nio.channels.Channels;
import java.nio.channels.ReadableByteChannel;
import java.nio.channels.WritableByteChannel;
import javax.annotation.concurrent.NotThreadSafe;
import com.codahale.metrics.Meter;
import org.apache.gobblin.util.limiter.Limiter;
/**
* A class that copies an {@link InputStream} to an {@link OutputStream} in a configurable way.
*/
@NotThreadSafe
public class StreamCopier {
private static final int KB = 1024;
public static final int DEFAULT_BUFFER_SIZE = 32 * KB;
private final ReadableByteChannel inputChannel;
private final WritableByteChannel outputChannel;
private final Long maxBytes;
private int bufferSize = DEFAULT_BUFFER_SIZE;
private Meter copySpeedMeter;
private boolean closeChannelsOnComplete = false;
private volatile boolean copied = false;
public StreamCopier(InputStream inputStream, OutputStream outputStream) {
this(inputStream, outputStream, null);
}
public StreamCopier(InputStream inputStream, OutputStream outputStream, Long maxBytes) {
this(Channels.newChannel(inputStream), Channels.newChannel(outputStream), maxBytes);
}
public StreamCopier(ReadableByteChannel inputChannel, WritableByteChannel outputChannel) {
this(inputChannel, outputChannel, null);
}
public StreamCopier(ReadableByteChannel inputChannel, WritableByteChannel outputChannel, Long maxBytes) {
this.inputChannel = inputChannel;
this.outputChannel = outputChannel;
this.maxBytes = maxBytes;
}
/**
* Set the size in bytes of the buffer used to copy.
*/
public StreamCopier withBufferSize(int bufferSize) {
this.bufferSize = bufferSize;
return this;
}
/**
* Set a {@link Meter} where copy speed will be reported.
*/
public StreamCopier withCopySpeedMeter(Meter copySpeedMeter) {
this.copySpeedMeter = copySpeedMeter;
return this;
}
/**
* Close the input and output {@link java.nio.channels.Channel}s after copy, whether the copy was successful or not.
*/
public StreamCopier closeChannelsOnComplete() {
this.closeChannelsOnComplete = true;
return this;
}
/**
* Execute the copy of bytes from the input to the output stream. If maxBytes is specified, limits the number of
* bytes copied to maxBytes.
* Note: this method should only be called once. Further calls will throw a {@link IllegalStateException}.
* @return Number of bytes copied.
*/
public synchronized long copy() throws IOException {
if (this.copied) {
throw new IllegalStateException(String.format("%s already copied.", StreamCopier.class.getName()));
}
this.copied = true;
try {
long numBytes = 0;
long totalBytes = 0;
final ByteBuffer buffer = ByteBuffer.allocateDirect(this.bufferSize);
// Only keep copying if we've read less than maxBytes (if maxBytes exists)
while ((this.maxBytes == null || this.maxBytes > totalBytes) &&
(numBytes = fillBufferFromInputChannel(buffer)) != -1) {
totalBytes += numBytes;
// flip the buffer to be written
buffer.flip();
// If we've read more than maxBytes, discard enough bytes to only write maxBytes.
if (this.maxBytes != null && totalBytes > this.maxBytes) {
buffer.limit(buffer.limit() - (int) (totalBytes - this.maxBytes));
totalBytes = this.maxBytes;
}
this.outputChannel.write(buffer);
// Clear if empty
buffer.compact();
if (this.copySpeedMeter != null) {
this.copySpeedMeter.mark(numBytes);
}
}
// Done writing, now flip to read again
buffer.flip();
// check that buffer is fully written.
while (buffer.hasRemaining()) {
this.outputChannel.write(buffer);
}
return totalBytes;
} finally {
if (this.closeChannelsOnComplete) {
this.inputChannel.close();
this.outputChannel.close();
}
}
}
private long fillBufferFromInputChannel(ByteBuffer buffer) throws IOException {
return this.inputChannel.read(buffer);
}
/**
* Indicates there were not enough permits in the {@link Limiter} to finish the copy.
*/
public static class NotEnoughPermitsException extends IOException {
private NotEnoughPermitsException() {
super("Not enough permits to perform stream copy.");
}
}
private static Closeable NOOP_CLOSEABLE = new Closeable() {
@Override
public void close() throws IOException {
// nothing to do
}
};
}
| 4,213 |
0 | Create_ds/gobblin/gobblin-utility/src/main/java/org/apache/gobblin/util | Create_ds/gobblin/gobblin-utility/src/main/java/org/apache/gobblin/util/io/StreamUtils.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.util.io;
import java.io.IOException;
import java.io.InputStream;
import java.io.OutputStream;
import java.nio.ByteBuffer;
import java.nio.channels.Channels;
import java.nio.channels.ReadableByteChannel;
import java.nio.channels.WritableByteChannel;
import com.google.common.base.Optional;
import com.google.common.base.Preconditions;
import java.util.zip.GZIPInputStream;
import org.apache.commons.compress.archivers.tar.TarArchiveEntry;
import org.apache.commons.compress.archivers.tar.TarArchiveOutputStream;
import org.apache.commons.compress.compressors.gzip.GzipCompressorOutputStream;
import org.apache.commons.compress.utils.IOUtils;
import org.apache.commons.lang.StringUtils;
import org.apache.hadoop.fs.FSDataInputStream;
import org.apache.hadoop.fs.FSDataOutputStream;
import org.apache.hadoop.fs.FileStatus;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.fs.PositionedReadable;
import org.apache.hadoop.fs.Seekable;
import org.apache.gobblin.configuration.ConfigurationKeys;
/**
* Utility class of input/output stream helpers.
*/
public class StreamUtils {
/**
* Convert an instance of {@link InputStream} to a {@link FSDataInputStream} that is {@link Seekable} and
* {@link PositionedReadable}.
*
* @see SeekableFSInputStream
*
*/
public static FSDataInputStream convertStream(InputStream in) throws IOException {
return new FSDataInputStream(new SeekableFSInputStream(in));
}
/**
* Copies an {@link InputStream} to and {@link OutputStream} using {@link Channels}.
*
* <p>
* <b>Note:</b> The method does not close the {@link InputStream} and {@link OutputStream}. However, the
* {@link ReadableByteChannel} and {@link WritableByteChannel}s are closed
* </p>
*
* @return Total bytes copied
*/
public static long copy(InputStream is, OutputStream os) throws IOException {
return new StreamCopier(is, os).copy();
}
/**
* Copies a {@link ReadableByteChannel} to a {@link WritableByteChannel}.
* <p>
* <b>Note:</b> The {@link ReadableByteChannel} and {@link WritableByteChannel}s are NOT closed by the method
* </p>
*
* @return Total bytes copied
*/
public static long copy(ReadableByteChannel inputChannel, WritableByteChannel outputChannel) throws IOException {
return new StreamCopier(inputChannel, outputChannel).copy();
}
/**
* Creates a tar gzip file using a given {@link Path} as input and a given {@link Path} as a destination. If the given
* input is a file then only that file will be added to tarball. If it is a directory then the entire directory will
* be recursively put into the tarball.
*
* @param fs the {@link FileSystem} the input exists, and the the output should be written to.
* @param sourcePath the {@link Path} of the input files, this can either be a file or a directory.
* @param destPath the {@link Path} that tarball should be written to.
*/
public static void tar(FileSystem fs, Path sourcePath, Path destPath) throws IOException {
tar(fs, fs, sourcePath, destPath);
}
/**
* Similiar to {@link #tar(FileSystem, Path, Path)} except the source and destination {@link FileSystem} can be different.
*
* @see #tar(FileSystem, Path, Path)
*/
public static void tar(FileSystem sourceFs, FileSystem destFs, Path sourcePath, Path destPath) throws IOException {
try (FSDataOutputStream fsDataOutputStream = destFs.create(destPath);
TarArchiveOutputStream tarArchiveOutputStream = new TarArchiveOutputStream(
new GzipCompressorOutputStream(fsDataOutputStream), ConfigurationKeys.DEFAULT_CHARSET_ENCODING.name())) {
FileStatus fileStatus = sourceFs.getFileStatus(sourcePath);
if (sourceFs.isDirectory(sourcePath)) {
dirToTarArchiveOutputStreamRecursive(fileStatus, sourceFs, Optional.<Path> absent(), tarArchiveOutputStream);
} else {
try (FSDataInputStream fsDataInputStream = sourceFs.open(sourcePath)) {
fileToTarArchiveOutputStream(fileStatus, fsDataInputStream, new Path(sourcePath.getName()),
tarArchiveOutputStream);
}
}
}
}
/**
* Helper method for {@link #tar(FileSystem, FileSystem, Path, Path)} that recursively adds a directory to a given
* {@link TarArchiveOutputStream}.
*/
private static void dirToTarArchiveOutputStreamRecursive(FileStatus dirFileStatus, FileSystem fs,
Optional<Path> destDir, TarArchiveOutputStream tarArchiveOutputStream) throws IOException {
Preconditions.checkState(fs.isDirectory(dirFileStatus.getPath()));
Path dir = destDir.isPresent() ? new Path(destDir.get(), dirFileStatus.getPath().getName())
: new Path(dirFileStatus.getPath().getName());
dirToTarArchiveOutputStream(dir, tarArchiveOutputStream);
for (FileStatus childFileStatus : fs.listStatus(dirFileStatus.getPath())) {
Path childFile = new Path(dir, childFileStatus.getPath().getName());
if (fs.isDirectory(childFileStatus.getPath())) {
dirToTarArchiveOutputStreamRecursive(childFileStatus, fs, Optional.of(childFile), tarArchiveOutputStream);
} else {
try (FSDataInputStream fsDataInputStream = fs.open(childFileStatus.getPath())) {
fileToTarArchiveOutputStream(childFileStatus, fsDataInputStream, childFile, tarArchiveOutputStream);
}
}
}
}
/**
* Helper method for {@link #tar(FileSystem, FileSystem, Path, Path)} that adds a directory entry to a given
* {@link TarArchiveOutputStream}.
*/
private static void dirToTarArchiveOutputStream(Path destDir, TarArchiveOutputStream tarArchiveOutputStream)
throws IOException {
TarArchiveEntry tarArchiveEntry = new TarArchiveEntry(formatPathToDir(destDir));
tarArchiveEntry.setModTime(System.currentTimeMillis());
tarArchiveOutputStream.putArchiveEntry(tarArchiveEntry);
tarArchiveOutputStream.closeArchiveEntry();
}
/**
* Helper method for {@link #tar(FileSystem, FileSystem, Path, Path)} that adds a file entry to a given
* {@link TarArchiveOutputStream} and copies the contents of the file to the new entry.
*/
private static void fileToTarArchiveOutputStream(FileStatus fileStatus, FSDataInputStream fsDataInputStream,
Path destFile, TarArchiveOutputStream tarArchiveOutputStream) throws IOException {
TarArchiveEntry tarArchiveEntry = new TarArchiveEntry(formatPathToFile(destFile));
tarArchiveEntry.setSize(fileStatus.getLen());
tarArchiveEntry.setModTime(System.currentTimeMillis());
tarArchiveOutputStream.putArchiveEntry(tarArchiveEntry);
try {
IOUtils.copy(fsDataInputStream, tarArchiveOutputStream);
} finally {
tarArchiveOutputStream.closeArchiveEntry();
}
}
/**
* Convert a {@link Path} to a {@link String} and make sure it is properly formatted to be recognized as a directory
* by {@link TarArchiveEntry}.
*/
private static String formatPathToDir(Path path) {
return path.toString().endsWith(Path.SEPARATOR) ? path.toString() : path.toString() + Path.SEPARATOR;
}
/**
* Convert a {@link Path} to a {@link String} and make sure it is properly formatted to be recognized as a file
* by {@link TarArchiveEntry}.
*/
private static String formatPathToFile(Path path) {
return StringUtils.removeEnd(path.toString(), Path.SEPARATOR);
}
/*
* Determines if a byte array is compressed. The java.util.zip GZip
* implementation does not expose the GZip header so it is difficult to determine
* if a string is compressed.
* Copied from Helix GZipCompressionUtil
* @param bytes an array of bytes
* @return true if the array is compressed or false otherwise
*/
public static boolean isCompressed(byte[] bytes) {
if ((bytes == null) || (bytes.length < 2)) {
return false;
} else {
return ((bytes[0] == (byte) (GZIPInputStream.GZIP_MAGIC)) &&
(bytes[1] == (byte) (GZIPInputStream.GZIP_MAGIC >> 8)));
}
}
/**
* Reads the full contents of a ByteBuffer and writes them to an OutputStream. The ByteBuffer is
* consumed by this operation; eg in.remaining() will be 0 after it completes successfully.
* @param in ByteBuffer to write into the OutputStream
* @param out Destination stream
* @throws IOException If there is an error writing into the OutputStream
*/
public static void byteBufferToOutputStream(ByteBuffer in, OutputStream out)
throws IOException {
final int BUF_SIZE = 8192;
if (in.hasArray()) {
out.write(in.array(), in.arrayOffset() + in.position(), in.remaining());
} else {
final byte[] b = new byte[Math.min(in.remaining(), BUF_SIZE)];
while (in.remaining() > 0) {
int bytesToRead = Math.min(in.remaining(), BUF_SIZE);
in.get(b, 0, bytesToRead);
out.write(b, 0, bytesToRead);
}
}
}
}
| 4,214 |
0 | Create_ds/gobblin/gobblin-utility/src/main/java/org/apache/gobblin/util | Create_ds/gobblin/gobblin-utility/src/main/java/org/apache/gobblin/util/io/MeteredOutputStream.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.util.io;
import java.io.FilterOutputStream;
import java.io.IOException;
import java.io.OutputStream;
import com.codahale.metrics.Meter;
import com.google.common.base.Optional;
import lombok.Builder;
import lombok.extern.slf4j.Slf4j;
/**
* A {@link FilterOutputStream} that counts the bytes read from the underlying {@link OutputStream}.
*/
@Slf4j
public class MeteredOutputStream extends FilterOutputStream implements MeteredStream {
/**
* Find the lowest {@link MeteredOutputStream} in a chain of {@link FilterOutputStream}s.
*/
public static Optional<MeteredOutputStream> findWrappedMeteredOutputStream(OutputStream os) {
if (os instanceof FilterOutputStream) {
try {
Optional<MeteredOutputStream> meteredOutputStream =
findWrappedMeteredOutputStream(FilterStreamUnpacker.unpackFilterOutputStream((FilterOutputStream) os));
if (meteredOutputStream.isPresent()) {
return meteredOutputStream;
}
} catch (IllegalAccessException iae) {
log.warn("Cannot unpack input stream due to SecurityManager.", iae);
// Do nothing, we can't unpack the FilterInputStream due to security restrictions
}
}
if (os instanceof MeteredOutputStream) {
return Optional.of((MeteredOutputStream) os);
}
return Optional.absent();
}
BatchedMeterDecorator meter;
/**
* Builds a {@link MeteredOutputStream}.
* @param out The {@link OutputStream} to measure.
* @param meter A {@link Meter} to use for measuring the {@link OutputStream}. If null, a new {@link Meter} will be created.
* @param updateFrequency For performance, {@link MeteredInputStream} will batch {@link Meter} updates to this many bytes.
*/
@Builder
public MeteredOutputStream(OutputStream out, Meter meter, int updateFrequency) {
super(out);
this.meter = new BatchedMeterDecorator(meter == null ? new Meter() : meter, updateFrequency > 0 ? updateFrequency : 1000);
}
@Override
public void write(int b) throws IOException {
this.meter.mark();
this.out.write(b);
}
@Override
public void write(byte[] b, int off, int len) throws IOException {
this.meter.mark(len);
this.out.write(b, off, len);
}
@Override
public Meter getBytesProcessedMeter() {
return this.meter.getUnderlyingMeter();
}
}
| 4,215 |
0 | Create_ds/gobblin/gobblin-utility/src/main/java/org/apache/gobblin/util | Create_ds/gobblin/gobblin-utility/src/main/java/org/apache/gobblin/util/io/ThrottledInputStream.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.util.io;
import java.io.Closeable;
import java.io.FilterInputStream;
import java.io.IOException;
import java.io.InputStream;
import org.apache.gobblin.util.limiter.Limiter;
import javax.annotation.concurrent.NotThreadSafe;
/**
* A throttled {@link InputStream}.
*/
@NotThreadSafe
public class ThrottledInputStream extends FilterInputStream {
private final Limiter limiter;
private final MeteredInputStream meter;
private long prevCount;
/**
* Builds a {@link ThrottledInputStream}.
*
* It is recommended to use a {@link StreamThrottler} for creation of {@link ThrottledInputStream}s.
*
* @param in {@link InputStream} to throttle.
* @param limiter {@link Limiter} to use for throttling.
* @param meter {@link MeteredInputStream} used to measure the {@link InputStream}. Note the {@link MeteredInputStream}
* MUST be in the {@link FilterInputStream} chain of {@link #in}.
*/
public ThrottledInputStream(InputStream in, Limiter limiter, MeteredInputStream meter) {
super(in);
this.limiter = limiter;
this.meter = meter;
// In case the meter was already used
this.prevCount = this.meter.getBytesProcessedMeter().getCount();
}
@Override
public int read() throws IOException {
blockUntilPermitsAvailable();
return this.in.read();
}
@Override
public int read(byte[] b) throws IOException {
blockUntilPermitsAvailable();
return this.in.read(b);
}
@Override
public int read(byte[] b, int off, int len) throws IOException {
blockUntilPermitsAvailable();
return this.in.read(b, off, len);
}
@Override
public synchronized void reset() throws IOException {
super.reset();
this.prevCount = this.meter.getBytesProcessedMeter().getCount();
}
private void blockUntilPermitsAvailable() {
try {
long currentCount = this.meter.getBytesProcessedMeter().getCount();
long permitsNeeded = currentCount - this.prevCount;
this.prevCount = currentCount;
if (permitsNeeded == 0L) {
return;
}
Closeable permit = this.limiter.acquirePermits(permitsNeeded);
if (permit == null) {
throw new RuntimeException("Could not acquire permits.");
}
} catch (InterruptedException ie) {
throw new RuntimeException(ie);
}
}
}
| 4,216 |
0 | Create_ds/gobblin/gobblin-utility/src/main/java/org/apache/gobblin/util | Create_ds/gobblin/gobblin-utility/src/main/java/org/apache/gobblin/util/io/AdditionalCloseableInputStream.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.util.io;
import java.io.Closeable;
import java.io.IOException;
import java.io.InputStream;
import org.apache.hadoop.fs.FSDataInputStream;
/**
* An extension of {@link FSDataInputStream} that takes additional {@link Closeable} objects,
* which will all be closed when {@link InputStream} is closed.
*/
public class AdditionalCloseableInputStream extends FSDataInputStream {
private Closeable[] closeables;
public AdditionalCloseableInputStream(InputStream in, Closeable... closeables)
throws IOException {
super(in);
this.closeables = closeables;
}
@Override
public void close()
throws IOException {
super.close();
for (Closeable closeable : this.closeables) {
closeable.close();
}
}
}
| 4,217 |
0 | Create_ds/gobblin/gobblin-utility/src/main/java/org/apache/gobblin/util | Create_ds/gobblin/gobblin-utility/src/main/java/org/apache/gobblin/util/io/EmptyInputStream.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.util.io;
import java.io.IOException;
import java.io.InputStream;
/**
* An {@link InputStream} with empty content
*/
public class EmptyInputStream extends InputStream {
public static final InputStream instance = new EmptyInputStream();
private EmptyInputStream() {}
@Override
public int read()
throws IOException {
return 0;
}
}
| 4,218 |
0 | Create_ds/gobblin/gobblin-utility/src/main/java/org/apache/gobblin/util | Create_ds/gobblin/gobblin-utility/src/main/java/org/apache/gobblin/util/io/SeekableFSInputStream.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.util.io;
import java.io.IOException;
import java.io.InputStream;
import org.apache.hadoop.fs.FSInputStream;
import org.apache.hadoop.fs.PositionedReadable;
import org.apache.hadoop.fs.Seekable;
/**
* Class that wraps an {@link InputStream} to support {@link Seekable} and {@link PositionedReadable}
*/
public class SeekableFSInputStream extends FSInputStream {
private InputStream in;
private long pos;
public SeekableFSInputStream(InputStream in) {
this.in = in;
}
@Override
public int read(byte[] b, int off, int len) throws IOException {
int val = this.in.read(b, off, len);
if (val > 0) {
this.pos += val;
}
return val;
}
@Override
public long getPos() throws IOException {
return this.pos;
}
@Override
public void seek(long pos) throws IOException {
this.pos += this.in.skip(pos - this.pos);
}
@Override
public boolean seekToNewSource(long arg0) throws IOException {
return false;
}
@Override
public int read() throws IOException {
int val = this.in.read();
if (val > 0) {
this.pos += val;
}
return val;
}
@Override
public void close() throws IOException {
super.close();
in.close();
}
}
| 4,219 |
0 | Create_ds/gobblin/gobblin-utility/src/main/java/org/apache/gobblin/util | Create_ds/gobblin/gobblin-utility/src/main/java/org/apache/gobblin/util/io/StreamCopierSharedLimiterKey.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.util.io;
import java.net.URI;
import org.apache.hadoop.fs.LocalFileSystem;
import com.google.common.base.Joiner;
import org.apache.gobblin.util.ClustersNames;
import org.apache.gobblin.util.limiter.broker.SharedLimiterKey;
import lombok.Getter;
/**
* A subclass of {@link SharedLimiterKey} with more detailed information on the source and target of
* a throttled {@link StreamCopier}.
*/
@Getter
public class StreamCopierSharedLimiterKey extends SharedLimiterKey {
public static final String STREAM_COPIER_KEY_PREFIX = "streamCopierBandwidth";
private final URI sourceURI;
private final URI targetURI;
public StreamCopierSharedLimiterKey(URI sourceURI, URI targetURI) {
super(getLimitedResourceId(sourceURI, targetURI));
this.sourceURI = sourceURI;
this.targetURI = targetURI;
}
private static String getLimitedResourceId(URI sourceURI, URI targetURI) {
return Joiner.on("/").join(STREAM_COPIER_KEY_PREFIX, getFSIdentifier(sourceURI), getFSIdentifier(targetURI));
}
private static String getFSIdentifier(URI uri) {
if (new LocalFileSystem().getScheme().equals(uri.getScheme())) {
return "localhost";
} else {
return ClustersNames.getInstance().getClusterName(uri.toString());
}
}
@Override
public boolean equals(Object o) {
if (this == o) {
return true;
}
if (o == null || getClass() != o.getClass()) {
return false;
}
if (!super.equals(o)) {
return false;
}
StreamCopierSharedLimiterKey that = (StreamCopierSharedLimiterKey) o;
if (!sourceURI.equals(that.sourceURI)) {
return false;
}
return targetURI.equals(that.targetURI);
}
@Override
public int hashCode() {
int result = super.hashCode();
result = 31 * result + sourceURI.hashCode();
result = 31 * result + targetURI.hashCode();
return result;
}
}
| 4,220 |
0 | Create_ds/gobblin/gobblin-utility/src/main/java/org/apache/gobblin/util | Create_ds/gobblin/gobblin-utility/src/main/java/org/apache/gobblin/util/io/MeteredStream.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.util.io;
import com.codahale.metrics.Meter;
/**
* A {@link java.io.InputStream} or {@link java.io.OutputStream} that measures bytes processed.
*/
public interface MeteredStream {
/**
* @return The {@link Meter} measuring the bytes processed.
*/
Meter getBytesProcessedMeter();
}
| 4,221 |
0 | Create_ds/gobblin/gobblin-utility/src/main/java/org/apache/gobblin/util | Create_ds/gobblin/gobblin-utility/src/main/java/org/apache/gobblin/util/io/StreamThrottler.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.util.io;
import java.io.InputStream;
import java.net.URI;
import com.google.common.base.Optional;
import com.google.common.base.Preconditions;
import org.apache.gobblin.broker.EmptyKey;
import org.apache.gobblin.broker.ResourceInstance;
import org.apache.gobblin.broker.iface.ConfigView;
import org.apache.gobblin.broker.iface.NotConfiguredException;
import org.apache.gobblin.broker.iface.ScopeType;
import org.apache.gobblin.broker.iface.ScopedConfigView;
import org.apache.gobblin.broker.iface.SharedResourceFactory;
import org.apache.gobblin.broker.iface.SharedResourceFactoryResponse;
import org.apache.gobblin.broker.iface.SharedResourcesBroker;
import org.apache.gobblin.util.limiter.Limiter;
import org.apache.gobblin.util.limiter.MultiLimiter;
import org.apache.gobblin.util.limiter.NoopLimiter;
import org.apache.gobblin.util.limiter.broker.SharedLimiterFactory;
import lombok.AllArgsConstructor;
import lombok.Builder;
import lombok.extern.slf4j.Slf4j;
/**
* A class used to throttle {@link InputStream}s.
* @param <S>
*/
@Slf4j
@AllArgsConstructor
public class StreamThrottler<S extends ScopeType<S>> {
/**
* A {@link SharedResourceFactory} that creates {@link StreamThrottler}.
*/
public static class Factory<S extends ScopeType<S>> implements SharedResourceFactory<StreamThrottler<S>, EmptyKey, S> {
public static final String NAME = "streamThrottler";
@Override
public String getName() {
return NAME;
}
@Override
public SharedResourceFactoryResponse<StreamThrottler<S>> createResource(SharedResourcesBroker<S> broker,
ScopedConfigView<S, EmptyKey> config) throws NotConfiguredException {
return new ResourceInstance<>(new StreamThrottler<>(broker));
}
@Override
public S getAutoScope(SharedResourcesBroker<S> broker, ConfigView<S, EmptyKey> config) {
return broker.selfScope().getType();
}
}
private final SharedResourcesBroker<S> broker;
/**
* Throttles an {@link InputStream} if throttling is configured.
* @param inputStream {@link InputStream} to throttle.
* @param sourceURI used for selecting the throttling policy.
* @param targetURI used for selecting the throttling policy.
*/
@Builder(builderMethodName = "throttleInputStream", builderClassName = "InputStreamThrottler")
private ThrottledInputStream doThrottleInputStream(InputStream inputStream, URI sourceURI, URI targetURI) {
Preconditions.checkNotNull(inputStream, "InputStream cannot be null.");
Limiter limiter = new NoopLimiter();
if (sourceURI != null && targetURI != null) {
StreamCopierSharedLimiterKey key = new StreamCopierSharedLimiterKey(sourceURI, targetURI);
try {
limiter = new MultiLimiter(limiter, this.broker.getSharedResource(new SharedLimiterFactory<S>(),
key));
} catch (NotConfiguredException nce) {
log.warn("Could not create a Limiter for key " + key, nce);
}
} else {
log.info("Not throttling input stream because source or target URIs are not defined.");
}
Optional<MeteredInputStream> meteredStream = MeteredInputStream.findWrappedMeteredInputStream(inputStream);
if (!meteredStream.isPresent()) {
meteredStream = Optional.of(MeteredInputStream.builder().in(inputStream).build());
inputStream = meteredStream.get();
}
return new ThrottledInputStream(inputStream, limiter, meteredStream.get());
}
}
| 4,222 |
0 | Create_ds/gobblin/gobblin-utility/src/main/java/org/apache/gobblin/util | Create_ds/gobblin/gobblin-utility/src/main/java/org/apache/gobblin/util/io/MeteredInputStream.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.util.io;
import java.io.FilterInputStream;
import java.io.IOException;
import java.io.InputStream;
import com.codahale.metrics.Meter;
import com.google.common.base.Optional;
import lombok.Builder;
import lombok.extern.slf4j.Slf4j;
/**
* A {@link FilterInputStream} that counts the bytes read from the underlying {@link InputStream}.
*/
@Slf4j
public class MeteredInputStream extends FilterInputStream implements MeteredStream {
/**
* Find the lowest {@link MeteredInputStream} in a chain of {@link FilterInputStream}s.
*/
public static Optional<MeteredInputStream> findWrappedMeteredInputStream(InputStream is) {
if (is instanceof FilterInputStream) {
try {
Optional<MeteredInputStream> meteredInputStream =
findWrappedMeteredInputStream(FilterStreamUnpacker.unpackFilterInputStream((FilterInputStream) is));
if (meteredInputStream.isPresent()) {
return meteredInputStream;
}
} catch (IllegalAccessException iae) {
log.warn("Cannot unpack input stream due to SecurityManager.", iae);
// Do nothing, we can't unpack the FilterInputStream due to security restrictions
}
}
if (is instanceof MeteredInputStream) {
return Optional.of((MeteredInputStream) is);
}
return Optional.absent();
}
private BatchedMeterDecorator meter;
/**
* Builds a {@link MeteredInputStream}.
* @param in The {@link InputStream} to measure.
* @param meter A {@link Meter} to use for measuring the {@link InputStream}. If null, a new {@link Meter} will be created.
* @param updateFrequency For performance, {@link MeteredInputStream} will batch {@link Meter} updates to this many bytes.
*/
@Builder
private MeteredInputStream(InputStream in, Meter meter, int updateFrequency) {
super(in);
this.meter = new BatchedMeterDecorator(meter == null ? new Meter() : meter, updateFrequency > 0 ? updateFrequency : 1000);
}
@Override
public int read() throws IOException {
int bte = super.read();
if (bte >= 0) {
this.meter.mark();
}
return bte;
}
@Override
public int read(byte[] b, int off, int len) throws IOException {
int readBytes = super.read(b, off, len);
this.meter.mark(readBytes);
return readBytes;
}
@Override
public Meter getBytesProcessedMeter() {
return this.meter.getUnderlyingMeter();
}
}
| 4,223 |
0 | Create_ds/gobblin/gobblin-utility/src/main/java/org/apache/gobblin/util | Create_ds/gobblin/gobblin-utility/src/main/java/org/apache/gobblin/util/io/FilterStreamUnpacker.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.util.io;
import java.io.FilterInputStream;
import java.io.FilterOutputStream;
import java.io.InputStream;
import java.io.OutputStream;
import java.lang.reflect.Field;
/**
* Contains utilities for getting uderlying streams to a filter stream.
*/
public class FilterStreamUnpacker {
/**
* Finds the underlying {@link InputStream} to a {@link FilterInputStream}. Note this is not always possible due
* to security restrictions of the JVM.
* @throws IllegalAccessException If security policies of the JVM prevent unpacking of the {@link FilterInputStream}.
*/
public static InputStream unpackFilterInputStream(FilterInputStream is) throws IllegalAccessException {
try {
Field field = FilterInputStream.class.getDeclaredField("in");
field.setAccessible(true);
return (InputStream) field.get(is);
} catch (NoSuchFieldException nsfe) {
throw new RuntimeException(nsfe);
}
}
/**
* Finds the underlying {@link OutputStream} to a {@link FilterOutputStream}. Note this is not always possible due
* to security restrictions of the JVM.
* @throws IllegalAccessException If security policies of the JVM prevent unpacking of the {@link FilterOutputStream}.
*/
public static OutputStream unpackFilterOutputStream(FilterOutputStream os) throws IllegalAccessException {
try {
Field field = FilterOutputStream.class.getDeclaredField("out");
field.setAccessible(true);
return (OutputStream) field.get(os);
} catch (NoSuchFieldException nsfe) {
throw new RuntimeException(nsfe);
}
}
}
| 4,224 |
0 | Create_ds/gobblin/gobblin-utility/src/main/java/org/apache/gobblin/util | Create_ds/gobblin/gobblin-utility/src/main/java/org/apache/gobblin/util/io/CloseableHttpConn.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.util.io;
import java.io.Closeable;
import java.io.IOException;
import java.net.HttpURLConnection;
import lombok.AllArgsConstructor;
/**
* Wraps a {@link HttpURLConnection} into a {@link Closeable} object.
*/
@AllArgsConstructor
public class CloseableHttpConn implements Closeable{
private final HttpURLConnection connection;
@Override
public void close()
throws IOException {
if (this.connection != null) {
this.connection.disconnect();
}
}
}
| 4,225 |
0 | Create_ds/gobblin/gobblin-utility/src/main/java/org/apache/gobblin/util | Create_ds/gobblin/gobblin-utility/src/main/java/org/apache/gobblin/util/io/BatchedMeterDecorator.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.util.io;
import com.codahale.metrics.Meter;
import org.apache.gobblin.util.Decorator;
import javax.annotation.concurrent.NotThreadSafe;
/**
* A decorator to a {@link Meter} that batches updates for performance.
*/
@NotThreadSafe
public class BatchedMeterDecorator implements Decorator {
private final Meter underlying;
private final int updateFrequency;
private int count;
public BatchedMeterDecorator(Meter underlying, int updateFrequency) {
this.underlying = underlying;
this.updateFrequency = updateFrequency;
this.count = 0;
}
public void mark() {
this.count++;
if (this.count > this.updateFrequency) {
updateUnderlying();
}
}
public void mark(long n) {
this.count += n;
if (this.count > this.updateFrequency) {
updateUnderlying();
}
}
private void updateUnderlying() {
this.underlying.mark(this.count);
this.count = 0;
}
@Override
public Object getDecoratedObject() {
return this.underlying;
}
public Meter getUnderlyingMeter() {
return this.underlying;
}
}
| 4,226 |
0 | Create_ds/gobblin/gobblin-utility/src/main/java/org/apache/gobblin/util | Create_ds/gobblin/gobblin-utility/src/main/java/org/apache/gobblin/util/binpacking/FieldWeighter.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.util.binpacking;
import org.apache.gobblin.source.workunit.WorkUnit;
import org.apache.gobblin.source.workunit.WorkUnitWeighter;
import lombok.AllArgsConstructor;
/**
* A {@link WorkUnitWeighter} implementation that parses the weight from a field in the work unit.
*/
@AllArgsConstructor
public class FieldWeighter implements WorkUnitWeighter {
private final String field;
@Override
public long weight(WorkUnit workUnit) {
return workUnit.getPropAsLong(this.field);
}
}
| 4,227 |
0 | Create_ds/gobblin/gobblin-utility/src/main/java/org/apache/gobblin/util | Create_ds/gobblin/gobblin-utility/src/main/java/org/apache/gobblin/util/binpacking/WorstFitDecreasingBinPacking.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.util.binpacking;
import java.io.Serializable;
import java.util.Collections;
import java.util.Comparator;
import java.util.List;
import java.util.concurrent.ExecutionException;
import com.google.common.base.Predicate;
import com.google.common.cache.CacheBuilder;
import com.google.common.cache.CacheLoader;
import com.google.common.cache.LoadingCache;
import com.google.common.collect.Iterables;
import com.google.common.collect.Lists;
import com.google.common.collect.MinMaxPriorityQueue;
import org.apache.gobblin.source.workunit.MultiWorkUnit;
import org.apache.gobblin.source.workunit.WorkUnit;
import org.apache.gobblin.source.workunit.WorkUnitBinPacker;
import org.apache.gobblin.source.workunit.WorkUnitWeighter;
import javax.annotation.Nullable;
import javax.annotation.OverridingMethodsMustInvokeSuper;
import lombok.AllArgsConstructor;
/**
* Implements a bin packing algorithm similar to worst-fit decreasing bin packing. Given a input list of {@link WorkUnit}s,
* a {@link WorkUnitWeighter} function, and a maximum weight per {@link MultiWorkUnit}, packs the input work units into
* {@link MultiWorkUnit}s.
*
* <p>
* The algorithm is as follows:
* * Sort work units decreasing by weight.
* * Compute the minimum number of {@link MultiWorkUnit}s needed, and create them.
* * For each work unit, find the {@link MultiWorkUnit} with the largest space available, if it fits, add it there,
* otherwise, create a new {@link MultiWorkUnit} and add the work unit there.
* </p>
*/
@AllArgsConstructor
public class WorstFitDecreasingBinPacking implements WorkUnitBinPacker {
public static final String TOTAL_MULTI_WORK_UNIT_WEIGHT = "binpacking.multiWorkUnit.totalWeight";
private final long maxWeightPerUnit;
@Override
@OverridingMethodsMustInvokeSuper
public List<WorkUnit> pack(List<WorkUnit> workUnitsIn, WorkUnitWeighter weighter) {
if (this.maxWeightPerUnit <= 0) { // just return the input
return workUnitsIn;
}
List<WorkUnit> workUnits = Lists.newArrayList(workUnitsIn);
long smallUnitSize = 0; // total size of work units smaller than maxWeightPerUnit
int largeUnits = 0; // number of work units larger than maxWeightPerUnit
for (WorkUnit workUnit : workUnits) {
long weight = weighter.weight(workUnit);
if (weight <= this.maxWeightPerUnit) {
smallUnitSize += weight;
} else {
largeUnits++;
}
}
int estimateByWeight = largeUnits + (int) ((smallUnitSize - 1) / this.maxWeightPerUnit) + 1;
int estimatedMultiWorkUnits = Math.min(estimateByWeight, workUnits.size());
MinMaxPriorityQueue<MultiWorkUnit> pQueue = MinMaxPriorityQueue.orderedBy(new MultiWorkUnitComparator()).create();
for (int i = 0; i < estimatedMultiWorkUnits; i++) {
pQueue.add(MultiWorkUnit.createEmpty());
}
Collections.sort(workUnits, Collections.reverseOrder(new WeightComparator(weighter)));
for (WorkUnit workUnit : workUnits) {
MultiWorkUnit lightestMultiWorkUnit = pQueue.peek();
long weight = Math.max(1, weighter.weight(workUnit));
long multiWorkUnitWeight = getMultiWorkUnitWeight(lightestMultiWorkUnit);
if (multiWorkUnitWeight == 0 || (weight + multiWorkUnitWeight <= this.maxWeightPerUnit
&& weight + multiWorkUnitWeight > multiWorkUnitWeight)) { // check for overflow
// if it fits, add it to lightest work unit
addToMultiWorkUnit(lightestMultiWorkUnit, workUnit, weight);
pQueue.poll();
pQueue.add(lightestMultiWorkUnit);
} else {
// if doesn't fit in lightest multi work unit, create a new work unit for it
MultiWorkUnit newMultiWorkUnit = MultiWorkUnit.createEmpty();
addToMultiWorkUnit(newMultiWorkUnit, workUnit, weight);
pQueue.add(newMultiWorkUnit);
}
}
return Lists.<WorkUnit> newArrayList(Iterables.filter(pQueue, new Predicate<MultiWorkUnit>() {
@Override
@edu.umd.cs.findbugs.annotations.SuppressWarnings(
value="NP_PARAMETER_MUST_BE_NONNULL_BUT_MARKED_AS_NULLABLE",
justification="Allowing nullable values")
public boolean apply(@Nullable MultiWorkUnit input) {
return getMultiWorkUnitWeight(input) > 0;
}
}));
}
private static class WeightComparator implements Comparator<WorkUnit> {
private final WorkUnitWeighter weighter;
private final LoadingCache<WorkUnit, Long> weightCache;
public WeightComparator(final WorkUnitWeighter weighter) {
this.weighter = weighter;
this.weightCache = CacheBuilder.newBuilder().softValues().build(new CacheLoader<WorkUnit, Long>() {
@Override
public Long load(WorkUnit key) throws Exception {
return WeightComparator.this.weighter.weight(key);
}
});
}
@Override
public int compare(WorkUnit o1, WorkUnit o2) {
try {
return Long.compare(this.weightCache.get(o1), this.weightCache.get(o2));
} catch (ExecutionException ee) {
throw new RuntimeException(ee);
}
}
}
private static void addToMultiWorkUnit(MultiWorkUnit multiWorkUnit, WorkUnit workUnit, long weight) {
multiWorkUnit.addWorkUnit(workUnit);
setMultiWorkUnitWeight(multiWorkUnit, getMultiWorkUnitWeight(multiWorkUnit) + weight);
}
private static class MultiWorkUnitComparator implements Comparator<MultiWorkUnit>, Serializable {
private static final long serialVersionUID = 1L;
@Override
public int compare(MultiWorkUnit o1, MultiWorkUnit o2) {
return Long.compare(getMultiWorkUnitWeight(o1), getMultiWorkUnitWeight(o2));
}
}
private static long getMultiWorkUnitWeight(MultiWorkUnit multiWorkUnit) {
return multiWorkUnit.contains(TOTAL_MULTI_WORK_UNIT_WEIGHT)
? multiWorkUnit.getPropAsLong(TOTAL_MULTI_WORK_UNIT_WEIGHT) : 0;
}
private static void setMultiWorkUnitWeight(MultiWorkUnit multiWorkUnit, long weight) {
multiWorkUnit.setProp(TOTAL_MULTI_WORK_UNIT_WEIGHT, Long.toString(weight));
}
}
| 4,228 |
0 | Create_ds/gobblin/gobblin-utility/src/main/java/org/apache/gobblin/util | Create_ds/gobblin/gobblin-utility/src/main/java/org/apache/gobblin/util/filesystem/FileStatusEntry.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.util.filesystem;
import com.google.common.base.Optional;
import java.io.FileNotFoundException;
import java.io.IOException;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileStatus;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
public class FileStatusEntry extends FileStatus {
static final FileStatusEntry[] EMPTY_ENTRIES = new FileStatusEntry[0];
private final FileStatusEntry parent;
private FileStatusEntry[] children;
private boolean exists;
private final FileSystem fs;
public Optional<FileStatus> _fileStatus;
public FileStatusEntry(final Path path)
throws IOException {
this(null, path, path.getFileSystem(new Configuration()));
}
private FileStatusEntry(final FileStatusEntry parent, final Path path, FileSystem fs)
throws IOException {
if (path == null) {
throw new IllegalArgumentException("Path is missing");
}
this.parent = parent;
this.fs = fs;
this._fileStatus = Optional.fromNullable(this.fs.getFileStatus(path));
}
public boolean refresh(final Path path)
throws IOException {
if (_fileStatus.isPresent()) {
Optional<FileStatus> oldStatus = this._fileStatus;
try {
this._fileStatus = Optional.of(this.fs.getFileStatus(path));
this.exists = this._fileStatus.isPresent();
return (oldStatus.isPresent() != this._fileStatus.isPresent()
|| oldStatus.get().getModificationTime() != this._fileStatus.get().getModificationTime()
|| oldStatus.get().isDirectory() != this._fileStatus.get().isDirectory()
|| oldStatus.get().getLen() != this._fileStatus.get().getLen());
} catch (FileNotFoundException e) {
_fileStatus = Optional.absent();
this.exists = false;
return true;
}
} else {
if (path.getFileSystem(new Configuration()).exists(path)) {
_fileStatus = Optional.of(this.fs.getFileStatus(path));
return true;
} else {
return false;
}
}
}
/**
* Create a new child instance.
* <p>
* Custom implementations should override this method to return
* a new instance of the appropriate type.
*
* @param path The child file
* @return a new child instance
*/
public FileStatusEntry newChildInstance(final Path path)
throws IOException {
return new FileStatusEntry(this, path, this.fs);
}
/**
* Return the parent entry.
*
* @return the parent entry
*/
public FileStatusEntry getParent() {
return parent;
}
/**
* Return the level
*
* @return the level
*/
public int getLevel() {
return parent == null ? 0 : parent.getLevel() + 1;
}
/**
* Return the directory's files.
*
* @return This directory's files or an empty
* array if the file is not a directory or the
* directory is empty
*/
public FileStatusEntry[] getChildren() {
return children != null ? children : EMPTY_ENTRIES;
}
/**
* Set the directory's files.
*
* @param children This directory's files, may be null
*/
public void setChildren(final FileStatusEntry[] children) {
this.children = children;
}
/**
* Indicate whether the file existed the last time it
* was checked.
*
* @return whether the file existed
*/
public boolean isExists() {
return exists;
}
/**
* Return the path from the instance FileStatus variable
* @return
*/
public Path getPath() {
return _fileStatus.get().getPath();
}
/**
* Return whether the path is a directory from the instance FileStatus variable.
* @return
*/
public boolean isDirectory() {
return _fileStatus.get().isDirectory();
}
/** Compare if this object is equal to another object
* @param o the object to be compared.
* @return true if two file status has the same path name; false if not.
*/
@Override
public boolean equals(Object o) {
if (o == null || this.getClass() != o.getClass()) {
return false;
}
FileStatusEntry other = (FileStatusEntry) o;
return this._fileStatus.get().equals(other._fileStatus.get());
}
/**
* Returns a hash code value for the object, which is defined as
* the hash code of the path name.
*
* @return a hash code value for the path name.
*/
@Override
public int hashCode() {
return getPath().hashCode();
}
}
| 4,229 |
0 | Create_ds/gobblin/gobblin-utility/src/main/java/org/apache/gobblin/util | Create_ds/gobblin/gobblin-utility/src/main/java/org/apache/gobblin/util/filesystem/ThrottledFileSystem.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.util.filesystem;
import java.io.Closeable;
import java.io.IOException;
import java.util.concurrent.atomic.AtomicInteger;
import org.apache.gobblin.util.ConfigUtils;
import org.apache.hadoop.fs.FSDataInputStream;
import org.apache.hadoop.fs.FSDataOutputStream;
import org.apache.hadoop.fs.FileStatus;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.fs.PathFilter;
import org.apache.hadoop.fs.permission.FsPermission;
import org.apache.hadoop.util.Progressable;
import org.apache.gobblin.broker.iface.ConfigView;
import org.apache.gobblin.broker.iface.NotConfiguredException;
import org.apache.gobblin.broker.iface.ScopeType;
import org.apache.gobblin.broker.iface.SharedResourcesBroker;
import org.apache.gobblin.util.HadoopUtils;
import org.apache.gobblin.util.limiter.Limiter;
import org.apache.gobblin.util.limiter.NotEnoughPermitsException;
import org.apache.gobblin.util.limiter.broker.SharedLimiterFactory;
import lombok.Getter;
import lombok.RequiredArgsConstructor;
/**
* A {@link FileSystemInstrumentation} for throttling calls to the underlying {@link FileSystem} using the input
* {@link Limiter}.
*/
public class ThrottledFileSystem extends FileSystemInstrumentation {
/**
* Factory for {@link ThrottledFileSystem}.
*/
public static class Factory<S extends ScopeType<S>> extends FileSystemInstrumentationFactory<S> {
private static final String SERVICE_NAME_CONF_KEY = "gobblin.broker.filesystem.limiterServiceName";
@Override
public FileSystem instrumentFileSystem(FileSystem fs, SharedResourcesBroker<S> broker,
ConfigView<S, FileSystemKey> config) {
try {
String serviceName = ConfigUtils.getString(config.getConfig(), SERVICE_NAME_CONF_KEY, "");
Limiter limiter = broker.getSharedResource(new SharedLimiterFactory<S>(), new FileSystemLimiterKey(config.getKey().getUri()));
return new ThrottledFileSystem(fs, limiter, serviceName);
} catch (NotConfiguredException nce) {
throw new RuntimeException(nce);
}
}
}
/**
* Listing operations will use 1 permit per this many listed elements.
*/
public static final int LISTING_FILES_PER_PERMIT = 100;
private final Limiter limiter;
private final String serviceName;
public ThrottledFileSystem(FileSystem fs, Limiter limiter, String serviceName) {
super(fs);
this.limiter = limiter;
this.serviceName = serviceName;
}
@Override
public boolean delete(Path path) throws IOException {
return this.delete(path, true);
}
@Override
public boolean delete(Path path, boolean recursive) throws IOException {
this.acquirePermit("delete " + path);
return super.delete(path, recursive);
}
@Override
public boolean exists(Path path) throws IOException {
this.acquirePermit("exists " + path);
return super.exists(path);
}
@Override
public FileStatus getFileStatus(Path path) throws IOException {
this.acquirePermit("getFileStatus " + path);
return super.getFileStatus(path);
}
@Override
public FileStatus[] globStatus(Path pathPattern) throws IOException {
FileStatus[] statuses = super.globStatus(pathPattern);
if (statuses == null) {
acquirePermit("globStatus " + pathPattern);
} else {
acquirePermits(statuses.length / LISTING_FILES_PER_PERMIT + 1, "globStatus " + pathPattern);
}
return statuses;
}
@Override
public FileStatus[] listStatus(Path path) throws IOException {
FileStatus[] statuses = super.listStatus(path);
if (statuses == null) {
acquirePermit("listStatus " + path);
} else {
acquirePermits(statuses.length / LISTING_FILES_PER_PERMIT + 1, "listStatus " + path);
}
return statuses;
}
@Override
public FileStatus[] listStatus(Path path, PathFilter filter) throws IOException {
CountingPathFilterDecorator decoratedFilter = new CountingPathFilterDecorator(filter);
FileStatus[] statuses = super.listStatus(path, decoratedFilter);
if (statuses == null) {
acquirePermit("listStatus " + path);
} else {
acquirePermits(decoratedFilter.getPathsProcessed().get() / LISTING_FILES_PER_PERMIT + 1, "listStatus " + path);
}
return statuses;
}
@Override
public boolean mkdirs(Path path, FsPermission permission) throws IOException {
this.acquirePermit("mkdirs " + path);
return super.mkdirs(path, permission);
}
@Override
public boolean rename(Path path0, Path path1) throws IOException {
this.acquirePermit("rename " + path0);
return HadoopUtils.renamePathHandleLocalFSRace(this.underlyingFs, path0, path1);
}
@Override
public FSDataOutputStream append(Path path, int bufferSize, Progressable progress) throws IOException {
this.acquirePermit("append " + path);
return super.append(path, bufferSize, progress);
}
@Override
public FSDataOutputStream create(Path path, FsPermission permission, boolean overwrite, int bufferSize,
short replication, long blockSize, Progressable progress) throws IOException {
this.acquirePermit("create " + path);
return super.create(path, permission, overwrite, bufferSize, replication, blockSize, progress);
}
@Override
public FSDataInputStream open(Path path, int bufferSize) throws IOException {
this.acquirePermit("open " + path);
return super.open(path, bufferSize);
}
private void acquirePermit(String op) throws IOException {
acquirePermits(1, op);
}
private void acquirePermits(int permits, String op) throws IOException {
try {
Closeable closeable = getRateLimiter().acquirePermits(permits);
if (closeable == null) {
throw new NotEnoughPermitsException(op);
}
} catch (InterruptedException e) {
throw new NotEnoughPermitsException(op, e);
}
}
protected Limiter getRateLimiter() {
return this.limiter;
}
public String getServiceName() {
return this.serviceName;
}
@Override
public void close() throws IOException {
getRateLimiter().stop();
super.close();
}
@RequiredArgsConstructor
private static class CountingPathFilterDecorator implements PathFilter {
private final PathFilter underlying;
@Getter
private final AtomicInteger pathsProcessed = new AtomicInteger();
@Override
public boolean accept(Path path) {
this.pathsProcessed.incrementAndGet();
return this.underlying.accept(path);
}
}
}
| 4,230 |
0 | Create_ds/gobblin/gobblin-utility/src/main/java/org/apache/gobblin/util | Create_ds/gobblin/gobblin-utility/src/main/java/org/apache/gobblin/util/filesystem/FileSystemInstrumentation.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.util.filesystem;
import java.io.IOException;
import org.apache.hadoop.fs.FileSystem;
/**
* A base class for {@link FileSystem} decorators used for {@link FileSystemInstrumentationFactory}s.
*/
public class FileSystemInstrumentation extends FileSystemDecorator {
protected boolean closed = false;
public FileSystemInstrumentation(FileSystem underlying) {
super(underlying.getScheme(), underlying.getScheme());
this.underlyingFs = underlying;
Runtime.getRuntime().addShutdownHook(new Thread() {
@Override
public void run() {
if (!FileSystemInstrumentation.this.closed) {
onClose();
}
}
});
}
@Override
public synchronized void close() throws IOException {
if (!this.closed) {
onClose();
this.closed = true;
}
super.close();
}
/**
* A method called when the {@link FileSystem} is being closed or when the JVM is shutting down.
* Useful for writing out information about the instrumentation.
*/
protected void onClose() {
}
}
| 4,231 |
0 | Create_ds/gobblin/gobblin-utility/src/main/java/org/apache/gobblin/util | Create_ds/gobblin/gobblin-utility/src/main/java/org/apache/gobblin/util/filesystem/PathAlterationObserver.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.util.filesystem;
import java.io.IOException;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.Comparator;
import java.util.Map;
import org.apache.commons.io.IOCase;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileStatus;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.fs.PathFilter;
import com.google.common.collect.Maps;
import lombok.extern.slf4j.Slf4j;
@Slf4j
public class PathAlterationObserver {
private final Map<PathAlterationListener, PathAlterationListener> listeners = Maps.newConcurrentMap();
private final FileStatusEntry rootEntry;
private final PathFilter pathFilter;
private final Comparator<Path> comparator;
private final FileSystem fs;
private final Path[] EMPTY_PATH_ARRAY = new Path[0];
private boolean changeApplied = false;
/**
* Final processing.
*/
public void destroy() {
}
/**
* Construct an observer for the specified directory.
*
* @param directoryName the name of the directory to observe
*/
public PathAlterationObserver(final String directoryName)
throws IOException {
this(new Path(directoryName));
}
/**
* Construct an observer for the specified directory and file filter.
*
* @param directoryName the name of the directory to observe
* @param pathFilter The file filter or null if none
*/
public PathAlterationObserver(final String directoryName, final PathFilter pathFilter)
throws IOException {
this(new Path(directoryName), pathFilter);
}
/**
* Construct an observer for the specified directory.
*
* @param directory the directory to observe
*/
public PathAlterationObserver(final Path directory)
throws IOException {
this(directory, null);
}
/**
* Construct an observer for the specified directory and file filter.
*
* @param directory the directory to observe
* @param pathFilter The file filter or null if none
*/
public PathAlterationObserver(final Path directory, final PathFilter pathFilter)
throws IOException {
this(new FileStatusEntry(directory), pathFilter);
}
/**
* The comparison between path is always case-sensitive in this general file system context.
*/
public PathAlterationObserver(final FileStatusEntry rootEntry, final PathFilter pathFilter)
throws IOException {
if (rootEntry == null) {
throw new IllegalArgumentException("Root entry is missing");
}
if (rootEntry.getPath() == null) {
throw new IllegalArgumentException("Root directory is missing");
}
this.rootEntry = rootEntry;
this.pathFilter = pathFilter;
this.fs = rootEntry.getPath().getFileSystem(new Configuration());
// By default, the comparsion is case sensitive.
this.comparator = new Comparator<Path>() {
@Override
public int compare(Path o1, Path o2) {
return IOCase.SENSITIVE.checkCompareTo(o1.toUri().toString(), o2.toUri().toString());
}
};
}
/**
* Add a file system listener.
*
* @param listener The file system listener
*/
public void addListener(final PathAlterationListener listener) {
if (listener != null) {
this.listeners.put(listener, new ExceptionCatchingPathAlterationListenerDecorator(listener));
}
}
/**
* Remove a file system listener.
*
* @param listener The file system listener
*/
public void removeListener(final PathAlterationListener listener) {
if (listener != null) {
this.listeners.remove(listener);
}
}
/**
* Returns the set of registered file system listeners.
*
* @return The file system listeners
*/
public Iterable<PathAlterationListener> getListeners() {
return listeners.keySet();
}
/**
* Initialize the observer.
* @throws IOException if an error occurs
*/
public void initialize() throws IOException {
rootEntry.refresh(rootEntry.getPath());
final FileStatusEntry[] children = doListPathsEntry(rootEntry.getPath(), rootEntry);
rootEntry.setChildren(children);
}
/**
* Check whether the file and its children have been created, modified or deleted.
*/
public synchronized void checkAndNotify()
throws IOException {
// If any files or directories are modified this flag will be set to true
this.changeApplied = false;
/* fire onStart() */
for (final PathAlterationListener listener : listeners.values()) {
listener.onStart(this);
}
/* fire directory/file events */
final Path rootPath = rootEntry.getPath();
if (fs.exists(rootPath)) {
// Current existed.
checkAndNotify(rootEntry, rootEntry.getChildren(), listPaths(rootPath));
} else if (rootEntry.isExists()) {
// Existed before and not existed now.
checkAndNotify(rootEntry, rootEntry.getChildren(), EMPTY_PATH_ARRAY);
} else {
// Didn't exist and still doesn't
}
if (this.changeApplied) {
for (final PathAlterationListener listener : listeners.values()) {
// Fire onCheckDetectedChange to notify when one check contains any number of changes
listener.onCheckDetectedChange();
}
}
/* fire onStop() */
for (final PathAlterationListener listener : listeners.values()) {
listener.onStop(this);
}
}
/**
* Compare two file lists for files which have been created, modified or deleted.
*
* @param parent The parent entry
* @param previous The original list of paths
* @param currentPaths The current list of paths
*/
private synchronized void checkAndNotify(final FileStatusEntry parent, final FileStatusEntry[] previous, final Path[] currentPaths)
throws IOException {
int c = 0;
final FileStatusEntry[] current =
currentPaths.length > 0 ? new FileStatusEntry[currentPaths.length] : FileStatusEntry.EMPTY_ENTRIES;
for (final FileStatusEntry previousEntry : previous) {
while (c < currentPaths.length && comparator.compare(previousEntry.getPath(), currentPaths[c]) > 0) {
current[c] = createPathEntry(parent, currentPaths[c]);
doCreate(current[c]);
c++;
}
if (c < currentPaths.length && comparator.compare(previousEntry.getPath(), currentPaths[c]) == 0) {
doMatch(previousEntry, currentPaths[c]);
checkAndNotify(previousEntry, previousEntry.getChildren(), listPaths(currentPaths[c]));
current[c] = previousEntry;
c++;
} else {
checkAndNotify(previousEntry, previousEntry.getChildren(), EMPTY_PATH_ARRAY);
doDelete(previousEntry);
}
}
for (; c < currentPaths.length; c++) {
current[c] = createPathEntry(parent, currentPaths[c]);
doCreate(current[c]);
}
parent.setChildren(current);
}
/**
* Create a new FileStatusEntry for the specified file.
*
* @param parent The parent file entry
* @param childPath The file to create an entry for
* @return A new file entry
*/
private FileStatusEntry createPathEntry(final FileStatusEntry parent, final Path childPath)
throws IOException {
final FileStatusEntry entry = parent.newChildInstance(childPath);
entry.refresh(childPath);
final FileStatusEntry[] children = doListPathsEntry(childPath, entry);
entry.setChildren(children);
return entry;
}
/**
* List the path in the format of FileStatusEntry array
* @param path The path to list files for
* @param entry the parent entry
* @return The child files
*/
private FileStatusEntry[] doListPathsEntry(Path path, FileStatusEntry entry)
throws IOException {
final Path[] paths = listPaths(path);
final FileStatusEntry[] children =
paths.length > 0 ? new FileStatusEntry[paths.length] : FileStatusEntry.EMPTY_ENTRIES;
for (int i = 0; i < paths.length; i++) {
children[i] = createPathEntry(entry, paths[i]);
}
return children;
}
/**
* Fire directory/file created events to the registered listeners.
*
* @param entry The file entry
*/
protected synchronized void doCreate(final FileStatusEntry entry) {
this.changeApplied = true;
for (final PathAlterationListener listener : listeners.values()) {
if (entry.isDirectory()) {
listener.onDirectoryCreate(entry.getPath());
} else {
listener.onFileCreate(entry.getPath());
}
}
final FileStatusEntry[] children = entry.getChildren();
for (final FileStatusEntry aChildren : children) {
doCreate(aChildren);
}
}
/**
* Fire directory/file change events to the registered listeners.
*
* @param entry The previous file system entry
* @param path The current file
*/
private synchronized void doMatch(final FileStatusEntry entry, final Path path)
throws IOException {
if (entry.refresh(path)) {
this.changeApplied = true;
for (final PathAlterationListener listener : listeners.values()) {
if (entry.isDirectory()) {
listener.onDirectoryChange(path);
} else {
listener.onFileChange(path);
}
}
}
}
/**
* Fire directory/file delete events to the registered listeners.
*
* @param entry The file entry
*/
private synchronized void doDelete(final FileStatusEntry entry) {
this.changeApplied = true;
for (final PathAlterationListener listener : listeners.values()) {
if (entry.isDirectory()) {
listener.onDirectoryDelete(entry.getPath());
} else {
listener.onFileDelete(entry.getPath());
}
}
}
/**
* List the contents of a directory denoted by Path
*
* @param path The path(File Object in general file system) to list the contents of
* @return the directory contents or a zero length array if
* the empty or the file is not a directory
*/
private Path[] listPaths(final Path path)
throws IOException {
Path[] children = null;
ArrayList<Path> tmpChildrenPath = new ArrayList<>();
if (fs.isDirectory(path)) {
// Get children's path list.
FileStatus[] chiledrenFileStatus = pathFilter == null ? fs.listStatus(path) : fs.listStatus(path, pathFilter);
for (FileStatus childFileStatus : chiledrenFileStatus) {
tmpChildrenPath.add(childFileStatus.getPath());
}
children = tmpChildrenPath.toArray(new Path[tmpChildrenPath.size()]);
}
if (children == null) {
children = EMPTY_PATH_ARRAY;
}
if (comparator != null && children.length > 1) {
Arrays.sort(children, comparator);
}
return children;
}
}
| 4,232 |
0 | Create_ds/gobblin/gobblin-utility/src/main/java/org/apache/gobblin/util | Create_ds/gobblin/gobblin-utility/src/main/java/org/apache/gobblin/util/filesystem/FileSystemDecorator.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.util.filesystem;
import java.io.IOException;
import java.net.URI;
import java.util.EnumSet;
import java.util.List;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.BlockLocation;
import org.apache.hadoop.fs.ContentSummary;
import org.apache.hadoop.fs.CreateFlag;
import org.apache.hadoop.fs.FSDataInputStream;
import org.apache.hadoop.fs.FSDataOutputStream;
import org.apache.hadoop.fs.FileChecksum;
import org.apache.hadoop.fs.FileStatus;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.FsServerDefaults;
import org.apache.hadoop.fs.FsStatus;
import org.apache.hadoop.fs.LocatedFileStatus;
import org.apache.hadoop.fs.Options;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.fs.PathFilter;
import org.apache.hadoop.fs.RemoteIterator;
import org.apache.hadoop.fs.permission.AclEntry;
import org.apache.hadoop.fs.permission.AclStatus;
import org.apache.hadoop.fs.permission.FsPermission;
import org.apache.hadoop.security.Credentials;
import org.apache.hadoop.security.token.Token;
import org.apache.hadoop.util.Progressable;
import org.apache.gobblin.util.Decorator;
import static org.apache.gobblin.util.filesystem.InstrumentedFileSystemUtils.*;
/**
* This is a decorator for {@link FileSystem} that allows optionally changing scheme.
*
* Note subclasses must set the underlying {@link FileSystem} at {@link #underlyingFs} as necessary.
*/
class FileSystemDecorator extends FileSystem implements Decorator {
protected String replacementScheme;
protected String underlyingScheme;
protected Configuration conf;
protected FileSystem underlyingFs;
FileSystemDecorator(String replacementScheme, String underlyingScheme) {
this.replacementScheme = replacementScheme;
this.underlyingScheme = underlyingScheme;
}
@Override
public Object getDecoratedObject() {
return this.underlyingFs;
}
@Override
public String getScheme() {
return this.replacementScheme;
}
@Override
public void initialize(URI uri, Configuration conf)
throws IOException {
if (this.underlyingFs == null) {
throw new IllegalStateException("Underlying fs has not been defined.");
}
this.underlyingFs.initialize(replaceScheme(uri, this.replacementScheme, this.underlyingScheme), conf);
}
public void setConf(Configuration conf) {
this.conf = conf;
if (this.underlyingFs != null) {
this.underlyingFs.setConf(conf);
}
}
@Override
public URI getUri() {
return replaceScheme(this.underlyingFs.getUri(), this.underlyingScheme, this.replacementScheme);
}
public FileStatus getFileLinkStatus(Path f) throws java.io.IOException {
return replaceScheme(this.underlyingFs.getFileLinkStatus(replaceScheme(f, this.replacementScheme, this.underlyingScheme)),
this.underlyingScheme, this.replacementScheme);
}
public AclStatus getAclStatus(Path path) throws IOException {
return this.underlyingFs.getAclStatus(path);
}
public void setAcl(Path path, List<AclEntry> aclSpec) throws IOException {
this.underlyingFs.setAcl(path, aclSpec);
}
public void modifyAclEntries(Path path, List<AclEntry> aclSpec) throws IOException {
this.underlyingFs.modifyAclEntries(path, aclSpec);
}
public FsStatus getStatus() throws java.io.IOException {
return this.underlyingFs.getStatus();
}
public FSDataOutputStream append(Path f) throws java.io.IOException {
return this.underlyingFs.append(replaceScheme(f, this.replacementScheme, this.underlyingScheme));
}
public short getReplication(Path src) throws java.io.IOException {
return this.underlyingFs.getReplication(replaceScheme(src, this.replacementScheme, this.underlyingScheme));
}
public void close() throws java.io.IOException {
this.underlyingFs.close();
}
public void setWriteChecksum(boolean writeChecksum) {
this.underlyingFs.setWriteChecksum(writeChecksum);
}
public FileChecksum getFileChecksum(Path f) throws java.io.IOException {
return this.underlyingFs.getFileChecksum(replaceScheme(f, this.replacementScheme, this.underlyingScheme));
}
public boolean isDirectory(Path f) throws java.io.IOException {
return this.underlyingFs.isDirectory(replaceScheme(f, this.replacementScheme, this.underlyingScheme));
}
public void createSymlink(Path target, Path link, boolean createParent)
throws IOException {
this.underlyingFs.createSymlink(replaceScheme(target, this.replacementScheme, this.underlyingScheme),
replaceScheme(link, this.replacementScheme, this.underlyingScheme), createParent);
}
public Path createSnapshot(Path path, String snapshotName) throws java.io.IOException {
return replaceScheme(this.underlyingFs.createSnapshot(replaceScheme(path, this.replacementScheme, this.underlyingScheme), snapshotName),
this.underlyingScheme, this.replacementScheme);
}
public FSDataOutputStream create(Path f, FsPermission permission, EnumSet<CreateFlag> flags, int bufferSize,
short replication, long blockSize, Progressable progress, Options.ChecksumOpt checksumOpt)
throws java.io.IOException {
return this.underlyingFs.create(replaceScheme(f, this.replacementScheme, this.underlyingScheme),
permission, flags, bufferSize, replication, blockSize, progress, checksumOpt);
}
public Path resolvePath(Path p) throws java.io.IOException {
return replaceScheme(this.underlyingFs.resolvePath(replaceScheme(p, this.replacementScheme, this.underlyingScheme)), this.underlyingScheme, this.replacementScheme);
}
public FileStatus[] listStatus(Path f) throws java.io.FileNotFoundException, java.io.IOException {
return replaceScheme(
this.underlyingFs.listStatus(replaceScheme(f, this.replacementScheme, this.underlyingScheme)),
this.underlyingScheme, this.replacementScheme);
}
public long getUsed() throws java.io.IOException {
return this.underlyingFs.getUsed();
}
public Configuration getConf() {
return this.underlyingFs.getConf();
}
public FSDataOutputStream create(Path f, Progressable progress) throws java.io.IOException {
return this.underlyingFs.create(replaceScheme(f, this.replacementScheme, this.underlyingScheme), progress);
}
public boolean isFile(Path f) throws java.io.IOException {
return this.underlyingFs.isFile(replaceScheme(f, this.replacementScheme, this.underlyingScheme));
}
public Path getWorkingDirectory() {
return replaceScheme(this.underlyingFs.getWorkingDirectory(), this.underlyingScheme, this.replacementScheme);
}
public FsServerDefaults getServerDefaults() throws java.io.IOException {
return this.underlyingFs.getServerDefaults();
}
public void copyToLocalFile(boolean delSrc, Path src, Path dst, boolean useRawLocalFileSystem)
throws java.io.IOException {
this.underlyingFs.copyToLocalFile(delSrc, replaceScheme(src, this.replacementScheme, this.underlyingScheme),
replaceScheme(dst, this.replacementScheme, this.underlyingScheme), useRawLocalFileSystem);
}
public FileStatus[] globStatus(Path pathPattern) throws IOException {
return replaceScheme(
this.underlyingFs.globStatus(replaceScheme(pathPattern, this.replacementScheme, this.underlyingScheme)),
this.underlyingScheme, this.replacementScheme);
}
public void setWorkingDirectory(Path new_dir) {
this.underlyingFs.setWorkingDirectory(replaceScheme(new_dir, this.replacementScheme, this.underlyingScheme));
}
public FileStatus[] listStatus(Path f, PathFilter filter) throws IOException {
return replaceScheme(
this.underlyingFs.listStatus(replaceScheme(f, this.replacementScheme, this.underlyingScheme), filter),
this.underlyingScheme, this.replacementScheme);
}
public String getName() {
return this.underlyingFs.getName();
}
public boolean createNewFile(Path f) throws java.io.IOException {
return this.underlyingFs.createNewFile(replaceScheme(f, this.replacementScheme, this.underlyingScheme));
}
public FileStatus[] listStatus(Path[] files) throws IOException {
return replaceScheme(
this.underlyingFs.listStatus(replaceScheme(files, this.replacementScheme, this.underlyingScheme)),
this.underlyingScheme, this.replacementScheme);
}
public boolean delete(Path f, boolean recursive) throws java.io.IOException {
return this.underlyingFs.delete(replaceScheme(f, this.replacementScheme, this.underlyingScheme), recursive);
}
public Path getLinkTarget(Path f) throws java.io.IOException {
return replaceScheme(this.underlyingFs.getLinkTarget(replaceScheme(f, this.replacementScheme, this.underlyingScheme)),
this.underlyingScheme, this.replacementScheme);
}
public void copyToLocalFile(boolean delSrc, Path src, Path dst) throws java.io.IOException {
this.underlyingFs.copyToLocalFile(delSrc, replaceScheme(src, this.replacementScheme, this.underlyingScheme),
replaceScheme(dst, this.replacementScheme, this.underlyingScheme));
}
public short getDefaultReplication() {
return this.underlyingFs.getDefaultReplication();
}
public Token<?> getDelegationToken(String renewer) throws java.io.IOException {
return this.underlyingFs.getDelegationToken(renewer);
}
public FsServerDefaults getServerDefaults(Path p) throws java.io.IOException {
return this.underlyingFs.getServerDefaults(replaceScheme(p, this.replacementScheme, this.underlyingScheme));
}
public FSDataOutputStream create(Path f, short replication) throws java.io.IOException {
return this.underlyingFs.create(replaceScheme(f, this.replacementScheme, this.underlyingScheme), replication);
}
public boolean mkdirs(Path f, FsPermission permission) throws java.io.IOException {
return this.underlyingFs.mkdirs(replaceScheme(f, this.replacementScheme, this.underlyingScheme), permission);
}
public BlockLocation[] getFileBlockLocations(Path p, long start, long len) throws java.io.IOException {
return this.underlyingFs.getFileBlockLocations(replaceScheme(p, this.replacementScheme, this.underlyingScheme),
start, len);
}
public void concat(Path trg, Path[] psrcs) throws java.io.IOException {
this.underlyingFs.concat(replaceScheme(trg, this.replacementScheme, this.underlyingScheme),
replaceScheme(psrcs, this.replacementScheme, this.underlyingScheme));
}
public Path getHomeDirectory() {
return replaceScheme(this.underlyingFs.getHomeDirectory(), this.underlyingScheme, this.replacementScheme);
}
public FileStatus getFileStatus(Path f) throws java.io.IOException {
return replaceScheme(this.underlyingFs.getFileStatus(replaceScheme(f, this.replacementScheme, this.underlyingScheme)),
this.underlyingScheme, this.replacementScheme);
}
public boolean supportsSymlinks() {
return this.underlyingFs.supportsSymlinks();
}
public RemoteIterator<LocatedFileStatus> listLocatedStatus(Path f)
throws java.io.FileNotFoundException, java.io.IOException {
return this.underlyingFs.listLocatedStatus(replaceScheme(f, this.replacementScheme, this.underlyingScheme));
}
public RemoteIterator<Path> listCorruptFileBlocks(Path path) throws java.io.IOException {
return this.underlyingFs.listCorruptFileBlocks(replaceScheme(path, this.replacementScheme, this.underlyingScheme));
}
public FSDataOutputStream create(Path f, short replication, Progressable progress) throws java.io.IOException {
return this.underlyingFs.create(replaceScheme(f, this.replacementScheme, this.underlyingScheme), replication, progress);
}
public boolean setReplication(Path src, short replication) throws java.io.IOException {
return this.underlyingFs.setReplication(replaceScheme(src, this.replacementScheme, this.underlyingScheme), replication);
}
public Path makeQualified(Path path) {
return replaceScheme(this.underlyingFs.makeQualified(replaceScheme(path, this.replacementScheme, this.underlyingScheme)), this.underlyingScheme, this.replacementScheme);
}
public FSDataOutputStream createNonRecursive(Path f, boolean overwrite, int bufferSize, short replication,
long blockSize, Progressable progress) throws java.io.IOException {
return this.underlyingFs.createNonRecursive(replaceScheme(f, this.replacementScheme, this.underlyingScheme),
overwrite, bufferSize, replication, blockSize, progress);
}
public long getBlockSize(Path f) throws java.io.IOException {
return this.underlyingFs.getBlockSize(replaceScheme(f, this.replacementScheme, this.underlyingScheme));
}
public short getDefaultReplication(Path path) {
return this.underlyingFs.getDefaultReplication(replaceScheme(path, this.replacementScheme, this.underlyingScheme));
}
public Token<?>[] addDelegationTokens(String renewer, Credentials credentials) throws java.io.IOException {
return this.underlyingFs.addDelegationTokens(renewer, credentials);
}
public FSDataOutputStream create(Path f, boolean overwrite, int bufferSize, short replication, long blockSize,
Progressable progress) throws java.io.IOException {
return this.underlyingFs.create(replaceScheme(f, this.replacementScheme, this.underlyingScheme),
overwrite, bufferSize, replication, blockSize, progress);
}
public FSDataOutputStream append(Path f, int bufferSize, Progressable progress) throws java.io.IOException {
return this.underlyingFs.append(replaceScheme(f, this.replacementScheme, this.underlyingScheme), bufferSize, progress);
}
public void copyToLocalFile(Path src, Path dst) throws java.io.IOException {
this.underlyingFs.copyToLocalFile(replaceScheme(src, this.replacementScheme, this.underlyingScheme),
replaceScheme(dst, this.replacementScheme, this.underlyingScheme));
}
public FSDataInputStream open(Path f, int bufferSize) throws java.io.IOException {
return this.underlyingFs.open(replaceScheme(f, this.replacementScheme, this.underlyingScheme), bufferSize);
}
public FileStatus[] listStatus(Path[] files, PathFilter filter) throws IOException {
return replaceScheme(
this.underlyingFs.listStatus(replaceScheme(files, this.replacementScheme, this.underlyingScheme), filter),
this.underlyingScheme, this.replacementScheme);
}
public long getLength(Path f) throws java.io.IOException {
return this.underlyingFs.getLength(replaceScheme(f, this.replacementScheme, this.underlyingScheme));
}
public void moveToLocalFile(Path src, Path dst) throws java.io.IOException {
this.underlyingFs.moveToLocalFile(replaceScheme(src, this.replacementScheme, this.underlyingScheme),
replaceScheme(dst, this.replacementScheme, this.underlyingScheme));
}
public FsStatus getStatus(Path p) throws java.io.IOException {
return this.underlyingFs.getStatus(replaceScheme(p, this.replacementScheme, this.underlyingScheme));
}
public FSDataOutputStream createNonRecursive(Path f, FsPermission permission, boolean overwrite, int bufferSize,
short replication, long blockSize, Progressable progress) throws java.io.IOException {
return this.underlyingFs.createNonRecursive(replaceScheme(f, this.replacementScheme, this.underlyingScheme),
permission, overwrite, bufferSize, replication, blockSize, progress);
}
public String getCanonicalServiceName() {
return this.underlyingFs.getCanonicalServiceName();
}
public boolean cancelDeleteOnExit(Path f) {
return this.underlyingFs.cancelDeleteOnExit(replaceScheme(f, this.replacementScheme, this.underlyingScheme));
}
public void copyFromLocalFile(boolean delSrc, boolean overwrite, Path[] srcs, Path dst) throws java.io.IOException {
this.underlyingFs.copyFromLocalFile(delSrc, overwrite, replaceScheme(srcs, this.replacementScheme, this.underlyingScheme),
replaceScheme(dst, this.replacementScheme, this.underlyingScheme));
}
public void setPermission(Path p, FsPermission permission) throws java.io.IOException {
this.underlyingFs.setPermission(replaceScheme(p, this.replacementScheme, this.underlyingScheme), permission);
}
public Path startLocalOutput(Path fsOutputFile, Path tmpLocalFile) throws java.io.IOException {
return replaceScheme(this.underlyingFs.startLocalOutput(replaceScheme(fsOutputFile, this.replacementScheme, this.underlyingScheme),
replaceScheme(tmpLocalFile, this.replacementScheme, this.underlyingScheme)), this.underlyingScheme, this.replacementScheme);
}
public void copyFromLocalFile(Path src, Path dst) throws java.io.IOException {
this.underlyingFs.copyFromLocalFile(replaceScheme(src, this.replacementScheme, this.underlyingScheme),
replaceScheme(dst, this.replacementScheme, this.underlyingScheme));
}
public boolean delete(Path f) throws java.io.IOException {
return this.underlyingFs.delete(replaceScheme(f, this.replacementScheme, this.underlyingScheme));
}
public FileSystem[] getChildFileSystems() {
return this.underlyingFs.getChildFileSystems();
}
public void copyFromLocalFile(boolean delSrc, Path src, Path dst) throws java.io.IOException {
this.underlyingFs.copyFromLocalFile(delSrc, replaceScheme(src, this.replacementScheme, this.underlyingScheme),
replaceScheme(dst, this.replacementScheme, this.underlyingScheme));
}
public void setTimes(Path p, long mtime, long atime) throws java.io.IOException {
this.underlyingFs.setTimes(replaceScheme(p, this.replacementScheme, this.underlyingScheme), mtime, atime);
}
public BlockLocation[] getFileBlockLocations(FileStatus file, long start, long len) throws java.io.IOException {
return this.underlyingFs.getFileBlockLocations(replaceScheme(file, this.replacementScheme, this.underlyingScheme),
start, len);
}
public ContentSummary getContentSummary(Path f) throws java.io.IOException {
return this.underlyingFs.getContentSummary(replaceScheme(f, this.replacementScheme, this.underlyingScheme));
}
public void renameSnapshot(Path path, String snapshotOldName, String snapshotNewName) throws java.io.IOException {
this.underlyingFs.renameSnapshot(replaceScheme(path, this.replacementScheme, this.underlyingScheme),
snapshotOldName, snapshotNewName);
}
public void moveFromLocalFile(Path[] srcs, Path dst) throws java.io.IOException {
this.underlyingFs.moveFromLocalFile(replaceScheme(srcs, this.replacementScheme, this.underlyingScheme),
replaceScheme(dst, this.replacementScheme, this.underlyingScheme));
}
public FSDataOutputStream createNonRecursive(Path f, FsPermission permission, EnumSet<CreateFlag> flags,
int bufferSize, short replication, long blockSize, Progressable progress) throws java.io.IOException {
return this.underlyingFs.createNonRecursive(replaceScheme(f, this.replacementScheme, this.underlyingScheme),
permission, flags, bufferSize, replication, blockSize, progress);
}
public boolean rename(Path src, Path dst) throws java.io.IOException {
return this.underlyingFs.rename(replaceScheme(src, this.replacementScheme, this.underlyingScheme),
replaceScheme(dst, this.replacementScheme, this.underlyingScheme));
}
public FSDataOutputStream create(Path f) throws java.io.IOException {
return this.underlyingFs.create(replaceScheme(f, this.replacementScheme, this.underlyingScheme));
}
public boolean mkdirs(Path f) throws java.io.IOException {
return this.underlyingFs.mkdirs(replaceScheme(f, this.replacementScheme, this.underlyingScheme));
}
public FSDataOutputStream create(Path f, boolean overwrite, int bufferSize, short replication, long blockSize)
throws java.io.IOException {
return this.underlyingFs.create(replaceScheme(f, this.replacementScheme, this.underlyingScheme),
overwrite, bufferSize, replication, blockSize);
}
public void completeLocalOutput(Path fsOutputFile, Path tmpLocalFile) throws java.io.IOException {
this.underlyingFs.completeLocalOutput(replaceScheme(fsOutputFile, this.replacementScheme, this.underlyingScheme),
replaceScheme(tmpLocalFile, this.replacementScheme, this.underlyingScheme));
}
public FSDataInputStream open(Path f) throws java.io.IOException {
return this.underlyingFs.open(replaceScheme(f, this.replacementScheme, this.underlyingScheme));
}
public FSDataOutputStream create(Path f, boolean overwrite, int bufferSize, Progressable progress)
throws java.io.IOException {
return this.underlyingFs.create(replaceScheme(f, this.replacementScheme, this.underlyingScheme),
overwrite, bufferSize, progress);
}
public void setVerifyChecksum(boolean verifyChecksum) {
this.underlyingFs.setVerifyChecksum(verifyChecksum);
}
public FSDataOutputStream create(Path f, FsPermission permission, EnumSet<CreateFlag> flags, int bufferSize,
short replication, long blockSize, Progressable progress) throws java.io.IOException {
return this.underlyingFs.create(replaceScheme(f, this.replacementScheme, this.underlyingScheme),
permission, flags, bufferSize, replication, blockSize, progress);
}
public long getDefaultBlockSize() {
return this.underlyingFs.getDefaultBlockSize();
}
public void copyFromLocalFile(boolean delSrc, boolean overwrite, Path src, Path dst) throws java.io.IOException {
this.underlyingFs.copyFromLocalFile(delSrc, overwrite, replaceScheme(src, this.replacementScheme, this.underlyingScheme),
replaceScheme(dst, this.replacementScheme, this.underlyingScheme));
}
public FSDataOutputStream create(Path f, boolean overwrite) throws java.io.IOException {
return this.underlyingFs.create(replaceScheme(f, this.replacementScheme, this.underlyingScheme), overwrite);
}
public long getDefaultBlockSize(Path f) {
return this.underlyingFs.getDefaultBlockSize(replaceScheme(f, this.replacementScheme, this.underlyingScheme));
}
public boolean exists(Path f) throws java.io.IOException {
return this.underlyingFs.exists(replaceScheme(f, this.replacementScheme, this.underlyingScheme));
}
public void deleteSnapshot(Path path, String snapshotName) throws java.io.IOException {
this.underlyingFs.deleteSnapshot(replaceScheme(path, this.replacementScheme, this.underlyingScheme), snapshotName);
}
public FSDataOutputStream create(Path f, boolean overwrite, int bufferSize) throws java.io.IOException {
return this.underlyingFs.create(replaceScheme(f, this.replacementScheme, this.underlyingScheme), overwrite, bufferSize);
}
public void setOwner(Path p, String username, String groupname) throws java.io.IOException {
this.underlyingFs.setOwner(replaceScheme(p, this.replacementScheme, this.underlyingScheme), username, groupname);
}
public RemoteIterator<LocatedFileStatus> listFiles(Path f, boolean recursive) throws java.io.IOException {
return this.underlyingFs.listFiles(replaceScheme(f, this.replacementScheme, this.underlyingScheme), recursive);
}
public FSDataOutputStream append(Path f, int bufferSize) throws java.io.IOException {
return this.underlyingFs.append(replaceScheme(f, this.replacementScheme, this.underlyingScheme), bufferSize);
}
public boolean deleteOnExit(Path f) throws java.io.IOException {
return this.underlyingFs.deleteOnExit(replaceScheme(f, this.replacementScheme, this.underlyingScheme));
}
public FSDataOutputStream create(Path f, FsPermission permission, boolean overwrite, int bufferSize,
short replication, long blockSize, Progressable progress) throws java.io.IOException {
return this.underlyingFs.create(replaceScheme(f, this.replacementScheme, this.underlyingScheme),
permission, overwrite, bufferSize, replication, blockSize, progress);
}
public void moveFromLocalFile(Path src, Path dst) throws java.io.IOException {
this.underlyingFs.moveFromLocalFile(replaceScheme(src, this.replacementScheme, this.underlyingScheme),
replaceScheme(dst, this.replacementScheme, this.underlyingScheme));
}
public FileStatus[] globStatus(Path pathPattern, PathFilter filter) throws java.io.IOException {
return replaceScheme(
this.underlyingFs.globStatus(replaceScheme(pathPattern, this.replacementScheme, this.underlyingScheme), filter),
this.underlyingScheme, this.replacementScheme);
}
}
| 4,233 |
0 | Create_ds/gobblin/gobblin-utility/src/main/java/org/apache/gobblin/util | Create_ds/gobblin/gobblin-utility/src/main/java/org/apache/gobblin/util/filesystem/DataFileVersionStrategy.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.util.filesystem;
import com.typesafe.config.Config;
import java.io.IOException;
import java.io.Serializable;
import java.util.Set;
import org.apache.gobblin.util.ClassAliasResolver;
import org.apache.gobblin.util.ConfigUtils;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
/**
* An interface to set and get "versions" to data files.
*
* The version is a rough signature to the data contents. It allows data preserving functionality (like copy) to replicate
* the version independently of metadata with other semantics like file modification time.
*
* Examples where this might be useful is data syncing between two locations. Relying on modification times to detect
* data changes may lead to a feedback loop of copying: data gets created at location A at time 0,
* at time 1 data is copied to location B, sync mechanism might incorrectly believe that since mod time of location B
* is higher, it should be synced back to location A, etc.
*
* Required properties:
* - REPLICABLE: Two calls to `getVersion` on a file that has clearly not been modified must return the same version.
* - MONOTONOUS: The default version of a file is an increasing function of modification time.
* - CONSERVATIVE: If file f had its version last set to v, but the versioning implementation determines the file MIGHT
* have been modified and it chooses to return a version, it will return a value strictly larger than v.
*
* A common pattern to achieve monotonicity and conservativeness will be to invalidate the version of a data file
* if it is detected that the file was modified without updating the version (e.g. by a process which is unaware of versioning).
*
* @param <T> the type for the version objects. Must be comparable and serializable.
*/
public interface DataFileVersionStrategy<T extends Comparable<T> & Serializable> {
/**
* Characteristics a {@link DataFileVersionStrategy} may have.
*/
enum Characteristic {
/** The default version for a data file is the modtime of the file. Versions can in general be compared against modtimes. */
COMPATIBLE_WITH_MODTIME,
/** Version can be explicitly set. If false, `set*` methods will always return false */
SETTABLE,
/** If a file has been modified and a set* method was not called, `getVersion` will throw an error. */
STRICT
}
String DATA_FILE_VERSION_STRATEGY_KEY = "org.apache.gobblin.dataFileVersionStrategy";
String DEFAULT_DATA_FILE_VERSION_STRATEGY = "modtime";
/**
* Instantiate a {@link DataFileVersionStrategy} according to input configuration.
*/
static DataFileVersionStrategy instantiateDataFileVersionStrategy(FileSystem fs, Config config) throws IOException {
String versionStrategy = ConfigUtils.getString(config, DATA_FILE_VERSION_STRATEGY_KEY, DEFAULT_DATA_FILE_VERSION_STRATEGY);
ClassAliasResolver resolver = new ClassAliasResolver(DataFileVersionFactory.class);
try {
Class<? extends DataFileVersionFactory> klazz = resolver.resolveClass(versionStrategy);
return klazz.newInstance().createDataFileVersionStrategy(fs, config);
} catch (ReflectiveOperationException roe) {
throw new IOException(roe);
}
}
/**
* A Factory for {@link DataFileVersionStrategy}s.
*/
interface DataFileVersionFactory<T extends Comparable<T> & Serializable> {
/**
* Build a {@link DataFileVersionStrategy} with the input configuration.
*/
DataFileVersionStrategy<T> createDataFileVersionStrategy(FileSystem fs, Config config);
}
/**
* Get the version of a path.
*/
T getVersion(Path path) throws IOException;
/**
* Set the version of a path to a specific version (generally replicated from another path).
*
* @return false if the version is not settable.
* @throws IOException if the version is settable but could not be set successfully.
*/
boolean setVersion(Path path, T version) throws IOException;
/**
* Set the version of a path to a value automatically set by the versioning system. Note this call must respect the
* monotonicity requirement.
*
* @return false if the version is not settable.
* @throws IOException if the version is settable but could not be set successfully.
*/
boolean setDefaultVersion(Path path) throws IOException;
/**
* @return The list of optional characteristics this {@link DataFileVersionStrategy} satisfies.
*/
Set<Characteristic> applicableCharacteristics();
/**
* @return whether this implementation have the specified characteristic.
*/
default boolean hasCharacteristic(Characteristic characteristic) {
return applicableCharacteristics().contains(characteristic);
}
}
| 4,234 |
0 | Create_ds/gobblin/gobblin-utility/src/main/java/org/apache/gobblin/util | Create_ds/gobblin/gobblin-utility/src/main/java/org/apache/gobblin/util/filesystem/FileSystemInstrumentationFactory.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.util.filesystem;
import org.apache.hadoop.fs.FileSystem;
import org.apache.gobblin.broker.iface.ConfigView;
import org.apache.gobblin.broker.iface.ScopeType;
import org.apache.gobblin.broker.iface.SharedResourcesBroker;
/**
* A factory that instruments {@link FileSystem}. Instrumentations are usually decorators of the underlying
* {@link FileSystem} that add some additional features to it. Implementations can extend {@link FileSystemInstrumentation}
* for convenience.
*/
public class FileSystemInstrumentationFactory<S extends ScopeType<S>> {
/**
* Return an instrumented version of the input {@link FileSystem}. Generally, this will return a decorator for the
* input {@link FileSystem}. If the instrumentation will be a no-op (due to, for example, configuration), it is
* recommended to return the input {@link FileSystem} directly for performance.
*/
public FileSystem instrumentFileSystem(FileSystem fs, SharedResourcesBroker<S> broker, ConfigView<S, FileSystemKey> config) {
return fs;
}
}
| 4,235 |
0 | Create_ds/gobblin/gobblin-utility/src/main/java/org/apache/gobblin/util | Create_ds/gobblin/gobblin-utility/src/main/java/org/apache/gobblin/util/filesystem/PathAlterationListener.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.util.filesystem;
import org.apache.commons.io.monitor.FileAlterationListener;
import org.apache.hadoop.fs.Path;
/**
* A listener that receives events of general file system modifications.
* A generalized version Of FileAlterationListener interface using Path as the parameter for each method
* @see FileAlterationListener
*/
public interface PathAlterationListener {
void onStart(final PathAlterationObserver observer);
void onFileCreate(final Path path);
void onFileChange(final Path path);
void onStop(final PathAlterationObserver observer);
void onDirectoryCreate(final Path directory);
void onDirectoryChange(final Path directory);
void onDirectoryDelete(final Path directory);
void onFileDelete(final Path path);
/**
* Is invoked after any file or directory is modified, after processing the other change events
* Is only invoked once per poll, which is found in checkAndNotify() from {@link PathAlterationObserver}
*/
void onCheckDetectedChange();
}
| 4,236 |
0 | Create_ds/gobblin/gobblin-utility/src/main/java/org/apache/gobblin/util | Create_ds/gobblin/gobblin-utility/src/main/java/org/apache/gobblin/util/filesystem/InstrumentedFileSystem.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.util.filesystem;
import java.io.IOException;
import java.net.URI;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileSystem;
import org.apache.gobblin.broker.SharedResourcesBrokerFactory;
import static org.apache.gobblin.util.filesystem.InstrumentedFileSystemUtils.*;
/**
* A base {@link org.apache.hadoop.fs.FileSystem} that uses {@link FileSystemFactory} to create an underlying
* {@link org.apache.hadoop.fs.FileSystem} with available instrumentations. (see also {@link FileSystemInstrumentation}).
*/
public class InstrumentedFileSystem extends FileSystemDecorator {
public InstrumentedFileSystem(String scheme, FileSystem underlyingFileSystem) {
super(scheme, underlyingFileSystem.getScheme());
this.underlyingFs = underlyingFileSystem;
}
@Override
public void initialize(URI uri, Configuration conf)
throws IOException {
this.replacementScheme = uri.getScheme();
Configuration actualConfiguration = new Configuration(conf);
String key = "fs." + this.underlyingScheme + ".impl";
actualConfiguration.set(key, this.underlyingFs.getClass().getName());
this.underlyingFs = FileSystemFactory.get(replaceScheme(uri, this.replacementScheme, this.underlyingScheme), actualConfiguration,
SharedResourcesBrokerFactory.getImplicitBroker());
}
}
| 4,237 |
0 | Create_ds/gobblin/gobblin-utility/src/main/java/org/apache/gobblin/util | Create_ds/gobblin/gobblin-utility/src/main/java/org/apache/gobblin/util/filesystem/FileSystemKey.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.util.filesystem;
import java.net.URI;
import java.net.URISyntaxException;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileSystem;
import com.google.common.base.Strings;
import org.apache.gobblin.broker.iface.SharedResourceKey;
import lombok.Getter;
@Getter
public class FileSystemKey implements SharedResourceKey {
private final URI uri;
private final Configuration configuration;
public FileSystemKey(URI uri, Configuration configuration) {
this.configuration = configuration;
this.uri = resolveURI(uri, configuration);
}
private URI resolveURI(URI uri, Configuration configuration) {
String scheme = uri.getScheme();
String authority = uri.getAuthority();
if (scheme == null && authority == null) { // use default FS
return FileSystem.getDefaultUri(configuration);
}
if (scheme != null && authority == null) { // no authority
URI defaultUri = FileSystem.getDefaultUri(configuration);
if (scheme.equals(defaultUri.getScheme()) // if scheme matches default
&& defaultUri.getAuthority() != null) { // & default has authority
return defaultUri; // return default
}
}
try {
return new URI(scheme, Strings.nullToEmpty(authority), "/", null, null);
} catch (URISyntaxException use) {
// This should never happen
throw new RuntimeException(use);
}
}
@Override
public String toConfigurationKey() {
return this.uri.getScheme() + (this.uri.getHost() == null ? "" : ("." + this.uri.getHost()));
}
@Override
public boolean equals(Object o) {
if (this == o) {
return true;
}
if (o == null || getClass() != o.getClass()) {
return false;
}
FileSystemKey that = (FileSystemKey) o;
return uri == null ? that.uri == null : uri.equals(that.uri);
}
@Override
public int hashCode() {
return uri != null ? uri.hashCode() : 0;
}
}
| 4,238 |
0 | Create_ds/gobblin/gobblin-utility/src/main/java/org/apache/gobblin/util | Create_ds/gobblin/gobblin-utility/src/main/java/org/apache/gobblin/util/filesystem/ModTimeDataFileVersionStrategy.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.util.filesystem;
import java.io.IOException;
import java.util.Set;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import com.google.common.collect.ImmutableSet;
import com.typesafe.config.Config;
import lombok.Data;
import org.apache.gobblin.annotation.Alias;
/**
* An implementation of {@link DataFileVersionStrategy} that uses modtime as the file version.
*
* This is the default implementation and does data comparisons purely based on modification time.
*/
@Data
public class ModTimeDataFileVersionStrategy implements DataFileVersionStrategy<Long> {
@Alias(value = "modtime")
public static class Factory implements DataFileVersionStrategy.DataFileVersionFactory<Long> {
@Override
public DataFileVersionStrategy<Long> createDataFileVersionStrategy(FileSystem fs, Config config) {
return new ModTimeDataFileVersionStrategy(fs);
}
}
private final FileSystem fs;
@Override
public Long getVersion(Path path) throws IOException {
return this.fs.getFileStatus(path).getModificationTime();
}
@Override
public boolean setVersion(Path path, Long version) {
return false;
}
@Override
public boolean setDefaultVersion(Path path) {
return false;
}
@Override
public Set<Characteristic> applicableCharacteristics() {
return ImmutableSet.of(Characteristic.COMPATIBLE_WITH_MODTIME);
}
}
| 4,239 |
0 | Create_ds/gobblin/gobblin-utility/src/main/java/org/apache/gobblin/util | Create_ds/gobblin/gobblin-utility/src/main/java/org/apache/gobblin/util/filesystem/InstrumentedWebHDFSFileSystem.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.util.filesystem;
import org.apache.hadoop.hdfs.web.WebHdfsFileSystem;
/**
* A {@link InstrumentedFileSystem} for the webhdfs scheme.
*
* Usage:
* FileSystem.get("instrumented-webhdfs://...")
*/
public class InstrumentedWebHDFSFileSystem extends InstrumentedFileSystem {
public static final String SCHEME = "instrumented-webhdfs";
public InstrumentedWebHDFSFileSystem() {
super(SCHEME, new WebHdfsFileSystem());
}
}
| 4,240 |
0 | Create_ds/gobblin/gobblin-utility/src/main/java/org/apache/gobblin/util | Create_ds/gobblin/gobblin-utility/src/main/java/org/apache/gobblin/util/filesystem/FileSystemSupplier.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.util.filesystem;
import java.io.IOException;
import org.apache.hadoop.fs.FileSystem;
/**
* An interface of supplier to get FileSystem
*/
public interface FileSystemSupplier {
/**
* Function to get fileSystem
* @return the new FileSystem for using
*/
public FileSystem getFileSystem() throws IOException;
}
| 4,241 |
0 | Create_ds/gobblin/gobblin-utility/src/main/java/org/apache/gobblin/util | Create_ds/gobblin/gobblin-utility/src/main/java/org/apache/gobblin/util/filesystem/FileSystemFactory.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.util.filesystem;
import java.io.IOException;
import java.net.URI;
import java.util.ServiceLoader;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileSystem;
import org.apache.gobblin.broker.ResourceInstance;
import org.apache.gobblin.broker.iface.ConfigView;
import org.apache.gobblin.broker.iface.NotConfiguredException;
import org.apache.gobblin.broker.iface.ScopeType;
import org.apache.gobblin.broker.iface.ScopedConfigView;
import org.apache.gobblin.broker.iface.SharedResourceFactory;
import org.apache.gobblin.broker.iface.SharedResourceFactoryResponse;
import org.apache.gobblin.broker.iface.SharedResourcesBroker;
import lombok.extern.slf4j.Slf4j;
/**
* A {@link SharedResourceFactory} for creating {@link FileSystem}s.
*
* The factory creates a {@link FileSystem} with the correct scheme and applies any {@link FileSystemInstrumentation}
* found in the classpath.
*/
@Slf4j
public class FileSystemFactory<S extends ScopeType<S>> implements SharedResourceFactory<FileSystem, FileSystemKey, S> {
public static final String FACTORY_NAME = "filesystem";
/**
* Equivalent to {@link FileSystem#get(Configuration)}, but uses the input {@link SharedResourcesBroker} to configure
* add-ons to the {@link FileSystem} (e.g. throttling, instrumentation).
*/
public static <S extends ScopeType<S>> FileSystem get(Configuration configuration, SharedResourcesBroker<S> broker)
throws IOException {
return get(FileSystem.getDefaultUri(configuration), configuration, broker);
}
/**
* Equivalent to {@link FileSystem#get(URI, Configuration)}, but uses the input {@link SharedResourcesBroker} to configure
* add-ons to the {@link FileSystem} (e.g. throttling, instrumentation).
*/
public static <S extends ScopeType<S>> FileSystem get(URI uri, Configuration configuration, SharedResourcesBroker<S> broker)
throws IOException {
try {
return broker.getSharedResource(new FileSystemFactory<S>(), new FileSystemKey(uri, configuration));
} catch (NotConfiguredException nce) {
throw new IOException(nce);
}
}
@Override
public String getName() {
return FACTORY_NAME;
}
@Override
public SharedResourceFactoryResponse<FileSystem> createResource(SharedResourcesBroker<S> broker,
ScopedConfigView<S, FileSystemKey> config) throws NotConfiguredException {
try {
FileSystemKey key = config.getKey();
URI uri = key.getUri();
Configuration hadoopConf = key.getConfiguration();
log.info("Creating instrumented FileSystem for uri " + uri);
Class<? extends FileSystem> fsClass = FileSystem.getFileSystemClass(uri.getScheme(), hadoopConf);
if (InstrumentedFileSystem.class.isAssignableFrom(fsClass)) {
InstrumentedFileSystem tmpfs = (InstrumentedFileSystem) fsClass.newInstance();
hadoopConf = new Configuration(hadoopConf);
String schemeKey = "fs." + uri.getScheme() + ".impl";
hadoopConf.set(schemeKey, tmpfs.underlyingFs.getClass().getName());
}
FileSystem fs = FileSystem.newInstance(uri, hadoopConf);
ServiceLoader<FileSystemInstrumentationFactory> loader = ServiceLoader.load(FileSystemInstrumentationFactory.class);
for (FileSystemInstrumentationFactory instrumentationFactory : loader) {
fs = instrumentationFactory.instrumentFileSystem(fs, broker, config);
}
return new ResourceInstance<>(fs);
} catch (IOException | ReflectiveOperationException ioe) {
throw new RuntimeException(ioe);
}
}
@Override
public S getAutoScope(SharedResourcesBroker<S> broker, ConfigView<S, FileSystemKey> config) {
return broker.selfScope().getType().rootScope();
}
}
| 4,242 |
0 | Create_ds/gobblin/gobblin-utility/src/main/java/org/apache/gobblin/util | Create_ds/gobblin/gobblin-utility/src/main/java/org/apache/gobblin/util/filesystem/FileSystemLimiterKey.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.util.filesystem;
import java.net.URI;
import com.google.common.base.Strings;
import org.apache.gobblin.util.ClustersNames;
import org.apache.gobblin.util.limiter.broker.SharedLimiterKey;
/**
* {@link SharedLimiterKey} used for NameNode throttling.
*/
public class FileSystemLimiterKey extends SharedLimiterKey {
public static final String RESOURCE_LIMITED_PREFIX = "filesystem";
private final URI uri;
public final String serviceName;
public FileSystemLimiterKey(URI uri) {
this(uri, null);
}
public FileSystemLimiterKey(URI uri, String serviceName) {
super(RESOURCE_LIMITED_PREFIX + "/" + getFSIdentifier(uri) + (Strings.isNullOrEmpty(serviceName) ? "" : "/" + serviceName));
this.uri = uri;
this.serviceName = serviceName;
}
private static String getFSIdentifier(URI uri) {
return ClustersNames.getInstance().getClusterName(uri.toString());
}
@Override
public boolean equals(Object o) {
if (this == o) {
return true;
}
if (o == null || getClass() != o.getClass()) {
return false;
}
if (!super.equals(o)) {
return false;
}
FileSystemLimiterKey that = (FileSystemLimiterKey) o;
return uri.equals(that.uri);
}
@Override
public int hashCode() {
int result = super.hashCode();
result = 31 * result + uri.hashCode();
return result;
}
}
| 4,243 |
0 | Create_ds/gobblin/gobblin-utility/src/main/java/org/apache/gobblin/util | Create_ds/gobblin/gobblin-utility/src/main/java/org/apache/gobblin/util/filesystem/InstrumentedLocalFileSystem.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.util.filesystem;
import org.apache.hadoop.fs.LocalFileSystem;
/**
* A {@link InstrumentedFileSystem} for the file scheme.
*
* Usage:
* FileSystem.get("instrumented-file://...")
*/
public class InstrumentedLocalFileSystem extends InstrumentedFileSystem {
public static final String SCHEME = "instrumented-file";
public InstrumentedLocalFileSystem() {
super(SCHEME, new LocalFileSystem());
}
}
| 4,244 |
0 | Create_ds/gobblin/gobblin-utility/src/main/java/org/apache/gobblin/util | Create_ds/gobblin/gobblin-utility/src/main/java/org/apache/gobblin/util/filesystem/PathAlterationObserverScheduler.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.util.filesystem;
import java.io.IOException;
import java.util.List;
import java.util.concurrent.CopyOnWriteArrayList;
import java.util.concurrent.Executors;
import java.util.concurrent.ScheduledExecutorService;
import java.util.concurrent.ScheduledFuture;
import java.util.concurrent.TimeUnit;
import org.apache.hadoop.fs.Path;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import com.google.common.base.Optional;
import org.apache.gobblin.util.ExecutorsUtils;
/**
* A runnable that spawns a monitoring thread triggering any
* registered {@link PathAlterationObserver} at a specified interval.
*
* Based on {@link org.apache.commons.io.monitor.FileAlterationMonitor}, implemented monitoring
* thread to periodically check the monitored file in thread pool.
*/
public final class PathAlterationObserverScheduler implements Runnable {
private static final Logger LOGGER = LoggerFactory.getLogger(PathAlterationObserverScheduler.class);
private final long interval;
private volatile boolean running = false;
private final ScheduledExecutorService executor = Executors.newScheduledThreadPool(1,
ExecutorsUtils.newDaemonThreadFactory(Optional.of(LOGGER), Optional.of("newDaemonThreadFactory")));
private ScheduledFuture<?> executionResult;
private final List<PathAlterationObserver> observers = new CopyOnWriteArrayList<PathAlterationObserver>();
// Parameter for the running the Monitor periodically.
private int initialDelay = 0;
public PathAlterationObserverScheduler() {
this(3000);
}
public PathAlterationObserverScheduler(final long interval) {
this.interval = interval;
}
/**
* Add a file system observer to this monitor.
*
* @param observer The file system observer to add
*/
public void addObserver(final PathAlterationObserver observer) {
if (observer != null) {
observers.add(observer);
}
}
/**
* Remove a file system observer from this monitor.
*
* @param observer The file system observer to remove
*/
public void removeObserver(final PathAlterationObserver observer) {
if (observer != null) {
while (observers.remove(observer)) {
}
}
}
/**
* Returns the set of {@link PathAlterationObserver} registered with
* this monitor.
*
* @return The set of {@link PathAlterationObserver}
*/
public Iterable<PathAlterationObserver> getObservers() {
return observers;
}
/**
* Start monitoring.
* @throws IOException if an error occurs initializing the observer
*/
public synchronized void start() throws IOException {
if (running) {
throw new IllegalStateException("Monitor is already running");
}
for (final PathAlterationObserver observer : observers) {
observer.initialize();
}
if (interval > 0) {
running = true;
this.executionResult = executor.scheduleWithFixedDelay(this, initialDelay, interval, TimeUnit.MILLISECONDS);
} else {
LOGGER.info("Not starting due to non-positive scheduling interval:" + interval);
}
}
/**
* Stop monitoring
*
* @throws Exception if an error occurs initializing the observer
*/
public synchronized void stop()
throws IOException, InterruptedException {
stop(interval);
}
/**
* Stop monitoring
*
* @param stopInterval the amount of time in milliseconds to wait for the thread to finish.
* A value of zero will wait until the thread is finished (see {@link Thread#join(long)}).
* @throws IOException if an error occurs initializing the observer
* @since 2.1
*/
public synchronized void stop(final long stopInterval)
throws IOException, InterruptedException {
if (!running) {
LOGGER.warn("Already stopped");
return;
}
running = false;
for (final PathAlterationObserver observer : observers) {
observer.destroy();
}
executionResult.cancel(true);
executor.shutdown();
if (!executor.awaitTermination(stopInterval, TimeUnit.MILLISECONDS)) {
throw new RuntimeException("Did not shutdown in the timeout period");
}
}
@Override
public void run() {
if (!running) {
return;
}
for (final PathAlterationObserver observer : observers) {
try {
observer.checkAndNotify();
} catch (IOException ioe) {
LOGGER.error("Path alteration detector error.", ioe);
}
}
}
/**
* Create and attach {@link PathAlterationObserverScheduler}s for the given
* root directory and any nested subdirectories under the root directory to the given
* {@link PathAlterationObserverScheduler}.
* @param detector a {@link PathAlterationObserverScheduler}
* @param listener a {@link org.apache.gobblin.util.filesystem.PathAlterationListener}
* @param observerOptional Optional observer object. For testing routine, this has been initialized by user.
* But for general usage, the observer object is created inside this method.
* @param rootDirPath root directory
* @throws IOException
*/
public void addPathAlterationObserver(PathAlterationListener listener,
Optional<PathAlterationObserver> observerOptional, Path rootDirPath)
throws IOException {
PathAlterationObserver observer = observerOptional.or(new PathAlterationObserver(rootDirPath));
observer.addListener(listener);
addObserver(observer);
}
}
| 4,245 |
0 | Create_ds/gobblin/gobblin-utility/src/main/java/org/apache/gobblin/util | Create_ds/gobblin/gobblin-utility/src/main/java/org/apache/gobblin/util/filesystem/PathAlterationListenerAdaptor.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.util.filesystem;
import org.apache.hadoop.fs.Path;
public class PathAlterationListenerAdaptor implements PathAlterationListener {
public void onStart(final PathAlterationObserver observer) {
}
public void onFileCreate(final Path path) {
}
public void onFileChange(final Path path) {
}
public void onStop(final PathAlterationObserver observer) {
}
public void onDirectoryCreate(final Path directory) {
}
public void onDirectoryChange(final Path directory) {
}
public void onDirectoryDelete(final Path directory) {
}
public void onFileDelete(final Path path) {
}
public void onCheckDetectedChange() {
}
}
| 4,246 |
0 | Create_ds/gobblin/gobblin-utility/src/main/java/org/apache/gobblin/util | Create_ds/gobblin/gobblin-utility/src/main/java/org/apache/gobblin/util/filesystem/ExceptionCatchingPathAlterationListenerDecorator.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.util.filesystem;
import java.util.Arrays;
import java.util.concurrent.Callable;
import org.apache.hadoop.fs.Path;
import org.apache.gobblin.util.Decorator;
import lombok.AllArgsConstructor;
import lombok.extern.slf4j.Slf4j;
/**
* A decorator for {@link PathAlterationListener} that catches and logs any exception thrown by the underlying listener,
* preventing it from failing the application.
*/
@AllArgsConstructor
@Slf4j
public class ExceptionCatchingPathAlterationListenerDecorator implements PathAlterationListener, Decorator {
private final PathAlterationListener underlying;
@Override
public Object getDecoratedObject() {
return this.underlying;
}
@Override
public void onStart(PathAlterationObserver observer) {
logSwallowedThrowable(() -> {
this.underlying.onStart(observer);
return null;
});
}
@Override
public void onFileCreate(Path path) {
logSwallowedThrowable(() -> {
this.underlying.onFileCreate(path);
return null;
});
}
@Override
public void onFileChange(Path path) {
logSwallowedThrowable(() -> {
this.underlying.onFileChange(path);
return null;
});
}
@Override
public void onStop(PathAlterationObserver observer) {
logSwallowedThrowable(() -> {
this.underlying.onStop(observer);
return null;
});
}
@Override
public void onDirectoryCreate(Path directory) {
logSwallowedThrowable(() -> {
this.underlying.onDirectoryCreate(directory);
return null;
});
}
@Override
public void onDirectoryChange(Path directory) {
logSwallowedThrowable(() -> {
this.underlying.onDirectoryChange(directory);
return null;
});
}
@Override
public void onDirectoryDelete(Path directory) {
logSwallowedThrowable(() -> {
this.underlying.onDirectoryDelete(directory);
return null;
});
}
@Override
public void onFileDelete(Path path) {
logSwallowedThrowable(() -> {
this.underlying.onFileDelete(path);
return null;
});
}
@Override
public void onCheckDetectedChange() {
logSwallowedThrowable(() -> {
this.underlying.onCheckDetectedChange();
return null;
});
}
protected void logSwallowedThrowable(Callable<Void> c) {
try {
c.call();
} catch (Throwable exc) {
String methodName = Arrays.stream(exc.getStackTrace()).findFirst().get().getMethodName();
log.error(methodName + " failed due to exception:", exc);
}
}
}
| 4,247 |
0 | Create_ds/gobblin/gobblin-utility/src/main/java/org/apache/gobblin/util | Create_ds/gobblin/gobblin-utility/src/main/java/org/apache/gobblin/util/filesystem/InstrumentedHDFSFileSystem.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.util.filesystem;
import org.apache.hadoop.hdfs.DistributedFileSystem;
/**
* A {@link InstrumentedFileSystem} for the hdfs scheme.
*
* Usage:
* FileSystem.get("instrumented-hdfs://...")
*/
public class InstrumentedHDFSFileSystem extends InstrumentedFileSystem {
public static final String SCHEME = "instrumented-hdfs";
public InstrumentedHDFSFileSystem() {
super(SCHEME, new DistributedFileSystem());
}
}
| 4,248 |
0 | Create_ds/gobblin/gobblin-utility/src/main/java/org/apache/gobblin/util | Create_ds/gobblin/gobblin-utility/src/main/java/org/apache/gobblin/util/filesystem/InstrumentedFileSystemUtils.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.util.filesystem;
import java.io.IOException;
import java.net.URI;
import java.net.URISyntaxException;
import org.apache.hadoop.fs.FileStatus;
import org.apache.hadoop.fs.Path;
/**
* Common methods for Instrumented {@link org.apache.hadoop.fs.FileSystem}s.
*/
public class InstrumentedFileSystemUtils {
/**
* Replace the scheme of the input {@link URI} if it matches the string to replace.
*/
public static URI replaceScheme(URI uri, String replace, String replacement) {
if (replace != null && replace.equals(replacement)) {
return uri;
}
try {
if (replace != null && replace.equals(uri.getScheme())) {
return new URI(replacement, uri.getUserInfo(), uri.getHost(), uri.getPort(), uri.getPath(), uri.getQuery(), uri.getFragment());
} else {
return uri;
}
} catch (URISyntaxException use) {
throw new RuntimeException("Failed to replace scheme.");
}
}
/**
* Replace the scheme of the input {@link Path} if it matches the string to replace.
*/
public static Path replaceScheme(Path path, String replace, String replacement) {
return new Path(replaceScheme(path.toUri(), replace, replacement));
}
/**
* Replace the scheme of each {@link Path} if it matches the string to replace.
*/
public static Path[] replaceScheme(Path[] paths, String replace, String replacement) {
if (replace != null && replace.equals(replacement)) {
return paths;
}
Path[] output = new Path[paths.length];
for (int i = 0; i < paths.length; i++) {
output[i] = replaceScheme(paths[i], replace, replacement);
}
return output;
}
/**
* Replace the scheme of each {@link FileStatus} if it matches the string to replace.
*/
public static FileStatus[] replaceScheme(FileStatus[] paths, String replace, String replacement) {
if (replace != null && replace.equals(replacement)) {
return paths;
}
FileStatus[] output = new FileStatus[paths.length];
for (int i = 0; i < paths.length; i++) {
output[i] = replaceScheme(paths[i], replace, replacement);
}
return output;
}
/**
* Replace the scheme of the input {@link FileStatus} if it matches the string to replace.
*/
public static FileStatus replaceScheme(FileStatus st, String replace, String replacement) {
if (replace != null && replace.equals(replacement)) {
return st;
}
try {
return new FileStatus(st.getLen(), st.isDir(), st.getReplication(), st.getBlockSize(), st.getModificationTime(),
st.getAccessTime(), st.getPermission(), st.getOwner(), st.getGroup(), st.isSymlink() ? st.getSymlink() : null,
replaceScheme(st.getPath(), replace, replacement));
} catch (IOException ioe) {
throw new RuntimeException(ioe);
}
}
}
| 4,249 |
0 | Create_ds/gobblin/gobblin-utility/src/main/java/org/apache/gobblin/util | Create_ds/gobblin/gobblin-utility/src/main/java/org/apache/gobblin/util/function/CheckedExceptionFunction.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.util.function;
import java.io.IOException;
import java.util.function.Function;
import lombok.Getter;
import lombok.RequiredArgsConstructor;
/**
* Alternative to {@link Function} that handles wrapping (or tunneling) a single checked {@link Exception} derived class.
* Inspired by: https://dzone.com/articles/how-to-handle-checked-exception-in-lambda-expressi
*/
@FunctionalInterface
public interface CheckedExceptionFunction<T, R, E extends Exception> {
/**
* Wrapper to tunnel {@link IOException} as an unchecked exception that would later be unwrapped via
* {@link WrappedIOException#rethrowWrapped()}. If no expectation of unwrapping, this wrapper may simply add
* unnecessar obfuscation: instead use {@link CheckedExceptionFunction#wrapToUnchecked(CheckedExceptionFunction)}
*
* BUMMER: specific {@link IOException} hard-coded because: "generic class may not extend 'java.lang.Throwable'"
*/
@RequiredArgsConstructor
public static class WrappedIOException extends RuntimeException {
@Getter
private final IOException wrappedException;
/** CAUTION: if this be your intent, DO NOT FORGET! Being unchecked, the compiler WILL NOT remind you. */
public void rethrowWrapped() throws IOException {
throw wrappedException;
}
}
R apply(T arg) throws E;
/** @return {@link Function} proxy that catches any instance of {@link Exception} and rethrows it wrapped as {@link RuntimeException} */
static <T, R, E extends Exception> Function<T, R> wrapToUnchecked(CheckedExceptionFunction<T, R, E> f) {
return a -> {
try {
return f.apply(a);
} catch (RuntimeException re) {
throw re; // no double wrapping
} catch (Exception e) {
throw new RuntimeException(e);
}
};
}
/**
* @return {@link Function} proxy that catches any instance of {@link IOException}, and rethrows it wrapped as {@link WrappedIOException},
* for easy unwrapping via {@link WrappedIOException#rethrowWrapped()}
*/
static <T, R, E extends IOException> Function<T, R> wrapToTunneled(CheckedExceptionFunction<T, R, E> f) {
return a -> {
try {
return f.apply(a);
} catch (RuntimeException re) {
throw re; // no double wrapping
} catch (IOException ioe) {
throw new WrappedIOException(ioe);
}
};
}
}
| 4,250 |
0 | Create_ds/gobblin/gobblin-utility/src/main/java/org/apache/gobblin/util | Create_ds/gobblin/gobblin-utility/src/main/java/org/apache/gobblin/util/function/CheckedExceptionPredicate.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.util.function;
import java.io.IOException;
import java.util.function.Function;
import java.util.function.Predicate;
import lombok.Getter;
import lombok.RequiredArgsConstructor;
/**
* Alternative to {@link Predicate} that handles wrapping (or tunneling) a single checked {@link Exception} derived class.
* Based on and extremely similar to {@link CheckedExceptionFunction}.<br><br>
*
* At first glance, it appears these 2 classes could be generalized and combined without the uncomfortable amount of duplication.
* But this is not possible to do cleanly because:
* <ul>
* <li> {@link Predicate} and {@link Function} are separate types with no inheritance hierarchy relationship</li>
* <li>
* {@link CheckedExceptionPredicate#wrapToUnchecked(CheckedExceptionPredicate)} returns a {@link Predicate}
* but {@link CheckedExceptionFunction#wrapToUnchecked(CheckedExceptionFunction)} returns a {@link Function}. And
* since Java does not support higher level generics / type classes (i.e. type parameters for types that are
* themselves parameterized)
* </li>
* </ul>
*/
@FunctionalInterface
public interface CheckedExceptionPredicate<T, E extends Exception> {
/**
* Wrapper to tunnel {@link IOException} as an unchecked exception that would later be unwrapped via
* {@link WrappedIOException#rethrowWrapped()}. If no expectation of unwrapping, this wrapper may simply add
* unnecessary obfuscation: instead use {@link CheckedExceptionPredicate#wrapToUnchecked(CheckedExceptionPredicate)}
*
* BUMMER: specific {@link IOException} hard-coded because: "generic class may not extend {@link java.lang.Throwable}"
*/
@RequiredArgsConstructor
class WrappedIOException extends RuntimeException {
@Getter
private final IOException wrappedException;
/** CAUTION: if this be your intent, DO NOT FORGET! Being unchecked, the compiler WILL NOT remind you. */
public void rethrowWrapped() throws IOException {
throw wrappedException;
}
}
boolean test(T arg) throws E;
/** @return {@link Predicate} proxy that catches any instance of {@link Exception} and rethrows it wrapped as {@link RuntimeException} */
static <T, E extends Exception> Predicate<T> wrapToUnchecked(CheckedExceptionPredicate<T, E> f) {
return a -> {
try {
return f.test(a);
} catch (RuntimeException re) {
throw re; // no double wrapping
} catch (Exception e) {
throw new RuntimeException(e);
}
};
}
/**
* @return {@link Predicate} proxy that catches any instance of {@link IOException}, and rethrows it wrapped as {@link WrappedIOException},
* for easy unwrapping via {@link WrappedIOException#rethrowWrapped()}
*/
static <T, E extends IOException> Predicate<T> wrapToTunneled(CheckedExceptionPredicate<T, E> f) {
return a -> {
try {
return f.test(a);
} catch (RuntimeException re) {
throw re; // no double wrapping
} catch (IOException ioe) {
throw new WrappedIOException(ioe);
}
};
}
}
| 4,251 |
0 | Create_ds/gobblin/gobblin-utility/src/main/java/org/apache/gobblin/util | Create_ds/gobblin/gobblin-utility/src/main/java/org/apache/gobblin/util/jdbc/DataSourceProvider.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.util.jdbc;
import java.time.Duration;
import java.util.Arrays;
import java.util.Properties;
import java.util.concurrent.atomic.AtomicInteger;
import javax.sql.DataSource;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import com.google.inject.Inject;
import com.google.inject.Provider;
import com.google.inject.name.Named;
import com.zaxxer.hikari.HikariDataSource;
import org.apache.gobblin.password.PasswordManager;
/**
* A provider class for {@link javax.sql.DataSource}s.
*/
public class DataSourceProvider implements Provider<DataSource> {
private static final Logger LOG = LoggerFactory.getLogger(DataSourceProvider.class);
private static final AtomicInteger POOL_NUM = new AtomicInteger(0);
public static final String GOBBLIN_UTIL_JDBC_PREFIX = "gobblin.util.jdbc.";
public static final String CONN_DRIVER = GOBBLIN_UTIL_JDBC_PREFIX + "conn.driver";
public static final String CONN_URL = GOBBLIN_UTIL_JDBC_PREFIX + "conn.url";
public static final String USERNAME = GOBBLIN_UTIL_JDBC_PREFIX + "username";
public static final String PASSWORD = GOBBLIN_UTIL_JDBC_PREFIX + "password";
public static final String SKIP_VALIDATION_QUERY = GOBBLIN_UTIL_JDBC_PREFIX + "skip.validation.query";
public static final String MAX_ACTIVE_CONNS = GOBBLIN_UTIL_JDBC_PREFIX + "max.active.connections";
public static final String DEFAULT_CONN_DRIVER = "com.mysql.cj.jdbc.Driver";
protected final HikariDataSource dataSource;
@Inject
public DataSourceProvider(@Named("dataSourceProperties") Properties properties) {
this.dataSource = new HikariDataSource();
this.dataSource.setPoolName("HikariPool-" + POOL_NUM.incrementAndGet() + "-" + getClass().getSimpleName());
this.dataSource.setDriverClassName(properties.getProperty(CONN_DRIVER, DEFAULT_CONN_DRIVER));
// the validation query should work beyond mysql; still, to bypass for any reason, heed directive
if (!Boolean.parseBoolean(properties.getProperty(SKIP_VALIDATION_QUERY, "false"))) {
// MySQL server can timeout a connection so need to validate connections before use
final String validationQuery = MysqlDataSourceUtils.QUERY_CONNECTION_IS_VALID_AND_NOT_READONLY;
LOG.info("setting `DataSource` validation query: '" + validationQuery + "'");
// TODO: revisit following verification of successful connection pool migration:
// If your driver supports JDBC4 we strongly recommend not setting this property. This is for "legacy" drivers
// that do not support the JDBC4 Connection.isValid() API; see:
// https://github.com/brettwooldridge/HikariCP#gear-configuration-knobs-baby
this.dataSource.setConnectionTestQuery(validationQuery);
this.dataSource.setIdleTimeout(Duration.ofSeconds(60).toMillis());
}
this.dataSource.setJdbcUrl(properties.getProperty(CONN_URL));
// TODO: revisit following verification of successful connection pool migration:
// whereas `o.a.commons.dbcp.BasicDataSource` defaults min idle conns to 0, hikari defaults to 10.
// perhaps non-zero would have desirable runtime perf, but anything >0 currently fails unit tests (even 1!);
// (so experimenting with a higher number would first require adjusting tests)
this.dataSource.setMinimumIdle(0);
if (properties.containsKey(USERNAME) && properties.containsKey(PASSWORD)) {
this.dataSource.setUsername(properties.getProperty(USERNAME));
this.dataSource
.setPassword(PasswordManager.getInstance(properties).readPassword(properties.getProperty(PASSWORD)));
}
if (properties.containsKey(MAX_ACTIVE_CONNS)) {
this.dataSource.setMaximumPoolSize(Integer.parseInt(properties.getProperty(MAX_ACTIVE_CONNS)));
}
}
public DataSourceProvider() {
LOG.warn("Creating {} without setting validation query.\n Stacktrace of current thread {}",
this.getClass().getSimpleName(),
Arrays.toString(Thread.currentThread().getStackTrace()).replace(", ", "\n at "));
this.dataSource = new HikariDataSource();
}
@Override
public DataSource get() {
return this.dataSource;
}
}
| 4,252 |
0 | Create_ds/gobblin/gobblin-utility/src/main/java/org/apache/gobblin/util | Create_ds/gobblin/gobblin-utility/src/main/java/org/apache/gobblin/util/jdbc/DataSourceModule.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.util.jdbc;
import java.util.Properties;
import javax.sql.DataSource;
import com.google.inject.AbstractModule;
import com.google.inject.name.Names;
/**
* A Guice module defining the dependencies used by the jdbc data source.
*/
public class DataSourceModule extends AbstractModule {
private final Properties properties;
public DataSourceModule(Properties properties) {
this.properties = new Properties();
this.properties.putAll(properties);
}
@Override
protected void configure() {
bind(Properties.class).annotatedWith(Names.named("dataSourceProperties")).toInstance(this.properties);
bind(DataSource.class).toProvider(DataSourceProvider.class);
}
}
| 4,253 |
0 | Create_ds/gobblin/gobblin-utility/src/main/java/org/apache/gobblin/util | Create_ds/gobblin/gobblin-utility/src/main/java/org/apache/gobblin/util/jdbc/DataSourceBuilder.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.util.jdbc;
import org.apache.gobblin.configuration.ConfigurationKeys;
import org.apache.gobblin.configuration.State;
import java.util.Properties;
import javax.sql.DataSource;
import lombok.ToString;
import org.apache.commons.lang.StringUtils;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import com.google.common.base.Preconditions;
@ToString(exclude = "passWord")
public class DataSourceBuilder {
private static final Logger LOG = LoggerFactory.getLogger(DataSourceBuilder.class);
private String url;
private String driver;
private String userName;
private String passWord;
private Integer maxActiveConnections;
private String cryptoKeyLocation;
private Boolean useStrongEncryption;
private State state;
public static DataSourceBuilder builder() {
return new DataSourceBuilder();
}
public DataSourceBuilder url(String url) {
this.url = url;
return this;
}
public DataSourceBuilder driver(String driver) {
this.driver = driver;
return this;
}
public DataSourceBuilder userName(String userName) {
this.userName = userName;
return this;
}
public DataSourceBuilder passWord(String passWord) {
this.passWord = passWord;
return this;
}
public DataSourceBuilder maxActiveConnections(int maxActiveConnections) {
this.maxActiveConnections = maxActiveConnections;
return this;
}
public DataSourceBuilder cryptoKeyLocation(String cryptoKeyLocation) {
this.cryptoKeyLocation = cryptoKeyLocation;
return this;
}
public DataSourceBuilder useStrongEncryption(boolean useStrongEncryption) {
this.useStrongEncryption = useStrongEncryption;
return this;
}
public DataSourceBuilder state(State state) {
this.state = state;
return this;
}
public DataSource build() {
validate();
Properties properties = new Properties();
if (this.state != null) {
properties = this.state.getProperties();
}
properties.setProperty(DataSourceProvider.CONN_URL, this.url);
properties.setProperty(DataSourceProvider.USERNAME, this.userName);
properties.setProperty(DataSourceProvider.PASSWORD, this.passWord);
properties.setProperty(DataSourceProvider.CONN_DRIVER, this.driver);
if (!StringUtils.isEmpty(this.cryptoKeyLocation)) {
properties.setProperty(ConfigurationKeys.ENCRYPT_KEY_LOC, this.cryptoKeyLocation);
}
if (this.maxActiveConnections != null) {
properties.setProperty(DataSourceProvider.MAX_ACTIVE_CONNS, this.maxActiveConnections.toString());
}
if (this.useStrongEncryption != null) {
properties.setProperty(ConfigurationKeys.ENCRYPT_USE_STRONG_ENCRYPTOR, this.useStrongEncryption.toString());
}
if (LOG.isDebugEnabled()) {
LOG.debug("Building DataSource with properties " + properties);
}
return new DataSourceProvider(properties).get();
}
private void validate() {
validateNotEmpty(this.url, "url");
validateNotEmpty(this.driver, "driver");
validateNotEmpty(this.passWord, "passWord");
Preconditions.checkArgument(this.maxActiveConnections == null || this.maxActiveConnections > 0,
"maxActiveConnections should be a positive integer.");
}
private static void validateNotEmpty(String s, String name) {
Preconditions.checkArgument(!StringUtils.isEmpty(s), name + " should not be empty.");
}
}
| 4,254 |
0 | Create_ds/gobblin/gobblin-utility/src/main/java/org/apache/gobblin/util | Create_ds/gobblin/gobblin-utility/src/main/java/org/apache/gobblin/util/jdbc/MysqlDataSourceUtils.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.util.jdbc;
public final class MysqlDataSourceUtils {
/**
* This query will validate that MySQL connection is active and Mysql instance is writable.
*
* If a database failover happened, and current replica became read-only, this query will fail and
* connection will be removed from the pool.
*
* See https://stackoverflow.com/questions/39552146/evicting-connections-to-a-read-only-node-in-a-cluster-from-the-connection-pool
* */
public static final String QUERY_CONNECTION_IS_VALID_AND_NOT_READONLY =
"select case when @@read_only = 0 then 1 else (select table_name from information_schema.tables) end as `1`";
private MysqlDataSourceUtils() {
}
}
| 4,255 |
0 | Create_ds/gobblin/gobblin-utility/src/main/java/org/apache/gobblin/util | Create_ds/gobblin/gobblin-utility/src/main/java/org/apache/gobblin/util/recordcount/LateFileRecordCountProvider.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.util.recordcount;
import java.io.IOException;
import java.util.Random;
import java.util.regex.Pattern;
import org.apache.commons.io.FilenameUtils;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.gobblin.util.RecordCountProvider;
import lombok.AllArgsConstructor;
@AllArgsConstructor
public class LateFileRecordCountProvider extends RecordCountProvider {
private static final String SEPARATOR = ".";
private static final String LATE_COMPONENT = ".late";
private static final String EMPTY_STRING = "";
private RecordCountProvider recordCountProviderWithoutSuffix;
/**
* Construct filename for a late file. If the file does not exists in the output dir, retain the original name.
* Otherwise, append a LATE_COMPONENT{RandomInteger} to the original file name.
* For example, if file "part1.123.avro" exists in dir "/a/b/", the returned path will be "/a/b/part1.123.late12345.avro".
*/
public Path constructLateFilePath(String originalFilename, FileSystem fs, Path outputDir) throws IOException {
if (!fs.exists(new Path(outputDir, originalFilename))) {
return new Path(outputDir, originalFilename);
}
return constructLateFilePath(FilenameUtils.getBaseName(originalFilename) + LATE_COMPONENT
+ new Random().nextInt(Integer.MAX_VALUE) + SEPARATOR + FilenameUtils.getExtension(originalFilename), fs,
outputDir);
}
/**
* Remove the late components in the path added by {@link LateFileRecordCountProvider}.
*/
public static Path restoreFilePath(Path path) {
return new Path(path.getName().replaceAll(Pattern.quote(LATE_COMPONENT) + "[\\d]*", EMPTY_STRING));
}
/**
* Get record count from a given filename (possibly having LATE_COMPONENT{RandomInteger}), using the original {@link FileNameWithRecordCountFormat}.
*/
@Override
public long getRecordCount(Path path) {
return this.recordCountProviderWithoutSuffix.getRecordCount(restoreFilePath(path));
}
}
| 4,256 |
0 | Create_ds/gobblin/gobblin-utility/src/main/java/org/apache/gobblin/util | Create_ds/gobblin/gobblin-utility/src/main/java/org/apache/gobblin/util/recordcount/IngestionRecordCountProvider.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.util.recordcount;
import java.util.regex.Pattern;
import org.apache.commons.lang.StringUtils;
import org.apache.hadoop.fs.Path;
import com.google.common.base.Preconditions;
import com.google.common.io.Files;
import org.apache.gobblin.util.RecordCountProvider;
/**
* Implementation of {@link RecordCountProvider}, which provides record count from file path.
* The file path should follow the pattern: {Filename}.{RecordCount}.{Extension}.
* For example, given a file path: "/a/b/c/file.123.avro", the record count will be 123.
*/
public class IngestionRecordCountProvider extends RecordCountProvider {
private static final String SEPARATOR = ".";
/**
* Construct a new file path by appending record count to the filename of the given file path, separated by SEPARATOR.
* return original path if record count already exists
* For example, given path: "/a/b/c/file.avro" and record count: 123,
* the new path returned will be: "/a/b/c/file.123.avro"
* given path: "/a/b/c/file.123.avro" and record count: 123,
* returned "/a/b/c/file.123.avro"
*/
public static String constructFilePath(String oldFilePath, long recordCounts) {
return new Path(new Path(oldFilePath).getParent(), Files.getNameWithoutExtension(oldFilePath).toString() + SEPARATOR
+ recordCounts + SEPARATOR + Files.getFileExtension(oldFilePath)).toString();
}
/**
* @param filepath format /a/b/c/file.123.avro
* @return true if record count exists
*/
public static boolean containsRecordCount(String filepath) {
String[] components = filepath.split(Pattern.quote(SEPARATOR));
return components.length >= 3 && StringUtils.isNumeric(components[components.length - 2]);
}
/**
* The record count should be the last component before the filename extension.
*/
@Override
public long getRecordCount(Path filepath) {
String[] components = filepath.getName().split(Pattern.quote(SEPARATOR));
Preconditions.checkArgument(containsRecordCount(filepath.getName()),
String.format("Filename %s does not follow the pattern: FILENAME.RECORDCOUNT.EXTENSION", filepath));
return Long.parseLong(components[components.length - 2]);
}
}
| 4,257 |
0 | Create_ds/gobblin/gobblin-utility/src/main/java/org/apache/gobblin/util | Create_ds/gobblin/gobblin-utility/src/main/java/org/apache/gobblin/util/recordcount/CompactionRecordCountProvider.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.util.recordcount;
import java.util.Random;
import java.util.regex.Pattern;
import org.apache.gobblin.util.RecordCountProvider;
import org.apache.hadoop.fs.Path;
import com.google.common.base.Preconditions;
/**
* Implementation of {@link RecordCountProvider}, which provides record count from file path.
* The file name should follow the pattern: {Prefix}{RecordCount}.{SystemCurrentTimeInMills}.{RandomInteger}{SUFFIX}.
* The prefix should be either {@link #M_OUTPUT_FILE_PREFIX} or {@link #MR_OUTPUT_FILE_PREFIX}.
* For example, given a file path: "/a/b/c/part-m-123.1444437036.12345.avro", the record count will be 123.
*/
public class CompactionRecordCountProvider extends RecordCountProvider {
public static final String MR_OUTPUT_FILE_PREFIX = "part-r-";
public static final String M_OUTPUT_FILE_PREFIX = "part-m-";
private static final String SEPARATOR = ".";
private static final String DEFAULT_SUFFIX = ".avro";
private static final Random RANDOM = new Random();
/**
* Construct the file name as {filenamePrefix}{recordCount}.{SystemCurrentTimeInMills}.{RandomInteger}{SUFFIX}.
* @deprecated discouraged since default behavior is not obvious from API itself.
*/
@Deprecated
public static String constructFileName(String filenamePrefix, long recordCount) {
return constructFileName(filenamePrefix, DEFAULT_SUFFIX, recordCount);
}
/**
* Construct the file name as {filenamePrefix}{recordCount}.{SystemCurrentTimeInMills}.{RandomInteger}{extension}.
*/
public static String constructFileName(String filenamePrefix, String extension, long recordCount) {
Preconditions.checkArgument(
filenamePrefix.equals(M_OUTPUT_FILE_PREFIX) || filenamePrefix.equals(MR_OUTPUT_FILE_PREFIX),
String.format("%s is not a supported prefix, which should be %s, or %s.", filenamePrefix, M_OUTPUT_FILE_PREFIX,
MR_OUTPUT_FILE_PREFIX));
StringBuilder sb = new StringBuilder();
sb.append(filenamePrefix);
sb.append(Long.toString(recordCount));
sb.append(SEPARATOR);
sb.append(Long.toString(System.currentTimeMillis()));
sb.append(SEPARATOR);
sb.append(Integer.toString(RANDOM.nextInt(Integer.MAX_VALUE)));
sb.append(extension);
return sb.toString();
}
/**
* Get the record count through filename.
*/
@Override
public long getRecordCount(Path filepath) {
String filename = filepath.getName();
Preconditions.checkArgument(filename.startsWith(M_OUTPUT_FILE_PREFIX) || filename.startsWith(MR_OUTPUT_FILE_PREFIX),
String.format("%s is not a supported filename, which should start with %s, or %s.", filename,
M_OUTPUT_FILE_PREFIX, MR_OUTPUT_FILE_PREFIX));
String prefixWithCounts = filename.split(Pattern.quote(SEPARATOR))[0];
if (filename.startsWith(M_OUTPUT_FILE_PREFIX)) {
return Long.parseLong(prefixWithCounts.substring(M_OUTPUT_FILE_PREFIX.length()));
}
return Long.parseLong(prefixWithCounts.substring(MR_OUTPUT_FILE_PREFIX.length()));
}
/**
* This method currently supports converting the given {@link Path} from {@link IngestionRecordCountProvider}.
* The converted {@link Path} will start with {@link #M_OUTPUT_FILE_PREFIX}.
*/
public Path convertPath(Path path, String extension, RecordCountProvider src) {
if (this.getClass().equals(src.getClass())) {
return path;
} else if (src.getClass().equals(IngestionRecordCountProvider.class)) {
String newFileName = constructFileName(M_OUTPUT_FILE_PREFIX, extension, src.getRecordCount(path));
return new Path(path.getParent(), newFileName);
} else {
throw getNotImplementedException(src);
}
}
}
| 4,258 |
0 | Create_ds/gobblin/gobblin-utility/src/main/java/org/apache/gobblin/util | Create_ds/gobblin/gobblin-utility/src/main/java/org/apache/gobblin/util/json/JsonUtils.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.util.json;
import org.codehaus.jackson.JsonFactory;
import org.codehaus.jackson.map.ObjectMapper;
/**
* JSON related utilities
*/
public class JsonUtils {
private static final JsonFactory jacksonFactory = new JsonFactory();
private static final ObjectMapper jacksonObjectMapper = new ObjectMapper();
/**
* Get a Jackson JsonFactory configured with standard settings. The JsonFactory is thread-safe.
*/
public static JsonFactory getDefaultJacksonFactory() {
return jacksonFactory;
}
/**
* Get a Jackson ObjectMapper configured with standard settings. The ObjectMapper is thread-safe.
*/
public static ObjectMapper getDefaultObjectMapper() {
return jacksonObjectMapper;
}
}
| 4,259 |
0 | Create_ds/gobblin/gobblin-utility/src/main/java/org/apache/gobblin/util | Create_ds/gobblin/gobblin-utility/src/main/java/org/apache/gobblin/util/logs/Log4jConfigurationHelper.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.util.logs;
import java.io.FileInputStream;
import java.io.IOException;
import java.io.InputStream;
import java.util.Collection;
import java.util.Map.Entry;
import java.util.Properties;
import org.apache.log4j.Level;
import org.apache.log4j.LogManager;
import org.apache.log4j.Logger;
import org.apache.log4j.PropertyConfigurator;
import com.google.common.io.Closer;
/**
* A helper class for programmatically configuring log4j.
*
* @author Yinan Li
*/
public class Log4jConfigurationHelper {
private static final Logger LOG = Logger.getLogger(Log4jConfigurationHelper.class);
public static final String LOG_LEVEL_OVERRIDE_MAP = "log.levelOverride.map";
/**
* Update the log4j configuration.
*
* @param targetClass the target class used to get the original log4j configuration file as a resource
* @param log4jPath the custom log4j configuration properties file path
* @param log4jFileName the custom log4j configuration properties file name
* @throws IOException if there's something wrong with updating the log4j configuration
*/
public static void updateLog4jConfiguration(Class<?> targetClass, String log4jPath, String log4jFileName)
throws IOException {
Closer closer = Closer.create();
try {
InputStream fileInputStream = closer.register(new FileInputStream(log4jPath));
InputStream inputStream = closer.register(targetClass.getResourceAsStream("/" + log4jFileName));
Properties customProperties = new Properties();
customProperties.load(fileInputStream);
Properties originalProperties = new Properties();
originalProperties.load(inputStream);
for (Entry<Object, Object> entry : customProperties.entrySet()) {
originalProperties.setProperty(entry.getKey().toString(), entry.getValue().toString());
}
LogManager.resetConfiguration();
PropertyConfigurator.configure(originalProperties);
} catch (Throwable t) {
throw closer.rethrow(t);
} finally {
closer.close();
}
}
// change log level of any class using this config
// e.g. log.levelOverride.map=org.apache.gobblin=debug,org.apache.kafka=trace
public static void setLogLevel(Collection<String> logClassesAndLevelString) {
for (String logClassAndLevelString : logClassesAndLevelString) {
String[] logClassAndLevel = logClassAndLevelString.split("=");
if (logClassAndLevel.length != 2) {
LOG.warn("Invalid value for config " + LOG_LEVEL_OVERRIDE_MAP);
}
Logger.getLogger(logClassAndLevel[0]).setLevel(Level.toLevel(logClassAndLevel[1]));
}
}
}
| 4,260 |
0 | Create_ds/gobblin/gobblin-utility/src/main/java/org/apache/gobblin/util | Create_ds/gobblin/gobblin-utility/src/main/java/org/apache/gobblin/util/logs/LogCopier.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.util.logs;
import com.google.common.annotations.VisibleForTesting;
import com.google.common.base.Optional;
import com.google.common.base.Preconditions;
import com.google.common.base.Splitter;
import com.google.common.base.Strings;
import com.google.common.collect.ImmutableList;
import com.google.common.collect.ImmutableSet;
import com.google.common.collect.Sets;
import com.google.common.io.Closer;
import com.google.common.io.Files;
import com.google.common.util.concurrent.AbstractScheduledService;
import java.io.BufferedReader;
import java.io.BufferedWriter;
import java.io.IOException;
import java.io.InputStreamReader;
import java.io.OutputStreamWriter;
import java.util.ArrayList;
import java.util.HashSet;
import java.util.Iterator;
import java.util.List;
import java.util.Set;
import java.util.concurrent.Callable;
import java.util.concurrent.ExecutionException;
import java.util.concurrent.ExecutorService;
import java.util.concurrent.Executors;
import java.util.concurrent.Future;
import java.util.concurrent.TimeUnit;
import java.util.regex.Pattern;
import java.util.stream.Collectors;
import lombok.Getter;
import lombok.Setter;
import org.apache.gobblin.configuration.ConfigurationKeys;
import org.apache.gobblin.util.DatasetFilterUtils;
import org.apache.gobblin.util.FileListUtils;
import org.apache.gobblin.util.filesystem.FileSystemSupplier;
import org.apache.hadoop.fs.FSDataInputStream;
import org.apache.hadoop.fs.FSDataOutputStream;
import org.apache.hadoop.fs.FileStatus;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
/**
* A utility service that periodically reads log files in a source log file directory for changes
* since the last reads and appends the changes to destination log files with the same names as
* the source log files in a destination log directory. The source and destination log files
* can be on different {@link FileSystem}s.
*
* <p>
* This class extends the {@link AbstractScheduledService} so it can be used with a
* {@link com.google.common.util.concurrent.ServiceManager} that manages the lifecycle of
* a {@link LogCopier}.
* </p>
*
* <p>
* This class is intended to be used in the following pattern:
*
* <pre>
* {@code
* LogCopier.Builder logCopierBuilder = LogCopier.newBuilder();
* LogCopier logCopier = logCopierBuilder
* .useSrcFileSystem(FileSystem.getLocal(new Configuration()))
* .useDestFileSystem(FileSystem.get(URI.create(destFsUri), new Configuration()))
* .readFrom(new Path(srcLogDir))
* .writeTo(new Path(destLogDir))
* .useSourceLogFileMonitorInterval(60)
* .useTimeUnit(TimeUnit.SECONDS)
* .build();
*
* ServiceManager serviceManager = new ServiceManager(Lists.newArrayList(logCopier));
* serviceManager.startAsync();
*
* // ...
* serviceManager.stopAsync().awaitStopped(60, TimeUnit.SECONDS);
* }
* </pre>
*
* Checkout the Javadoc of {@link LogCopier.Builder} to see the available options for customization.
* </p>
*
* @author Yinan Li
*/
public class LogCopier extends AbstractScheduledService {
private static final Logger LOGGER = LoggerFactory.getLogger(LogCopier.class);
private static final long DEFAULT_SOURCE_LOG_FILE_MONITOR_INTERVAL = 120;
private static final TimeUnit DEFAULT_TIME_UNIT = TimeUnit.SECONDS;
private static final int DEFAULT_LINES_WRITTEN_BEFORE_FLUSH = 100;
private static final int DEFAULT_NUM_COPY_THREADS = 10;
private FileSystem srcFs;
private FileSystem destFs;
private final List<Path> srcLogDirs;
private final Path destLogDir;
private final long sourceLogFileMonitorInterval;
private final TimeUnit timeUnit;
private final FileSystemSupplier destFsSupplier;
private final FileSystemSupplier srcFsSupplier;
private final Set<String> logFileExtensions;
private final int numCopyThreads;
private final String currentLogFileName;
private final Optional<List<Pattern>> includingRegexPatterns;
private final Optional<List<Pattern>> excludingRegexPatterns;
private final Optional<String> logFileNamePrefix;
private final int linesWrittenBeforeFlush;
private final ExecutorService executorService;
@Setter
private boolean needToUpdateDestFs;
@Setter
private boolean needToUpdateSrcFs;
@Getter
private final Set<String> copiedFileNames = Sets.newConcurrentHashSet();
private boolean shouldCopyCurrentLogFile;
private LogCopier(Builder builder) throws IOException {
this.destFsSupplier = builder.destFsSupplier;
this.srcFsSupplier = builder.srcFsSupplier;
this.srcFs = this.srcFsSupplier != null ? this.srcFsSupplier.getFileSystem() : builder.srcFs;
Preconditions.checkArgument(this.srcFs != null, "srcFs or srcFsSupplier has not been set");
this.destFs = this.destFsSupplier != null ? this.destFsSupplier.getFileSystem() : builder.destFs;
Preconditions.checkArgument(this.destFs != null, "destFs or destFsSupplier has not been set");
this.srcLogDirs = builder.srcLogDirs.stream().map(d -> this.srcFs.makeQualified(d)).collect(Collectors.toList());
this.destLogDir = this.destFs.makeQualified(builder.destLogDir);
this.sourceLogFileMonitorInterval = builder.sourceLogFileMonitorInterval;
this.timeUnit = builder.timeUnit;
this.logFileExtensions = builder.logFileExtensions;
this.currentLogFileName = builder.currentLogFileName;
this.shouldCopyCurrentLogFile = false;
this.needToUpdateDestFs = false;
this.needToUpdateSrcFs = false;
this.includingRegexPatterns = Optional.fromNullable(builder.includingRegexPatterns);
this.excludingRegexPatterns = Optional.fromNullable(builder.excludingRegexPatterns);
this.logFileNamePrefix = Optional.fromNullable(builder.logFileNamePrefix);
this.linesWrittenBeforeFlush = builder.linesWrittenBeforeFlush;
this.numCopyThreads = builder.numCopyThreads;
this.executorService = Executors.newFixedThreadPool(numCopyThreads);
}
@Override
protected void shutDown() throws Exception {
try {
//We need to copy the current log file as part of shutdown sequence.
shouldCopyCurrentLogFile = true;
runOneIteration();
//Close the Filesystem objects, since these were created with auto close disabled.
LOGGER.debug("Closing FileSystem objects...");
this.destFs.close();
this.srcFs.close();
} finally {
super.shutDown();
}
}
@Override
protected void runOneIteration() throws IOException {
checkSrcLogFiles();
}
@Override
protected Scheduler scheduler() {
return Scheduler.newFixedRateSchedule(0, this.sourceLogFileMonitorInterval, this.timeUnit);
}
private boolean shouldIncludeLogFile(FileStatus logFile) {
Path logFilePath = logFile.getPath();
//Skip copy of current log file if current log file copy is disabled
if (currentLogFileName.equals(Files.getNameWithoutExtension(logFilePath.getName()))) {
return shouldCopyCurrentLogFile;
}
//Skip copy of log file if it has already been copied previously.
if (copiedFileNames.contains(logFilePath.getName())) {
return false;
}
//Special case to accept all log file extensions.
if (LogCopier.this.logFileExtensions.isEmpty()) {
return true;
}
return LogCopier.this.logFileExtensions.contains(Files.getFileExtension(logFilePath.getName()));
}
/**
* Prune the set of copied files by removing the set of files which have been already deleted from the source.
* This keeps the copiedFileNames from growing unboundedly and is useful when log rotation is enabled on the
* source dirs with maximum number of backups.
* @param srcLogFileNames
*/
@VisibleForTesting
void pruneCopiedFileNames(Set<String> srcLogFileNames) {
Iterator<String> copiedFilesIterator = copiedFileNames.iterator();
while (copiedFilesIterator.hasNext()) {
String fileName = copiedFilesIterator.next();
if (!srcLogFileNames.contains(fileName)) {
copiedFilesIterator.remove();
}
}
}
/**
* Perform a check on new source log files and submit copy tasks for new log files.
*/
@VisibleForTesting
void checkSrcLogFiles() throws IOException {
List<FileStatus> srcLogFiles = new ArrayList<>();
Set<String> srcLogFileNames = new HashSet<>();
Set<Path> newLogFiles = new HashSet<>();
for (Path logDirPath : srcLogDirs) {
srcLogFiles.addAll(FileListUtils.listFilesRecursively(srcFs, logDirPath));
//Remove the already copied files from the list of files to copy
for (FileStatus srcLogFile : srcLogFiles) {
if (shouldIncludeLogFile(srcLogFile)) {
newLogFiles.add(srcLogFile.getPath());
}
srcLogFileNames.add(srcLogFile.getPath().getName());
}
}
if (newLogFiles.isEmpty()) {
LOGGER.warn("No log file found under directories " + this.srcLogDirs);
return;
}
List<Future> futures = new ArrayList<>();
// Schedule a copy task for each new log file
for (final Path srcLogFile : newLogFiles) {
String destLogFileName =
this.logFileNamePrefix.isPresent() ? this.logFileNamePrefix.get() + "." + srcLogFile.getName()
: srcLogFile.getName();
final Path destLogFile = new Path(this.destLogDir, destLogFileName);
futures.add(this.executorService.submit(new LogCopyTask(srcLogFile, destLogFile)));
}
//Wait for copy tasks to finish
for (Future future : futures) {
try {
future.get();
} catch (InterruptedException e) {
LOGGER.error("LogCopyTask was interrupted - {}", e);
} catch (ExecutionException e) {
LOGGER.error("Failed LogCopyTask - {}", e);
}
}
if (needToUpdateDestFs) {
if (destFsSupplier == null) {
throw new IOException("Try to update dest fileSystem but destFsSupplier has not been set");
}
this.destFs.close();
this.destFs = destFsSupplier.getFileSystem();
LOGGER.info("Dest fs updated" + destFs.toString());
needToUpdateDestFs = false;
}
if (needToUpdateSrcFs) {
if (srcFsSupplier == null) {
throw new IOException("Try to update source fileSystem but srcFsSupplier has not been set");
}
this.srcFs.close();
this.srcFs = srcFsSupplier.getFileSystem();
LOGGER.info("Src fs updated" + srcFs.toString());
needToUpdateSrcFs = false;
}
pruneCopiedFileNames(srcLogFileNames);
}
/**
* Get a new {@link LogCopier.Builder} instance for building a {@link LogCopier}.
*
* @return a new {@link LogCopier.Builder} instance
*/
public static Builder newBuilder() {
return new Builder();
}
/**
* A builder class for {@link LogCopier}.
*/
public static class Builder {
private static final Splitter COMMA_SPLITTER = Splitter.on(',').omitEmptyStrings().trimResults();
private FileSystem srcFs = null;
private List<Path> srcLogDirs;
private FileSystem destFs = null;
private Path destLogDir;
private FileSystemSupplier destFsSupplier = null;
private FileSystemSupplier srcFsSupplier = null;
private long sourceLogFileMonitorInterval = DEFAULT_SOURCE_LOG_FILE_MONITOR_INTERVAL;
private int numCopyThreads = DEFAULT_NUM_COPY_THREADS;
private TimeUnit timeUnit = DEFAULT_TIME_UNIT;
private Set<String> logFileExtensions;
private String currentLogFileName;
private List<Pattern> includingRegexPatterns;
private List<Pattern> excludingRegexPatterns;
private String logFileNamePrefix;
private int linesWrittenBeforeFlush = DEFAULT_LINES_WRITTEN_BEFORE_FLUSH;
/**
* Set the interval between two checks for the source log file monitor.
*
* @param sourceLogFileMonitorInterval the interval between two checks for the source log file monitor
* @return this {@link LogCopier.Builder} instance
*/
public Builder useSourceLogFileMonitorInterval(long sourceLogFileMonitorInterval) {
Preconditions.checkArgument(sourceLogFileMonitorInterval > 0,
"Source log file monitor interval must be positive");
this.sourceLogFileMonitorInterval = sourceLogFileMonitorInterval;
return this;
}
/**
* Set the {@link TimeUnit} used for the source log file monitor interval.
*
* @param timeUnit the {@link TimeUnit} used for the log file monitor interval
* @return this {@link LogCopier.Builder} instance
*/
public Builder useTimeUnit(TimeUnit timeUnit) {
Preconditions.checkNotNull(timeUnit);
this.timeUnit = timeUnit;
return this;
}
/**
* Set the {@link FileSystemSupplier} used for generating new Dest FileSystem later when token been updated.
*
* @param supplier the {@link FileSystemSupplier} used for generating new Dest FileSystem
* @return this {@link LogCopier.Builder} instance
*/
public Builder useDestFsSupplier(FileSystemSupplier supplier) {
Preconditions.checkNotNull(supplier);
this.destFsSupplier = supplier;
return this;
}
/**
* Set the {@link FileSystemSupplier} used for generating new source FileSystem later when token been updated.
*
* @param supplier the {@link FileSystemSupplier} used for generating new source FileSystem
* @return this {@link LogCopier.Builder} instance
*/
public Builder useSrcFsSupplier(FileSystemSupplier supplier) {
Preconditions.checkNotNull(supplier);
this.srcFsSupplier = supplier;
return this;
}
/**
* Set the set of acceptable log file extensions.
*
* @param logFileExtensions the set of acceptable log file extensions
* @return this {@link LogCopier.Builder} instance
*/
public Builder acceptsLogFileExtensions(Set<String> logFileExtensions) {
Preconditions.checkNotNull(logFileExtensions);
this.logFileExtensions = ImmutableSet.copyOf(logFileExtensions);
return this;
}
/**
* Set the regex patterns used to filter logs that should be copied.
*
* @param regexList a comma-separated list of regex patterns
* @return this {@link LogCopier.Builder} instance
*/
public Builder useIncludingRegexPatterns(String regexList) {
Preconditions.checkNotNull(regexList);
this.includingRegexPatterns = DatasetFilterUtils.getPatternsFromStrings(COMMA_SPLITTER.splitToList(regexList));
return this;
}
/**
* Set the regex patterns used to filter logs that should not be copied.
*
* @param regexList a comma-separated list of regex patterns
* @return this {@link LogCopier.Builder} instance
*/
public Builder useExcludingRegexPatterns(String regexList) {
Preconditions.checkNotNull(regexList);
this.excludingRegexPatterns = DatasetFilterUtils.getPatternsFromStrings(COMMA_SPLITTER.splitToList(regexList));
return this;
}
/**
* Set the source {@link FileSystem} for reading the source log file.
*
* @param srcFs the source {@link FileSystem} for reading the source log file
* @return this {@link LogCopier.Builder} instance
*/
public Builder useSrcFileSystem(FileSystem srcFs) {
Preconditions.checkNotNull(srcFs);
this.srcFs = srcFs;
return this;
}
/**
* Set the destination {@link FileSystem} for writing the destination log file.
*
* @param destFs the destination {@link FileSystem} for writing the destination log file
* @return this {@link LogCopier.Builder} instance
*/
public Builder useDestFileSystem(FileSystem destFs) {
Preconditions.checkNotNull(destFs);
this.destFs = destFs;
return this;
}
/**
* Set the path of the source log file directory to read from.
*
* @param srcLogDir the path of the source log file directory to read from
* @return this {@link LogCopier.Builder} instance
*/
public Builder readFrom(Path srcLogDir) {
Preconditions.checkNotNull(srcLogDir);
this.srcLogDirs = ImmutableList.of(srcLogDir);
return this;
}
/**
* Set the paths of the source log file directories to read from.
*
* @param srcLogDirs the paths of the source log file directories to read from
* @return this {@link LogCopier.Builder} instance
*/
public Builder readFrom(List<Path> srcLogDirs) {
Preconditions.checkNotNull(srcLogDirs);
this.srcLogDirs = srcLogDirs;
return this;
}
/**
* Set the path of the destination log file directory to write to.
*
* @param destLogDir the path of the destination log file directory to write to
* @return this {@link LogCopier.Builder} instance
*/
public Builder writeTo(Path destLogDir) {
Preconditions.checkNotNull(destLogDir);
this.destLogDir = destLogDir;
return this;
}
/**
* Set the log file name prefix at the destination.
*
* @param logFileNamePrefix the log file name prefix at the destination
* @return this {@link LogCopier.Builder} instance
*/
public Builder useLogFileNamePrefix(String logFileNamePrefix) {
Preconditions.checkArgument(!Strings.isNullOrEmpty(logFileNamePrefix),
"Invalid log file name prefix: " + logFileNamePrefix);
this.logFileNamePrefix = logFileNamePrefix;
return this;
}
/**
* Set the number of lines written before they are flushed to disk.
*
* @param linesWrittenBeforeFlush the number of lines written before they are flushed to disk
* @return this {@link LogCopier.Builder} instance
*/
public Builder useLinesWrittenBeforeFlush(int linesWrittenBeforeFlush) {
Preconditions.checkArgument(linesWrittenBeforeFlush > 0,
"The value specifying the lines to write before flush must be positive");
this.linesWrittenBeforeFlush = linesWrittenBeforeFlush;
return this;
}
/**
* Set the current log file name
*/
public Builder useCurrentLogFileName(String currentLogFileName) {
this.currentLogFileName = currentLogFileName;
return this;
}
/**
* Set the number of threads to use for copying container log files to dest FS.
* @param numCopyThreads
*/
public Builder useNumCopyThreads(int numCopyThreads) {
this.numCopyThreads = numCopyThreads;
return this;
}
/**
* Build a new {@link LogCopier} instance.
*
* @return a new {@link LogCopier} instance
*/
public LogCopier build() throws IOException {
return new LogCopier(this);
}
}
private class LogCopyTask implements Callable<Void> {
private final Path srcLogFile;
private final Path destLogFile;
public LogCopyTask(Path srcLogFile, Path destLogFile) {
this.srcLogFile = srcLogFile;
this.destLogFile = destLogFile;
}
@Override
public Void call() {
try {
copyChangesOfLogFile(LogCopier.this.srcFs.makeQualified(this.srcLogFile),
LogCopier.this.destFs.makeQualified(this.destLogFile));
} catch (IOException ioe) {
LOGGER.error(String.format("Failed while copying logs from %s to %s", this.srcLogFile, this.destLogFile), ioe);
}
return null;
}
/**
* Copy log files that have been rolled over.
*/
private void copyChangesOfLogFile(Path srcFile, Path destFile) throws IOException {
LOGGER.info("Copying changes from {} to {}", srcFile.toString(), destFile.toString());
if (!LogCopier.this.srcFs.exists(srcFile)) {
LOGGER.warn("Source log file not found: " + srcFile);
return;
}
FSDataInputStream fsDataInputStream = null;
try (Closer closer = Closer.create()) {
fsDataInputStream = closer.register(LogCopier.this.srcFs.open(srcFile));
BufferedReader srcLogFileReader = closer.register(
new BufferedReader(new InputStreamReader(fsDataInputStream, ConfigurationKeys.DEFAULT_CHARSET_ENCODING)));
FSDataOutputStream outputStream = LogCopier.this.destFs.create(destFile);
BufferedWriter destLogFileWriter = closer.register(
new BufferedWriter(new OutputStreamWriter(outputStream, ConfigurationKeys.DEFAULT_CHARSET_ENCODING)));
String line;
int linesProcessed = 0;
while (!Thread.currentThread().isInterrupted() && (line = srcLogFileReader.readLine()) != null) {
if (!shouldCopyLine(line)) {
continue;
}
destLogFileWriter.write(line);
destLogFileWriter.newLine();
linesProcessed++;
if (linesProcessed % LogCopier.this.linesWrittenBeforeFlush == 0) {
destLogFileWriter.flush();
}
}
//Add the copied file to the list of files already copied to the destination.
LogCopier.this.copiedFileNames.add(srcFile.getName());
}
}
/**
* Check if a log line should be copied.
*
* <p>
* A line should be copied if and only if all of the following conditions satisfy:
*
* <ul>
* <li>
* It doesn't match any of the excluding regex patterns. If there's no excluding regex patterns,
* this condition is considered satisfied.
* </li>
* <li>
* It matches at least one of the including regex patterns. If there's no including regex patterns,
* this condition is considered satisfied.
* </li>
* </ul>
* </p>
*/
private boolean shouldCopyLine(String line) {
boolean including =
!LogCopier.this.includingRegexPatterns.isPresent() || DatasetFilterUtils.stringInPatterns(line,
LogCopier.this.includingRegexPatterns.get());
boolean excluding = LogCopier.this.excludingRegexPatterns.isPresent() && DatasetFilterUtils.stringInPatterns(line,
LogCopier.this.excludingRegexPatterns.get());
return !excluding && including;
}
}
}
| 4,261 |
0 | Create_ds/gobblin/gobblin-utility/src/main/java/org/apache/gobblin/util | Create_ds/gobblin/gobblin-utility/src/main/java/org/apache/gobblin/util/executors/ScalingThreadPoolExecutor.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.util.executors;
import java.util.concurrent.BlockingQueue;
import java.util.concurrent.Executors;
import java.util.concurrent.ThreadFactory;
import java.util.concurrent.ThreadPoolExecutor;
import java.util.concurrent.TimeUnit;
/**
* {@link java.util.concurrent.ThreadPoolExecutor} that scales from a min to a max number of threads as tasks get
* added.
*/
public class ScalingThreadPoolExecutor extends ThreadPoolExecutor {
/**
* Creates a {@link ScalingThreadPoolExecutor}.
* @param min Core thread pool size.
* @param max Max number of threads allowed.
* @param keepAliveTime Keep alive time for unused threads in milliseconds.
* @return A {@link ScalingThreadPoolExecutor}.
*/
public static ScalingThreadPoolExecutor newScalingThreadPool(int min, int max, long keepAliveTime) {
return newScalingThreadPool(min, max, keepAliveTime, Executors.defaultThreadFactory());
}
/**
* Creates a {@link ScalingThreadPoolExecutor}.
* @param min Core thread pool size.
* @param max Max number of threads allowed.
* @param keepAliveTime Keep alive time for unused threads in milliseconds.
* @param threadFactory thread factory to use.
* @return A {@link ScalingThreadPoolExecutor}.
*/
public static ScalingThreadPoolExecutor newScalingThreadPool(int min, int max, long keepAliveTime,
ThreadFactory threadFactory) {
ScalingQueue queue = new ScalingQueue();
ScalingThreadPoolExecutor executor =
new ScalingThreadPoolExecutor(min, max, keepAliveTime, TimeUnit.MILLISECONDS, queue, threadFactory);
executor.setRejectedExecutionHandler(new ForceQueuePolicy());
queue.setThreadPoolExecutor(executor);
return executor;
}
private ScalingThreadPoolExecutor(int corePoolSize, int maximumPoolSize, long keepAliveTime, TimeUnit unit,
BlockingQueue<Runnable> workQueue, ThreadFactory threadFactory) {
super(corePoolSize, maximumPoolSize, keepAliveTime, unit, workQueue, threadFactory);
}
}
| 4,262 |
0 | Create_ds/gobblin/gobblin-utility/src/main/java/org/apache/gobblin/util | Create_ds/gobblin/gobblin-utility/src/main/java/org/apache/gobblin/util/executors/MDCPropagatingRunnable.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.util.executors;
import org.slf4j.MDC;
import java.util.Map;
public class MDCPropagatingRunnable implements Runnable {
private final Runnable runnable;
private final Map<String, String> context;
public MDCPropagatingRunnable(Runnable runnable) {
this.runnable = runnable;
this.context = MDC.getCopyOfContextMap();
}
@Override
public void run() {
Map<String, String> originalContext = MDC.getCopyOfContextMap();
if (context != null) {
MDC.setContextMap(context);
}
try {
this.runnable.run();
} finally {
if (originalContext != null) {
MDC.setContextMap(originalContext);
} else {
MDC.clear();
}
}
}
}
| 4,263 |
0 | Create_ds/gobblin/gobblin-utility/src/main/java/org/apache/gobblin/util | Create_ds/gobblin/gobblin-utility/src/main/java/org/apache/gobblin/util/executors/MDCPropagatingExecutorService.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.util.executors;
import com.google.common.util.concurrent.ForwardingListeningExecutorService;
import com.google.common.util.concurrent.ListenableFuture;
import com.google.common.util.concurrent.ListeningExecutorService;
import com.google.common.util.concurrent.MoreExecutors;
import java.util.concurrent.Callable;
import java.util.concurrent.ExecutorService;
public class MDCPropagatingExecutorService extends ForwardingListeningExecutorService {
private ListeningExecutorService executorService;
public MDCPropagatingExecutorService(ExecutorService executorService) {
if (executorService instanceof ListeningExecutorService) {
this.executorService = (ListeningExecutorService)executorService;
} else {
this.executorService = MoreExecutors.listeningDecorator(executorService);
}
}
@Override
protected ListeningExecutorService delegate() {
return this.executorService;
}
@Override
public void execute(Runnable command) {
super.execute(new MDCPropagatingRunnable(command));
}
@Override
public <T> ListenableFuture<T> submit(Callable<T> task) {
return super.submit(new MDCPropagatingCallable<T>(task));
}
@Override
public ListenableFuture<?> submit(Runnable task) {
return super.submit(new MDCPropagatingRunnable(task));
}
@Override
public <T> ListenableFuture<T> submit(Runnable task, T result) {
return super.submit(new MDCPropagatingRunnable(task), result);
}
} | 4,264 |
0 | Create_ds/gobblin/gobblin-utility/src/main/java/org/apache/gobblin/util | Create_ds/gobblin/gobblin-utility/src/main/java/org/apache/gobblin/util/executors/IteratorExecutor.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.util.executors;
import java.util.Iterator;
import java.util.List;
import java.util.concurrent.Callable;
import java.util.concurrent.CompletionService;
import java.util.concurrent.ExecutionException;
import java.util.concurrent.ExecutorCompletionService;
import java.util.concurrent.ExecutorService;
import java.util.concurrent.Executors;
import java.util.concurrent.Future;
import java.util.concurrent.ThreadFactory;
import java.util.concurrent.TimeUnit;
import org.slf4j.Logger;
import com.google.common.base.Optional;
import com.google.common.base.Predicate;
import com.google.common.collect.Iterables;
import com.google.common.collect.Lists;
import com.google.common.util.concurrent.Futures;
import org.apache.gobblin.util.Either;
import org.apache.gobblin.util.ExecutorsUtils;
import javax.annotation.Nullable;
import lombok.extern.slf4j.Slf4j;
/**
* Executes tasks in an {@link Iterator}. Tasks need not be generated until they can be executed.
* @param <T>
*/
@Slf4j
public class IteratorExecutor<T> {
private final CompletionService<T> completionService;
private final int numThreads;
private final ExecutorService executor;
private final Iterator<Callable<T>> iterator;
private boolean executed;
public IteratorExecutor(Iterator<Callable<T>> runnableIterator, int numThreads, ThreadFactory threadFactory) {
this.numThreads = numThreads;
this.iterator = runnableIterator;
this.executor = ExecutorsUtils.loggingDecorator(Executors.newFixedThreadPool(numThreads, threadFactory));
this.completionService = new ExecutorCompletionService<>(this.executor);
this.executed = false;
}
/**
* Execute the tasks in the task {@link Iterator}. Blocks until all tasks are completed.
*
* <p>
* Note: this method only guarantees tasks have finished, not that they have finished successfully. It is the caller's
* responsibility to verify the returned futures are successful. Also see {@link #executeAndGetResults()} for different
* semantics.
* </p>
*
* @return a list of completed futures.
* @throws InterruptedException
*/
public List<Future<T>> execute() throws InterruptedException {
List<Future<T>> futures = Lists.newArrayList();
try {
if (this.executed) {
throw new RuntimeException(String.format("This %s has already been executed.", IteratorExecutor.class.getSimpleName()));
}
int activeTasks = 0;
while (this.iterator.hasNext()) {
try {
futures.add(this.completionService.submit(this.iterator.next()));
activeTasks++;
} catch (Exception exception) {
// if this.iterator.next fails, add an immediate fail future
futures.add(Futures.<T>immediateFailedFuture(exception));
}
if (activeTasks == this.numThreads) {
this.completionService.take();
activeTasks--;
}
}
while (activeTasks > 0) {
this.completionService.take();
activeTasks--;
}
} finally {
ExecutorsUtils.shutdownExecutorService(this.executor, Optional.of(log), 10, TimeUnit.SECONDS);
this.executed = true;
}
return futures;
}
/**
* Execute the tasks in the task {@link Iterator}. Blocks until all tasks are completed. Gets the results of each
* task, and for each task returns either its result or the thrown {@link ExecutionException}.
*
* @return a list containing for each task either its result or the {@link ExecutionException} thrown, in the same
* order as the input {@link Iterator}.
* @throws InterruptedException
*/
public List<Either<T, ExecutionException>> executeAndGetResults() throws InterruptedException {
List<Either<T, ExecutionException>> results = Lists.newArrayList();
List<Future<T>> futures = execute();
for (Future<T> future : futures) {
try {
results.add(Either.<T, ExecutionException>left(future.get()));
} catch (ExecutionException ee) {
results.add(Either.<T, ExecutionException>right(ee));
}
}
return results;
}
/**
* Utility method that checks whether all tasks succeeded from the output of {@link #executeAndGetResults()}.
* @return true if all tasks succeeded.
*/
public static <T> boolean verifyAllSuccessful(List<Either<T, ExecutionException>> results) {
return Iterables.all(results, new Predicate<Either<T, ExecutionException>>() {
@Override
public boolean apply(@Nullable Either<T, ExecutionException> input) {
return input instanceof Either.Left;
}
});
}
/**
* Log failures in the output of {@link #executeAndGetResults()}.
* @param results output of {@link #executeAndGetResults()}
* @param useLogger logger to log the messages into.
* @param atMost will log at most this many errors.
*/
public static <T> void logFailures(List<Either<T, ExecutionException>> results, Logger useLogger, int atMost) {
Logger actualLogger = useLogger == null ? log : useLogger;
Iterator<Either<T, ExecutionException>> it = results.iterator();
int printed = 0;
while (it.hasNext()) {
Either<T, ExecutionException> nextResult = it.next();
if (nextResult instanceof Either.Right) {
ExecutionException exc = ((Either.Right<T, ExecutionException>) nextResult).getRight();
actualLogger.error("Iterator executor failure.", exc);
printed++;
if (printed >= atMost) {
return;
}
}
}
}
/**
* Log failures in the output of {@link #executeAndGetResults()}, and also propagate exception to upper layer.
* @param results output of {@link #executeAndGetResults()}
* @param useLogger logger to log the messages into.
* @param atMost will log at most this many errors.
*/
public static <T> void logAndThrowFailures(List<Either<T, ExecutionException>> results, Logger useLogger, int atMost) {
Logger actualLogger = useLogger == null ? log : useLogger;
Iterator<Either<T, ExecutionException>> it = results.iterator();
int printed = 0;
ExecutionException exc = null;
while (it.hasNext()) {
Either<T, ExecutionException> nextResult = it.next();
if (nextResult instanceof Either.Right) {
exc = ((Either.Right<T, ExecutionException>) nextResult).getRight();
actualLogger.error("Iterator executor failure.", exc);
printed++;
if (printed >= atMost) {
break;
}
}
}
/**
* Throw any exception that Executor ran into.
*/
if (printed > 0) {
throw new RuntimeException(exc);
}
}
}
| 4,265 |
0 | Create_ds/gobblin/gobblin-utility/src/main/java/org/apache/gobblin/util | Create_ds/gobblin/gobblin-utility/src/main/java/org/apache/gobblin/util/executors/MDCPropagatingScheduledExecutorService.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.util.executors;
import com.google.common.base.Throwables;
import com.google.common.util.concurrent.AbstractFuture;
import com.google.common.util.concurrent.ForwardingListenableFuture;
import com.google.common.util.concurrent.ForwardingListeningExecutorService;
import com.google.common.util.concurrent.ListenableFuture;
import com.google.common.util.concurrent.ListenableFutureTask;
import com.google.common.util.concurrent.ListenableScheduledFuture;
import com.google.common.util.concurrent.ListeningExecutorService;
import com.google.common.util.concurrent.ListeningScheduledExecutorService;
import com.google.common.util.concurrent.MoreExecutors;
import edu.umd.cs.findbugs.annotations.SuppressFBWarnings;
import java.util.concurrent.Callable;
import java.util.concurrent.Delayed;
import java.util.concurrent.ScheduledExecutorService;
import java.util.concurrent.ScheduledFuture;
import java.util.concurrent.TimeUnit;
import static com.google.common.base.Preconditions.checkNotNull;
public class MDCPropagatingScheduledExecutorService extends ForwardingListeningExecutorService implements ListeningScheduledExecutorService {
private final ListeningScheduledExecutorService executorService;
public MDCPropagatingScheduledExecutorService(ScheduledExecutorService executorService) {
if (executorService instanceof ListeningScheduledExecutorService) {
this.executorService = (ListeningScheduledExecutorService)executorService;
} else {
this.executorService = MoreExecutors.listeningDecorator(executorService);
}
}
@Override
protected ListeningExecutorService delegate() {
return executorService;
}
@Override
public void execute(Runnable command) {
super.execute(new MDCPropagatingRunnable(command));
}
@Override
public <T> ListenableFuture<T> submit(Callable<T> task) {
return super.submit(new MDCPropagatingCallable<T>(task));
}
@Override
public ListenableFuture<?> submit(Runnable task) {
return super.submit(new MDCPropagatingRunnable(task));
}
@Override
public <T> ListenableFuture<T> submit(Runnable task, T result) {
return super.submit(new MDCPropagatingRunnable(task), result);
}
@Override
public ListenableScheduledFuture<?> schedule(Runnable command, long delay, TimeUnit unit) {
ListenableFutureTask<Void> task = ListenableFutureTask.create(new MDCPropagatingRunnable(command), null);
ScheduledFuture<?> scheduled = executorService.schedule(task, delay, unit);
return new ListenableScheduledTask<>(task, scheduled);
}
@Override
public <V> ListenableScheduledFuture<V> schedule(Callable<V> callable, long delay, TimeUnit unit) {
ListenableFutureTask<V> task = ListenableFutureTask.create(new MDCPropagatingCallable<>(callable));
ScheduledFuture<?> scheduled = executorService.schedule(task, delay, unit);
return new ListenableScheduledTask<>(task, scheduled);
}
@Override
public ListenableScheduledFuture<?> scheduleAtFixedRate(Runnable command, long initialDelay, long period, TimeUnit unit) {
NeverSuccessfulListenableFutureTask task = new NeverSuccessfulListenableFutureTask(new MDCPropagatingRunnable(command));
ScheduledFuture<?> scheduled = executorService.scheduleAtFixedRate(task, initialDelay, period, unit);
return new ListenableScheduledTask<>(task, scheduled);
}
@Override
public ListenableScheduledFuture<?> scheduleWithFixedDelay(Runnable command, long initialDelay, long delay, TimeUnit unit) {
NeverSuccessfulListenableFutureTask task = new NeverSuccessfulListenableFutureTask(new MDCPropagatingRunnable(command));
ScheduledFuture<?> scheduled = executorService.scheduleWithFixedDelay(task, initialDelay, delay, unit);
return new ListenableScheduledTask<>(task, scheduled);
}
@SuppressFBWarnings("EQ_COMPARETO_USE_OBJECT_EQUALS")
private static final class ListenableScheduledTask<V> extends ForwardingListenableFuture.SimpleForwardingListenableFuture<V> implements ListenableScheduledFuture<V> {
private final ScheduledFuture<?> scheduledDelegate;
public ListenableScheduledTask(ListenableFuture<V> listenableDelegate, ScheduledFuture<?> scheduledDelegate) {
super(listenableDelegate);
this.scheduledDelegate = scheduledDelegate;
}
@Override
public boolean cancel(boolean mayInterruptIfRunning) {
boolean cancelled = super.cancel(mayInterruptIfRunning);
if (cancelled) {
scheduledDelegate.cancel(mayInterruptIfRunning);
}
return cancelled;
}
@Override
public long getDelay(TimeUnit unit) {
return scheduledDelegate.getDelay(unit);
}
@Override
public int compareTo(Delayed other) {
return scheduledDelegate.compareTo(other);
}
}
private static final class NeverSuccessfulListenableFutureTask extends AbstractFuture<Void> implements Runnable {
private final Runnable delegate;
public NeverSuccessfulListenableFutureTask(Runnable delegate) {
this.delegate = checkNotNull(delegate);
}
@Override public void run() {
try {
delegate.run();
} catch (Throwable t) {
setException(t);
throw Throwables.propagate(t);
}
}
}
} | 4,266 |
0 | Create_ds/gobblin/gobblin-utility/src/main/java/org/apache/gobblin/util | Create_ds/gobblin/gobblin-utility/src/main/java/org/apache/gobblin/util/executors/MDCPropagatingCallable.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.util.executors;
import org.slf4j.MDC;
import java.util.Map;
import java.util.concurrent.Callable;
public class MDCPropagatingCallable<T> implements Callable<T> {
private final Callable<T> callable;
private final Map<String, String> context;
public MDCPropagatingCallable(Callable<T> callable) {
this.callable = callable;
this.context = MDC.getCopyOfContextMap();
}
@Override
public T call() throws Exception {
T answer;
Map<String, String> originalContext = MDC.getCopyOfContextMap();
if (context != null) {
MDC.setContextMap(context);
}
try {
answer = this.callable.call();
} finally {
if (originalContext != null) {
MDC.setContextMap(originalContext);
} else {
MDC.clear();
}
}
return answer;
}
}
| 4,267 |
0 | Create_ds/gobblin/gobblin-utility/src/main/java/org/apache/gobblin/util | Create_ds/gobblin/gobblin-utility/src/main/java/org/apache/gobblin/util/executors/ScalingQueue.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.util.executors;
import java.util.concurrent.LinkedBlockingQueue;
import java.util.concurrent.ThreadPoolExecutor;
/**
* Implementation of {@link LinkedBlockingQueue} that is aware of a {@link ThreadPoolExecutor} and rejects insertions
* if there are no threads available. Used for {@link ScalingThreadPoolExecutor}.
*
* <p>
* Although this class is {@link java.io.Serializable} because it inherits from {@link LinkedBlockingQueue}, it
* is not intended to be serialized (e.g. executor is transient, so will not deserialize correctly).
* </p>
*/
class ScalingQueue extends LinkedBlockingQueue<Runnable> {
private static final long serialVersionUID = -4522307109241425248L;
/**
* The executor this Queue belongs to.
*/
private transient ThreadPoolExecutor executor;
/**
* Creates a TaskQueue with a capacity of {@link Integer#MAX_VALUE}.
*/
public ScalingQueue() {
super();
}
/**
* Creates a TaskQueue with the given (fixed) capacity.
* @param capacity the capacity of this queue.
*/
public ScalingQueue(int capacity) {
super(capacity);
}
/**
* Sets the executor this queue belongs to.
*/
public synchronized void setThreadPoolExecutor(ThreadPoolExecutor executor) {
this.executor = executor;
}
/**
* Inserts the specified element at the tail of this queue if there is at least one available thread to run the
* current task. If all pool threads are actively busy, it rejects the offer.
*
* @param runnable the element to add.
* @return true if it was possible to add the element to this queue, else false
* @see ThreadPoolExecutor#execute(Runnable)
*/
@Override
public synchronized boolean offer(Runnable runnable) {
int allWorkingThreads = this.executor.getActiveCount() + super.size();
return allWorkingThreads < this.executor.getPoolSize() && super.offer(runnable);
}
}
| 4,268 |
0 | Create_ds/gobblin/gobblin-utility/src/main/java/org/apache/gobblin/util | Create_ds/gobblin/gobblin-utility/src/main/java/org/apache/gobblin/util/executors/ForceQueuePolicy.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.util.executors;
import java.util.concurrent.RejectedExecutionException;
import java.util.concurrent.RejectedExecutionHandler;
import java.util.concurrent.ThreadPoolExecutor;
/**
* {@link java.util.concurrent.RejectedExecutionHandler} that just re-attempts to put into the queue.
*/
public class ForceQueuePolicy implements RejectedExecutionHandler {
@Override
public void rejectedExecution(Runnable r, ThreadPoolExecutor executor) {
try {
executor.getQueue().put(r);
} catch (InterruptedException e) {
//should never happen since we never wait
throw new RejectedExecutionException(e);
}
}
}
| 4,269 |
0 | Create_ds/gobblin/gobblin-utility/src/main/java/org/apache/gobblin/util | Create_ds/gobblin/gobblin-utility/src/main/java/org/apache/gobblin/util/http/HttpLimiterKey.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.util.http;
import org.apache.gobblin.util.limiter.broker.SharedLimiterKey;
/**
* {@link HttpLimiterKey} used for http client throttling.
*/
public class HttpLimiterKey extends SharedLimiterKey {
public HttpLimiterKey(String resourceId) {
super(resourceId);
}
@Override
public boolean equals(Object o) {
if (this == o) {
return true;
}
if (o == null || getClass() != o.getClass()) {
return false;
}
if (!super.equals(o)) {
return false;
}
return true;
}
@Override
public int hashCode() {
return super.hashCode();
}
}
| 4,270 |
0 | Create_ds/gobblin/gobblin-utility/src/main/java/org/apache/gobblin/util | Create_ds/gobblin/gobblin-utility/src/main/java/org/apache/gobblin/util/hadoop/GobblinSequenceFileReader.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.util.hadoop;
import java.io.IOException;
import lombok.extern.slf4j.Slf4j;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.SequenceFile;
/**
* Override the {@link SequenceFile.Reader} mainly to
* override the {@link SequenceFile.Reader {@link #getValueClassName()}} so that
* we can handle the package name issue properly.
*/
@Slf4j
public class GobblinSequenceFileReader extends SequenceFile.Reader {
public GobblinSequenceFileReader(FileSystem fs, Path file,
Configuration conf) throws IOException {
super(fs, file, conf);
}
/** Returns the name of the value class. */
public String getValueClassName() {
if (super.getValueClassName().startsWith("gobblin.")) {
log.info("[We have] " + super.getValueClassName());
return "org.apache." + super.getValueClassName();
}
return super.getValueClassName();
}
}
| 4,271 |
0 | Create_ds/gobblin/gobblin-utility/src/main/java/org/apache/gobblin/util | Create_ds/gobblin/gobblin-utility/src/main/java/org/apache/gobblin/util/hadoop/TokenUtils.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.util.hadoop;
import com.google.common.base.Optional;
import com.google.common.base.Preconditions;
import com.google.common.base.Strings;
import lombok.extern.slf4j.Slf4j;
import org.apache.commons.lang3.StringUtils;
import org.apache.gobblin.configuration.State;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hive.conf.HiveConf;
import org.apache.hadoop.hive.metastore.IMetaStoreClient;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapred.JobClient;
import org.apache.hadoop.mapred.JobConf;
import org.apache.hadoop.mapred.Master;
import org.apache.hadoop.mapreduce.security.TokenCache;
import org.apache.hadoop.mapreduce.security.token.delegation.DelegationTokenIdentifier;
import org.apache.hadoop.mapreduce.server.jobtracker.JTConfig;
import org.apache.hadoop.mapreduce.v2.api.HSClientProtocol;
import org.apache.hadoop.mapreduce.v2.api.protocolrecords.GetDelegationTokenRequest;
import org.apache.hadoop.mapreduce.v2.jobhistory.JHAdminConfig;
import org.apache.hadoop.net.NetUtils;
import org.apache.hadoop.security.Credentials;
import org.apache.hadoop.security.SecurityUtil;
import org.apache.hadoop.security.UserGroupInformation;
import org.apache.hadoop.security.token.Token;
import org.apache.hadoop.yarn.factory.providers.RecordFactoryProvider;
import org.apache.hadoop.yarn.ipc.YarnRPC;
import org.apache.hadoop.yarn.util.ConverterUtils;
import org.apache.thrift.TException;
import java.io.DataOutputStream;
import java.io.File;
import java.io.FileOutputStream;
import java.io.IOException;
import java.net.InetAddress;
import java.net.UnknownHostException;
import java.security.PrivilegedExceptionAction;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.Collections;
import java.util.List;
import java.util.regex.Pattern;
/**
* A utility class for obtain Hadoop tokens and Hive metastore tokens for Azkaban jobs.
*
* <p>
* This class is compatible with Hadoop 2.
* </p>
*/
@Slf4j
public class TokenUtils {
private static final String USER_TO_PROXY = "tokens.user.to.proxy";
private static final String KEYTAB_USER = "keytab.user";
private static final String KEYTAB_LOCATION = "keytab.location";
private static final String HADOOP_SECURITY_AUTHENTICATION = "hadoop.security.authentication";
public static final String OTHER_NAMENODES = "other_namenodes";
public static final String TOKEN_RENEWER = "token_renewer";
private static final String KERBEROS = "kerberos";
private static final String YARN_RESOURCEMANAGER_PRINCIPAL = "yarn.resourcemanager.principal";
private static final String YARN_RESOURCEMANAGER_ADDRESS = "yarn.resourcemanager.address";
private static final String MAPRED_JOB_TRACKER = "mapred.job.tracker";
private static final String MAPREDUCE_JOBTRACKER_ADDRESS = "mapreduce.jobtracker.address";
private static final Pattern KEYTAB_USER_PATTERN = Pattern.compile(".*\\/.*@.*");
private static final String KERBEROS_REALM = "kerberos.realm";
/**
* the key that will be used to set proper signature for each of the hcat token when multiple hcat
* tokens are required to be fetched.
*/
private static final String HIVE_TOKEN_SIGNATURE_KEY = "hive.metastore.token.signature";
/**
* User can specify the hcat location that they used specifically. It could contains addtional hcat location,
* comma-separated.
*/
private static final String USER_DEFINED_HIVE_LOCATIONS = "user.defined.hcatLocation";
/**
* Get Hadoop tokens (tokens for job history server, job tracker, hive and HDFS) using Kerberos keytab,
* on behalf on a proxy user, embed tokens into a {@link UserGroupInformation} as returned result, persist in-memory
* credentials if tokenFile specified
*
* Note that when a super-user is fetching tokens for other users,
* {@link #fetchHcatToken(String, HiveConf, String, IMetaStoreClient)} getDelegationToken} explicitly
* contains a string parameter indicating proxy user, while other hadoop services require impersonation first.
*
* @param state A {@link State} object that should contain properties.
* @param tokenFile If present, the file will store materialized credentials.
* @param ugi The {@link UserGroupInformation} that used to impersonate into the proxy user by a "doAs block".
* @param targetUser The user to be impersonated as, for fetching hadoop tokens.
* @return A {@link UserGroupInformation} containing negotiated credentials.
*/
public static UserGroupInformation getHadoopAndHiveTokensForProxyUser(final State state, Optional<File> tokenFile,
UserGroupInformation ugi, IMetaStoreClient client, String targetUser) throws IOException, InterruptedException {
final Credentials cred = new Credentials();
ugi.doAs(new PrivilegedExceptionAction<Void>() {
@Override
public Void run() throws Exception {
getHadoopTokens(state, Optional.absent(), cred);
return null;
}
});
ugi.getCredentials().addAll(cred);
// Will add hive tokens into ugi in this method.
getHiveToken(state, client, cred, targetUser, ugi);
if (tokenFile.isPresent()) {
persistTokens(cred, tokenFile.get());
}
// at this point, tokens in ugi can be more than that in Credential object,
// since hive token is not put in Credential object.
return ugi;
}
public static void getHadoopFSTokens(final State state, Optional<File> tokenFile, final Credentials cred, final String renewer)
throws IOException, InterruptedException {
Preconditions.checkArgument(state.contains(KEYTAB_USER), "Missing required property " + KEYTAB_USER);
Preconditions.checkArgument(state.contains(KEYTAB_LOCATION), "Missing required property " + KEYTAB_LOCATION);
Configuration configuration = new Configuration();
configuration.set(HADOOP_SECURITY_AUTHENTICATION, KERBEROS);
UserGroupInformation.setConfiguration(configuration);
UserGroupInformation.loginUserFromKeytab(obtainKerberosPrincipal(state), state.getProp(KEYTAB_LOCATION));
final Optional<String> userToProxy = Strings.isNullOrEmpty(state.getProp(USER_TO_PROXY)) ? Optional.<String>absent()
: Optional.fromNullable(state.getProp(USER_TO_PROXY));
final Configuration conf = new Configuration();
log.info("Getting tokens for userToProxy " + userToProxy);
List<String> remoteFSURIList = new ArrayList<>();
if(state.contains(OTHER_NAMENODES)){
remoteFSURIList = state.getPropAsList(OTHER_NAMENODES);
}
getAllFSTokens(conf, cred, renewer, userToProxy, remoteFSURIList);
if (tokenFile.isPresent()) {
persistTokens(cred, tokenFile.get());
}
}
/**
* Get Hadoop tokens (tokens for job history server, job tracker and HDFS) using Kerberos keytab.
*
* @param state A {@link State} object that should contain property {@link #USER_TO_PROXY},
* {@link #KEYTAB_USER} and {@link #KEYTAB_LOCATION}. To obtain tokens for
* other namenodes, use property {@link #OTHER_NAMENODES} with comma separated HDFS URIs.
* @param tokenFile If present, the file will store materialized credentials.
* @param cred A im-memory representation of credentials.
*/
public static void getHadoopTokens(final State state, Optional<File> tokenFile, final Credentials cred)
throws IOException, InterruptedException {
Preconditions.checkArgument(state.contains(KEYTAB_USER), "Missing required property " + KEYTAB_USER);
Preconditions.checkArgument(state.contains(KEYTAB_LOCATION), "Missing required property " + KEYTAB_LOCATION);
Configuration configuration = new Configuration();
configuration.set(HADOOP_SECURITY_AUTHENTICATION, KERBEROS);
UserGroupInformation.setConfiguration(configuration);
UserGroupInformation.loginUserFromKeytab(obtainKerberosPrincipal(state), state.getProp(KEYTAB_LOCATION));
final Optional<String> userToProxy = Strings.isNullOrEmpty(state.getProp(USER_TO_PROXY)) ? Optional.<String>absent()
: Optional.fromNullable(state.getProp(USER_TO_PROXY));
final Configuration conf = new Configuration();
List<String> remoteFSURIList = new ArrayList<>();
if (state.contains(OTHER_NAMENODES)) {
remoteFSURIList = state.getPropAsList(OTHER_NAMENODES);
}
String renewer = state.getProp(TOKEN_RENEWER);
log.info("Getting tokens for {}, using renewer: {}, including remote FS: {}", userToProxy, renewer, remoteFSURIList.toString());
getJhToken(conf, cred);
getJtTokens(conf, cred, userToProxy, state);
getAllFSTokens(conf, cred, renewer, userToProxy, remoteFSURIList);
if (tokenFile.isPresent()) {
persistTokens(cred, tokenFile.get());
}
}
/**
* Obtain kerberos principal in a dynamic way, where the instance's value is determined by the hostname of the machine
* that the job is currently running on.
* It will be invoked when {@link #KEYTAB_USER} is not following pattern specified in {@link #KEYTAB_USER_PATTERN}.
* @throws UnknownHostException
*/
public static String obtainKerberosPrincipal(final State state) throws UnknownHostException {
if (!state.getProp(KEYTAB_USER).matches(KEYTAB_USER_PATTERN.pattern())) {
Preconditions.checkArgument(state.contains(KERBEROS_REALM));
return state.getProp(KEYTAB_USER) + "/" + InetAddress.getLocalHost().getCanonicalHostName() + "@" + state.getProp(
KERBEROS_REALM);
} else {
return state.getProp(KEYTAB_USER);
}
}
/**
*
* @param userToProxy The user that hiveClient is impersonating as to fetch the delegation tokens.
* @param ugi The {@link UserGroupInformation} that to be added with negotiated credentials.
*/
public static void getHiveToken(final State state, IMetaStoreClient hiveClient, Credentials cred,
final String userToProxy, UserGroupInformation ugi) {
try {
// Fetch the delegation token with "service" field overwritten with the metastore.uri configuration.
// org.apache.gobblin.hive.HiveMetaStoreClientFactory.getHiveConf(com.google.common.base.Optional<java.lang.String>)
// sets the signature field to the same value to retrieve the token correctly.
HiveConf hiveConf = new HiveConf();
Token<DelegationTokenIdentifier> hcatToken =
fetchHcatToken(userToProxy, hiveConf, hiveConf.get(HiveConf.ConfVars.METASTOREURIS.varname), hiveClient);
cred.addToken(hcatToken.getService(), hcatToken);
ugi.addToken(hcatToken);
// Fetch extra Hcat location user specified.
final List<String> extraHcatLocations =
state.contains(USER_DEFINED_HIVE_LOCATIONS) ? state.getPropAsList(USER_DEFINED_HIVE_LOCATIONS)
: Collections.EMPTY_LIST;
if (!extraHcatLocations.isEmpty()) {
log.info("Need to fetch extra metaStore tokens from hive.");
// start to process the user inputs.
for (final String thriftUrl : extraHcatLocations) {
log.info("Fetching metaStore token from : " + thriftUrl);
hiveConf = new HiveConf();
hiveConf.set(HiveConf.ConfVars.METASTOREURIS.varname, thriftUrl);
hcatToken = fetchHcatToken(userToProxy, hiveConf, thriftUrl, hiveClient);
cred.addToken(hcatToken.getService(), hcatToken);
ugi.addToken(hcatToken);
log.info("Successfully fetched token for:" + thriftUrl);
}
}
} catch (final Throwable t) {
final String message = "Failed to get hive metastore token." + t.getMessage() + t.getCause();
log.error(message, t);
throw new RuntimeException(message);
}
}
/**
* function to fetch hcat token as per the specified hive configuration and then store the token
* in to the credential store specified .
*
* @param userToProxy String value indicating the name of the user the token will be fetched for.
* @param hiveConf the configuration based off which the hive client will be initialized.
*/
private static Token<DelegationTokenIdentifier> fetchHcatToken(final String userToProxy, final HiveConf hiveConf,
final String tokenSignatureOverwrite, final IMetaStoreClient hiveClient)
throws IOException, TException, InterruptedException {
log.info(HiveConf.ConfVars.METASTORE_USE_THRIFT_SASL.varname + ": " + hiveConf.get(
HiveConf.ConfVars.METASTORE_USE_THRIFT_SASL.varname));
log.info(HiveConf.ConfVars.METASTORE_KERBEROS_PRINCIPAL.varname + ": " + hiveConf.get(
HiveConf.ConfVars.METASTORE_KERBEROS_PRINCIPAL.varname));
final Token<DelegationTokenIdentifier> hcatToken = new Token<>();
hcatToken.decodeFromUrlString(
hiveClient.getDelegationToken(userToProxy, UserGroupInformation.getLoginUser().getShortUserName()));
// overwrite the value of the service property of the token if the signature
// override is specified.
// If the service field is set, do not overwrite that
if (hcatToken.getService().getLength() <= 0 && tokenSignatureOverwrite != null
&& tokenSignatureOverwrite.trim().length() > 0) {
hcatToken.setService(new Text(tokenSignatureOverwrite.trim().toLowerCase()));
log.info(HIVE_TOKEN_SIGNATURE_KEY + ":" + tokenSignatureOverwrite);
}
log.info("Created hive metastore token for user:" + userToProxy + " with kind[" + hcatToken.getKind() + "]"
+ " and service[" + hcatToken.getService() + "]");
return hcatToken;
}
private static void getJhToken(Configuration conf, Credentials cred) throws IOException {
YarnRPC rpc = YarnRPC.create(conf);
final String serviceAddr = conf.get(JHAdminConfig.MR_HISTORY_ADDRESS);
log.debug("Connecting to HistoryServer at: " + serviceAddr);
HSClientProtocol hsProxy =
(HSClientProtocol) rpc.getProxy(HSClientProtocol.class, NetUtils.createSocketAddr(serviceAddr), conf);
log.info("Pre-fetching JH token from job history server");
Token<?> jhToken = null;
try {
jhToken = getDelegationTokenFromHS(hsProxy, conf);
} catch (Exception exc) {
throw new IOException("Failed to fetch JH token.", exc);
}
if (jhToken == null) {
log.error("getDelegationTokenFromHS() returned null");
throw new IOException("Unable to fetch JH token.");
}
log.info("Created JH token: " + jhToken.toString());
log.info("Token kind: " + jhToken.getKind());
log.info("Token id: " + Arrays.toString(jhToken.getIdentifier()));
log.info("Token service: " + jhToken.getService());
cred.addToken(jhToken.getService(), jhToken);
}
private static void getJtTokens(final Configuration conf, final Credentials cred, final Optional<String> userToProxy,
final State state) throws IOException, InterruptedException {
if (userToProxy.isPresent()) {
UserGroupInformation.createProxyUser(userToProxy.get(), UserGroupInformation.getLoginUser())
.doAs(new PrivilegedExceptionAction<Void>() {
@Override
public Void run() throws Exception {
getJtTokensImpl(state, conf, cred);
return null;
}
});
} else {
getJtTokensImpl(state, conf, cred);
}
}
private static void getJtTokensImpl(final State state, final Configuration conf, final Credentials cred)
throws IOException {
try {
JobConf jobConf = new JobConf();
JobClient jobClient = new JobClient(jobConf);
log.info("Pre-fetching JT token from JobTracker");
Token<DelegationTokenIdentifier> mrdt = jobClient.getDelegationToken(getMRTokenRenewerInternal(jobConf));
if (mrdt == null) {
log.error("Failed to fetch JT token");
throw new IOException("Failed to fetch JT token.");
}
log.info("Created JT token: " + mrdt.toString());
log.info("Token kind: " + mrdt.getKind());
log.info("Token id: " + Arrays.toString(mrdt.getIdentifier()));
log.info("Token service: " + mrdt.getService());
cred.addToken(mrdt.getService(), mrdt);
} catch (InterruptedException ie) {
throw new IOException(ie);
}
}
public static void getAllFSTokens(final Configuration conf, final Credentials cred, final String renewer,
final Optional<String> userToProxy, final List<String> remoteFSURIList) throws IOException, InterruptedException {
if (userToProxy.isPresent()) {
UserGroupInformation.createProxyUser(userToProxy.get(), UserGroupInformation.getLoginUser())
.doAs(new PrivilegedExceptionAction<Void>() {
@Override
public Void run() throws Exception {
getAllFSTokensImpl(conf, cred, renewer, remoteFSURIList);
return null;
}
});
} else {
getAllFSTokensImpl(conf, cred, renewer, remoteFSURIList);
}
}
public static void getAllFSTokensImpl(Configuration conf, Credentials cred, String renewer, List<String> remoteFSURIList) {
try {
// Handles token for local namenode
getLocalFSToken(conf, cred, renewer);
// Handle token for remote namenodes if any
getRemoteFSTokenFromURI(conf, cred, renewer, remoteFSURIList);
log.debug("All credential tokens: " + cred.getAllTokens());
} catch (IOException e) {
log.error("Error getting or creating HDFS token with renewer: " + renewer, e);
}
}
public static void getLocalFSToken(Configuration conf, Credentials cred, String renewer) throws IOException {
FileSystem fs = FileSystem.get(conf);
if (StringUtils.isEmpty(renewer)) {
renewer = getMRTokenRenewerInternal(new JobConf()).toString();
log.info("No renewer specified for FS: {}, taking default renewer: {}", fs.getUri(), renewer);
}
log.debug("Getting HDFS token for" + fs.getUri() + " with renewer: " + renewer);
Token<?>[] fsTokens = fs.addDelegationTokens(renewer, cred);
if (fsTokens != null) {
for (Token<?> token : fsTokens) {
log.info("FS Uri: " + fs.getUri() + " token: " + token);
}
}
}
public static void getRemoteFSTokenFromURI(Configuration conf, Credentials cred, String renewer, List<String> remoteNamenodesList)
throws IOException {
if (remoteNamenodesList == null || remoteNamenodesList.size() == 0) {
log.debug("no remote namenode URI specified, not getting any tokens for remote namenodes: " + remoteNamenodesList);
return;
}
log.debug("Getting tokens for remote namenodes: " + remoteNamenodesList);
Path[] ps = new Path[remoteNamenodesList.size()];
for (int i = 0; i < ps.length; i++) {
ps[i] = new Path(remoteNamenodesList.get(i).trim());
}
if (StringUtils.isEmpty(renewer)) {
TokenCache.obtainTokensForNamenodes(cred, ps, conf);
} else {
for(Path p: ps) {
FileSystem otherNameNodeFS = p.getFileSystem(conf);
final Token<?>[] tokens = otherNameNodeFS.addDelegationTokens(renewer, cred);
if (tokens != null) {
for (Token<?> token : tokens) {
log.info("Got dt token for " + otherNameNodeFS.getUri() + "; " + token);
}
}
}
}
log.info("Successfully fetched tokens for: " + remoteNamenodesList);
}
private static void persistTokens(Credentials cred, File tokenFile) throws IOException {
try (FileOutputStream fos = new FileOutputStream(tokenFile); DataOutputStream dos = new DataOutputStream(fos)) {
cred.writeTokenStorageToStream(dos);
}
log.info("Tokens loaded in " + tokenFile.getAbsolutePath());
}
private static Token<?> getDelegationTokenFromHS(HSClientProtocol hsProxy, Configuration conf) throws IOException {
GetDelegationTokenRequest request =
RecordFactoryProvider.getRecordFactory(null).newRecordInstance(GetDelegationTokenRequest.class);
request.setRenewer(Master.getMasterPrincipal(conf));
org.apache.hadoop.yarn.api.records.Token mrDelegationToken;
mrDelegationToken = hsProxy.getDelegationToken(request).getDelegationToken();
return ConverterUtils.convertFromYarn(mrDelegationToken, hsProxy.getConnectAddress());
}
public static Text getMRTokenRenewerInternal(JobConf jobConf) throws IOException {
String servicePrincipal = jobConf.get(YARN_RESOURCEMANAGER_PRINCIPAL, jobConf.get(JTConfig.JT_USER_NAME));
Text renewer;
if (servicePrincipal != null) {
String target = jobConf.get(YARN_RESOURCEMANAGER_ADDRESS, jobConf.get(MAPREDUCE_JOBTRACKER_ADDRESS));
if (target == null) {
target = jobConf.get(MAPRED_JOB_TRACKER);
}
String addr = NetUtils.createSocketAddr(target).getHostName();
renewer = new Text(SecurityUtil.getServerPrincipal(servicePrincipal, addr));
} else {
// No security
renewer = new Text("azkaban mr tokens");
}
return renewer;
}
}
| 4,272 |
0 | Create_ds/gobblin/gobblin-utility/src/main/java/org/apache/gobblin/util | Create_ds/gobblin/gobblin-utility/src/main/java/org/apache/gobblin/util/concurrent/TaskScheduler.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.util.concurrent;
import java.io.Closeable;
import java.io.IOException;
import java.util.concurrent.Callable;
import java.util.concurrent.ExecutionException;
import java.util.concurrent.TimeUnit;
import com.google.common.base.Function;
import com.google.common.base.Optional;
import com.google.common.base.Preconditions;
import com.google.common.cache.Cache;
import com.google.common.cache.CacheBuilder;
import com.google.common.collect.Iterables;
import lombok.Synchronized;
/**
* Base class for a task scheduler that can run {@link ScheduledTask}s periodically. Subclasses can implement
* {@link #startImpl(Optional)} and {@link #closeImpl()}.
*
* <p>Implementations of this interface are expected to be thread-safe, and can be safely accessed
* by multiple concurrent threads.</p>
*
* @author joelbaranick
*/
public abstract class TaskScheduler<K, T extends ScheduledTask<K>> implements Closeable {
private final Cache<K, CancellableTask<K, T>> cancellableTaskMap = CacheBuilder.newBuilder().build();
private volatile boolean isStarted = false;
protected TaskScheduler() {}
/**
* Start the {@link TaskScheduler}.
*
* @param name the name of the {@link TaskScheduler}
*/
@Synchronized
final void start(Optional<String> name) {
if (!this.isStarted) {
this.startImpl(name);
this.isStarted = true;
}
}
/**
* Start the {@link TaskScheduler}.
*
* @param name the name of the {@link TaskScheduler}
*/
abstract void startImpl(Optional<String> name);
/**
* Schedules a subclass of {@link ScheduledTask} to run periodically.
*
* @param task the subclass of {@link ScheduledTask} to run every period
* @param period the period between successive executions of the task
* @param unit the time unit of the initialDelay and period parameters
*/
public final void schedule(final T task, final long period, final TimeUnit unit) throws IOException {
Preconditions.checkArgument(this.isStarted, "TaskScheduler is not started");
try {
CancellableTask<K, T> cancellableTask =
this.cancellableTaskMap.get(task.getKey(), new Callable<CancellableTask<K, T>>() {
@Override
public CancellableTask<K, T> call() {
return scheduleImpl(task, period, unit);
}
});
if (cancellableTask.getScheduledTask() != task) {
throw new IOException("Failed to schedule task with duplicate key");
}
} catch (ExecutionException e) {
throw new IOException("Failed to schedule task", e);
}
}
/**
* Schedules a subclass of {@link ScheduledTask} to run periodically.
*
* @param task the subclass of {@link ScheduledTask} to run every period
* @param period the period between successive executions of the task
* @param unit the time unit of the period parameter
*/
abstract CancellableTask<K, T> scheduleImpl(T task, long period, TimeUnit unit);
/**
* Gets all {@link ScheduledTask}s.
*
* @return the {@link ScheduledTask}s
*/
public final Iterable<T> getScheduledTasks() {
return Iterables.transform(this.cancellableTaskMap.asMap().values(), new Function<CancellableTask<K, T>, T>() {
@Override
public T apply(CancellableTask<K, T> cancellableTask) {
return cancellableTask.getScheduledTask();
}
});
}
/**
* Gets the {@link ScheduledTask} with the specified key.
*
* @return the {@link ScheduledTask}
*/
public final Optional<T> getScheduledTask(K key) {
CancellableTask<K, T> cancellableTask = this.cancellableTaskMap.getIfPresent(key);
if (cancellableTask != null) {
return Optional.of(cancellableTask.getScheduledTask());
}
return Optional.absent();
}
/**
* Attempts to cancel the specified {@link ScheduledTask}.
*
* @param task the {@link ScheduledTask} to cancel
* @return true if the {@link ScheduledTask} was canceled; otherwise, false
*/
public boolean cancel(T task) {
CancellableTask<K, T> cancellableTask = this.cancellableTaskMap.getIfPresent(task.getKey());
if (cancellableTask != null && cancellableTask.getScheduledTask() == task && cancellableTask.cancel()) {
this.cancellableTaskMap.invalidate(task.getKey());
return true;
}
return false;
}
/**
* Closes this {@link TaskScheduler}, ensuring that new tasks cannot be created
* and cancelling existing tasks.
*
* @throws IOException if an I/O error occurs
*/
@Override
@Synchronized
public final void close() throws IOException {
if (this.isStarted) {
this.isStarted = false;
this.closeImpl();
}
}
/**
* Closes this {@link TaskScheduler}, ensuring that new tasks cannot be created
* and cancelling existing tasks.
*
* @throws IOException if an I/O error occurs
*/
abstract void closeImpl() throws IOException;
}
| 4,273 |
0 | Create_ds/gobblin/gobblin-utility/src/main/java/org/apache/gobblin/util | Create_ds/gobblin/gobblin-utility/src/main/java/org/apache/gobblin/util/concurrent/CancellableTask.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.util.concurrent;
import lombok.AllArgsConstructor;
import lombok.Getter;
/**
* The wrapper around a {@link ScheduledTask} whose execution can be cancelled.
*
* @param <K> the type of the key of the {@link ScheduledTask}
* @param <T> the type of the {@link ScheduledTask}
* @author joelbaranick
*/
@AllArgsConstructor
abstract class CancellableTask<K, T extends ScheduledTask<K>> {
@Getter
private T scheduledTask;
/**
* Attempts to cancel execution of this task. If the task
* has been executed or cancelled already, it will return
* with no side effect.
*
* @return true if the task was cancelled; otherwise, false
*/
abstract boolean cancel();
} | 4,274 |
0 | Create_ds/gobblin/gobblin-utility/src/main/java/org/apache/gobblin/util | Create_ds/gobblin/gobblin-utility/src/main/java/org/apache/gobblin/util/concurrent/TaskSchedulerFactory.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.util.concurrent;
import com.google.common.base.Optional;
/**
* A factory which can be used to get an instance of {@link TaskScheduler}.
*
* @author joelbaranick
*/
public class TaskSchedulerFactory {
private TaskSchedulerFactory() {}
/**
* Gets an instance of the {@link TaskScheduler} with the specified type and ensures that it is started. If
* the type is unknown an instance of the default {@link TaskScheduler} will be returned.
*
* @param type the type of the {@link TaskScheduler}
* @param name the name associated threads created by the {@link TaskScheduler}
* @param <K> the type of the key for the {@link ScheduledTask}
* @param <T> the type of the {@link ScheduledTask}
* @return an instance of {@link TaskScheduler}
*/
public static <K, T extends ScheduledTask<K>> TaskScheduler<K, T> get(String type, Optional<String> name) {
TaskSchedulerType taskSchedulerType = TaskSchedulerType.parse(type);
return get(taskSchedulerType, name);
}
/**
* Gets an instance of the {@link TaskScheduler} with the specified type and ensures that it is started. If
* the type is unknown an instance of the default {@link TaskScheduler} will be returned.
*
* @param type the type of the {@link TaskScheduler}
* @param name the name associated threads created by the {@link TaskScheduler}
* @param <K> the type of the key for the {@link ScheduledTask}
* @param <T> the type of the {@link ScheduledTask}
* @return an instance of {@link TaskScheduler}
*/
public static <K, T extends ScheduledTask<K>> TaskScheduler<K, T> get(TaskSchedulerType type, Optional<String> name) {
try {
TaskScheduler<K, T> taskScheduler = type.getTaskSchedulerClass().newInstance();
taskScheduler.start(name);
return taskScheduler;
} catch (InstantiationException | IllegalAccessException e) {
throw new RuntimeException("Unable to instantiate task scheduler '" + name + "'.", e);
}
}
}
| 4,275 |
0 | Create_ds/gobblin/gobblin-utility/src/main/java/org/apache/gobblin/util | Create_ds/gobblin/gobblin-utility/src/main/java/org/apache/gobblin/util/concurrent/TaskSchedulerType.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.util.concurrent;
import org.apache.commons.lang.StringUtils;
import com.google.common.base.Enums;
import lombok.AllArgsConstructor;
import lombok.Getter;
/**
* The types of supported {@link TaskScheduler}s.
*
* @author joelbaranick
*/
@AllArgsConstructor
public enum TaskSchedulerType {
/**
* A {@link TaskScheduler} based on a {@link java.util.concurrent.ScheduledExecutorService}. This
* is the default {@link TaskScheduler}.
*/
SCHEDULEDEXECUTORSERVICE(ScheduledExecutorServiceTaskScheduler.class),
/**
* A {@link TaskScheduler} based on a {@link org.jboss.netty.util.HashedWheelTimer}.
*/
HASHEDWHEELTIMER(HashedWheelTimerTaskScheduler.class);
@Getter
private final Class<? extends TaskScheduler> taskSchedulerClass;
/**
* Return the {@link TaskSchedulerType} with the specified name. If the specified name
* does not map to a {@link TaskSchedulerType}, then {@link #SCHEDULEDEXECUTORSERVICE}
* will be returned.
*
* @param name the name of the {@link TaskSchedulerType}
* @return the specified {@link TaskSchedulerType} or {@link #SCHEDULEDEXECUTORSERVICE}
*/
public static TaskSchedulerType parse(String name) {
if (StringUtils.isEmpty(name)) {
return SCHEDULEDEXECUTORSERVICE;
}
return Enums.getIfPresent(TaskSchedulerType.class, name.toUpperCase()).or(SCHEDULEDEXECUTORSERVICE);
}
}
| 4,276 |
0 | Create_ds/gobblin/gobblin-utility/src/main/java/org/apache/gobblin/util | Create_ds/gobblin/gobblin-utility/src/main/java/org/apache/gobblin/util/concurrent/HashedWheelTimerTaskScheduler.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.util.concurrent;
import java.io.IOException;
import java.util.Map;
import java.util.concurrent.TimeUnit;
import org.jboss.netty.util.HashedWheelTimer;
import org.jboss.netty.util.Timeout;
import org.jboss.netty.util.Timer;
import org.jboss.netty.util.TimerTask;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.slf4j.MDC;
import com.google.common.base.Optional;
import org.apache.gobblin.util.ExecutorsUtils;
import lombok.Synchronized;
/**
* An implementation of {@link TaskScheduler} which schedules @{link ScheduledTask}s on an instance
* of {@link HashedWheelTimer}.
*
* @param <K> the type of the key for the {@link ScheduledTask}s
* @param <T> the type of the {@link ScheduledTask}s
* @author joelbaranick
*/
class HashedWheelTimerTaskScheduler<K, T extends ScheduledTask<K>> extends TaskScheduler<K, T> {
private static final Logger LOGGER = LoggerFactory.getLogger(HashedWheelTimerTaskScheduler.class);
private static HashedWheelTimer executor = new HashedWheelTimer(
ExecutorsUtils.newDaemonThreadFactory(Optional.of(LOGGER), Optional.of("HashedWheelTimerTaskScheduler")));
/**
* Instantiates a new instance of {@link HashedWheelTimerTaskScheduler}.
*/
HashedWheelTimerTaskScheduler() {}
/**
* Start the {@link TaskScheduler}.
*
* @param name the name of the {@link TaskScheduler}
*/
@Override
final void startImpl(Optional<String> name) {}
/**
* Schedules a subclass of {@link ScheduledTask} to run periodically.
*
* @param task the subclass of {@link ScheduledTask} to run every period
* @param period the period between successive executions of the task
* @param unit the time unit of the period parameter
*/
@Override
final CancellableTask<K, T> scheduleImpl(T task, long period, TimeUnit unit) {
return new HashedWheelTimerTask<>(executor, task, period, unit);
}
/**
* Closes this {@link TaskScheduler}, ensuring that new tasks cannot be created
* and cancelling existing tasks.
*
* @throws IOException if an I/O error occurs
*/
@Override
final void closeImpl() throws IOException {
for (T scheduledTask : this.getScheduledTasks()) {
this.cancel(scheduledTask);
}
}
/**
* The concrete implementation of {@link TimerTask} which is used to schedule a {@link ScheduledTask}
* on a {@link HashedWheelTimer}.
*
* @param <K2> the type of the key of the {@link ScheduledTask}
* @param <T2> the type of the {@link ScheduledTask}
* @author joelbaranick
*/
private class HashedWheelTimerTask<K2, T2 extends ScheduledTask<K2>> extends CancellableTask<K2, T2>
implements TimerTask {
private final HashedWheelTimer timer;
private final T2 task;
private final long period;
private final TimeUnit unit;
private final Map<String, String> context;
private volatile Timeout future;
/**
* Instantiates a new instance of {@link HashedWheelTimerTask}.
*
* @param timer the {@link HashedWheelTimer} that the {@link HashedWheelTimerTask} is associated to.
* @param task the {@link ScheduledTask} to run.
* @param period the period between successive executions of the task
* @param unit the time unit of the period parameter
*/
HashedWheelTimerTask(HashedWheelTimer timer, T2 task, long period, TimeUnit unit) {
super(task);
this.timer = timer;
this.task = task;
this.period = period;
this.unit = unit;
this.context = MDC.getCopyOfContextMap();
this.future = this.timer.newTimeout(this, this.period, this.unit);
}
/**
* Executed after the delay specified with
* {@link Timer#newTimeout(TimerTask, long, TimeUnit)}.
*
* @param timeout a handle which is associated with this task
*/
@Override
@Synchronized
public void run(Timeout timeout) throws Exception {
Map<String, String> originalContext = MDC.getCopyOfContextMap();
if (this.context != null) {
MDC.setContextMap(context);
}
try {
this.task.runOneIteration();
} finally {
if (this.future != null) {
this.future = this.timer.newTimeout(this, this.period, this.unit);
}
if (originalContext != null) {
MDC.setContextMap(originalContext);
} else {
MDC.clear();
}
}
}
/**
* Attempts to cancel execution of this task. If the task
* has been executed or cancelled already, it will return
* with no side effect.
*
* @return true if the task was cancelled; otherwise, false
*/
@Override
@Synchronized
public boolean cancel() {
if (this.future != null) {
this.future.cancel();
this.future = null;
}
return true;
}
}
}
| 4,277 |
0 | Create_ds/gobblin/gobblin-utility/src/main/java/org/apache/gobblin/util | Create_ds/gobblin/gobblin-utility/src/main/java/org/apache/gobblin/util/concurrent/ScheduledTask.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.util.concurrent;
/**
* A task which can be scheduled to run recurrently by an instance of {@link TaskScheduler}.
*
* @author joelbaranick
*/
public interface ScheduledTask<K> {
/**
* The unique key used to identify this task.
*
* @return the unique key
*/
K getKey();
/**
* Run one iteration of the scheduled task.
*/
void runOneIteration();
}
| 4,278 |
0 | Create_ds/gobblin/gobblin-utility/src/main/java/org/apache/gobblin/util | Create_ds/gobblin/gobblin-utility/src/main/java/org/apache/gobblin/util/concurrent/AutoResetEvent.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.util.concurrent;
import java.util.concurrent.TimeUnit;
public class AutoResetEvent {
private final Object syncObject = new Object();
private boolean state;
public AutoResetEvent() {
this(false);
}
public AutoResetEvent(boolean initialState) {
this.state = initialState;
}
public void waitOne() throws InterruptedException {
synchronized (this.syncObject) {
while (!this.state) {
this.syncObject.wait();
}
this.state = false;
}
}
public boolean waitOne(int timeout, TimeUnit timeUnit) throws InterruptedException {
long waitTime = timeUnit.toNanos(timeout);
long startTime = System.nanoTime();
synchronized (this.syncObject) {
while (!this.state) {
long remainingTimeoutNs = waitTime - (System.nanoTime() - startTime);
if (remainingTimeoutNs <= 0) {
return false;
}
long remainingTimeoutMs = 0;
if (remainingTimeoutNs > 999999) {
remainingTimeoutMs = remainingTimeoutNs / 1000000;
remainingTimeoutNs = remainingTimeoutNs % 1000000;
}
this.syncObject.wait(remainingTimeoutMs, (int) remainingTimeoutNs);
}
this.state = false;
return true;
}
}
public void set() {
synchronized (this.syncObject) {
this.state = true;
this.syncObject.notify();
}
}
}
| 4,279 |
0 | Create_ds/gobblin/gobblin-utility/src/main/java/org/apache/gobblin/util | Create_ds/gobblin/gobblin-utility/src/main/java/org/apache/gobblin/util/concurrent/ScheduledExecutorServiceTaskScheduler.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.util.concurrent;
import java.io.IOException;
import java.util.concurrent.Executors;
import java.util.concurrent.ScheduledExecutorService;
import java.util.concurrent.ScheduledFuture;
import java.util.concurrent.TimeUnit;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import com.google.common.base.Optional;
import org.apache.gobblin.util.ExecutorsUtils;
/**
* An implementation of {@link TaskScheduler} which schedules @{link ScheduledTask}s on an instance
* of {@link ScheduledExecutorService}.
*
* @param <K> the type of the key for the {@link ScheduledTask}s
* @param <T> the type of the {@link ScheduledTask}s
* @author joelbaranick
*/
class ScheduledExecutorServiceTaskScheduler<K, T extends ScheduledTask<K>> extends TaskScheduler<K, T> {
private static final Logger LOGGER = LoggerFactory.getLogger(ScheduledExecutorServiceTaskScheduler.class);
private ScheduledExecutorService executorService;
/**
* Instantiates a new instance of {@link ScheduledExecutorServiceTaskScheduler}.
*/
ScheduledExecutorServiceTaskScheduler() {}
/**
* Start the {@link TaskScheduler}.
*
* @param name the name of the {@link TaskScheduler}
*/
@Override
final void startImpl(Optional<String> name) {
this.executorService =
ExecutorsUtils.loggingDecorator(Executors.newScheduledThreadPool(0,
ExecutorsUtils.newDaemonThreadFactory(Optional.of(LOGGER), name)));
}
/**
* Schedules a subclass of {@link ScheduledTask} to run periodically.
*
* @param task the subclass of {@link ScheduledTask} to run every period
* @param period the period between successive executions of the task
* @param unit the time unit of the period parameter
*/
@Override
final CancellableTask<K, T> scheduleImpl(final T task, long period, TimeUnit unit) {
final ScheduledFuture<?> future = this.executorService.scheduleAtFixedRate(new RunnableTask(task), 0, period, unit);
return new CancellableScheduledFuture<>(task, future);
}
/**
* Closes this {@link TaskScheduler}, ensuring that new tasks cannot be created
* and cancelling existing tasks.
*
* @throws IOException if an I/O error occurs
*/
@Override
final void closeImpl() throws IOException {
ExecutorsUtils.shutdownExecutorService(this.executorService, Optional.of(LOGGER));
}
/**
* An implementation of {@link Runnable} which will run the specified {@link ScheduledTask}.
*/
private class RunnableTask implements Runnable {
private final T task;
/**
* Instantiates a new {@link RunnableTask}.
*
* @param task the {@link ScheduledTask} to run
*/
public RunnableTask(T task) {
this.task = task;
}
/**
* When an object implementing interface <code>Runnable</code> is used
* to create a thread, starting the thread causes the object's
* <code>run</code> method to be called in that separately executing
* thread.
* <p>
* The general contract of the method <code>run</code> is that it may
* take any action whatsoever.
*
* @see java.lang.Thread#run()
*/
@Override
public void run() {
this.task.runOneIteration();
}
}
/**
* An implementation of {@link CancellableTask} which can cancel the underlying {@link ScheduledFuture}.
*
* @param <K2> the type of the key of the {@link ScheduledTask}
* @param <T2> the type of the {@link ScheduledTask}
*/
private class CancellableScheduledFuture<K2, T2 extends ScheduledTask<K2>> extends CancellableTask<K2, T2> {
private final ScheduledFuture<?> future;
/**
* Instantiates a new {@link CancellableScheduledFuture}.
*
* @param task the underlying {@link ScheduledTask}
* @param future the underlying {@link ScheduledFuture}
*/
public CancellableScheduledFuture(T2 task, ScheduledFuture<?> future) {
super(task);
this.future = future;
}
/**
* Attempts to cancel execution of this task. If the task
* has been executed or cancelled already, it will return
* with no side effect.
*
* @return true if the task was cancelled; otherwise, false
*/
@Override
public boolean cancel() {
this.future.cancel(true);
return true;
}
}
}
| 4,280 |
0 | Create_ds/gobblin/gobblin-utility/src/main/java/org/apache/gobblin/util | Create_ds/gobblin/gobblin-utility/src/main/java/org/apache/gobblin/util/guid/HasGuid.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.util.guid;
import java.io.IOException;
/**
* Represents an object for which we can compute a unique, replicable {@link Guid}.
*/
public interface HasGuid {
/**
* @return the {@link Guid} for this object.
* @throws IOException
*/
public Guid guid() throws IOException;
}
| 4,281 |
0 | Create_ds/gobblin/gobblin-utility/src/main/java/org/apache/gobblin/util | Create_ds/gobblin/gobblin-utility/src/main/java/org/apache/gobblin/util/guid/Guid.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.util.guid;
import lombok.EqualsAndHashCode;
import java.io.IOException;
import org.apache.commons.codec.DecoderException;
import org.apache.commons.codec.binary.Hex;
import org.apache.commons.codec.digest.DigestUtils;
import org.apache.commons.lang.StringUtils;
import org.apache.commons.lang3.ArrayUtils;
import com.google.common.base.Charsets;
/**
* Class wrapping a byte array representing a guid. A {@link Guid} is intended to uniquely identify objects in a
* replicable way.
*/
@EqualsAndHashCode
public class Guid {
public static final int GUID_LENGTH = 20;
final byte[] sha;
/**
* Creates a {@link Guid} by computing the sha of the input bytes.
*/
public Guid(byte[] bytes) {
this(bytes, false);
}
/**
* @param bytes byte array.
* @param isSha true if bytes are already a sha.
*/
private Guid(byte[] bytes, boolean isSha) {
if (isSha) {
this.sha = bytes;
} else {
this.sha = computeGuid(bytes);
}
}
/**
* Generate a {@link Guid} for an array of {@link HasGuid}.
* @param objs array of {@link HasGuid}.
* @return a single {@link Guid} for the array.
* @throws IOException
*/
public static Guid fromHasGuid(HasGuid... objs) throws IOException {
byte[][] byteArrays = new byte[objs.length][];
for (int i = 0; i < objs.length; i++) {
byteArrays[i] = objs[i].guid().sha;
}
return fromByteArrays(byteArrays);
}
/**
* Generate a {@link Guid} for an array of Strings.
* @param strings array of Strings.
* @return a single {@link Guid} for the array.
* @throws IOException
*/
public static Guid fromStrings(String... strings) throws IOException {
if (strings == null || strings.length == 0) {
throw new IOException("Attempting to compute guid for an empty array.");
}
return new Guid(StringUtils.join(strings).getBytes(Charsets.UTF_8));
}
/**
* Generate a {@link Guid} for an array of byte arrays.
* @param byteArrays array of byte arrays.
* @return a single {@link Guid} for the array.
* @throws IOException
*/
public static Guid fromByteArrays(byte[]... byteArrays) throws IOException {
if (byteArrays == null || byteArrays.length == 0) {
throw new IOException("Attempting to compute guid for an empty array.");
}
if (byteArrays.length == 1) {
return new Guid(byteArrays[0]);
}
byte[] tmp = new byte[0];
for (byte[] arr : byteArrays) {
tmp = ArrayUtils.addAll(tmp, arr);
}
return new Guid(tmp);
}
/**
* Reverse of {@link #toString}. Deserializes a {@link Guid} from a previously serialized one.
* @param str Serialized {@link Guid}.
* @return deserialized {@link Guid}.
* @throws IOException
*/
public static Guid deserialize(String str) throws IOException {
if (str.length() != 2 * GUID_LENGTH) {
throw new IOException("String is not an encoded guid.");
}
try {
return new Guid(Hex.decodeHex(str.toCharArray()), true);
} catch (DecoderException de) {
throw new IOException(de);
}
}
/**
* Combine multiple {@link Guid}s into a single {@link Guid}.
* @throws IOException
*/
public static Guid combine(Guid... guids) throws IOException {
byte[][] byteArrays = new byte[guids.length][];
for (int i = 0; i < guids.length; i++) {
byteArrays[i] = guids[i].sha;
}
return fromByteArrays(byteArrays);
}
/**
* Creates a new {@link Guid} which is a unique, replicable representation of the pair (this, byteArrays).
* @param byteArrays an array of byte arrays.
* @return a new {@link Guid}.
* @throws IOException
*/
public Guid append(byte[]... byteArrays) throws IOException {
if (byteArrays == null || byteArrays.length == 0) {
return this;
}
return fromByteArrays(ArrayUtils.add(byteArrays, this.sha));
}
/**
* Creates a new {@link Guid} which is a unique, replicable representation of the pair (this, guids). Equivalent to
* combine(this, guid1, guid2, ...)
* @param guids an array of {@link Guid}.
* @return a new {@link Guid}.
* @throws IOException
*/
public Guid append(Guid... guids) throws IOException {
if (guids == null || guids.length == 0) {
return this;
}
return combine(ArrayUtils.add(guids, this));
}
/**
* Creates a new {@link Guid} which is a unique, replicable representation of the pair (this, objs).
* @param objs an array of {@link HasGuid}.
* @return a new {@link Guid}.
* @throws IOException
*/
public Guid append(HasGuid... objs) throws IOException {
if (objs == null || objs.length == 0) {
return this;
}
return fromHasGuid(ArrayUtils.add(objs, new SimpleHasGuid(this)));
}
/**
* Serializes the guid into a hex string. The original {@link Guid} can be recovered using {@link #deserialize}.
*/
@Override
public String toString() {
return Hex.encodeHexString(this.sha);
}
// DigestUtils.sha is deprecated for sha1, but sha1 is not available in old versions of commons codec
@SuppressWarnings("deprecation")
private static byte[] computeGuid(byte[] bytes) {
return DigestUtils.sha(bytes);
}
static class SimpleHasGuid implements HasGuid {
private final Guid guid;
public SimpleHasGuid(Guid guid) {
this.guid = guid;
}
@Override
public Guid guid() throws IOException {
return this.guid;
}
}
}
| 4,282 |
0 | Create_ds/gobblin/gobblin-utility/src/main/java/org/apache/gobblin/util | Create_ds/gobblin/gobblin-utility/src/main/java/org/apache/gobblin/util/deprecation/DeprecationUtils.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.util.deprecation;
import java.util.List;
import org.apache.gobblin.configuration.State;
import lombok.extern.slf4j.Slf4j;
/**
* Utilities to handle deprecations in Gobblin.
*/
@Slf4j
public class DeprecationUtils {
/**
* Sets an option in a {@link State} to the first available value of a list of deprecatedKeys. For example, if
* an option "optiona" was previously called "optionb" or "optionc",
* calling {@link #renameDeprecatedKeys(State, String, List)} will search for the first available key-value pair
* with key optiona, optionb, or optionc, and set optiona to that value.
*
* @param state {@link State} to modify.
* @param currentKey current name of an option.
* @param deprecatedKeys all other names that {@link State}s could use to refer to that option.
*/
public static void renameDeprecatedKeys(State state, String currentKey, List<String> deprecatedKeys) {
if (state.contains(currentKey)) {
return;
}
for (String oldKey : deprecatedKeys) {
if (state.contains(oldKey)) {
log.info("Copying the value of deprecated key " + oldKey + " into key " + currentKey);
state.setProp(currentKey, state.getProp(oldKey));
return;
}
}
}
}
| 4,283 |
0 | Create_ds/gobblin/gobblin-utility/src/main/java/org/apache/gobblin/util | Create_ds/gobblin/gobblin-utility/src/main/java/org/apache/gobblin/util/orc/AvroOrcSchemaConverter.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.util.orc;
import java.util.List;
import java.util.stream.Collectors;
import org.apache.avro.Schema;
import org.apache.gobblin.util.AvroSchemaUtils;
import org.apache.orc.TypeDescription;
/**
* A utility class that provides a method to convert {@link Schema} into {@link TypeDescription}.
*/
public class AvroOrcSchemaConverter {
public static TypeDescription getOrcSchema(Schema avroSchema) {
final Schema.Type type = avroSchema.getType();
switch (type) {
case NULL:
// empty union represents null type
final TypeDescription nullUnion = TypeDescription.createUnion();
return nullUnion;
case LONG:
return TypeDescription.createLong();
case INT:
return TypeDescription.createInt();
case BYTES:
case FIXED:
return getTypeDescriptionForBinarySchema(avroSchema);
case ARRAY:
return TypeDescription.createList(getOrcSchema(avroSchema.getElementType()));
case RECORD:
final TypeDescription recordStruct = TypeDescription.createStruct();
for (Schema.Field field2 : avroSchema.getFields()) {
final Schema fieldSchema = field2.schema();
final TypeDescription fieldType = getOrcSchema(fieldSchema);
if (fieldType != null) {
recordStruct.addField(field2.name(), fieldType);
} else {
throw new IllegalStateException("Should never get a null type as fieldType.");
}
}
return recordStruct;
case MAP:
return TypeDescription.createMap(
// in Avro maps, keys are always strings
TypeDescription.createString(), getOrcSchema(avroSchema.getValueType()));
case UNION:
final List<Schema> nonNullMembers = getNonNullMembersOfUnion(avroSchema);
if (isNullableUnion(avroSchema, nonNullMembers)) {
// a single non-null union member
// this is how Avro represents "nullable" types; as a union of the NULL type with another
// since ORC already supports nullability of all types, just use the child type directly
return getOrcSchema(nonNullMembers.get(0));
} else {
// not a nullable union type; represent as an actual ORC union of them
final TypeDescription union = TypeDescription.createUnion();
for (final Schema childSchema : nonNullMembers) {
union.addUnionChild(getOrcSchema(childSchema));
}
return union;
}
case STRING:
return TypeDescription.createString();
case FLOAT:
return TypeDescription.createFloat();
case DOUBLE:
return TypeDescription.createDouble();
case BOOLEAN:
return TypeDescription.createBoolean();
case ENUM:
// represent as String for now
return TypeDescription.createString();
default:
throw new IllegalStateException(String.format("Unrecognized Avro type: %s", type.getName()));
}
}
/**
* Get the {@link TypeDescription} for a binary schema type.
*
* This is based on logic from org.apache.hadoop.hive.serde2.avro.SchemaToTypeInfo#generateTypeInfo.
*
* @return If the logical type is decimal then return a decimal TypeDescription, otherwise return a binary
* TypeDescription.
*
*/
private static TypeDescription getTypeDescriptionForBinarySchema(Schema avroSchema) {
if ("decimal".equalsIgnoreCase(avroSchema.getProp("logicalType"))) {
int scale = AvroSchemaUtils.getValueAsInteger(avroSchema, "scale");
int precision = AvroSchemaUtils.getValueAsInteger(avroSchema, "precision");
return TypeDescription.createDecimal().withScale(scale).withPrecision(precision);
}
return TypeDescription.createBinary();
}
/**
* A helper method to check if the union is a nullable union. This check is to distinguish the case between a nullable and
* a non-nullable union, each with a single member. In the former case, we want to "flatten" to the member type, while
* in the case of the latter (i.e. non-nullable type), we want to preserve the union type.
* @param unionSchema
* @param nonNullMembers
* @return true if the unionSchema is a nullable, false otherwise.
*/
private static boolean isNullableUnion(Schema unionSchema, List<Schema> nonNullMembers) {
return unionSchema.getTypes().size() == 2 && nonNullMembers.size() == 1;
}
/**
* In Avro, a union defined with null in the first and only one type after is considered as a nullable
* field instead of the real union type.
*
* For this type of schema, get all member types to help examine the real type of it.
*/
public static List<Schema> getNonNullMembersOfUnion(Schema unionSchema) {
return unionSchema.getTypes().stream().filter(schema -> !Schema.Type.NULL.equals(schema.getType()))
.collect(Collectors.toList());
}
/**
* Examine the Avro {@link Schema} object and get rid of "null" type in the beginning, which essentially indicates
* the type is nullable. The elimination of null type from union member list is important to keep consistent with
* {@link TypeDescription} object in terms of index of union member.
*/
public static Schema sanitizeNullableSchema(Schema avroSchema) {
if (avroSchema.getType() != Schema.Type.UNION) {
return avroSchema;
}
// Processing union schema.
List<Schema> members = getNonNullMembersOfUnion(avroSchema);
if (isNullableUnion(avroSchema, members)) {
return members.get(0);
} else {
// Reconstruct Avro Schema by eliminating null.
return Schema.createUnion(members);
}
}
}
| 4,284 |
0 | Create_ds/gobblin/gobblin-utility/src/main/java/org/apache/gobblin/util | Create_ds/gobblin/gobblin-utility/src/main/java/org/apache/gobblin/util/service/StandardServiceConfig.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.util.service;
import com.google.common.collect.ImmutableMap;
import com.typesafe.config.Config;
import com.typesafe.config.ConfigFactory;
import lombok.EqualsAndHashCode;
import lombok.Getter;
/**
* A wrapper around a typesafe {@link Config} object to provide standard configuration for
* services.
*/
@Getter @EqualsAndHashCode
public class StandardServiceConfig {
public static final String STARTUP_PREFIX = "startUp";
public static final String TIMEOUT_MS_KEY = "timeoutMs";
public static final String STARTUP_TIMEOUT_MS_PROP = STARTUP_PREFIX + "." + TIMEOUT_MS_KEY;
public static final String SHUTDOWN_PREFIX = "shutDown";
public static final String SHUTDOWN_TIMEOUT_MS_PROP = SHUTDOWN_PREFIX + "." + TIMEOUT_MS_KEY;
public static final Config DEFAULT_CFG =
ConfigFactory.parseMap(ImmutableMap.<String, Object>builder()
.put(STARTUP_TIMEOUT_MS_PROP, 5 * 60 * 1000) // 5 minutes
.put(SHUTDOWN_TIMEOUT_MS_PROP, 5 * 60 * 1000) // 5 minutes
.build());
private final long startUpTimeoutMs;
private final long shutDownTimeoutMs;
/**
* Constructor from a typesafe config object
* @param serviceCfg the service configuration; must be local, i.e. any service namespace
* prefix should be removed using {@link Config#getConfig(String)}.
**/
public StandardServiceConfig(Config serviceCfg) {
Config effectiveCfg = serviceCfg.withFallback(DEFAULT_CFG);
this.startUpTimeoutMs = effectiveCfg.getLong(STARTUP_TIMEOUT_MS_PROP);
this.shutDownTimeoutMs = effectiveCfg.getLong(SHUTDOWN_TIMEOUT_MS_PROP);
}
}
| 4,285 |
0 | Create_ds/gobblin/gobblin-utility/src/main/java/org/apache/gobblin/util | Create_ds/gobblin/gobblin-utility/src/main/java/org/apache/gobblin/util/event/ContainerHealthCheckFailureEvent.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/**
* An event type to signal failure of a container health check. This event can be generated from anywhere
* inside the application. This event is intended to be emitted
* over an {@link com.google.common.eventbus.EventBus} instance.
*/
package org.apache.gobblin.util.event;
import java.util.Map;
import com.google.common.collect.Maps;
import com.typesafe.config.Config;
import lombok.Getter;
public class ContainerHealthCheckFailureEvent {
public static final String CONTAINER_HEALTH_CHECK_EVENT_BUS_NAME = "ContainerHealthCheckEventBus";
// Context of emission of this event, like the task's state.
@Getter
private final Config config;
/**
* Name of the class that generated this failure event.
*/
@Getter
private final String className;
@Getter
private final Map<String, String> metadata = Maps.newHashMap();
public ContainerHealthCheckFailureEvent(Config config, String className) {
this.config = config;
this.className = className;
}
public void addMetadata(String key, String value) {
metadata.put(key, value);
}
}
| 4,286 |
0 | Create_ds/gobblin/gobblin-utility/src/main/java/org/apache/gobblin/util | Create_ds/gobblin/gobblin-utility/src/main/java/org/apache/gobblin/util/iterators/InterruptibleIterator.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.util.iterators;
import java.util.Iterator;
import java.util.NoSuchElementException;
import java.util.concurrent.Callable;
import lombok.RequiredArgsConstructor;
/**
* An iterator that allows ending prematurely (i.e. {@link #hasNext()} becomes false) if a boolean {@link Callable}
* becomes true.
*/
@RequiredArgsConstructor
public class InterruptibleIterator<T> implements Iterator<T> {
private final Iterator<T> iterator;
private final Callable<Boolean> interrupt;
/** Set to true when user calls {@link #hasNext()} to indicate we can no longer interrupt.*/
private boolean promisedNext = false;
@Override
public boolean hasNext() {
try {
if (this.promisedNext || (this.iterator.hasNext() && !this.interrupt.call())) {
this.promisedNext = true;
return true;
}
return false;
} catch (Exception exception) {
throw new RuntimeException(exception);
}
}
@Override
public T next() {
if (hasNext()) {
this.promisedNext = false;
return this.iterator.next();
}
throw new NoSuchElementException();
}
@Override
public void remove() {
this.iterator.remove();
}
}
| 4,287 |
0 | Create_ds/gobblin/gobblin-utility/src/main/java/org/apache/gobblin/util | Create_ds/gobblin/gobblin-utility/src/main/java/org/apache/gobblin/util/reflection/RestrictedFieldAccessingUtils.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.util.reflection;
import java.lang.reflect.Field;
import java.util.Arrays;
/**
* These are hacky methods that should only be used when there are access-modifiers prevents accessing fields that
* are essential for application code to access.
*/
public class RestrictedFieldAccessingUtils {
private RestrictedFieldAccessingUtils() {
}
/**
* Getting field defined in containingObj which was not publicly-accessible, using java-reflection.
*/
public static Object getRestrictedFieldByReflection(Object containingObj, String fieldName, Class clazz)
throws NoSuchFieldException, IllegalAccessException {
Field field = clazz.getDeclaredField(fieldName);
field.setAccessible(true);
return field.get(containingObj);
}
/**
* Getting field defined in superclass(es) which was not publicly-accessible, using java-reflection.
*/
public static Object getRestrictedFieldByReflectionRecursively(Object containingObj, String fieldName, Class clazz)
throws NoSuchFieldException, IllegalAccessException {
// When it reaches Object.class level and still not find the field, throw exception.
if (clazz.getCanonicalName().equals("java.lang.Object")) {
throw new NoSuchFieldException(
String.format("Field %s doesn't exist in specified class and its ancestors", fieldName));
}
if (!Arrays.asList(clazz.getDeclaredFields()).stream()
.anyMatch(x -> x.getName().equals(fieldName))) {
return getRestrictedFieldByReflectionRecursively(containingObj, fieldName, clazz.getSuperclass());
} else {
return getRestrictedFieldByReflection(containingObj, fieldName, clazz);
}
}
}
| 4,288 |
0 | Create_ds/gobblin/gobblin-utility/src/main/java/org/apache/gobblin/util | Create_ds/gobblin/gobblin-utility/src/main/java/org/apache/gobblin/util/reflection/GobblinConstructorUtils.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.util.reflection;
import java.lang.reflect.InvocationTargetException;
import java.util.Arrays;
import java.util.List;
import lombok.extern.slf4j.Slf4j;
import org.apache.commons.lang3.reflect.ConstructorUtils;
/**
* Helper methods to instantiate classes
*/
@Slf4j
public class GobblinConstructorUtils {
/**
* Convenience method on top of {@link ConstructorUtils#invokeConstructor(Class, Object[])} that returns a new
* instance of the <code>cls</code> based on a constructor priority order. Each {@link List} in the
* <code>constructorArgs</code> array contains the arguments for a constructor of <code>cls</code>. The first
* constructor whose signature matches the argument types will be invoked.
*
* @param cls the class to be instantiated
* @param constructorArgs An array of constructor argument list. Order defines the priority of a constructor.
* @return
*
* @throws NoSuchMethodException if no constructor matched was found
*/
@SafeVarargs
public static <T> T invokeFirstConstructor(Class<T> cls, List<Object>... constructorArgs)
throws NoSuchMethodException, IllegalAccessException, InvocationTargetException, InstantiationException {
for (List<Object> args : constructorArgs) {
Class<?>[] parameterTypes = new Class[args.size()];
for (int i = 0; i < args.size(); i++) {
parameterTypes[i] = args.get(i).getClass();
}
if (ConstructorUtils.getMatchingAccessibleConstructor(cls, parameterTypes) != null) {
return ConstructorUtils.invokeConstructor(cls, args.toArray(new Object[args.size()]));
}
}
throw new NoSuchMethodException("No accessible constructor found");
}
/**
* Returns a new instance of the <code>cls</code> based on a set of arguments. The method will search for a
* constructor accepting the first k arguments in <code>args</code> for every k from args.length to 0, and will
* invoke the first constructor found.
*
* For example, {@link #invokeLongestConstructor}(cls, myString, myInt) will first attempt to create an object with
* of class <code>cls</code> with constructor <init>(String, int), if it fails it will attempt <init>(String), and
* finally <init>().
*
* @param cls the class to instantiate.
* @param args the arguments to use for instantiation.
* @throws ReflectiveOperationException
*/
public static <T> T invokeLongestConstructor(Class<T> cls, Object... args) throws ReflectiveOperationException {
Class<?>[] parameterTypes = new Class[args.length];
for (int i = 0; i < args.length; i++) {
parameterTypes[i] = args[i].getClass();
}
for (int i = args.length; i >= 0; i--) {
if (ConstructorUtils.getMatchingAccessibleConstructor(cls, Arrays.copyOfRange(parameterTypes, 0, i)) != null) {
log.debug(
String.format("Found accessible constructor for class %s with parameter types %s.", cls,
Arrays.toString(Arrays.copyOfRange(parameterTypes, 0, i))));
return ConstructorUtils.invokeConstructor(cls, Arrays.copyOfRange(args, 0, i));
}
}
throw new NoSuchMethodException(String.format("No accessible constructor for class %s with parameters a subset of %s.",
cls, Arrays.toString(parameterTypes)));
}
/**
* Utility method to create an instance of <code>clsName</code> using the constructor matching the arguments, <code>args</code>
*
* @param superType of <code>clsName</code>. The new instance is cast to superType
* @param clsName complete cannonical name of the class to be instantiated
* @param args constructor args to be used
*
* @throws IllegalArgumentException if there was an issue creating the instance due to
* {@link NoSuchMethodException}, {@link InvocationTargetException},{@link InstantiationException},
* {@link ClassNotFoundException}
*
* @return A new instance of <code>clsName</code>
*/
@SuppressWarnings("unchecked")
public static <T> T invokeConstructor(final Class<T> superType, final String clsName, Object... args) {
try {
return (T) ConstructorUtils.invokeConstructor(Class.forName(clsName), args);
} catch (NoSuchMethodException | IllegalAccessException | InvocationTargetException | InstantiationException
| ClassNotFoundException e) {
throw new IllegalArgumentException(e);
}
}
}
| 4,289 |
0 | Create_ds/gobblin/gobblin-utility/src/main/java/org/apache/gobblin/runtime | Create_ds/gobblin/gobblin-utility/src/main/java/org/apache/gobblin/runtime/cli/ConstructorAndPublicMethodsCliObjectFactory.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.runtime.cli;
import java.io.IOException;
import java.lang.reflect.Constructor;
import java.lang.reflect.InvocationTargetException;
import java.lang.reflect.Modifier;
import java.util.Map;
import org.apache.commons.cli.CommandLine;
import org.apache.commons.cli.Option;
import org.apache.commons.cli.Options;
import com.google.common.collect.Maps;
/**
* A helper class for automatically inferring {@link Option}s from the constructor and public methods in a class.
*
* For method inference, see {@link PublicMethodsCliObjectFactory}.
*
* {@link Option}s are inferred from the constructor as follows:
* 1. The helper will search for exactly one constructor with only String arguments and which is annotated with
* {@link CliObjectSupport}.
* 2. For each parameter of the constructor, the helper will create a required {@link Option}.
*
*/
public class ConstructorAndPublicMethodsCliObjectFactory<T> extends PublicMethodsCliObjectFactory<T> {
private final Constructor<? extends T> constructor;
private final Map<String, Integer> constructoArgumentsMap;
private final Options options;
public ConstructorAndPublicMethodsCliObjectFactory(Class<? extends T> klazz) {
super(klazz);
this.constructoArgumentsMap = Maps.newHashMap();
this.options = super.getOptions();
this.constructor = inferConstructorOptions(this.options);
}
@Override
public T constructObject(CommandLine cli) throws IOException {
return buildInstance(cli);
}
@Override
public Options getOptions() {
return this.options;
}
/**
* Builds an instance of T using the selected constructor getting the constructor
* parameters from the {@link CommandLine}.
*
* Note: this method will also automatically call {@link #applyCommandLineOptions(CommandLine, T)} on
* the constructed object.
*/
private T buildInstance(CommandLine cli) {
String[] constructorArgs = new String[this.constructor.getParameterTypes().length];
for (Option option : cli.getOptions()) {
if (this.constructoArgumentsMap.containsKey(option.getOpt())) {
int idx = this.constructoArgumentsMap.get(option.getOpt());
constructorArgs[idx] = option.getValue();
}
}
T embeddedGobblin;
try {
embeddedGobblin = this.constructor.newInstance((Object[]) constructorArgs);
return embeddedGobblin;
} catch (IllegalAccessException | InvocationTargetException | InstantiationException exc) {
throw new RuntimeException("Could not instantiate " + this.klazz.getName(), exc);
}
}
private Constructor<? extends T> inferConstructorOptions(Options otherOptions) {
Constructor<? extends T> selectedConstructor = null;
for (Constructor<?> constructor : this.klazz.getConstructors()) {
if (canUseConstructor(constructor)) {
if (selectedConstructor == null) {
selectedConstructor = (Constructor<? extends T>) constructor;
} else {
throw new RuntimeException("Multiple usable constructors for " + this.klazz.getName());
}
}
}
if (selectedConstructor == null) {
throw new RuntimeException("There is no usable constructor for " + this.klazz.getName());
}
int constructorIdx = 0;
for (String argument : selectedConstructor.getAnnotation(CliObjectSupport.class).argumentNames()) {
Option option = Option.builder(argument).required().hasArg().build();
otherOptions.addOption(option);
constructoArgumentsMap.put(option.getOpt(), constructorIdx);
constructorIdx++;
}
return selectedConstructor;
}
private boolean canUseConstructor(Constructor<?> constructor) {
if (!Modifier.isPublic(constructor.getModifiers())) {
return false;
}
if (!constructor.isAnnotationPresent(CliObjectSupport.class)) {
return false;
}
for (Class<?> param : constructor.getParameterTypes()) {
if (param != String.class) {
return false;
}
}
return constructor.getParameterTypes().length ==
constructor.getAnnotation(CliObjectSupport.class).argumentNames().length;
}
}
| 4,290 |
0 | Create_ds/gobblin/gobblin-utility/src/main/java/org/apache/gobblin/runtime | Create_ds/gobblin/gobblin-utility/src/main/java/org/apache/gobblin/runtime/cli/CliObjectOption.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.runtime.cli;
import java.lang.annotation.ElementType;
import java.lang.annotation.Retention;
import java.lang.annotation.RetentionPolicy;
import java.lang.annotation.Target;
/**
* Specify additional information to use when building a CLI option from a method.
* This will only be respected on public methods with none or exactly one {@link String} parameter.
*/
@Retention(value= RetentionPolicy.RUNTIME) @Target(value= {ElementType.METHOD})
public @interface CliObjectOption {
/**
* The name of the option in cli (e.g. if name="myName", then CLI users would call "-myName" to activate the option).
*/
String name() default "";
/**
* A description for the option.
*/
String description() default "";
}
| 4,291 |
0 | Create_ds/gobblin/gobblin-utility/src/main/java/org/apache/gobblin/runtime | Create_ds/gobblin/gobblin-utility/src/main/java/org/apache/gobblin/runtime/cli/GobblinCli.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.runtime.cli;
import com.google.common.collect.Sets;
import org.apache.gobblin.annotation.Alias;
import org.apache.gobblin.util.ClassAliasResolver;
/**
* Instantiates a {@link CliApplication} and runs it.
*/
public class GobblinCli {
public static void main(String[] args) {
ClassAliasResolver<CliApplication> resolver = new ClassAliasResolver<>(CliApplication.class);
if (args.length < 1 || Sets.newHashSet("-h", "--help").contains(args[0])) {
printUsage(resolver);
return;
}
String alias = args[0];
try {
CliApplication application = resolver.resolveClass(alias).newInstance();
application.run(args);
} catch (ReflectiveOperationException roe) {
System.err.println("Could not find an application with alias " + alias);
printUsage(resolver);
System.exit(1);
} catch (Throwable t) {
System.out.println("Error: " + t.getMessage());
t.printStackTrace();
System.exit(2);
}
}
private static void printUsage(ClassAliasResolver<CliApplication> resolver) {
System.out.println("Usage: gobblin cli <command>");
System.out.println("Available commands:");
for (Alias alias : resolver.getAliasObjects()) {
System.out.println("\t" + alias.value() + "\t\t" + alias.description());
}
}
}
| 4,292 |
0 | Create_ds/gobblin/gobblin-utility/src/main/java/org/apache/gobblin/runtime | Create_ds/gobblin/gobblin-utility/src/main/java/org/apache/gobblin/runtime/cli/PublicMethodsCliObjectFactory.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.runtime.cli;
import java.io.IOException;
import java.lang.reflect.InvocationTargetException;
import java.lang.reflect.Method;
import java.lang.reflect.Modifier;
import java.util.Arrays;
import java.util.Comparator;
import java.util.List;
import java.util.Map;
import org.apache.commons.cli.CommandLine;
import org.apache.commons.cli.CommandLineParser;
import org.apache.commons.cli.DefaultParser;
import org.apache.commons.cli.HelpFormatter;
import org.apache.commons.cli.Option;
import org.apache.commons.cli.Options;
import org.apache.commons.cli.ParseException;
import com.google.common.base.Strings;
import com.google.common.collect.ImmutableList;
import com.google.common.collect.Maps;
import lombok.Getter;
import lombok.extern.slf4j.Slf4j;
/**
* A helper class for automatically inferring {@link Option}s from the public methods in a class.
*
* For each public method in the class to infer with exactly zero or one String parameter, the helper will create
* an optional {@link Option}. Using the annotation {@link CliObjectOption} the helper can automatically
* add a description to the {@link Option}. Annotating a method with {@link NotOnCli} will prevent the helper from
* creating an {@link Option} from it.
*/
@Slf4j
public abstract class PublicMethodsCliObjectFactory<T> implements CliObjectFactory<T> {
private static final Option HELP = Option.builder("h").longOpt("help").build();
private static final Option USE_LOG = Option.builder("l").desc("Uses log to print out erros in the base CLI code.").build();
private static final List<String> BLACKLISTED_FROM_CLI = ImmutableList.of(
"getClass", "hashCode", "notify", "notifyAll", "toString", "wait"
);
protected final Class<? extends T> klazz;
@Getter
private final Options options;
private final Map<String, Method> methodsMap;
public PublicMethodsCliObjectFactory(Class<? extends T> klazz) {
this.klazz = klazz;
this.methodsMap = Maps.newHashMap();
this.options = inferOptionsFromMethods();
}
@Override
public T buildObject(CommandLine cli) {
try {
T obj = constructObject(cli);
applyCommandLineOptions(cli, obj);
return obj;
} catch (IOException exc) {
throw new RuntimeException("Could not instantiate " + this.klazz.getSimpleName(), exc);
}
}
@Override
public T buildObject(String[] args, int offset, boolean printUsage, String usage) throws IOException {
Options options = new Options();
options.addOption(HELP);
options.addOption(USE_LOG);
for (Option opt : getOptions().getOptions()) {
options.addOption(opt);
}
CommandLine cli;
try {
CommandLineParser parser = new DefaultParser();
cli = parser.parse(options, Arrays.copyOfRange(args, offset, args.length));
} catch (ParseException pe) {
if (printUsage) {
System.out.println("Command line parse exception: " + pe.getMessage());
printUsage(usage, options);
}
throw new IOException(pe);
}
if (cli.hasOption(HELP.getOpt())) {
if (printUsage) {
printUsage(usage, options);
}
throw new HelpArgumentFound();
}
try {
return buildObject(cli);
} catch (Throwable t) {
if (cli.hasOption(USE_LOG.getOpt())) {
log.error("Failed to instantiate " + this.klazz.getName(), t);
} else {
System.out.println("Error: " + t.getMessage());
}
if (printUsage) {
printUsage(usage, options);
}
throw new IOException(t);
}
}
protected abstract T constructObject(CommandLine cli) throws IOException;
@Override
public String getUsageString() {
return "[OPTIONS]";
}
/**
* For each method for which the helper created an {@link Option} and for which the input {@link CommandLine} contains
* that option, this method will automatically call the method on the input object with the correct
* arguments.
*/
public void applyCommandLineOptions(CommandLine cli, T embeddedGobblin) {
try {
for (Option option : cli.getOptions()) {
if (!this.methodsMap.containsKey(option.getOpt())) {
// Option added by cli driver itself.
continue;
}
if (option.hasArg()) {
this.methodsMap.get(option.getOpt()).invoke(embeddedGobblin, option.getValue());
} else {
this.methodsMap.get(option.getOpt()).invoke(embeddedGobblin);
}
}
} catch (IllegalAccessException | InvocationTargetException exc) {
throw new RuntimeException("Could not apply options to " + embeddedGobblin.getClass().getName(), exc);
}
}
private Options inferOptionsFromMethods() {
Options options = new Options();
for (Method method : klazz.getMethods()) {
if (canUseMethod(method)) {
CliObjectOption annotation = method.isAnnotationPresent(CliObjectOption.class) ?
method.getAnnotation(CliObjectOption.class) : null;
String optionName = annotation == null || Strings.isNullOrEmpty(annotation.name())
? method.getName() : annotation.name();
String description = annotation == null ? "" : annotation.description();
Option.Builder builder = Option.builder(optionName).desc(description);
boolean hasArg = method.getParameterTypes().length > 0;
if (hasArg) {
builder.hasArg();
}
Option option = builder.build();
options.addOption(option);
this.methodsMap.put(option.getOpt(), method);
}
}
return options;
}
private boolean canUseMethod(Method method) {
if (!Modifier.isPublic(method.getModifiers())) {
return false;
}
if (BLACKLISTED_FROM_CLI.contains(method.getName())) {
return false;
}
if (method.isAnnotationPresent(NotOnCli.class)) {
return false;
}
Class<?>[] parameters = method.getParameterTypes();
if (parameters.length >= 2) {
return false;
}
if (parameters.length == 1 && parameters[0] != String.class) {
return false;
}
return true;
}
private void printUsage(String usage, Options options) {
HelpFormatter formatter = new HelpFormatter();
formatter.setOptionComparator(new Comparator<Option>() {
@Override
public int compare(Option o1, Option o2) {
if (o1.isRequired() && !o2.isRequired()) {
return -1;
}
if (!o1.isRequired() && o2.isRequired()) {
return 1;
}
return o1.getOpt().compareTo(o2.getOpt());
}
});
formatter.printHelp(usage, options);
}
}
| 4,293 |
0 | Create_ds/gobblin/gobblin-utility/src/main/java/org/apache/gobblin/runtime | Create_ds/gobblin/gobblin-utility/src/main/java/org/apache/gobblin/runtime/cli/CliApplication.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.runtime.cli;
/**
* An application that can be called by {@link GobblinCli}.
*/
public interface CliApplication {
/**
* Run this application. Note the array of args contains the alias of this applicaiton as its firts argument.
*/
void run(String[] args) throws Exception;
}
| 4,294 |
0 | Create_ds/gobblin/gobblin-utility/src/main/java/org/apache/gobblin/runtime | Create_ds/gobblin/gobblin-utility/src/main/java/org/apache/gobblin/runtime/cli/CliObjectFactory.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.runtime.cli;
import java.io.IOException;
import org.apache.commons.cli.CommandLine;
import org.apache.commons.cli.Options;
/**
* A factory used to create an object from CLI arguments backed by commons-cli library.
*/
public interface CliObjectFactory<T> {
/**
* @return an instance of {@link Options} understood by this factory.
*/
Options getOptions();
/**
* Build an instance of T from the parsed {@link CommandLine}. The input {@link CommandLine} was parsed with
* the output of {@link Options}, but may include additional {@link org.apache.commons.cli.Option}s added by the driver.
*/
T buildObject(CommandLine cli);
/**
* Build an instance of T from the input arguments.
* @param args input arguments to the application.
* @param offset will only start processing from this argument number.
* @param printUsage if true, a failure or -h will cause help to be printed and an exception to be thrown.
* @param usage usage String to be printed at the beginning of help message.
*/
T buildObject(String[] args, int offset, boolean printUsage, String usage) throws IOException;
/**
* Get a usage string for display on the command line. The output of this method will be appended to the base string
* "gobblin run <appName>". This should specify required options or parameters.
*/
String getUsageString();
/**
* Thrown if help argument (-h) was found in the app arguments.
*/
public static class HelpArgumentFound extends RuntimeException {
}
}
| 4,295 |
0 | Create_ds/gobblin/gobblin-utility/src/main/java/org/apache/gobblin/runtime | Create_ds/gobblin/gobblin-utility/src/main/java/org/apache/gobblin/runtime/cli/CliObjectSupport.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.runtime.cli;
import java.lang.annotation.ElementType;
import java.lang.annotation.Retention;
import java.lang.annotation.RetentionPolicy;
import java.lang.annotation.Target;
/**
* Specifies that the annotated constructor should be used to instantiate the
* object for running from CLI. Note the length of {@link #argumentNames()}
* must exactly match the number of parameters in the constructor.
* This annotation will be ignored unless it is applied to a constructor with only {@link String} parameters. Note
* that annotating multiple valid constructors in the same class is an error.
*/
@Retention(value= RetentionPolicy.RUNTIME) @Target(value= {ElementType.CONSTRUCTOR})
public @interface CliObjectSupport {
/**
* An array specifying the display name of each constructor parameter as an option in the CLI. The length of this
* array must be exactly the same as the number of parameters in the constructor.
*/
String[] argumentNames() default {};
}
| 4,296 |
0 | Create_ds/gobblin/gobblin-utility/src/main/java/org/apache/gobblin/runtime | Create_ds/gobblin/gobblin-utility/src/main/java/org/apache/gobblin/runtime/cli/NotOnCli.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.runtime.cli;
import java.lang.annotation.ElementType;
import java.lang.annotation.Retention;
import java.lang.annotation.RetentionPolicy;
import java.lang.annotation.Target;
/**
* Specifies that the annotated method should not be offered as an option in the CLI.
*/
@Retention(value= RetentionPolicy.RUNTIME) @Target(value= {ElementType.METHOD})
public @interface NotOnCli {
}
| 4,297 |
0 | Create_ds/gobblin/gobblin-utility/src/main/java/org/apache/gobblin | Create_ds/gobblin/gobblin-utility/src/main/java/org/apache/gobblin/fsm/StateWithCallbacks.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.fsm;
import javax.annotation.Nullable;
/**
* A state for a {@link FiniteStateMachine} which supports callbacks when entering and leaving the state.
* @param <T> supertype of states in the FSM.
*/
public interface StateWithCallbacks<T> {
/**
* Called when an FSM reaches this state.
* @param previousState the previous state of the machine.
*/
default void onEnterState(@Nullable T previousState) {
// do nothing
}
/**
* Called when an FSM leaves this state.
* @param nextState the next state of the machine.
*/
default void onLeaveState(T nextState) {
// do nothing
}
}
| 4,298 |
0 | Create_ds/gobblin/gobblin-utility/src/main/java/org/apache/gobblin | Create_ds/gobblin/gobblin-utility/src/main/java/org/apache/gobblin/fsm/FiniteStateMachine.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.fsm;
import java.io.Closeable;
import java.io.IOException;
import java.util.HashSet;
import java.util.Set;
import java.util.concurrent.locks.Condition;
import java.util.concurrent.locks.ReentrantReadWriteLock;
import com.google.common.annotations.VisibleForTesting;
import com.google.common.collect.HashMultimap;
import com.google.common.collect.SetMultimap;
import lombok.Getter;
import lombok.extern.slf4j.Slf4j;
/**
* An implementation of a basic FiniteStateMachine that allows keeping track of the state its state and gating certain
* logic on whether a transition is valid or not.
*
* This class is useful in situations where logic is complex, possibly multi-threaded, and can take multiple paths. Certain
* pieces of logic (for example running a job, publishing a dataset, etc) can only happen if other actions ended correctly,
* and the FSM is a way of simplifying the encoding and verification of those conditions. It is understood that state
* transitions may not be instantaneous, and that other state transitions should not start until the current one has
* been resolved.
*
* All public methods of this class will wait until the FSM is in a non-transitioning state. If multiple transitions are
* queued at the same time, the order in which they are executed is essentially random.
*
* The states supported by FSM can be enums or instances of any base type. The legality of a transition is determined
* by equality, i.e. if a transition A -> B is legal, the current state is A' and the desired end state is B', the transition
* will be legal if A.equals(A') && B.equals(B'). This allows for storing additional information into the current state
* as long as it does not affect the equality check (i.e. fields that are not compared in the equals check can store
* state metadata, etc.).
*
* Suggested Usage:
* FiniteStateMachine<MySymbols> fsm = new FiniteStateMachine.Builder().addTransition(START_SYMBOL, END_SYMBOL).build(initialSymbol);
*
* try (Transition transition = fsm.startTransition(MY_END_STATE)) {
* try {
* // my logic
* } catch (MyException exc) {
* transition.changeEndState(MY_ERROR);
* }
* } catch (UnallowedTransitionException exc) {
* // Cannot execute logic because it's an illegal transition!
* } catch (ReentrantStableStateWait exc) {
* // Somewhere in the logic an instruction tried to do an operation with the fsm that would likely cause a deadlock
* } catch (AbandonedTransitionException exc) {
* // Another thread initiated a transition and became inactive ending the transition
* } catch (InterruptedException exc) {
* // Could not start transition because thread got interrupted while waiting for a non-transitioning state
* } catch (FailedTransitionCallbackException exc) {
* // A callback in the transition start or end states has failed.
* exc.getTransition().changeEndState(MY_ERROR).closeWithoutCallbacks(); // example handling
* }
*
* @param <T>
*/
@Slf4j
public class FiniteStateMachine<T> {
/**
* Used to build a {@link FiniteStateMachine} instance.
*/
public static class Builder<T> {
private final SetMultimap<T, T> allowedTransitions;
private final Set<T> universalEnds;
private T errorState;
public Builder() {
this.allowedTransitions = HashMultimap.create();
this.universalEnds = new HashSet<>();
}
/**
* Add a legal transition to the {@link FiniteStateMachine}.
*/
public Builder<T> addTransition(T startState, T endState) {
this.allowedTransitions.put(startState, endState);
return this;
}
/**
* Specify that a state is a valid end state for a transition starting from any state. Useful for example for
* error states.
*/
public Builder<T> addUniversalEnd(T state) {
this.universalEnds.add(state);
return this;
}
/**
* Specify the error state to which this machine can transition if nothing else is possible. Note the error state
* is always an allowed end state.
*/
public Builder<T> errorState(T state) {
this.errorState = state;
return this;
}
/**
* Build a {@link FiniteStateMachine} starting at the given initial state.
*/
public FiniteStateMachine<T> build(T initialState) {
return new FiniteStateMachine<>(this.allowedTransitions, this.universalEnds, this.errorState, initialState);
}
}
private final SetMultimap<T, T> allowedTransitions;
private final Set<T> universalEnds;
private final T errorState;
private final ReentrantReadWriteLock lock;
private final Condition condition;
private final T initialState;
private volatile T currentState;
private volatile Transition currentTransition;
protected FiniteStateMachine(SetMultimap<T, T> allowedTransitions, Set<T> universalEnds, T errorState, T initialState) {
this.allowedTransitions = allowedTransitions;
this.universalEnds = universalEnds;
this.errorState = errorState;
this.lock = new ReentrantReadWriteLock();
this.condition = this.lock.writeLock().newCondition();
this.initialState = initialState;
this.currentState = initialState;
if (this.currentState instanceof StateWithCallbacks) {
((StateWithCallbacks) this.currentState).onEnterState(null);
}
}
/**
* Start a transition to the end state specified. The returned {@link Transition} object is a closeable that will finalize
* the transition when it is closed. While the transition is open, no other transition can start.
*
* It is recommended to call this method only within a try-with-resource block to ensure the transition is closed.
*
* @throws UnallowedTransitionException If the transition is not allowed.
* @throws InterruptedException if the thread got interrupted while waiting for a non-transitioning state.
*/
public Transition startTransition(T endState) throws UnallowedTransitionException, InterruptedException {
try {
this.lock.writeLock().lock();
while (isTransitioning()) {
this.condition.await();
}
if (!isAllowedTransition(this.currentState, endState)) {
throw new UnallowedTransitionException(this.currentState, endState);
}
Transition transition = new Transition(endState);
this.currentTransition = transition;
return transition;
} finally {
this.lock.writeLock().unlock();
}
}
/**
* Transition immediately to the given end state. This is essentially {@link #startTransition(Object)} immediately
* followed by {@link Transition#close()}.
*
* @throws UnallowedTransitionException if the transition is not allowed.
* @throws InterruptedException if the thread got interrupted while waiting for a non-transitioning state.
*/
public void transitionImmediately(T endState) throws UnallowedTransitionException, InterruptedException, FailedTransitionCallbackException {
Transition transition = startTransition(endState);
transition.close();
}
/**
* Transition immediately to the given end state if the transition is allowed.
*
* @return true if the transition happened.
* @throws InterruptedException if the thread got interrupted while waiting for a non-transitioning state.
*/
public boolean transitionIfAllowed(T endState) throws InterruptedException, FailedTransitionCallbackException {
try {
transitionImmediately(endState);
} catch (UnallowedTransitionException exc) {
return false;
}
return true;
}
/**
* Get the current state. This method will wait until the FSM is in a non-transitioning state (although a transition
* may start immediately after).
* @throws InterruptedException if the thread got interrupted while waiting for a non-transitioning state.
*/
public T getCurrentState() throws InterruptedException {
try {
// Need to get lock to make sure we're not in transitioning state.
this.lock.readLock().lock();
waitForNonTransitioningReadLock();
return this.currentState;
} finally {
this.lock.readLock().unlock();
}
}
@VisibleForTesting
T getCurrentStateEvenIfTransitioning() {
return this.currentState;
}
/**
* @return A clone of this FSM starting at the initial state of the FSM.
*/
public FiniteStateMachine<T> cloneAtInitialState() {
return new FiniteStateMachine<>(this.allowedTransitions, this.universalEnds, this.errorState, this.initialState);
}
/**
* @return A clone of this FSM starting at the current state of the FSM.
*/
public FiniteStateMachine<T> cloneAtCurrentState() throws InterruptedException {
try {
this.lock.readLock().lock();
waitForNonTransitioningReadLock();
return new FiniteStateMachine<>(this.allowedTransitions, this.universalEnds, this.errorState, this.currentState);
} finally {
this.lock.readLock().unlock();
}
}
/**
* Waits for a read lock in a non-transitioning state. The caller MUST hold the read lock before calling this method.
* @throws InterruptedException
*/
private void waitForNonTransitioningReadLock() throws InterruptedException {
if (isTransitioning()) {
this.lock.readLock().unlock();
// To use the condition, need to upgrade to a write lock
this.lock.writeLock().lock();
try {
while (isTransitioning()) {
this.condition.await();
}
// After non-transitioning state, downgrade again to read-lock
this.lock.readLock().lock();
} finally {
this.lock.writeLock().unlock();
}
}
}
private boolean isTransitioning() {
if (this.currentTransition != null && Thread.currentThread().equals(this.currentTransition.ownerThread)) {
throw new ReentrantStableStateWait(
"Tried to check for non-transitioning state from a thread that had already initiated a transition, "
+ "this may indicate a deadlock. To change end state use Transition.changeEndState() instead.");
}
if (this.currentTransition != null && !this.currentTransition.ownerThread.isAlive()) {
throw new AbandonedTransitionException(this.currentTransition.ownerThread);
}
return this.currentTransition != null;
}
protected boolean isAllowedTransition(T startState, T endState) {
if (endState.equals(this.errorState)) {
return true;
}
if (this.universalEnds.contains(endState)) {
return true;
}
Set<T> endStates = this.allowedTransitions.get(startState);
return endStates != null && endStates.contains(endState);
}
/**
* A handle used for controlling the transition of the {@link FiniteStateMachine}. Note if this handle is lost the
* {@link FiniteStateMachine} will likely go into an invalid state.
*/
public class Transition implements Closeable {
private final Thread ownerThread;
private volatile T endState;
private volatile boolean closed;
private Transition(T endState) {
this.ownerThread = Thread.currentThread();
this.endState = endState;
this.closed = false;
}
/**
* Get the state at the beginning of this transition.
*/
public T getStartState() {
if (this.closed) {
throw new IllegalStateException("Transition already closed.");
}
return FiniteStateMachine.this.currentState;
}
/**
* Change the end state of the transition. The new end state must be a legal transition for the state when the
* {@link Transition} was created.
*
* @throws UnallowedTransitionException if the new end state is not an allowed transition.
*/
public synchronized void changeEndState(T endState) throws UnallowedTransitionException {
if (this.closed) {
throw new IllegalStateException("Transition already closed.");
}
if (!isAllowedTransition(FiniteStateMachine.this.currentState, endState)) {
throw new UnallowedTransitionException(FiniteStateMachine.this.currentState, endState);
}
this.endState = endState;
}
/**
* Change the end state of the transition to the FSM error state.
*/
public synchronized void switchEndStateToErrorState() {
this.endState = FiniteStateMachine.this.errorState;
}
/**
* Close the current transition moving the {@link FiniteStateMachine} to the end state and releasing all locks.
*
* @throws FailedTransitionCallbackException when start or end state callbacks fail. Note if this exception is thrown
* the transition is not complete and the error must be handled to complete it.
*/
@Override
public void close() throws FailedTransitionCallbackException {
doClose(true);
}
/**
* Close the current transition moving the {@link FiniteStateMachine} to the end state and releasing all locks without
* calling any callbacks. This method should only be called after a {@link #close()} has failed and the failure
* cannot be handled.
*/
public void closeWithoutCallbacks() {
try {
doClose(false);
} catch (FailedTransitionCallbackException exc) {
throw new IllegalStateException(String.format("Close without callbacks threw a %s. This is an error in code.",
FailedTransitionCallbackException.class), exc);
}
}
private synchronized void doClose(boolean withCallbacks) throws FailedTransitionCallbackException {
if (this.closed) {
return;
}
try {
FiniteStateMachine.this.lock.writeLock().lock();
try {
if (withCallbacks && getStartState() instanceof StateWithCallbacks) {
((StateWithCallbacks<T>) getStartState()).onLeaveState(this.endState);
}
} catch (Throwable t) {
throw new FailedTransitionCallbackException(this, FailedCallback.START_STATE, t);
}
try {
if (withCallbacks && this.endState instanceof StateWithCallbacks) {
((StateWithCallbacks) this.endState).onEnterState(getStartState());
}
} catch (Throwable t) {
throw new FailedTransitionCallbackException(this, FailedCallback.END_STATE, t);
}
this.closed = true;
FiniteStateMachine.this.currentState = this.endState;
FiniteStateMachine.this.currentTransition = null;
FiniteStateMachine.this.condition.signalAll();
} finally {
FiniteStateMachine.this.lock.writeLock().unlock();
}
}
}
/**
* If a transition is not allowed to happen.
*/
@Getter
public static class UnallowedTransitionException extends Exception {
private final Object startState;
private final Object endState;
public UnallowedTransitionException(Object startState, Object endState) {
super(String.format("Unallowed transition: %s -> %s", startState, endState));
this.startState = startState;
this.endState = endState;
}
}
/**
* Thrown when a thread that has started a transition is waiting for a non-transitioning state, which is a deadlock situation.
*/
public static class ReentrantStableStateWait extends RuntimeException {
public ReentrantStableStateWait(String message) {
super(message);
}
}
/**
* Thrown when a transition was initiated by a thread that no longer exists, likely implying that the transition can
* never be closed.
*/
public static class AbandonedTransitionException extends RuntimeException {
private final Thread startingThread;
public AbandonedTransitionException(Thread startingThread) {
super(String.format("Thread %s initiated a transition but became inactive before closing it.", startingThread));
this.startingThread = startingThread;
}
}
public enum FailedCallback {
START_STATE, END_STATE
}
/**
* Thrown when the callbacks when closing a transition fail.
*/
@Getter
public static class FailedTransitionCallbackException extends IOException {
private final FiniteStateMachine.Transition transition;
private final FailedCallback failedCallback;
private final Throwable originalException;
public FailedTransitionCallbackException(FiniteStateMachine<?>.Transition transition, FailedCallback failedCallback,
Throwable originalException) {
super("Failed callbacks when ending transition.", originalException);
this.transition = transition;
this.failedCallback = failedCallback;
this.originalException = originalException;
}
}
}
| 4,299 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.