index
int64
0
0
repo_id
stringlengths
26
205
file_path
stringlengths
51
246
content
stringlengths
8
433k
__index_level_0__
int64
0
10k
0
Create_ds/ocelli/ocelli-core/src/main/java/netflix
Create_ds/ocelli/ocelli-core/src/main/java/netflix/ocelli/KeyedInstance.java
package netflix.ocelli; import rx.Observable; import rx.Observable.Transformer; import rx.functions.Func1; import rx.observables.GroupedObservable; public class KeyedInstance<K, T> { private final K key; private final Instance<T> member; KeyedInstance(K key, Instance<T> member) { this.key = key; this.member = member; } /** * Partition Members into multiple partitions based on a partition function. * It's possible for a member to exist in multiple partitions. Each partition * is a GroupedObservable of members for that partition alone. This stream can * be fed into a load balancer. * * Partitions are useful in the following use cases. * * 1. Hosts are grouped into VIPs where each VIP subset can service a certain subset of * requests. In the example below API's provided by vip1 can be serviced by Hosts 1,2,3 * whereas API's provied by vip2 can only be serviced by hosts 2 and 3. * * VIP : F(Host) -> O(vip) Multiple vips * * <vip1> Host1, Host2, Host3 * <vip2> Host2, Host3 * * 2. Shard or hash aware clients using consistent hashing (ex. Cassandra) or sharding (ex. EvCache) * will opt to send traffic only to nodes that can own the data. The partitioner function * will return the tokenRangeId or shardId for each host. Note that for replication factor of 1 * each shard will contain only 1 host while for higher replication factors each shard will contain * multiple hosts (equal to the number of shards) and that these hosts will overlap. * * <range1> Host1, Host2 * <range2> Host2, Host3 * <range3> Host3, Host4 * <range4> Host4, Host5 * * @author elandau * * @param <C> Client type * @param <K> The partition key */ public static <K, T> Transformer<Instance<T>, GroupedObservable<K, Instance<T>>> partitionBy(final Func1<T, Observable<K>> partitioner) { return new Transformer<Instance<T>, GroupedObservable<K, Instance<T>>>() { @Override public Observable<GroupedObservable<K, Instance<T>>> call(final Observable<Instance<T>> o) { return o .flatMap(new Func1<Instance<T>, Observable<KeyedInstance<K, T>>>() { @Override public Observable<KeyedInstance<K, T>> call(final Instance<T> member) { return partitioner .call(member.getValue()) .map(new Func1<K, KeyedInstance<K, T>>() { @Override public KeyedInstance<K, T> call(K key) { return new KeyedInstance<K, T>(key, member); } }); } }) .groupBy( new Func1<KeyedInstance<K, T>, K>() { @Override public K call(KeyedInstance<K, T> t1) { return t1.key; } }, new Func1<KeyedInstance<K, T>, Instance<T>>() { @Override public Instance<T> call(KeyedInstance<K, T> t1) { return t1.member; } }); } }; } }
6,200
0
Create_ds/ocelli/ocelli-core/src/main/java/netflix
Create_ds/ocelli/ocelli-core/src/main/java/netflix/ocelli/Instance.java
package netflix.ocelli; import rx.Observable; /** * An {@link Instance} encapsulates a generic entity as well as its lifecycle. Lifecycle * is managed as an Observable<Void> that onCompletes when the entity is no longer in the * pool. This technique is also used to introduce topologies and quarantine logic for any * client type where each incarnation of the entity within the load balancer has its own * lifecycle. * * Instance is used internally in Ocelli and should not be created directly other than * for implementing specific entity registry solutions, such as Eureka. * * @see LoadBalancer * * @author elandau * * @param <T> */ public abstract class Instance<T> { public static <T> Instance<T> create(final T value, final Observable<Void> lifecycle) { return new Instance<T>() { @Override public Observable<Void> getLifecycle() { return lifecycle; } @Override public T getValue() { return value; } }; } /** * Return the lifecycle for this object * @return */ public abstract Observable<Void> getLifecycle(); /** * Return the instance object which could be an address or an actual client implementation * @return */ public abstract T getValue(); public String toString() { return "Instance[" + getValue() + "]"; } }
6,201
0
Create_ds/ocelli/ocelli-core/src/main/java/netflix
Create_ds/ocelli/ocelli-core/src/main/java/netflix/ocelli/InstanceEvent.java
package netflix.ocelli; @SuppressWarnings("rawtypes") public class InstanceEvent<T extends Enum> extends AbstractLoadBalancerEvent<T> { public enum EventType implements MetricEventType { /* Connection specific events. */ ExecutionSuccess(true, false, Void.class), ExecutionFailed(true, true, Void.class), ; private final boolean isTimed; private final boolean isError; private final Class<?> optionalDataType; EventType(boolean isTimed, boolean isError, Class<?> optionalDataType) { this.isTimed = isTimed; this.isError = isError; this.optionalDataType = optionalDataType; } @Override public boolean isTimed() { return isTimed; } @Override public boolean isError() { return isError; } @Override public Class<?> getOptionalDataType() { return optionalDataType; } } public static final InstanceEvent<EventType> EXECUTION_SUCCESS = from(EventType.ExecutionSuccess); public static final InstanceEvent<EventType> EXECUTION_FAILED = from(EventType.ExecutionFailed); /*Always refer to as constants*/protected InstanceEvent(T name, boolean isTimed, boolean isError) { super(name, isTimed, isError); } private static InstanceEvent<EventType> from(EventType type) { return new InstanceEvent<EventType>(type, type.isTimed(), type.isError()); } }
6,202
0
Create_ds/ocelli/ocelli-core/src/main/java/netflix
Create_ds/ocelli/ocelli-core/src/main/java/netflix/ocelli/LoadBalancerEventListener.java
package netflix.ocelli; import java.util.concurrent.TimeUnit; public interface LoadBalancerEventListener <E extends LoadBalancerEvent<?>> { int NO_DURATION = -1; Object NO_VALUE = null; Throwable NO_ERROR = null; TimeUnit NO_TIME_UNIT = null; /** * Event callback for any {@link MetricsEvent}. The parameters passed are all the contextual information possible for * any event. There presence or absence will depend on the type of event. * * @param event Event for which this callback has been invoked. This will never be {@code null} * @param duration If the passed event is {@link MetricsEvent#isTimed()} then the actual duration, else * {@link #NO_DURATION} * @param timeUnit The time unit for the duration, if exists, else {@link #NO_TIME_UNIT} * @param throwable If the passed event is {@link MetricsEvent#isError()} then the cause of the error, else * {@link #NO_ERROR} * @param value If the passed event requires custom object to be passed, then that object, else {@link #NO_VALUE} */ void onEvent(E event, long duration, TimeUnit timeUnit, Throwable throwable, Object value); /** * Marks the end of all event callbacks. No methods on this listener will ever be called once this method is called. */ void onCompleted(); /** * A callback when this listener is subscribed to a {@link MetricEventsPublisher}. */ void onSubscribe(); }
6,203
0
Create_ds/ocelli/ocelli-core/src/main/java/netflix
Create_ds/ocelli/ocelli-core/src/main/java/netflix/ocelli/LoadBalancerEvent.java
package netflix.ocelli; @SuppressWarnings("rawtypes") public interface LoadBalancerEvent <T extends Enum> { T getType(); boolean isTimed(); boolean isError(); /** * This interface is a "best-practice" rather than a contract as a more strongly required contract is for the event * type to be an enum. */ interface MetricEventType { boolean isTimed(); boolean isError(); Class<?> getOptionalDataType(); } }
6,204
0
Create_ds/ocelli/ocelli-core/src/main/java/netflix
Create_ds/ocelli/ocelli-core/src/main/java/netflix/ocelli/SnapshotToInstance.java
package netflix.ocelli; import rx.Observable; import rx.Observable.Transformer; import rx.functions.Func1; import rx.functions.Func2; import java.util.ArrayList; import java.util.Collections; import java.util.HashMap; import java.util.HashSet; import java.util.List; import java.util.Map; import java.util.Set; /** * Utility class to convert a full snapshot of pool members, T, to an Observable<Instance<T>> * by diffing the new list with the last list. Any {@link Instance} that has been removed * will have it's lifecycle terminated. * * @author elandau * * @param <T> */ public class SnapshotToInstance<T> implements Transformer<List<T>, Instance<T>> { public class State { final Map<T, CloseableInstance<T>> members; final List<Instance<T>> newInstances; public State() { members = new HashMap<T, CloseableInstance<T>>(); newInstances = Collections.emptyList(); } public State(State toCopy) { members = new HashMap<T, CloseableInstance<T>>(toCopy.members); newInstances = new ArrayList<>(); } } @Override public Observable<Instance<T>> call(Observable<List<T>> snapshots) { return snapshots.scan(new State(), new Func2<State, List<T>, State>() { @Override public State call(State state, List<T> instances) { State newState = new State(state); Set<T> keysToRemove = new HashSet<T>(newState.members.keySet()); for (T ii : instances) { keysToRemove.remove(ii); if (!newState.members.containsKey(ii)) { CloseableInstance<T> member = CloseableInstance.from(ii); newState.members.put(ii, member); newState.newInstances.add(member); } } for (T tKey : keysToRemove) { CloseableInstance<T> removed = newState.members.remove(tKey); if (null != removed) { removed.close(); } } return newState; } }).concatMap(new Func1<State, Observable<Instance<T>>>() { @Override public Observable<Instance<T>> call(State state) { return Observable.from(state.newInstances); } }); } }
6,205
0
Create_ds/ocelli/ocelli-core/src/main/java/netflix
Create_ds/ocelli/ocelli-core/src/main/java/netflix/ocelli/Host.java
package netflix.ocelli; import java.util.Map; /** * A class for expressing a host. * * @author Nitesh Kant */ public class Host { private String hostName; private int port; private Map<String, String> attributes; public Host(String hostName, int port) { this.hostName = hostName; this.port = port; } public Host(String hostName, int port, Map<String, String> attributes) { this.hostName = hostName; this.port = port; this.attributes = attributes; } public String getHostName() { return hostName; } public int getPort() { return port; } public String getAttributes(String key, String defaultValue) { if (attributes != null && attributes.containsKey(key)) { return attributes.get(key); } return defaultValue; } @Override public boolean equals(Object o) { if (this == o) { return true; } if (!(o instanceof Host)) { return false; } Host host = (Host) o; return port == host.port && !(hostName != null ? !hostName.equals(host.hostName) : host.hostName != null); } @Override public int hashCode() { int result = hostName != null ? hostName.hashCode() : 0; result = 31 * result + port; return result; } @Override public String toString() { StringBuilder sb = new StringBuilder(); sb.append("Host [") .append("hostName=").append(hostName) .append(", port=").append(port); if (attributes != null && attributes.isEmpty()) { sb.append(", attr=").append(attributes); } return sb.append("]").toString(); } }
6,206
0
Create_ds/ocelli/ocelli-core/src/main/java/netflix
Create_ds/ocelli/ocelli-core/src/main/java/netflix/ocelli/DelayStrategy.java
package netflix.ocelli; /** * Strategy to determine the backoff delay after N consecutive failures. * * @author elandau * */ public interface DelayStrategy { long get(int consecutiveFailures); }
6,207
0
Create_ds/ocelli/ocelli-core/src/main/java/netflix
Create_ds/ocelli/ocelli-core/src/main/java/netflix/ocelli/InstanceQuarantiner.java
package netflix.ocelli; import java.util.concurrent.TimeUnit; import java.util.concurrent.atomic.AtomicBoolean; import java.util.concurrent.atomic.AtomicInteger; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import rx.Observable; import rx.Observable.OnSubscribe; import rx.Scheduler; import rx.Subscriber; import rx.functions.Action0; import rx.functions.Func0; import rx.functions.Func1; import rx.subjects.BehaviorSubject; /** * To be used as a flatMap on a stream of Instance<T> the quarantiner converts the Instance<T> * to an Observable<Instance<T>> where each emitted item represents an active Instance<T> based * on a failure detector specific to T. When failure is detected the emitted instance's lifecycle * will be terminated and a new Instance<T> based on the original Instance<T> (passed to the flatMap) * will be emitted after a configurable quarantine time. * * @author elandau * * @param <T> */ public class InstanceQuarantiner<T> implements Func1<Instance<T>, Observable<Instance<T>>> { private static final Logger LOG = LoggerFactory.getLogger(InstanceQuarantiner.class); private final IncarnationFactory<T> factory; private final DelayStrategy backoffStrategy; private final Scheduler scheduler; /** * Factory for creating a new incarnation of a server based on the primary incarnation. * * @author elandau * * @param <T> */ public interface IncarnationFactory<T> { /** * * @param value The primary client instance * @param listener Listener to invoke with success and failure events * @param lifecycle Composite of this incarnations failure detected lifecycle and membership * @return */ public T create(T value, InstanceEventListener listener, Observable<Void> lifecycle); } public interface ShutdownAction<T> { void shutdown(T object); } /** * Create a new InstanceQuaratiner * * @param failureActionSetter Function to call to associate the failure callback with a T */ public static <T> InstanceQuarantiner<T> create( IncarnationFactory<T> factory, DelayStrategy backoffStrategy, Scheduler scheduler) { return new InstanceQuarantiner<T>(factory, backoffStrategy, scheduler); } public InstanceQuarantiner(IncarnationFactory<T> factory, DelayStrategy backoffStrategy, Scheduler scheduler) { this.factory = factory; this.scheduler = scheduler; this.backoffStrategy = backoffStrategy; } /** * Metrics for a single incarnation of a client type */ static class IncarnationMetrics extends InstanceEventListener { final Action0 shutdownAction; final AtomicBoolean failed = new AtomicBoolean(false); final InstanceMetrics parent; public IncarnationMetrics(InstanceMetrics parent, Action0 shutdownAction) { this.parent = parent; this.shutdownAction = shutdownAction; } @Override protected void onExecutionFailed(long duration, TimeUnit timeUnit, Throwable throwable) { if (failed.compareAndSet(false, true)) { parent.onExecutionFailed(duration, timeUnit, throwable); shutdownAction.call(); } } @Override protected void onExecutionSuccess(long duration, TimeUnit timeUnit) { if (!failed.get()) { parent.onExecutionSuccess(duration, timeUnit); } } } /** * Metrics shared across all incarnations of a client type */ static class InstanceMetrics extends InstanceEventListener { private AtomicInteger consecutiveFailures = new AtomicInteger(); @Override protected void onExecutionFailed(long duration, TimeUnit timeUnit, Throwable throwable) { consecutiveFailures.incrementAndGet(); } @Override protected void onExecutionSuccess(long duration, TimeUnit timeUnit) { consecutiveFailures.set(0); } } @Override public Observable<Instance<T>> call(final Instance<T> primaryInstance) { return Observable.create(new OnSubscribe<Instance<T>>() { @Override public void call(final Subscriber<? super Instance<T>> s) { final InstanceMetrics instanceMetrics = new InstanceMetrics(); final AtomicBoolean first = new AtomicBoolean(true); s.add(Observable .defer(new Func0<Observable<Instance<T>>>() { @Override public Observable<Instance<T>> call() { LOG.info("Creating next incarnation of '{}'", primaryInstance.getValue()); final BehaviorSubject<Void> incarnationLifecycle = BehaviorSubject.create(); final IncarnationMetrics metrics = new IncarnationMetrics( instanceMetrics, // TODO: Make this a policy new Action0() { @Override public void call() { incarnationLifecycle.onCompleted(); } }); // The incarnation lifecycle is tied to the main instance membership as well as failure // detection // TODO: Can we do this without cache? final Observable<Void> lifecycle = incarnationLifecycle.ambWith(primaryInstance.getLifecycle()).cache(); Observable<Instance<T>> o = Observable.just(Instance.create(factory.create(primaryInstance.getValue(), metrics, lifecycle), lifecycle)); if (!first.compareAndSet(true, false)) { long delay = backoffStrategy.get(instanceMetrics.consecutiveFailures.get()); o = o.delaySubscription(delay, TimeUnit.MILLISECONDS, scheduler); } return o; } }) .concatMap(new Func1<Instance<T>, Observable<Void>>() { @Override public Observable<Void> call(final Instance<T> instance) { s.onNext(instance); return instance.getLifecycle(); } }) .repeat() .takeUntil(primaryInstance.getLifecycle()) .subscribe()); } }); } }
6,208
0
Create_ds/ocelli/ocelli-core/src/main/java/netflix
Create_ds/ocelli/ocelli-core/src/main/java/netflix/ocelli/InstanceCollector.java
package netflix.ocelli; import rx.Observable; import rx.Observable.Operator; import rx.Observable.Transformer; import rx.Subscriber; import rx.Subscription; import rx.functions.Action0; import rx.functions.Action1; import rx.functions.Func0; import rx.functions.Func1; import rx.subscriptions.CompositeSubscription; import java.util.ArrayList; import java.util.Collections; import java.util.List; import java.util.Map; import java.util.Set; import java.util.concurrent.ConcurrentHashMap; /** * From a list of Instance<T> maintain a List of active Instance<T>. Add when T is up and remove * when T is either down or Instance failed or completed. * * @author elandau * * @param <T> */ public class InstanceCollector<T> implements Transformer<Instance<T>, List<T>> { private final Func0<Map<T, Subscription>> instanceStoreFactory; private InstanceCollector(Func0<Map<T, Subscription>> instanceStoreFactory) { this.instanceStoreFactory = instanceStoreFactory; } public static <T> InstanceCollector<T> create() { return create(new Func0<Map<T, Subscription>>() { @Override public Map<T, Subscription> call() { return new ConcurrentHashMap<T, Subscription>(); } }); } public static <T> InstanceCollector<T> create(Func0<Map<T, Subscription>> instanceStoreFactory) { return new InstanceCollector<T>(instanceStoreFactory); } // TODO: Move this into a utils package public static <K, T> Action1<Instance<T>> toMap(final Map<K, T> map, final Func1<T, K> keyFunc) { return new Action1<Instance<T>>() { @Override public void call(final Instance<T> t1) { map.put(keyFunc.call(t1.getValue()), t1.getValue()); t1.getLifecycle().doOnCompleted(new Action0() { @Override public void call() { map.remove(t1.getValue()); } }); } }; } @Override public Observable<List<T>> call(Observable<Instance<T>> o) { return o.lift(new Operator<Set<T>, Instance<T>>() { @Override public Subscriber<? super Instance<T>> call(final Subscriber<? super Set<T>> s) { final CompositeSubscription cs = new CompositeSubscription(); final Map<T, Subscription> instances = instanceStoreFactory.call(); s.add(cs); return new Subscriber<Instance<T>>() { @Override public void onCompleted() { s.onCompleted(); } @Override public void onError(Throwable e) { s.onError(e); } @Override public void onNext(final Instance<T> t) { Subscription sub = t.getLifecycle().doOnCompleted(new Action0() { @Override public void call() { Subscription sub = instances.remove(t.getValue()); cs.remove(sub); s.onNext(instances.keySet()); } }).subscribe(); instances.put(t.getValue(), sub); s.onNext(instances.keySet()); } }; } }) .map(new Func1<Set<T>, List<T>>() { @Override public List<T> call(Set<T> instances) { ArrayList<T> snapshot = new ArrayList<T>(instances.size()); snapshot.addAll(instances); // Make an immutable copy of the list return Collections.unmodifiableList(snapshot); } }); } }
6,209
0
Create_ds/ocelli/ocelli-core/src/main/java/netflix/ocelli
Create_ds/ocelli/ocelli-core/src/main/java/netflix/ocelli/util/SingleMetric.java
package netflix.ocelli.util; /** * Contract for tracking a single Metric. For example, a SingleMetric may track an exponential moving * average where add() is called for each new sample and get() is called to get the current * exponential moving average * * @author elandau * * @param <T> */ public interface SingleMetric<T> { /** * Add a new sample * @param sample */ void add(T sample); /** * Reset the value to default */ void reset(); /** * @return The latest calculated value */ T get(); }
6,210
0
Create_ds/ocelli/ocelli-core/src/main/java/netflix/ocelli
Create_ds/ocelli/ocelli-core/src/main/java/netflix/ocelli/util/RpsEstimator.java
package netflix.ocelli.util; import java.util.concurrent.TimeUnit; import java.util.concurrent.atomic.AtomicLong; public class RpsEstimator { private static final double MICROS_PER_SECOND = TimeUnit.MICROSECONDS.convert(1, TimeUnit.SECONDS); private AtomicLong sampleCounter = new AtomicLong(); private AtomicLong lastCheckpoint = new AtomicLong(); private volatile long estimatedRps; private volatile long nextCheckpoint; private volatile long lastFlushTime = System.nanoTime(); public static class State { long count; long rps; } public RpsEstimator(long initialRps) { this.estimatedRps = 0; this.nextCheckpoint = 1000; } public State addSample() { long counter = sampleCounter.incrementAndGet(); if (counter - lastCheckpoint.get() == nextCheckpoint) { long count = counter - lastCheckpoint.get(); synchronized (this) { lastCheckpoint.set(counter); long now = System.nanoTime(); estimatedRps = (long) (count * MICROS_PER_SECOND / (lastFlushTime - now)); lastFlushTime = now; nextCheckpoint = estimatedRps; State state = new State(); state.count = count; state.rps = estimatedRps; return state; } } else if (counter - lastCheckpoint.get() > 2 * estimatedRps) { nextCheckpoint = (counter - lastCheckpoint.get()) * 2; } return null; } long getSampleCount() { return sampleCounter.get(); } long getRps() { return estimatedRps; } }
6,211
0
Create_ds/ocelli/ocelli-core/src/main/java/netflix/ocelli
Create_ds/ocelli/ocelli-core/src/main/java/netflix/ocelli/util/RxUtil.java
package netflix.ocelli.util; import java.util.Collection; import java.util.LinkedHashSet; import java.util.List; import java.util.Set; import java.util.concurrent.CountDownLatch; import java.util.concurrent.Semaphore; import java.util.concurrent.TimeUnit; import java.util.concurrent.atomic.AtomicBoolean; import java.util.concurrent.atomic.AtomicInteger; import java.util.concurrent.atomic.AtomicLong; import java.util.concurrent.atomic.AtomicReference; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import rx.Observable; import rx.Observable.OnSubscribe; import rx.Observable.Operator; import rx.Observer; import rx.Subscriber; import rx.functions.Action0; import rx.functions.Action1; import rx.functions.Func0; import rx.functions.Func1; import rx.functions.FuncN; import rx.subscriptions.Subscriptions; public class RxUtil { private static final Logger LOG = LoggerFactory.getLogger(RxUtil.class); private interface Action01<T> extends Action1<T>, Action0 {} /** * Increment a stateful counter outside the stream. * * {code * <pre> * observable * .doOnNext(RxUtil.increment(mycounter)) * </pre> * } * * @param metric * @return */ public static <T> Action01<T> increment(final AtomicLong metric) { return new Action01<T>() { @Override public void call(T t1) { metric.incrementAndGet(); } @Override public void call() { metric.incrementAndGet(); } }; } public static <T> Action01<T> increment(final AtomicInteger metric) { return new Action01<T>() { @Override public void call(T t1) { metric.incrementAndGet(); } @Override public void call() { metric.incrementAndGet(); } }; } /** * Decrement a stateful counter outside the stream. * * {code * <pre> * observable * .doOnNext(RxUtil.decrement(mycounter)) * </pre> * } * * @param metric * @return */ public static <T> Action01<T> decrement(final AtomicLong metric) { return new Action01<T>() { @Override public void call(T t1) { metric.decrementAndGet(); } @Override public void call() { metric.decrementAndGet(); } }; } /** * Trace each item emitted on the stream with a given label. * Will log the file and line where the trace occurs. * * {code * <pre> * observable * .doOnNext(RxUtil.trace("next: ")) * </pre> * } * * @param label */ public static <T> Action01<T> trace(String label) { final String caption = getSourceLabel(label); final AtomicLong counter = new AtomicLong(); return new Action01<T>() { @Override public void call(T t1) { LOG.trace("{} ({}) {}", caption, counter.incrementAndGet(), t1); } @Override public void call() { LOG.trace("{} ({}) {}", caption, counter.incrementAndGet()); } }; } /** * Log info line for each item emitted on the stream with a given label. * Will log the file and line where the trace occurs. * * {code * <pre> * observable * .doOnNext(RxUtil.info("next: ")) * </pre> * } * * @param label */ public static <T> Action01<T> info(String label) { final String caption = getSourceLabel(label); final AtomicLong counter = new AtomicLong(); return new Action01<T>() { @Override public void call(T t1) { LOG.info("{} ({}) {}", caption, counter.incrementAndGet(), t1); } @Override public void call() { LOG.info("{} ({})", caption, counter.incrementAndGet()); } }; } /** * Action to sleep in the middle of a pipeline. This is normally used in tests to * introduce an artifical delay. * @param timeout * @param units * @return */ public static <T> Action01<T> sleep(final long timeout, final TimeUnit units) { return new Action01<T>() { @Override public void call(T t1) { call(); } @Override public void call() { try { units.sleep(timeout); } catch (InterruptedException e) { Thread.currentThread().interrupt(); throw new RuntimeException(e); } } }; } /** * Decrement a countdown latch for each item * * {code * <pre> * observable * .doOnNext(RxUtil.decrement(latch)) * </pre> * } */ public static <T> Action01<T> countdown(final CountDownLatch latch) { return new Action01<T>() { @Override public void call(T t1) { latch.countDown(); } @Override public void call() { latch.countDown(); } }; } /** * Log the request rate at the given interval. * * {code * <pre> * observable * .lift(RxUtil.rate("items per 10 seconds", 10, TimeUnit.SECONDS)) * </pre> * } * * @param label * @param interval * @param units */ public static String[] TIME_UNIT = {"ns", "us", "ms", "s", "m", "h", "d"}; public static <T> Operator<T, T> rate(final String label, final long interval, final TimeUnit units) { final String caption = getSourceLabel(label); return new Operator<T, T>() { @Override public Subscriber<? super T> call(final Subscriber<? super T> child) { final AtomicLong counter = new AtomicLong(); final String sUnits = (interval == 1) ? TIME_UNIT[units.ordinal()] : String.format("({} {})", interval, TIME_UNIT[units.ordinal()]); child.add( Observable.interval(interval, units) .subscribe(new Action1<Long>() { @Override public void call(Long t1) { LOG.info("{} {} / {}", caption, counter.getAndSet(0), sUnits); } })); return new Subscriber<T>(child) { @Override public void onCompleted() { if (!isUnsubscribed()) child.onCompleted(); } @Override public void onError(Throwable e) { if (!isUnsubscribed()) child.onError(e); } @Override public void onNext(T t) { counter.incrementAndGet(); if (!isUnsubscribed()) child.onNext(t); } }; } }; } /** * Log error line when an error occurs. * Will log the file and line where the trace occurs. * * {code * <pre> * observable * .doOnError(RxUtil.error("Stream broke")) * </pre> * } * * @param label */ public static Action1<Throwable> error(String label) { final String caption = getSourceLabel(label); final AtomicLong counter = new AtomicLong(); return new Action1<Throwable>() { @Override public void call(Throwable t1) { LOG.error("{} ({}) {}", caption, counter.incrementAndGet(), t1); } }; } /** * Log a warning line when an error occurs. * Will log the file and line where the trace occurs. * * {code * <pre> * observable * .doOnError(RxUtil.warn("Stream broke")) * </pre> * } * * @param label */ public static Action1<Throwable> warn(String label) { final String caption = getSourceLabel(label); final AtomicLong counter = new AtomicLong(); return new Action1<Throwable>() { @Override public void call(Throwable t1) { LOG.warn("{} ({}) {}", caption, counter.incrementAndGet(), t1); } }; } public static <T> Func1<List<T>, Boolean> listNotEmpty() { return new Func1<List<T>, Boolean>() { @Override public Boolean call(List<T> t1) { return !t1.isEmpty(); } }; } /** * Filter out any collection that is empty. * * {code * <pre> * observable * .filter(RxUtil.collectionNotEmpty()) * </pre> * } */ public static <T> Func1<Collection<T>, Boolean> collectionNotEmpty() { return new Func1<Collection<T>, Boolean>() { @Override public Boolean call(Collection<T> t1) { return !t1.isEmpty(); } }; } /** * Operator that acts as a pass through. Use this when you want the operator * to be interchangable with the default implementation being a single passthrough. * * {code * <pre> * Operator<T,T> customOperator = RxUtil.passthrough(); * observable * .lift(customOperator) * </pre> * } */ public static <T> Operator<T, T> passthrough() { return new Operator<T, T>() { @Override public Subscriber<? super T> call(final Subscriber<? super T> o) { return o; } }; } /** * Cache all items and emit a single LinkedHashSet with all data when onComplete is called * @return */ public static <T> Operator<Set<T>, T> toLinkedHashSet() { return new Operator<Set<T>, T>() { @Override public Subscriber<? super T> call(final Subscriber<? super Set<T>> o) { final Set<T> set = new LinkedHashSet<T>(); return new Subscriber<T>() { @Override public void onCompleted() { o.onNext(set); o.onCompleted(); } @Override public void onError(Throwable e) { o.onError(e); } @Override public void onNext(T t) { set.add(t); } }; } }; } private static String getSourceLabel(String label) { StackTraceElement[] stack = Thread.currentThread().getStackTrace(); StackTraceElement element = stack[3]; return "(" + element.getFileName() + ":" + element.getLineNumber() + ") " + label; } /** * Filter that returns true whenever an external state is true * {code * <pre> * final AtomicBoolean condition = new AtomicBoolean(); * * observable * .filter(RxUtil.isTrue(condition)) * </pre> * } * @param condition */ public static <T> Func1<T, Boolean> isTrue(final AtomicBoolean condition) { return new Func1<T, Boolean>() { @Override public Boolean call(T t1) { return condition.get(); } }; } /** * Filter that returns true whenever an external state is false * {code * <pre> * final AtomicBoolean condition = new AtomicBoolean(); * * observable * .filter(RxUtil.isTrue(condition)) * </pre> * } * @param condition */ public static <T> Func1<T, Boolean> isFalse(final AtomicBoolean condition) { return new Func1<T, Boolean>() { @Override public Boolean call(T t1) { return !condition.get(); } }; } /** * Filter that returns true whenever a CAS operation on an external static * AtomicBoolean succeeds * {code * <pre> * final AtomicBoolean condition = new AtomicBoolean(); * * observable * .filter(RxUtil.isTrue(condition)) * </pre> * } * @param condition */ public static Func1<? super Long, Boolean> compareAndSet(final AtomicBoolean condition, final boolean expect, final boolean value) { return new Func1<Long, Boolean>() { @Override public Boolean call(Long t1) { return condition.compareAndSet(expect, value); } }; } /** * Simple operation that sets an external condition for each emitted item * {code * <pre> * final AtomicBoolean condition = new AtomicBoolean(); * * observable * .doOnNext(RxUtil.set(condition, true)) * </pre> * } * @param condition * @param value * @return */ public static <T> Action01<T> set(final AtomicBoolean condition, final boolean value) { return new Action01<T>() { @Override public void call(T t1) { condition.set(value); } @Override public void call() { condition.set(value); } }; } /** * Filter that always returns a constant value. Use this to create a default filter * when the filter implementation is plugable. * * @param constant * @return */ public static <T> Func1<T, Boolean> constantFilter(final boolean constant) { return new Func1<T, Boolean>() { @Override public Boolean call(T t1) { return constant; } }; } /** * Observable factory to be used with {@link Observable.defer()} which will round robin * through a list of {@link Observable}'s so that each subscribe() returns the next * {@link Observable} in the list. * * @param sources * @return */ public static <T> Func0<Observable<T>> roundRobinObservableFactory(@SuppressWarnings("unchecked") final Observable<T> ... sources) { return new Func0<Observable<T>>() { final AtomicInteger count = new AtomicInteger(); @Override public Observable<T> call() { int index = count.getAndIncrement() % sources.length; return sources[index]; } }; } public static <T> Observable<Observable<T>> onSubscribeChooseNext(final Observable<T> ... sources) { return Observable.create(new OnSubscribe<Observable<T>>() { private AtomicInteger count = new AtomicInteger(); @Override public void call(Subscriber<? super Observable<T>> t1) { int index = count.getAndIncrement(); if (index < sources.length) { t1.onNext(sources[index]); } t1.onCompleted(); } }); } /** * Given a list of observables that emit a boolean condition AND all conditions whenever * any condition changes and emit the resulting condition when the final condition changes. * @param sources * @return */ public static Observable<Boolean> conditionAnder(List<Observable<Boolean>> sources) { return Observable.combineLatest(sources, new FuncN<Observable<Boolean>>() { @Override public Observable<Boolean> call(Object... args) { return Observable.from(args).cast(Boolean.class).firstOrDefault(true, new Func1<Boolean, Boolean>() { @Override public Boolean call(Boolean status) { return !status; } }); } }) .flatMap(new Func1<Observable<Boolean>, Observable<Boolean>>() { @Override public Observable<Boolean> call(Observable<Boolean> t1) { return t1; } }) .distinctUntilChanged(); } /** * Trace all parts of an observable's state and especially when * notifications are discarded due to being unsubscribed. This should * only used for debugging purposes. * @param label * @return */ public static <T> Operator<T, T> uberTracer(String label) { final String caption = getSourceLabel(label); return new Operator<T, T>() { @Override public Subscriber<? super T> call(final Subscriber<? super T> s) { s.add(Subscriptions.create(new Action0() { @Override public void call() { LOG.info("{} unsubscribing", caption); } })); return new Subscriber<T>(s) { private AtomicLong completedCounter = new AtomicLong(); private AtomicLong nextCounter = new AtomicLong(); private AtomicLong errorCounter = new AtomicLong(); @Override public void onCompleted() { if (!s.isUnsubscribed()) { s.onCompleted(); } else { LOG.info("{} ({}) Discarding onCompleted", caption, completedCounter.incrementAndGet()); } } @Override public void onError(Throwable e) { if (!s.isUnsubscribed()) { s.onCompleted(); } else { LOG.info("{} ({}) Discarding onError", caption, errorCounter.incrementAndGet()); } } @Override public void onNext(T t) { if (!s.isUnsubscribed()) { s.onNext(t); } else { LOG.info("{} ({}) Discarding onNext", caption, nextCounter.incrementAndGet()); } } }; } }; } /** * Utility to call an action when any event occurs regardless that event * @param action * @return */ public static <T> Observer<T> onAny(final Action0 action) { return new Observer<T>() { @Override public void onCompleted() { action.call(); } @Override public void onError(Throwable e) { action.call(); } @Override public void onNext(T t) { action.call(); } } ; } public static <T> Action01<T> acquire(final Semaphore sem) { return new Action01<T>() { @Override public void call(T t1) { call(); } @Override public void call() { try { sem.acquire(); } catch (InterruptedException e) { // TODO Auto-generated catch block e.printStackTrace(); } } }; } public static <T> Action01<T> release(final Semaphore sem) { return new Action01<T>() { @Override public void call(T t1) { call(); } @Override public void call() { sem.release(); } }; } public static <T> Action1<T> set(final AtomicReference<T> ref) { return new Action1<T>() { @Override public void call(T t1) { ref.set(t1); } }; } }
6,212
0
Create_ds/ocelli/ocelli-core/src/main/java/netflix/ocelli
Create_ds/ocelli/ocelli-core/src/main/java/netflix/ocelli/util/Stopwatch.java
package netflix.ocelli.util; import java.util.concurrent.TimeUnit; /** * A stopwatch starts counting when the object is created and can be used * to track how long operations take. For simplicity this contract does * not provide a mechanism to stop, restart, or clear the stopwatch. Instead * it just returns the elapsed time since the object was created. * * @author elandau * @see {@link Stopwatches} */ public interface Stopwatch { /** * Elapsed time since object was created. * @param units * @return */ long elapsed(TimeUnit units); }
6,213
0
Create_ds/ocelli/ocelli-core/src/main/java/netflix/ocelli
Create_ds/ocelli/ocelli-core/src/main/java/netflix/ocelli/util/AtomicDouble.java
package netflix.ocelli.util; import java.util.concurrent.atomic.AtomicLong; /** * Utility class to track an atomic double for use in non blocking algorithms (@see ExponentialAverage) * * @author elandau */ public class AtomicDouble { private AtomicLong bits; public AtomicDouble() { this(0f); } public AtomicDouble(double initialValue) { bits = new AtomicLong(Double.doubleToLongBits(initialValue)); } public final boolean compareAndSet(double expect, double update) { return bits.compareAndSet(Double.doubleToLongBits(expect), Double.doubleToLongBits(update)); } public final void set(double newValue) { bits.set(Double.doubleToLongBits(newValue)); } public final double get() { return Double.longBitsToDouble(bits.get()); } public final double getAndSet(double newValue) { return Double.longBitsToDouble(bits.getAndSet(Double.doubleToLongBits(newValue))); } public final boolean weakCompareAndSet(double expect, double update) { return bits.weakCompareAndSet(Double.doubleToLongBits(expect), Double.doubleToLongBits(update)); } public double doubleValue() { return (double) get(); } public int intValue() { return (int) get(); } public long longValue() { return (long) get(); } }
6,214
0
Create_ds/ocelli/ocelli-core/src/main/java/netflix/ocelli
Create_ds/ocelli/ocelli-core/src/main/java/netflix/ocelli/util/StateMachine.java
package netflix.ocelli.util; import java.util.HashMap; import java.util.HashSet; import java.util.Map; import java.util.Set; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import rx.Observable; import rx.Observable.OnSubscribe; import rx.Subscriber; import rx.functions.Action1; import rx.functions.Action2; import rx.functions.Func0; import rx.functions.Func1; import rx.subjects.PublishSubject; public class StateMachine<T, E> implements Action1<E> { private static final Logger LOG = LoggerFactory.getLogger(StateMachine.class); public static class State<T, E> { private String name; private Func1<T, Observable<E>> enter; private Func1<T, Observable<E>> exit; private Map<E, State<T, E>> transitions = new HashMap<E, State<T, E>>(); private Set<E> ignore = new HashSet<E>(); public static <T, E> State<T, E> create(String name) { return new State<T, E>(name); } public State(String name) { this.name = name; } public State<T, E> onEnter(Func1<T, Observable<E>> func) { this.enter = func; return this; } public State<T, E> onExit(Func1<T, Observable<E>> func) { this.exit = func; return this; } public State<T, E> transition(E event, State<T, E> state) { transitions.put(event, state); return this; } public State<T, E> ignore(E event) { ignore.add(event); return this; } Observable<E> enter(T context) { if (enter != null) return enter.call(context); return Observable.empty(); } Observable<E> exit(T context) { if (exit != null) exit.call(context); return Observable.empty(); } State<T, E> next(E event) { return transitions.get(event); } public String toString() { return name; } } private volatile State<T, E> state; private final T context; private final PublishSubject<E> events = PublishSubject.create(); public static <T, E> StateMachine<T, E> create(T context, State<T, E> initial) { return new StateMachine<T, E>(context, initial); } public StateMachine(T context, State<T, E> initial) { this.state = initial; this.context = context; } public Observable<Void> start() { return Observable.create(new OnSubscribe<Void>() { @Override public void call(Subscriber<? super Void> sub) { sub.add(events.collect(new Func0<T>() { @Override public T call() { return context; } }, new Action2<T, E>() { @Override public void call(T context, E event) { LOG.trace("{} : {}({})", context, state, event); final State<T, E> next = state.next(event); if (next != null) { state.exit(context); state = next; next.enter(context).subscribe(StateMachine.this); } else if (!state.ignore.contains(event)) { LOG.warn("Unexpected event {} in state {} for {} ", event, state, context); } } }) .subscribe()); state.enter(context); } }); } @Override public void call(E event) { events.onNext(event); } public State<T, E> getState() { return state; } public T getContext() { return context; } }
6,215
0
Create_ds/ocelli/ocelli-core/src/main/java/netflix/ocelli
Create_ds/ocelli/ocelli-core/src/main/java/netflix/ocelli/util/RandomBlockingQueue.java
package netflix.ocelli.util; import java.util.AbstractQueue; import java.util.ArrayList; import java.util.Collection; import java.util.Iterator; import java.util.List; import java.util.Random; import java.util.concurrent.TimeUnit; import java.util.concurrent.locks.Condition; import java.util.concurrent.locks.ReentrantLock; public class RandomBlockingQueue<E> extends AbstractQueue<E> { /** The queued items */ private List<E> items = new ArrayList<E>(); private Random rand = new Random(); /** Main lock guarding all access */ final ReentrantLock lock; /** Condition for waiting takes */ private final Condition notEmpty; public RandomBlockingQueue() { this(false); } public RandomBlockingQueue(boolean fair) { lock = new ReentrantLock(fair); notEmpty = lock.newCondition(); } private static void checkNotNull(Object v) { if (v == null) throw new NullPointerException(); } @SuppressWarnings("unchecked") static <E> E cast(Object item) { return (E) item; } /** * Inserts element into array * Call only when holding lock. */ private void insert(E x) { if (items.size() == 0) { items.add(x); } else { int index = rand.nextInt(items.size()); items.add(items.get(index)); items.set(index, x); } notEmpty.signal(); } /** * Extracts random element. Moves the last element to the spot where the random * element was removed. This avoids having to shift all the items in the array. * * Call only when holding lock. */ private E extract() { return items.remove(items.size()-1); } public boolean offer(E e) { checkNotNull(e); final ReentrantLock lock = this.lock; lock.lock(); try { insert(e); return true; } finally { lock.unlock(); } } public void put(E e) throws InterruptedException { offer(e); } public boolean offer(E e, long timeout, TimeUnit unit) { offer(e); return true; } public E poll() { final ReentrantLock lock = this.lock; lock.lock(); try { return (items.size() == 0) ? null : extract(); } finally { lock.unlock(); } } public E take() throws InterruptedException { final ReentrantLock lock = this.lock; lock.lockInterruptibly(); try { while (items.size() == 0) notEmpty.await(); return extract(); } finally { lock.unlock(); } } public E poll(long timeout, TimeUnit unit) throws InterruptedException { long nanos = unit.toNanos(timeout); final ReentrantLock lock = this.lock; lock.lockInterruptibly(); try { while (items.size() == 0) { if (nanos <= 0) return null; nanos = notEmpty.awaitNanos(nanos); } return extract(); } finally { lock.unlock(); } } public E peek() { final ReentrantLock lock = this.lock; lock.lock(); try { return (items.size() == 0) ? null : items.get(rand.nextInt(items.size())); } finally { lock.unlock(); } } // this doc comment is overridden to remove the reference to collections // greater in size than Integer.MAX_VALUE /** * Returns the number of elements in this queue. * * @return the number of elements in this queue */ public int size() { final ReentrantLock lock = this.lock; lock.lock(); try { return items.size(); } finally { lock.unlock(); } } // this doc comment is a modified copy of the inherited doc comment, // without the reference to unlimited queues. /** * Returns the number of additional elements that this queue can ideally * (in the absence of memory or resource constraints) accept without * blocking. This is always equal to the initial capacity of this queue * less the current {@code size} of this queue. * * <p>Note that you <em>cannot</em> always tell if an attempt to insert * an element will succeed by inspecting {@code remainingCapacity} * because it may be the case that another thread is about to * insert or remove an element. */ public int remainingCapacity() { final ReentrantLock lock = this.lock; lock.lock(); try { return Integer.MAX_VALUE - items.size(); } finally { lock.unlock(); } } /** * Removes a single instance of the specified element from this queue, * if it is present. More formally, removes an element {@code e} such * that {@code o.equals(e)}, if this queue contains one or more such * elements. * Returns {@code true} if this queue contained the specified element * (or equivalently, if this queue changed as a result of the call). * * <p>Removal of interior elements in circular array based queues * is an intrinsically slow and disruptive operation, so should * be undertaken only in exceptional circumstances, ideally * only when the queue is known not to be accessible by other * threads. * * @param o element to be removed from this queue, if present * @return {@code true} if this queue changed as a result of the call */ public boolean remove(Object o) { if (o == null) return false; final ReentrantLock lock = this.lock; lock.lock(); try { return this.items.remove(o); } finally { lock.unlock(); } } /** * Returns {@code true} if this queue contains the specified element. * More formally, returns {@code true} if and only if this queue contains * at least one element {@code e} such that {@code o.equals(e)}. * * @param o object to be checked for containment in this queue * @return {@code true} if this queue contains the specified element */ public boolean contains(Object o) { if (o == null) return false; final ReentrantLock lock = this.lock; lock.lock(); try { return items.contains(o); } finally { lock.unlock(); } } /** * Returns an array containing all of the elements in this queue, in * proper sequence. * * <p>The returned array will be "safe" in that no references to it are * maintained by this queue. (In other words, this method must allocate * a new array). The caller is thus free to modify the returned array. * * <p>This method acts as bridge between array-based and collection-based * APIs. * * @return an array containing all of the elements in this queue */ public Object[] toArray() { final ReentrantLock lock = this.lock; lock.lock(); try { return this.items.toArray(new Object[items.size()]); } finally { lock.unlock(); } } /** * Returns an array containing all of the elements in this queue, in * proper sequence; the runtime type of the returned array is that of * the specified array. If the queue fits in the specified array, it * is returned therein. Otherwise, a new array is allocated with the * runtime type of the specified array and the size of this queue. * * <p>If this queue fits in the specified array with room to spare * (i.e., the array has more elements than this queue), the element in * the array immediately following the end of the queue is set to * {@code null}. * * <p>Like the {@link #toArray()} method, this method acts as bridge between * array-based and collection-based APIs. Further, this method allows * precise control over the runtime type of the output array, and may, * under certain circumstances, be used to save allocation costs. * * <p>Suppose {@code x} is a queue known to contain only strings. * The following code can be used to dump the queue into a newly * allocated array of {@code String}: * * <pre> * String[] y = x.toArray(new String[0]);</pre> * * Note that {@code toArray(new Object[0])} is identical in function to * {@code toArray()}. * * @param a the array into which the elements of the queue are to * be stored, if it is big enough; otherwise, a new array of the * same runtime type is allocated for this purpose * @return an array containing all of the elements in this queue * @throws ArrayStoreException if the runtime type of the specified array * is not a supertype of the runtime type of every element in * this queue * @throws NullPointerException if the specified array is null */ public <T> T[] toArray(T[] a) { final ReentrantLock lock = this.lock; lock.lock(); try { return items.toArray(a); } finally { lock.unlock(); } } public String toString() { final ReentrantLock lock = this.lock; lock.lock(); try { return items.toString(); } finally { lock.unlock(); } } /** * Atomically removes all of the elements from this queue. * The queue will be empty after this call returns. */ public void clear() { final ReentrantLock lock = this.lock; lock.lock(); try { this.items.clear(); } finally { lock.unlock(); } } /** * @throws UnsupportedOperationException {@inheritDoc} * @throws ClassCastException {@inheritDoc} * @throws NullPointerException {@inheritDoc} * @throws IllegalArgumentException {@inheritDoc} */ public int drainTo(Collection<? super E> c) { checkNotNull(c); if (c == this) throw new IllegalArgumentException(); final ReentrantLock lock = this.lock; lock.lock(); try { int n = this.items.size(); this.items.removeAll(c); return n; } finally { lock.unlock(); } } /** * @throws UnsupportedOperationException {@inheritDoc} * @throws ClassCastException {@inheritDoc} * @throws NullPointerException {@inheritDoc} * @throws IllegalArgumentException {@inheritDoc} */ public int drainTo(Collection<? super E> c, int maxElements) { checkNotNull(c); if (c == this) throw new IllegalArgumentException(); if (maxElements <= 0) return 0; final ReentrantLock lock = this.lock; lock.lock(); try { if (maxElements < this.items.size()) maxElements = this.items.size(); int n = this.items.size(); this.items.removeAll(c); return n; } finally { lock.unlock(); } } /** * Returns an iterator over the elements in this queue in proper sequence. * The elements will be returned in order from first (head) to last (tail). * * <p>The returned {@code Iterator} is a "weakly consistent" iterator that * will never throw {@link java.util.ConcurrentModificationException * ConcurrentModificationException}, * and guarantees to traverse elements as they existed upon * construction of the iterator, and may (but is not guaranteed to) * reflect any modifications subsequent to construction. * * @return an iterator over the elements in this queue in proper sequence */ public Iterator<E> iterator() { throw new UnsupportedOperationException(); } }
6,216
0
Create_ds/ocelli/ocelli-core/src/main/java/netflix/ocelli
Create_ds/ocelli/ocelli-core/src/main/java/netflix/ocelli/loadbalancer/ChoiceOfTwoLoadBalancer.java
package netflix.ocelli.loadbalancer; import netflix.ocelli.LoadBalancerStrategy; import java.util.Comparator; import java.util.List; import java.util.NoSuchElementException; import java.util.Random; /** * This selector chooses 2 random hosts and picks the host with the 'best' * performance where that determination is deferred to a customizable function. * * This implementation is based on the paper 'The Power of Two Choices in * Randomized Load Balancing' http://www.eecs.harvard.edu/~michaelm/postscripts/tpds2001.pdf * This paper states that selecting the best of 2 random servers results in an * exponential improvement over selecting a single random node (also includes * round robin) but that adding a third (or more) servers does not yield a significant * performance improvement. * * @author elandau * * @param <T> */ public class ChoiceOfTwoLoadBalancer<T> implements LoadBalancerStrategy<T> { public static <T> ChoiceOfTwoLoadBalancer<T> create(final Comparator<T> func) { return new ChoiceOfTwoLoadBalancer<T>(func); } private final Comparator<T> func; private final Random rand = new Random(); public ChoiceOfTwoLoadBalancer(final Comparator<T> func2) { func = func2; } /** * @throws NoSuchElementException */ @Override public T choose(List<T> candidates) { if (candidates.isEmpty()) { throw new NoSuchElementException("No servers available in the load balancer"); } else if (candidates.size() == 1) { return candidates.get(0); } else { int pos = rand.nextInt(candidates.size()); T first = candidates.get(pos); T second = candidates.get((rand.nextInt(candidates.size()-1) + pos + 1) % candidates.size()); return func.compare(first, second) >= 0 ? first : second; } } }
6,217
0
Create_ds/ocelli/ocelli-core/src/main/java/netflix/ocelli
Create_ds/ocelli/ocelli-core/src/main/java/netflix/ocelli/loadbalancer/RandomWeightedLoadBalancer.java
package netflix.ocelli.loadbalancer; import netflix.ocelli.LoadBalancerStrategy; import netflix.ocelli.loadbalancer.weighting.ClientsAndWeights; import netflix.ocelli.loadbalancer.weighting.WeightingStrategy; import java.util.Collections; import java.util.List; import java.util.NoSuchElementException; import java.util.Random; /** * Select the next element using a random number. * * The weights are sorted such as that each cell in the array represents the * sum of the previous weights plus its weight. This structure makes it * possible to do a simple binary search using a random number from 0 to * total weights. * * Runtime complexity is O(log N) * * @author elandau * */ public class RandomWeightedLoadBalancer<T> implements LoadBalancerStrategy<T> { public static <T> RandomWeightedLoadBalancer<T> create(final WeightingStrategy<T> strategy) { return new RandomWeightedLoadBalancer<T>(strategy); } private final WeightingStrategy<T> strategy; private final Random rand = new Random(); public RandomWeightedLoadBalancer(final WeightingStrategy<T> strategy) { this.strategy = strategy; } /** * @throws NoSuchElementException */ @Override public T choose(List<T> local) { final ClientsAndWeights<T> caw = strategy.call(local); if (caw.isEmpty()) { throw new NoSuchElementException("No servers available in the load balancer"); } int total = caw.getTotalWeights(); if (total == 0) { return caw.getClient(rand.nextInt(caw.size())); } int pos = Collections.binarySearch(caw.getWeights(), rand.nextInt(total)); return caw.getClient(pos >= 0? pos+1 : -pos - 1); } }
6,218
0
Create_ds/ocelli/ocelli-core/src/main/java/netflix/ocelli
Create_ds/ocelli/ocelli-core/src/main/java/netflix/ocelli/loadbalancer/RoundRobinLoadBalancer.java
package netflix.ocelli.loadbalancer; import netflix.ocelli.LoadBalancerStrategy; import java.util.List; import java.util.NoSuchElementException; import java.util.Random; import java.util.concurrent.atomic.AtomicInteger; /** * Very simple LoadBlancer that when subscribed to gets an ImmutableList of active clients * and round robins on the elements in that list * * @author elandau */ public class RoundRobinLoadBalancer<T> implements LoadBalancerStrategy<T> { public static <T> RoundRobinLoadBalancer<T> create() { return create(new Random().nextInt(1000)); } public static <T> RoundRobinLoadBalancer<T> create(int seedPosition) { return new RoundRobinLoadBalancer<T>(seedPosition); } private final AtomicInteger position; public RoundRobinLoadBalancer() { position = new AtomicInteger(new Random().nextInt(1000)); } public RoundRobinLoadBalancer(int seedPosition) { position = new AtomicInteger(seedPosition); } /** * @throws NoSuchElementException */ @Override public T choose(List<T> local) { if (local.isEmpty()) { throw new NoSuchElementException("No servers available in the load balancer"); } else { int pos = Math.abs(position.incrementAndGet()); return local.get(pos % local.size()); } } }
6,219
0
Create_ds/ocelli/ocelli-core/src/main/java/netflix/ocelli/loadbalancer
Create_ds/ocelli/ocelli-core/src/main/java/netflix/ocelli/loadbalancer/weighting/LinearWeightingStrategy.java
package netflix.ocelli.loadbalancer.weighting; import rx.functions.Func1; import java.util.ArrayList; import java.util.List; public class LinearWeightingStrategy<C> implements WeightingStrategy<C> { private final Func1<C, Integer> func; public LinearWeightingStrategy(Func1<C, Integer> func) { this.func = func; } @Override public ClientsAndWeights<C> call(List<C> clients) { ArrayList<Integer> weights = new ArrayList<Integer>(clients.size()); if (!clients.isEmpty()) { for (C client : clients) { weights.add(func.call(client)); } int sum = 0; for (int i = 0; i < weights.size(); i++) { sum += weights.get(i); weights.set(i, sum); } } return new ClientsAndWeights<C>(clients, weights); } }
6,220
0
Create_ds/ocelli/ocelli-core/src/main/java/netflix/ocelli/loadbalancer
Create_ds/ocelli/ocelli-core/src/main/java/netflix/ocelli/loadbalancer/weighting/ClientsAndWeights.java
package netflix.ocelli.loadbalancer.weighting; import java.util.List; public class ClientsAndWeights<C> { private final List<C> clients; private final List<Integer> weights; public ClientsAndWeights(List<C> clients, List<Integer> weights) { this.clients = clients; this.weights = weights; } public List<C> getClients() { return clients; } public List<Integer> getWeights() { return weights; } public boolean isEmpty() { return clients.isEmpty(); } public int size() { return clients.size(); } public int getTotalWeights() { if (weights == null || weights.isEmpty()) { return 0; } return weights.get(weights.size() -1); } public C getClient(int index) { return clients.get(index); } public int getWeight(int index) { if (weights == null) { return 0; } return weights.get(index); } @Override public String toString() { return "ClientsAndWeights [clients=" + clients + ", weights=" + weights + ']'; } }
6,221
0
Create_ds/ocelli/ocelli-core/src/main/java/netflix/ocelli/loadbalancer
Create_ds/ocelli/ocelli-core/src/main/java/netflix/ocelli/loadbalancer/weighting/EqualWeightStrategy.java
package netflix.ocelli.loadbalancer.weighting; import java.util.List; /** * Strategy where all clients have the same weight * @author elandau * * @param <Host> * @param <C> * @param <Metrics> */ public class EqualWeightStrategy<C> implements WeightingStrategy<C> { @Override public ClientsAndWeights<C> call(List<C> clients) { return new ClientsAndWeights<C>(clients, null); } }
6,222
0
Create_ds/ocelli/ocelli-core/src/main/java/netflix/ocelli/loadbalancer
Create_ds/ocelli/ocelli-core/src/main/java/netflix/ocelli/loadbalancer/weighting/WeightingStrategy.java
package netflix.ocelli.loadbalancer.weighting; import java.util.List; import rx.functions.Func1; /** * Contract for strategy to determine client weights from a list of clients * * @author elandau * * @param <C> */ public interface WeightingStrategy<C> extends Func1<List<C>, ClientsAndWeights<C>> { /** * Run the weighting algorithm on the active set of clients and their associated statistics and * return an object containing the weights */ ClientsAndWeights<C> call(List<C> clients); }
6,223
0
Create_ds/ocelli/ocelli-core/src/main/java/netflix/ocelli/loadbalancer
Create_ds/ocelli/ocelli-core/src/main/java/netflix/ocelli/loadbalancer/weighting/InverseMaxWeightingStrategy.java
package netflix.ocelli.loadbalancer.weighting; import java.util.ArrayList; import java.util.List; import rx.functions.Func1; /** * Weighting strategy that gives an inverse weight to the highest rate. Using * this strategy higher input values receive smaller weights. * * For example, if the weight is based on pending requests then an input of * [1, 5, 10, 1] pending request counts would yield the following weights * [10, 6, 1, 10] using the formula : w(i) = max - w(i) + 1 * * Note that 1 is added to ensure that we don't have a 0 weight, which is invalid. * * @author elandau * * @param <C> */ public class InverseMaxWeightingStrategy<C> implements WeightingStrategy<C> { private Func1<C, Integer> func; public InverseMaxWeightingStrategy(Func1<C, Integer> func) { this.func = func; } @Override public ClientsAndWeights<C> call(List<C> clients) { ArrayList<Integer> weights = new ArrayList<Integer>(clients.size()); if (clients.size() > 0) { Integer max = 0; for (int i = 0; i < clients.size(); i++) { int weight = func.call(clients.get(i)); if (weight > max) { max = weight; } weights.add(i, weight); } int sum = 0; for (int i = 0; i < weights.size(); i++) { sum += (max - weights.get(i)) + 1; weights.set(i, sum); } } return new ClientsAndWeights<C>(clients, weights); } }
6,224
0
Create_ds/ocelli/ocelli-core/src/main/java/netflix/ocelli
Create_ds/ocelli/ocelli-core/src/main/java/netflix/ocelli/topologies/RingTopology.java
package netflix.ocelli.topologies; import java.util.ArrayList; import java.util.Collections; import java.util.HashMap; import java.util.HashSet; import java.util.List; import java.util.Map; import java.util.Set; import netflix.ocelli.CloseableInstance; import netflix.ocelli.Instance; import netflix.ocelli.InstanceToNotification; import netflix.ocelli.InstanceToNotification.InstanceNotification; import rx.Observable; import rx.Observable.Transformer; import rx.Scheduler; import rx.functions.Action0; import rx.functions.Func1; import rx.functions.Func2; import rx.schedulers.Schedulers; /** * The ring topology uses consistent hashing to arrange all hosts in a predictable ring * topology such that each client instance will be located in a uniformly distributed * fashion around the ring. The client will then target the next N hosts after its location. * * This type of topology ensures that each client instance communicates with a subset of * hosts in such a manner that the overall load shall be evenly distributed. * * @author elandau * * @param <T> */ public class RingTopology<K extends Comparable<K>, T> implements Transformer<Instance<T>, Instance<T>> { private final Func1<Integer, Integer> countFunc; private final Func1<T, K> keyFunc; private final Entry local; class Entry implements Comparable<Entry> { final K key; final T value; CloseableInstance<T> instance; public Entry(K key) { this(key, null); } public Entry(K key, T value) { this.key = key; this.value = value; } @Override public int compareTo(Entry o) { return key.compareTo(o.key); } public void setInstance(CloseableInstance<T> instance) { if (this.instance != null) { this.instance.close(); } this.instance = instance; } public void closeInstance() { setInstance(null); } public String toString() { return key.toString(); } } class State { // Complete hash ordered list of all existing instances final List<Entry> ring = new ArrayList<Entry>(); // Lookup of all entries in the active list final Map<K, Entry> active = new HashMap<K, Entry>(); State() { ring.add(local); } Observable<Instance<T>> update() { Collections.sort(ring); // Get the starting position in the ring and number of entries // that should be active. int pos = Collections.binarySearch(ring, local) + 1; int count = Math.min(ring.size() - 1, countFunc.call(ring.size() - 1)); // Determine the current 'active' set Set<Entry> current = new HashSet<Entry>(); for (int i = 0; i < count; i++) { current.add(ring.get((pos + i) % ring.size())); } // Determine Entries that have either been added or removed Set<Entry> added = new HashSet<Entry>(current); added.removeAll(active.values()); final Set<Entry> removed = new HashSet<Entry>(active.values()); removed.removeAll(current); // Update the active list for (Entry entry : added) { active.put(entry.key, entry); } return Observable // New instance will be added immediately .from(added) .map(new Func1<Entry, Instance<T>>() { @Override public Instance<T> call(Entry entry) { CloseableInstance<T> instance = CloseableInstance.from(entry.value); entry.setInstance(instance); return instance; } } ) .doOnCompleted(new Action0() { @Override public void call() { for (Entry entry : removed) { entry.closeInstance(); active.remove(entry.key); } } }); } public void add(Instance<T> value) { ring.add(new Entry(keyFunc.call(value.getValue()), value.getValue())); } public void remove(Instance<T> value) { K key = keyFunc.call(value.getValue()); int pos = Collections.binarySearch(ring, new Entry(key)); if (pos >= 0) { ring.remove(pos).closeInstance(); active.remove(key); } } } public static <K extends Comparable<K>, T> RingTopology<K, T> create(final K localKey, final Func1<T, K> keyFunc, Func1<Integer, Integer> countFunc) { return new RingTopology<K, T>(localKey, keyFunc, countFunc); } public static <K extends Comparable<K>, T> RingTopology<K, T> create(final K localKey, final Func1<T, K> keyFunc, Func1<Integer, Integer> countFunc, Scheduler scheduler) { return new RingTopology<K, T>(localKey, keyFunc, countFunc, scheduler); } public RingTopology(final K localKey, final Func1<T, K> keyFunc, Func1<Integer, Integer> countFunc) { this(localKey, keyFunc, countFunc, Schedulers.computation()); } // Visible for testing public RingTopology(final K localKey, final Func1<T, K> keyFunc, Func1<Integer, Integer> countFunc, Scheduler scheduler) { this.local = new Entry(localKey); this.countFunc = countFunc; this.keyFunc = keyFunc; } @Override public Observable<Instance<T>> call(Observable<Instance<T>> o) { return o .flatMap(InstanceToNotification.<T>create()) .scan(new State(), new Func2<State, InstanceNotification<T>, State>() { @Override public State call(State state, InstanceNotification<T> instance) { switch (instance.getKind()) { case OnAdd: state.add(instance.getInstance()); break; case OnRemove: state.remove(instance.getInstance()); break; } return state; } }) .concatMap(new Func1<State, Observable<Instance<T>>>() { @Override public Observable<Instance<T>> call(State state) { return state.update(); } }); } }
6,225
0
Create_ds/ocelli/ocelli-core/src/main/java/netflix/ocelli
Create_ds/ocelli/ocelli-core/src/main/java/netflix/ocelli/functions/Retrys.java
package netflix.ocelli.functions; import java.util.concurrent.TimeUnit; import netflix.ocelli.retrys.ExponentialBackoff; import rx.Observable; import rx.functions.Func1; public abstract class Retrys { public static Func1<Throwable, Boolean> ALWAYS = new Func1<Throwable, Boolean>() { @Override public Boolean call(Throwable t1) { return true; } }; public static Func1<Throwable, Boolean> NEVER = new Func1<Throwable, Boolean>() { @Override public Boolean call(Throwable t1) { return false; } }; /** * Exponential backoff * @param maxRetrys * @param timeslice * @param units * @return */ public static Func1<Observable<? extends Throwable>, Observable<?>> exponentialBackoff(final int maxRetrys, final long timeslice, final TimeUnit units) { return new ExponentialBackoff(maxRetrys, timeslice, -1, units, ALWAYS); } /** * Bounded exponential backoff * @param maxRetrys * @param timeslice * @param units * @return */ public static Func1<Observable<? extends Throwable>, Observable<?>> exponentialBackoff(final int maxRetrys, final long timeslice, final TimeUnit units, final long maxDelay) { return new ExponentialBackoff(maxRetrys, timeslice, maxDelay, units, ALWAYS); } }
6,226
0
Create_ds/ocelli/ocelli-core/src/main/java/netflix/ocelli
Create_ds/ocelli/ocelli-core/src/main/java/netflix/ocelli/functions/Actions.java
package netflix.ocelli.functions; import java.util.concurrent.atomic.AtomicBoolean; import rx.functions.Action0; public abstract class Actions { public static Action0 once(final Action0 delegate) { return new Action0() { private AtomicBoolean called = new AtomicBoolean(false); @Override public void call() { if (called.compareAndSet(false, true)) { delegate.call(); } } }; } }
6,227
0
Create_ds/ocelli/ocelli-core/src/main/java/netflix/ocelli
Create_ds/ocelli/ocelli-core/src/main/java/netflix/ocelli/functions/Functions.java
package netflix.ocelli.functions; import rx.functions.Func1; public abstract class Functions { public static Func1<Integer, Integer> log() { return new Func1<Integer, Integer>() { @Override public Integer call(Integer t1) { return (int)Math.ceil(Math.log(t1)); } }; } public static Func1<Integer, Integer> memoize(final Integer value) { return new Func1<Integer, Integer>() { @Override public Integer call(Integer t1) { return Math.min(value, t1); } }; } public static Func1<Integer, Integer> log_log() { return new Func1<Integer, Integer>() { @Override public Integer call(Integer t1) { return (int)Math.ceil(Math.log(Math.log(t1))); } }; } public static Func1<Integer, Integer> identity() { return new Func1<Integer, Integer>() { @Override public Integer call(Integer t1) { return t1; } }; } public static Func1<Integer, Integer> sqrt() { return new Func1<Integer, Integer>() { @Override public Integer call(Integer t1) { return (int)Math.ceil(Math.sqrt((double)t1)); } }; } public static Func1<Integer, Integer> root(final double pow) { return new Func1<Integer, Integer>() { @Override public Integer call(Integer t1) { return (int)Math.ceil(Math.pow((double)t1, 1/pow)); } }; } }
6,228
0
Create_ds/ocelli/ocelli-core/src/main/java/netflix/ocelli
Create_ds/ocelli/ocelli-core/src/main/java/netflix/ocelli/functions/Connectors.java
package netflix.ocelli.functions; import rx.Observable; import rx.functions.Func1; public abstract class Connectors { public static <C> Func1<C, Observable<Void>> never() { return new Func1<C, Observable<Void>>() { @Override public Observable<Void> call(C client) { return Observable.never(); } }; } public static <C> Func1<C, Observable<Void>> immediate() { return new Func1<C, Observable<Void>>() { @Override public Observable<Void> call(C client) { return Observable.empty(); } }; } public static <C> Func1<C, Observable<Void>> failure(final Throwable t) { return new Func1<C, Observable<Void>>() { @Override public Observable<Void> call(C client) { return Observable.error(t); } }; } }
6,229
0
Create_ds/ocelli/ocelli-core/src/main/java/netflix/ocelli
Create_ds/ocelli/ocelli-core/src/main/java/netflix/ocelli/functions/Topologies.java
package netflix.ocelli.functions; import netflix.ocelli.topologies.RingTopology; import rx.functions.Func1; /** * Convenience class for creating different topologies that filter clients into * a specific arrangement that limit the set of clients this instance will communicate * with. * * @author elandau * */ public abstract class Topologies { public static <T, K extends Comparable<K>> RingTopology<K, T> ring(K id, Func1<T, K> idFunc, Func1<Integer, Integer> countFunc) { return new RingTopology<K, T>(id, idFunc, countFunc); } }
6,230
0
Create_ds/ocelli/ocelli-core/src/main/java/netflix/ocelli
Create_ds/ocelli/ocelli-core/src/main/java/netflix/ocelli/functions/Weightings.java
package netflix.ocelli.functions; import rx.functions.Func1; import netflix.ocelli.loadbalancer.weighting.EqualWeightStrategy; import netflix.ocelli.loadbalancer.weighting.InverseMaxWeightingStrategy; import netflix.ocelli.loadbalancer.weighting.LinearWeightingStrategy; import netflix.ocelli.loadbalancer.weighting.WeightingStrategy; public abstract class Weightings { /** * @return Strategy that provides a uniform weight to each client */ public static <C> WeightingStrategy<C> uniform() { return new EqualWeightStrategy<C>(); } /** * @param func * @return Strategy that uses the output of the function as the weight */ public static <C> WeightingStrategy<C> identity(Func1<C, Integer> func) { return new LinearWeightingStrategy<C>(func); } /** * @param func * @return Strategy that sets the weight to the difference between the max * value of all clients and the client value. */ public static <C> WeightingStrategy<C> inverseMax(Func1<C, Integer> func) { return new InverseMaxWeightingStrategy<C>(func); } }
6,231
0
Create_ds/ocelli/ocelli-core/src/main/java/netflix/ocelli
Create_ds/ocelli/ocelli-core/src/main/java/netflix/ocelli/functions/Stopwatches.java
package netflix.ocelli.functions; import java.util.concurrent.TimeUnit; import netflix.ocelli.util.Stopwatch; import rx.functions.Func0; /** * Utility class to create common Stopwatch factories in the form of Func0<Stopwatch> * functions * * @author elandau * */ public class Stopwatches { /** * Stopwatch that calls System.nanoTime() * * @return */ public static Func0<Stopwatch> systemNano() { return new Func0<Stopwatch>() { @Override public Stopwatch call() { return new Stopwatch() { private final long startTime = System.nanoTime(); @Override public long elapsed(TimeUnit units) { return units.convert(System.nanoTime() - startTime, TimeUnit.NANOSECONDS); } }; } }; } }
6,232
0
Create_ds/ocelli/ocelli-core/src/main/java/netflix/ocelli
Create_ds/ocelli/ocelli-core/src/main/java/netflix/ocelli/functions/Limiters.java
package netflix.ocelli.functions; import netflix.ocelli.stats.ExponentialAverage; import rx.functions.Func1; public abstract class Limiters { /** * Guard against excessive backup requests using exponential moving average * on a per sample basis which is incremented by 1 for each request and by 0 * for each backup request. A backup request is allowed as long as the average * is able the expected percentile of primary requests to backup requests. * * Note that this implementation favors simplicity over accuracy and has * many drawbacks. * 1. Backup requests are per request and not tied to any time window * 2. I have yet to determine an equation that selects the proper window * for the requested ratio so that the updated exponential moving average * allows the ratio number of requests. * @param ratio * @param window * @return */ public static Func1<Boolean, Boolean> exponential(final double ratio, final int window) { return new Func1<Boolean, Boolean>() { private ExponentialAverage exp = new ExponentialAverage(window, 0); @Override public Boolean call(Boolean isPrimary) { if (isPrimary) { exp.add(1L); return true; } if (exp.get() > ratio) { exp.add(0L); return true; } return false; } }; } }
6,233
0
Create_ds/ocelli/ocelli-core/src/main/java/netflix/ocelli
Create_ds/ocelli/ocelli-core/src/main/java/netflix/ocelli/functions/Delays.java
package netflix.ocelli.functions; import java.util.concurrent.TimeUnit; import netflix.ocelli.DelayStrategy; public abstract class Delays { public static DelayStrategy fixed(final long delay, final TimeUnit units) { return new DelayStrategy() { @Override public long get(int count) { return TimeUnit.MILLISECONDS.convert(delay, units); } }; } public static DelayStrategy linear(final long delay, final TimeUnit units) { return new DelayStrategy() { @Override public long get(int count) { return count * TimeUnit.MILLISECONDS.convert(delay, units); } }; } public static DelayStrategy exp(final long step, final TimeUnit units) { return new DelayStrategy() { @Override public long get(int count) { if (count < 0) count = 0; else if (count > 30) count = 30; return (1 << count) * TimeUnit.MILLISECONDS.convert(step, units); } }; } public static DelayStrategy boundedExp(final long step, final long max, final TimeUnit units) { return new DelayStrategy() { @Override public long get(int count) { if (count < 0) count = 0; else if (count > 30) count = 30; long delay = (1 << count) * TimeUnit.MILLISECONDS.convert(step, units); if (delay > max) { return max; } return delay; } }; } public static DelayStrategy immediate() { return new DelayStrategy() { @Override public long get(int t1) { return 0L; } }; } }
6,234
0
Create_ds/ocelli/ocelli-core/src/main/java/netflix/ocelli
Create_ds/ocelli/ocelli-core/src/main/java/netflix/ocelli/functions/Failures.java
package netflix.ocelli.functions; import rx.Observable; import rx.functions.Func1; public abstract class Failures { public static <C> Func1<C, Observable<Throwable>> never() { return new Func1<C, Observable<Throwable>>() { @Override public Observable<Throwable> call(C client) { return Observable.never(); } }; } public static <C> Func1<C, Observable<Throwable>> always(final Throwable t) { return new Func1<C, Observable<Throwable>>() { @Override public Observable<Throwable> call(C client) { return Observable.error(t); } }; } }
6,235
0
Create_ds/ocelli/ocelli-core/src/main/java/netflix/ocelli
Create_ds/ocelli/ocelli-core/src/main/java/netflix/ocelli/functions/Metrics.java
package netflix.ocelli.functions; import rx.functions.Func0; import netflix.ocelli.stats.CKMSQuantiles; import netflix.ocelli.stats.Quantiles; import netflix.ocelli.util.SingleMetric; /** * Utility class for creating common strategies for tracking specific types of metrics * * @author elandau * */ public class Metrics { public static <T> Func0<SingleMetric<T>> memoizeFactory(final T value) { return new Func0<SingleMetric<T>>() { @Override public SingleMetric<T> call() { return memoize(value); } }; } /** * Return a predetermine constant value regardless of samples added. * @param value * @return */ public static <T> SingleMetric<T> memoize(final T value) { return new SingleMetric<T>() { @Override public void add(T sample) { } @Override public void reset() { } @Override public T get() { return value; } }; } public static Func0<SingleMetric<Long>> quantileFactory(final double percentile) { return new Func0<SingleMetric<Long>>() { @Override public SingleMetric<Long> call() { return quantile(percentile); } }; } /** * Use the default CKMSQuantiles algorithm to track a specific percentile * @param percentile * @return */ public static SingleMetric<Long> quantile(final double percentile) { return quantile(new CKMSQuantiles(new CKMSQuantiles.Quantile[]{new CKMSQuantiles.Quantile(percentile, 1)}), percentile); } /** * Use an externally provided Quantiles algorithm to track a single percentile. Note that * quantiles may be shared and should track homogeneous operations. * * @param quantiles * @param percentile * @return */ public static SingleMetric<Long> quantile(final Quantiles quantiles, final double percentile) { return new SingleMetric<Long>() { @Override public void add(Long sample) { quantiles.insert(sample.intValue()); } @Override public void reset() { } @Override public Long get() { return (long)quantiles.get(percentile); } }; } }
6,236
0
Create_ds/ocelli/ocelli-core/src/main/java/netflix/ocelli
Create_ds/ocelli/ocelli-core/src/main/java/netflix/ocelli/retrys/BackupRequestRetryStrategy.java
package netflix.ocelli.retrys; import java.util.concurrent.TimeUnit; import java.util.concurrent.atomic.AtomicInteger; import netflix.ocelli.functions.Metrics; import netflix.ocelli.functions.Retrys; import netflix.ocelli.functions.Stopwatches; import netflix.ocelli.util.SingleMetric; import netflix.ocelli.util.Stopwatch; import rx.Observable; import rx.Observable.OnSubscribe; import rx.Observable.Transformer; import rx.Scheduler; import rx.Subscriber; import rx.functions.Action1; import rx.functions.Func0; import rx.functions.Func1; import rx.schedulers.Schedulers; /** * Retry strategy that kicks off a second request if the first request does not * respond within an expected amount of time. The original request remains in * flight until either one responds. The strategy tracks response latencies and * feeds them into a SingleMetric that is used to determine the backup request * timeout. A common metric to use is the 90th percentile response time. * * Note that the same BackupRequestRetryStrategy instance is stateful and should * be used for all requests. Multiple BackupRequestRetryStrategy instances may be * used for different request types known to have varying response latency * distributions. * * Usage, * * {@code * <pre> * * BackupRequestRetryStrategy strategy = BackupRequestRetryStrategy.builder() * .withTimeoutMetric(Metrics.quantile(0.90)) * .withIsRetriablePolicy(somePolicyThatReturnsTrueOnRetriableErrors) * .build(); * * loadBalancer * .flatMap(operation) * .compose(strategy) * .subscribe(responseHandler) * </pre> * code} * * @author elandau * * @param <T> */ public class BackupRequestRetryStrategy<T> implements Transformer<T, T> { public static Func0<Stopwatch> DEFAULT_CLOCK = Stopwatches.systemNano(); private final Func0<Stopwatch> sw; private final SingleMetric<Long> metric; private final Func1<Throwable, Boolean> retriableError; private final Scheduler scheduler; public static class Builder<T> { private Func0<Stopwatch> sw = DEFAULT_CLOCK; private SingleMetric<Long> metric = Metrics.memoize(10L); private Func1<Throwable, Boolean> retriableError = Retrys.ALWAYS; private Scheduler scheduler = Schedulers.computation(); /** * Function to determine if an exception is retriable or not. A non * retriable exception will result in an immediate error being returned * while the first retriable exception on either the primary or secondary * request will be ignored to allow the other request to complete. * @param retriableError */ public Builder<T> withIsRetriablePolicy(Func1<Throwable, Boolean> retriableError) { this.retriableError = retriableError; return this; } /** * Function to determine the backup request timeout for each operation. * @param func * @param units */ public Builder<T> withTimeoutMetric(SingleMetric<Long> metric) { this.metric = metric; return this; } /** * Provide an external scheduler to drive the backup timeout. Use this * to test with a TestScheduler * * @param scheduler */ public Builder<T> withScheduler(Scheduler scheduler) { this.scheduler = scheduler; return this; } /** * Factory for creating stopwatches. A new stopwatch is created per operation. * @param clock */ public Builder<T> withStopwatch(Func0<Stopwatch> sw) { this.sw = sw; return this; } public BackupRequestRetryStrategy<T> build() { return new BackupRequestRetryStrategy<T>(this); } } public static <T> Builder<T> builder() { return new Builder<T>(); } private BackupRequestRetryStrategy(Builder<T> builder) { this.metric = builder.metric; this.retriableError = builder.retriableError; this.scheduler = builder.scheduler; this.sw = builder.sw; } @Override public Observable<T> call(final Observable<T> o) { Observable<T> timedO = Observable.create(new OnSubscribe<T>() { @Override public void call(Subscriber<? super T> s) { final Stopwatch timer = sw.call(); o.doOnNext(new Action1<T>() { @Override public void call(T t1) { metric.add(timer.elapsed(TimeUnit.MILLISECONDS)); } }).subscribe(s); } }); return Observable .just(timedO, timedO.delaySubscription(metric.get(), TimeUnit.MILLISECONDS, scheduler)) .flatMap(new Func1<Observable<T>, Observable<T>>() { final AtomicInteger counter = new AtomicInteger(); @Override public Observable<T> call(Observable<T> t1) { return t1.onErrorResumeNext(new Func1<Throwable, Observable<T>>() { @Override public Observable<T> call(Throwable e) { if (counter.incrementAndGet() == 2 || !retriableError.call(e)) { return Observable.error(e); } return Observable.never(); } }); } }) .take(1); } }
6,237
0
Create_ds/ocelli/ocelli-core/src/main/java/netflix/ocelli
Create_ds/ocelli/ocelli-core/src/main/java/netflix/ocelli/retrys/ExponentialBackoff.java
package netflix.ocelli.retrys; import java.util.Random; import java.util.concurrent.TimeUnit; import rx.Observable; import rx.functions.Func1; /** * Func1 to be passed to retryWhen which implements a robust random exponential backoff. The * random part of the exponential backoff ensures that some randomness is inserted so that multiple * clients blocked on a non-responsive resource spread out the retries to mitigate a thundering * herd. * * This class maintains retry count state and should be instantiated for entire top level request. * * @author elandau */ public class ExponentialBackoff implements Func1<Observable<? extends Throwable>, Observable<?>> { private static final int MAX_SHIFT = 30; private final int maxRetrys; private final long maxDelay; private final long slice; private final TimeUnit units; private final Func1<Throwable, Boolean> retryable; private static final Random rand = new Random(); private int tryCount; /** * Construct an exponential backoff * * @param maxRetrys Maximum number of retires to attempt * @param slice - Time interval multiplied by backoff amount * @param maxDelay - Upper bound allowable backoff delay * @param units - Time unit for slice and maxDelay * @param retryable - Function that returns true if the error is retryable or false if not. */ public ExponentialBackoff(int maxRetrys, long slice, long maxDelay, TimeUnit units, Func1<Throwable, Boolean> retryable) { this.maxDelay = maxDelay; this.maxRetrys = maxRetrys; this.slice = slice; this.units = units; this.retryable = retryable; this.tryCount = 0; } @Override public Observable<?> call(Observable<? extends Throwable> error) { return error.flatMap(new Func1<Throwable, Observable<?>>() { @Override public Observable<?> call(Throwable e) { // First make sure the error is actually retryable if (!retryable.call(e)) { return Observable.error(e); } if (tryCount >= maxRetrys) { return Observable.error(new Exception("Failed with " + tryCount + " retries", e)); } // Calculate the number of slices to wait int slices = (1 << Math.min(MAX_SHIFT, tryCount)); slices = (slices + rand.nextInt(slices+1)) / 2; long delay = slices * slice; if (maxDelay > 0 && delay > maxDelay) { delay = maxDelay; } tryCount++; return Observable.timer(delay, units); } }); } }
6,238
0
Create_ds/ocelli/ocelli-core/src/main/java/netflix/ocelli
Create_ds/ocelli/ocelli-core/src/main/java/netflix/ocelli/stats/ExponentialAverage.java
package netflix.ocelli.stats; import netflix.ocelli.util.AtomicDouble; import netflix.ocelli.util.SingleMetric; import rx.functions.Func0; public class ExponentialAverage implements SingleMetric<Long> { private final double k; private final AtomicDouble ema; private final double initial; public static Func0<SingleMetric<Long>> factory(final int N, final double initial) { return new Func0<SingleMetric<Long>>() { @Override public SingleMetric<Long> call() { return new ExponentialAverage(N, initial); } }; } public ExponentialAverage(int N, double initial) { this.initial = initial; this.k = 2.0/(double)(N+1); this.ema = new AtomicDouble(initial); } @Override public void add(Long sample) { double next; double current; do { current = ema.get(); next = sample * k + current * (1-k); } while(!ema.compareAndSet(current, next)); } @Override public Long get() { return (long)ema.get(); } @Override public void reset() { ema.set(initial); } }
6,239
0
Create_ds/ocelli/ocelli-core/src/main/java/netflix/ocelli
Create_ds/ocelli/ocelli-core/src/main/java/netflix/ocelli/stats/Frugal2UQuantiles.java
package netflix.ocelli.stats; import java.util.Random; /** * Implementation of the Frugal2U Algorithm. * * Reference: * Ma, Qiang, S. Muthukrishnan, and Mark Sandler. "Frugal Streaming for * Estimating Quantiles." Space-Efficient Data Structures, Streams, and * Algorithms. Springer Berlin Heidelberg, 2013. 77-96. * * Original code: <https://github.com/dgryski/go-frugal> * More info: http://blog.aggregateknowledge.com/2013/09/16/sketch-of-the-day-frugal-streaming/ * * @author Maycon Viana Bordin <mayconbordin@gmail.com> */ public class Frugal2UQuantiles implements Quantiles { private final Quantile quantiles[]; public Frugal2UQuantiles(Quantile[] quantiles) { this.quantiles = quantiles; } public Frugal2UQuantiles(double[] quantiles, int initialEstimate) { this.quantiles = new Quantile[quantiles.length]; for (int i=0; i<quantiles.length; i++) { this.quantiles[i] = new Quantile(initialEstimate, quantiles[i]); } } @Override public synchronized void insert(int value) { for (Quantile q : quantiles) { q.insert(value); } } public int get(double q) { for (Quantile quantile : quantiles) { if (quantile.q == q) return quantile.m; } return 0; } public class Quantile { int m; double q; int step = 1; int sign = 0; Random r = new Random(new Random().nextInt()); Quantile(int estimate, double quantile) { m = estimate; q = quantile; } void insert(int s) { if (sign == 0) { m = s; sign = 1; return; } if (s > m && r.nextDouble() > 1-q) { step += sign * f(step); if (step > 0) { m += step; } else { m += 1; } if (m > s) { step += (s - m); m = s; } if (sign < 0) { step = 1; } sign = 1; } else if (s < m && r.nextDouble() > q) { step += -sign * f(step); if (step > 0) { m -= step; } else { m--; } if (m < s) { step += (m - s); m = s; } if (sign > 0) { step = 1; } sign = -1; } } int f(int step) { return 1; } } }
6,240
0
Create_ds/ocelli/ocelli-core/src/main/java/netflix/ocelli
Create_ds/ocelli/ocelli-core/src/main/java/netflix/ocelli/stats/CKMSQuantiles.java
package netflix.ocelli.stats; /* Copyright 2012 Andrew Wang (andrew@umbrant.com) Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ import java.util.Arrays; import java.util.LinkedList; import java.util.ListIterator; /** * Implementation of the Cormode, Korn, Muthukrishnan, and Srivastava algorithm * for streaming calculation of targeted high-percentile epsilon-approximate * quantiles. * * This is a generalization of the earlier work by Greenwald and Khanna (GK), * which essentially allows different error bounds on the targeted quantiles, * which allows for far more efficient calculation of high-percentiles. * * * See: Cormode, Korn, Muthukrishnan, and Srivastava * "Effective Computation of Biased Quantiles over Data Streams" in ICDE 2005 * * Greenwald and Khanna, * "Space-efficient online computation of quantile summaries" in SIGMOD 2001 * */ public class CKMSQuantiles implements Quantiles { /** * Total number of items in stream. */ private int count = 0; /** * Used for tracking incremental compression. */ private int compressIdx = 0; /** * Current list of sampled items, maintained in sorted order with error * bounds. */ protected LinkedList<Item> sample; /** * Buffers incoming items to be inserted in batch. */ private long[] buffer = new long[500]; private int bufferCount = 0; /** * Array of Quantiles that we care about, along with desired error. */ private final Quantile quantiles[]; public CKMSQuantiles(Quantile[] quantiles) { this.quantiles = quantiles; this.sample = new LinkedList<Item>(); } /** * Add a new value from the stream. * * @param value */ @Override public synchronized void insert(int value) { buffer[bufferCount] = value; bufferCount++; if (bufferCount == buffer.length) { insertBatch(); compress(); } } /** * Get the estimated value at the specified quantile. * * @param q * Queried quantile, e.g. 0.50 or 0.99. * @return Estimated value at that quantile. */ @Override public synchronized int get(double q) { // clear the buffer insertBatch(); compress(); if (sample.size() == 0) { return 0; } int rankMin = 0; int desired = (int) (q * count); ListIterator<Item> it = sample.listIterator(); Item prev, cur; cur = it.next(); while (it.hasNext()) { prev = cur; cur = it.next(); rankMin += prev.g; if (rankMin + cur.g + cur.delta > desired + (allowableError(desired) / 2)) { return (int) prev.value; } } // edge case of wanting max value return (int) sample.getLast().value; } /** * Specifies the allowable error for this rank, depending on which quantiles * are being targeted. * * This is the f(r_i, n) function from the CKMS paper. It's basically how * wide the range of this rank can be. * * @param rank * the index in the list of samples */ private double allowableError(int rank) { // NOTE: according to CKMS, this should be count, not size, but this // leads // to error larger than the error bounds. Leaving it like this is // essentially a HACK, and blows up memory, but does "work". // int size = count; int size = sample.size(); double minError = size + 1; for (Quantile q : quantiles) { double error; if (rank <= q.quantile * size) { error = q.u * (size - rank); } else { error = q.v * rank; } if (error < minError) { minError = error; } } return minError; } private boolean insertBatch() { if (bufferCount == 0) { return false; } Arrays.sort(buffer, 0, bufferCount); // Base case: no samples int start = 0; if (sample.size() == 0) { Item newItem = new Item(buffer[0], 1, 0); sample.add(newItem); start++; count++; } ListIterator<Item> it = sample.listIterator(); Item item = it.next(); for (int i = start; i < bufferCount; i++) { long v = buffer[i]; while (it.nextIndex() < sample.size() && item.value < v) { item = it.next(); } // If we found that bigger item, back up so we insert ourselves // before it if (item.value > v) { it.previous(); } // We use different indexes for the edge comparisons, because of the // above // if statement that adjusts the iterator int delta; if (it.previousIndex() == 0 || it.nextIndex() == sample.size()) { delta = 0; } else { delta = ((int) Math.floor(allowableError(it.nextIndex()))) - 1; } Item newItem = new Item(v, 1, delta); it.add(newItem); count++; item = newItem; } bufferCount = 0; return true; } /** * Try to remove extraneous items from the set of sampled items. This checks * if an item is unnecessary based on the desired error bounds, and merges * it with the adjacent item if it is. */ private void compress() { if (sample.size() < 2) { return; } ListIterator<Item> it = sample.listIterator(); int removed = 0; Item prev = null; Item next = it.next(); while (it.hasNext()) { prev = next; next = it.next(); if (prev.g + next.g + next.delta <= allowableError(it.previousIndex())) { next.g += prev.g; // Remove prev. it.remove() kills the last thing returned. it.previous(); it.previous(); it.remove(); // it.next() is now equal to next, skip it back forward again it.next(); removed++; } } } private class Item { public final long value; public int g; public final int delta; public Item(long value, int lower_delta, int delta) { this.value = value; this.g = lower_delta; this.delta = delta; } @Override public String toString() { return String.format("%d, %d, %d", value, g, delta); } } public static class Quantile { public final double quantile; public final double error; public final double u; public final double v; public Quantile(double quantile, double error) { this.quantile = quantile; this.error = error; u = 2.0 * error / (1.0 - quantile); v = 2.0 * error / quantile; } @Override public String toString() { return String.format("Q{q=%.3f, eps=%.3f})", quantile, error); } } }
6,241
0
Create_ds/ocelli/ocelli-core/src/main/java/netflix/ocelli
Create_ds/ocelli/ocelli-core/src/main/java/netflix/ocelli/stats/Quantiles.java
package netflix.ocelli.stats; /** * Contract for tracking percentiles in a dataset * * @author elandau */ public interface Quantiles { /** * Add a sample * @param value */ public void insert(int value); /** * @param get (0 .. 1.0) * @return Get the Nth percentile */ public int get(double percentile); }
6,242
0
Create_ds/ocelli/ocelli-rxnetty/src/test/java/netflix/ocelli/rxnetty
Create_ds/ocelli/ocelli-rxnetty/src/test/java/netflix/ocelli/rxnetty/internal/LoadBalancingProviderTest.java
package netflix.ocelli.rxnetty.internal; import io.reactivex.netty.channel.Connection; import io.reactivex.netty.client.ConnectionFactory; import io.reactivex.netty.client.ConnectionObservable; import io.reactivex.netty.client.ConnectionProvider; import io.reactivex.netty.protocol.tcp.client.events.TcpClientEventListener; import netflix.ocelli.Instance; import netflix.ocelli.rxnetty.FailureListener; import netflix.ocelli.rxnetty.internal.AbstractLoadBalancer.LoadBalancingProvider; import org.junit.Rule; import org.junit.Test; import org.mockito.verification.VerificationMode; import rx.Observable; import rx.functions.Func1; import java.net.InetSocketAddress; import java.net.SocketAddress; import java.util.List; import java.util.concurrent.atomic.AtomicBoolean; import static org.hamcrest.MatcherAssert.*; import static org.hamcrest.Matchers.*; import static org.mockito.Mockito.*; public class LoadBalancingProviderTest { @Rule public final LoadBalancerRule loadBalancerRule = new LoadBalancerRule(); @Test(timeout = 60000) public void testRoundRobin() throws Exception { List<Instance<SocketAddress>> hosts = loadBalancerRule.setupDefault(); assertThat("Unexpected hosts found.", hosts, hasSize(2)); AbstractLoadBalancer<String, String> loadBalancer = loadBalancerRule.getLoadBalancer(); ConnectionFactory<String, String> cfMock = loadBalancerRule.newConnectionFactoryMock(); Observable<Instance<ConnectionProvider<String, String>>> providers = loadBalancerRule.getHostsAsConnectionProviders(cfMock); LoadBalancingProvider lbProvider = newLoadBalancingProvider(loadBalancer, cfMock, providers); @SuppressWarnings("unchecked") ConnectionObservable<String, String> connectionObservable = lbProvider.nextConnection(); assertNextConnection(hosts.get(0).getValue(), cfMock, connectionObservable, times(1)); assertNextConnection(hosts.get(1).getValue(), cfMock, connectionObservable, times(1)); assertNextConnection(hosts.get(0).getValue(), cfMock, connectionObservable, times(2) /*Invoked once above with same host*/); } @Test(timeout = 60000) public void testListenerSubscription() throws Exception { final AtomicBoolean listenerCalled = new AtomicBoolean(); final TcpClientEventListener listener = new TcpClientEventListener() { @Override public void onConnectionCloseStart() { listenerCalled.set(true); } }; InetSocketAddress host = new InetSocketAddress(0); loadBalancerRule.setup(new Func1<FailureListener, TcpClientEventListener>() { @Override public TcpClientEventListener call(FailureListener failureListener) { return listener; } }, host); AbstractLoadBalancer<String, String> loadBalancer = loadBalancerRule.getLoadBalancer(); ConnectionFactory<String, String> cfMock = loadBalancerRule.newConnectionFactoryMock(); Observable<Instance<ConnectionProvider<String, String>>> providers = loadBalancerRule.getHostsAsConnectionProviders(cfMock); LoadBalancingProvider lbProvider = newLoadBalancingProvider(loadBalancer, cfMock, providers); @SuppressWarnings("unchecked") ConnectionObservable<String, String> connectionObservable = lbProvider.nextConnection(); Connection<String, String> c = assertNextConnection(host, cfMock, connectionObservable, times(1)); c.closeNow(); assertThat("Listener not called.", listenerCalled.get(), is(true)); } protected Connection<String, String> assertNextConnection(SocketAddress host, ConnectionFactory<String, String> cfMock, ConnectionObservable<String, String> connectionObservable, VerificationMode verificationMode) { Connection<String, String> c = loadBalancerRule.connect(connectionObservable); verify(cfMock, verificationMode).newConnection(host); return c; } protected LoadBalancingProvider newLoadBalancingProvider(AbstractLoadBalancer<String, String> loadBalancer, ConnectionFactory<String, String> cfMock, Observable<Instance<ConnectionProvider<String, String>>> providers) { LoadBalancingProvider lbProvider = loadBalancer.new LoadBalancingProvider(cfMock, providers); return lbProvider; } }
6,243
0
Create_ds/ocelli/ocelli-rxnetty/src/test/java/netflix/ocelli/rxnetty
Create_ds/ocelli/ocelli-rxnetty/src/test/java/netflix/ocelli/rxnetty/internal/LoadBalancerRule.java
package netflix.ocelli.rxnetty.internal; import io.netty.channel.embedded.EmbeddedChannel; import io.reactivex.netty.channel.Connection; import io.reactivex.netty.channel.ConnectionImpl; import io.reactivex.netty.client.ConnectionFactory; import io.reactivex.netty.client.ConnectionObservable; import io.reactivex.netty.client.ConnectionObservable.OnSubcribeFunc; import io.reactivex.netty.client.ConnectionProvider; import io.reactivex.netty.client.events.ClientEventListener; import io.reactivex.netty.protocol.tcp.client.events.TcpClientEventListener; import io.reactivex.netty.protocol.tcp.client.events.TcpClientEventPublisher; import netflix.ocelli.Instance; import netflix.ocelli.LoadBalancerStrategy; import netflix.ocelli.loadbalancer.RoundRobinLoadBalancer; import netflix.ocelli.rxnetty.FailureListener; import org.junit.rules.ExternalResource; import org.junit.runner.Description; import org.junit.runners.model.Statement; import org.mockito.Mockito; import rx.Observable; import rx.Subscriber; import rx.Subscription; import rx.functions.Func1; import rx.functions.Func3; import rx.observers.TestSubscriber; import java.net.SocketAddress; import java.util.ArrayList; import java.util.List; public class LoadBalancerRule extends ExternalResource { private Observable<Instance<SocketAddress>> hosts; private Func1<FailureListener, ? extends TcpClientEventListener> eventListenerFactory; private LoadBalancerStrategy<HostConnectionProvider<String, String>> loadBalancingStratgey; private AbstractLoadBalancer<String, String> loadBalancer; private Func3<Observable<Instance<SocketAddress>>, Func1<FailureListener, ? extends TcpClientEventListener>, LoadBalancerStrategy<HostConnectionProvider<String, String>>, AbstractLoadBalancer<String, String>> lbFactory; public LoadBalancerRule() { } public LoadBalancerRule(Func3<Observable<Instance<SocketAddress>>, Func1<FailureListener, ? extends TcpClientEventListener>, LoadBalancerStrategy<HostConnectionProvider<String, String>>, AbstractLoadBalancer<String, String>> lbFactory) { this.lbFactory = lbFactory; } @Override public Statement apply(final Statement base, Description description) { return new Statement() { @Override public void evaluate() throws Throwable { base.evaluate(); } }; } public AbstractLoadBalancer<String, String> getLoadBalancer() { return loadBalancer; } public List<Instance<SocketAddress>> setupDefault() { final List<Instance<SocketAddress>> instances = new ArrayList<>(); instances.add(new DummyInstance()); instances.add(new DummyInstance()); setup(instances.get(0).getValue(), instances.get(1).getValue()); return hosts.toList().toBlocking().single(); } public AbstractLoadBalancer<String, String> setup(SocketAddress... hosts) { return setup(new Func1<FailureListener, TcpClientEventListener>() { @Override public TcpClientEventListener call(FailureListener failureListener) { return null; } }, hosts); } public AbstractLoadBalancer<String, String> setup( Func1<FailureListener, ? extends TcpClientEventListener> eventListenerFactory, SocketAddress... hosts) { return setup(eventListenerFactory, new RoundRobinLoadBalancer<HostConnectionProvider<String, String>>(-1), hosts); } public AbstractLoadBalancer<String, String> setup( Func1<FailureListener, ? extends TcpClientEventListener> eventListenerFactory, LoadBalancerStrategy<HostConnectionProvider<String, String>> loadBalancingStratgey, SocketAddress... hosts) { List<Instance<SocketAddress>> instances = new ArrayList<>(hosts.length); for (SocketAddress host : hosts) { instances.add(new DummyInstance(host)); } return setup(eventListenerFactory, loadBalancingStratgey, instances); } public AbstractLoadBalancer<String, String> setup( Func1<FailureListener, ? extends TcpClientEventListener> eventListenerFactory, LoadBalancerStrategy<HostConnectionProvider<String, String>> loadBalancingStratgey, List<Instance<SocketAddress>> hosts) { this.hosts = Observable.from(hosts); this.eventListenerFactory = eventListenerFactory; this.loadBalancingStratgey = loadBalancingStratgey; if (null != lbFactory) { loadBalancer = lbFactory.call(this.hosts, eventListenerFactory, loadBalancingStratgey); return loadBalancer; } loadBalancer = new AbstractLoadBalancer<String, String>(this.hosts, eventListenerFactory, loadBalancingStratgey) { @Override protected ConnectionProvider<String, String> newConnectionProviderForHost(final Instance<SocketAddress> host, final ConnectionFactory<String, String> connectionFactory) { return new ConnectionProvider<String, String>(connectionFactory) { @Override public ConnectionObservable<String, String> nextConnection() { return connectionFactory.newConnection(host.getValue()); } }; } }; return getLoadBalancer(); } public Func1<FailureListener, ? extends TcpClientEventListener> getEventListenerFactory() { return eventListenerFactory; } public Observable<Instance<SocketAddress>> getHosts() { return hosts; } public Observable<Instance<ConnectionProvider<String, String>>> getHostsAsConnectionProviders( final ConnectionFactory<String, String> cfMock) { return hosts.map(new Func1<Instance<SocketAddress>, Instance<ConnectionProvider<String, String>>>() { @Override public Instance<ConnectionProvider<String, String>> call(final Instance<SocketAddress> i) { final ConnectionProvider<String, String> cp = new ConnectionProvider<String, String>(cfMock) { @Override public ConnectionObservable<String, String> nextConnection() { return cfMock.newConnection(i.getValue()); } }; return new Instance<ConnectionProvider<String, String>>() { @Override public Observable<Void> getLifecycle() { return i.getLifecycle(); } @Override public ConnectionProvider<String, String> getValue() { return cp; } }; } }); } public LoadBalancerStrategy<HostConnectionProvider<String, String>> getLoadBalancingStratgey() { return loadBalancingStratgey; } public Connection<String, String> connect(ConnectionObservable<String, String> connectionObservable) { TestSubscriber<Connection<String, String>> testSub = new TestSubscriber<>(); connectionObservable.subscribe(testSub); testSub.awaitTerminalEvent(); testSub.assertNoErrors(); testSub.assertValueCount(1); return testSub.getOnNextEvents().get(0); } public ConnectionFactory<String, String> newConnectionFactoryMock() { @SuppressWarnings("unchecked") final ConnectionFactory<String, String> cfMock = Mockito.mock(ConnectionFactory.class); List<Instance<SocketAddress>> instances = hosts.toList().toBlocking().single(); for (Instance<SocketAddress> instance : instances) { EmbeddedChannel channel = new EmbeddedChannel(); final TcpClientEventPublisher eventPublisher = new TcpClientEventPublisher(); final Connection<String, String> mockConnection = ConnectionImpl.create(channel, eventPublisher, eventPublisher); Mockito.when(cfMock.newConnection(instance.getValue())) .thenReturn(ConnectionObservable.createNew(new OnSubcribeFunc<String, String>() { @Override public Subscription subscribeForEvents(ClientEventListener eventListener) { return eventPublisher.subscribe((TcpClientEventListener) eventListener); } @Override public void call(Subscriber<? super Connection<String, String>> subscriber) { subscriber.onNext(mockConnection); subscriber.onCompleted(); } })); } return cfMock; } private static class DummyInstance extends Instance<SocketAddress> { private final SocketAddress socketAddress; private DummyInstance() { socketAddress = new SocketAddress() { private static final long serialVersionUID = 711795406919943230L; @Override public String toString() { return "Dummy socket address: " + hashCode(); } }; } private DummyInstance(SocketAddress socketAddress) { this.socketAddress = socketAddress; } @Override public Observable<Void> getLifecycle() { return Observable.never(); } @Override public SocketAddress getValue() { return socketAddress; } } }
6,244
0
Create_ds/ocelli/ocelli-rxnetty/src/test/java/netflix/ocelli/rxnetty
Create_ds/ocelli/ocelli-rxnetty/src/test/java/netflix/ocelli/rxnetty/internal/AbstractLoadBalancerTest.java
package netflix.ocelli.rxnetty.internal; import io.reactivex.netty.client.ConnectionFactory; import io.reactivex.netty.client.ConnectionObservable; import io.reactivex.netty.client.ConnectionProvider; import netflix.ocelli.Instance; import org.junit.Rule; import org.junit.Test; import org.mockito.Mockito; import java.net.SocketAddress; import java.util.List; public class AbstractLoadBalancerTest { @Rule public final LoadBalancerRule lbRule = new LoadBalancerRule(); @Test(timeout = 60000) public void testRoundRobin() throws Exception { List<Instance<SocketAddress>> hosts = lbRule.setupDefault(); AbstractLoadBalancer<String, String> loadBalancer = lbRule.getLoadBalancer(); ConnectionFactory<String, String> cfMock = lbRule.newConnectionFactoryMock(); ConnectionProvider<String, String> cp = loadBalancer.toConnectionProvider(cfMock); ConnectionObservable<String, String> co = cp.nextConnection(); lbRule.connect(co); Mockito.verify(cfMock).newConnection(hosts.get(0).getValue()); Mockito.verifyNoMoreInteractions(cfMock); cp = loadBalancer.toConnectionProvider(cfMock); co = cp.nextConnection(); lbRule.connect(co); Mockito.verify(cfMock).newConnection(hosts.get(1).getValue()); Mockito.verifyNoMoreInteractions(cfMock); } }
6,245
0
Create_ds/ocelli/ocelli-rxnetty/src/main/java/netflix/ocelli
Create_ds/ocelli/ocelli-rxnetty/src/main/java/netflix/ocelli/rxnetty/FailureListener.java
package netflix.ocelli.rxnetty; import rx.Scheduler; import java.util.concurrent.TimeUnit; /** * A contract for taking actions upon detecting an unhealthy host. A failure listener instance is always associated with * a unique host, so any action taken on this listener will directly be applied to the associated host. */ public interface FailureListener { /** * This action will remove the host associated with this listener from the load balancing pool. */ void remove(); /** * This action quarantines the host associated with this listener from the load balancing pool, for the passed * {@code quarantineDuration}. The host will be added back to the load balancing pool after the quarantine duration * is elapsed. * * @param quarantineDuration Duration for keeping the host quarantined. * @param timeUnit Time unit for the duration. */ void quarantine(long quarantineDuration, TimeUnit timeUnit); /** * This action quarantines the host associated with this listener from the load balancing pool, for the passed * {@code quarantineDuration}. The host will be added back to the load balancing pool after the quarantine duration * is elapsed. * * @param quarantineDuration Duration for keeping the host quarantined. * @param timeUnit Time unit for the duration. * @param timerScheduler Scheduler to be used for the quarantine duration timer. */ void quarantine(long quarantineDuration, TimeUnit timeUnit, Scheduler timerScheduler); }
6,246
0
Create_ds/ocelli/ocelli-rxnetty/src/main/java/netflix/ocelli/rxnetty
Create_ds/ocelli/ocelli-rxnetty/src/main/java/netflix/ocelli/rxnetty/internal/AbstractLoadBalancer.java
package netflix.ocelli.rxnetty.internal; import io.reactivex.netty.channel.Connection; import io.reactivex.netty.client.ConnectionFactory; import io.reactivex.netty.client.ConnectionObservable; import io.reactivex.netty.client.ConnectionObservable.AbstractOnSubscribeFunc; import io.reactivex.netty.client.ConnectionProvider; import io.reactivex.netty.client.pool.PooledConnectionProvider; import io.reactivex.netty.protocol.tcp.client.events.TcpClientEventListener; import netflix.ocelli.Instance; import netflix.ocelli.LoadBalancerStrategy; import netflix.ocelli.rxnetty.FailureListener; import rx.Observable; import rx.Subscriber; import rx.functions.Action1; import rx.functions.Func1; import java.net.SocketAddress; import java.util.List; import java.util.NoSuchElementException; /** * An abstract load balancer for all TCP based protocols. * * <h2>Failure detection</h2> * * For every host that this load balancer connects, it provides a way to register a {@link TcpClientEventListener} * instance that can detect failures based on the various events received. Upon detecting the failure, an appropriate * action can be taken for the host, using the provided {@link FailureListener}. * * <h2>Use with RxNetty clients</h2> * * In order to use this load balancer with RxNetty clients, one has to convert it to an instance of * {@link ConnectionProvider} by calling {@link #toConnectionProvider()} * * @param <W> Type of Objects written on the connections created by this load balancer. * @param <R> Type of Objects read from the connections created by this load balancer. */ public abstract class AbstractLoadBalancer<W, R> { protected final Observable<Instance<SocketAddress>> hosts; protected final LoadBalancerStrategy<HostConnectionProvider<W, R>> loadBalancer; protected final Func1<FailureListener, ? extends TcpClientEventListener> eventListenerFactory; protected AbstractLoadBalancer(Observable<Instance<SocketAddress>> hosts, Func1<FailureListener, ? extends TcpClientEventListener> eventListenerFactory, LoadBalancerStrategy<HostConnectionProvider<W, R>> loadBalancer) { this.hosts = hosts; this.eventListenerFactory = eventListenerFactory; this.loadBalancer = loadBalancer; } /** * Converts this load balancer to a {@link ConnectionProvider} to be used with RxNetty clients. * * @return {@link ConnectionProvider} for this load balancer. */ public ConnectionProvider<W, R> toConnectionProvider() { return ConnectionProvider.create(new Func1<ConnectionFactory<W, R>, ConnectionProvider<W, R>>() { @Override public ConnectionProvider<W, R> call(final ConnectionFactory<W, R> connectionFactory) { return toConnectionProvider(connectionFactory); } }); } /*Visible for testing*/ ConnectionProvider<W, R> toConnectionProvider(final ConnectionFactory<W, R> factory) { final Observable<Instance<ConnectionProvider<W, R>>> providerStream = hosts.map(new Func1<Instance<SocketAddress>, Instance<ConnectionProvider<W, R>>>() { @Override public Instance<ConnectionProvider<W, R>> call(final Instance<SocketAddress> host) { final ConnectionProvider<W, R> pcp = newConnectionProviderForHost(host, factory); return new Instance<ConnectionProvider<W, R>>() { @Override public Observable<Void> getLifecycle() { return host.getLifecycle(); } @Override public ConnectionProvider<W, R> getValue() { return pcp; } }; } }); return new LoadBalancingProvider(factory, providerStream); } protected ConnectionProvider<W, R> newConnectionProviderForHost(Instance<SocketAddress> host, ConnectionFactory<W, R> connectionFactory) { /* * Bounds on the concurrency (concurrent connections) should be enforced at the request * processing level, providing a bound on number of connections is a difficult number * to determine. */ return PooledConnectionProvider.createUnbounded(connectionFactory, host.getValue()); } /*Visible for testing*/class LoadBalancingProvider extends ConnectionProvider<W, R> { private final HostHolder<W, R> hostHolder; public LoadBalancingProvider(ConnectionFactory<W, R> connectionFactory, Observable<Instance<ConnectionProvider<W, R>>> providerStream) { super(connectionFactory); hostHolder = new HostHolder<>(providerStream, eventListenerFactory); } @Override public ConnectionObservable<R, W> nextConnection() { return ConnectionObservable.createNew(new AbstractOnSubscribeFunc<R, W>() { @Override protected void doSubscribe(Subscriber<? super Connection<R, W>> sub, Action1<ConnectionObservable<R, W>> subscribeAllListenersAction) { final List<HostConnectionProvider<W, R>> providers = hostHolder.getProviders(); if (null == providers || providers.isEmpty()) { sub.onError(new NoSuchElementException("No hosts available.")); } HostConnectionProvider<W, R> hcp = loadBalancer.choose(providers); ConnectionObservable<R, W> nextConnection = hcp.getProvider().nextConnection(); if (hcp.getEventsListener() != null) { nextConnection.subscribeForEvents(hcp.getEventsListener()); } subscribeAllListenersAction.call(nextConnection); nextConnection.unsafeSubscribe(sub); } }); } @Override protected Observable<Void> doShutdown() { return hostHolder.shutdown(); } } }
6,247
0
Create_ds/ocelli/ocelli-rxnetty/src/main/java/netflix/ocelli/rxnetty
Create_ds/ocelli/ocelli-rxnetty/src/main/java/netflix/ocelli/rxnetty/internal/HostHolder.java
package netflix.ocelli.rxnetty.internal; import io.reactivex.netty.client.ConnectionProvider; import io.reactivex.netty.protocol.tcp.client.events.TcpClientEventListener; import netflix.ocelli.Instance; import netflix.ocelli.rxnetty.FailureListener; import rx.Observable; import rx.Observable.OnSubscribe; import rx.Subscriber; import rx.Subscription; import rx.functions.Action1; import rx.functions.Func1; import java.util.Collections; import java.util.List; class HostHolder<W, R> { private final Observable<List<HostConnectionProvider<W, R>>> providerStream; private volatile List<HostConnectionProvider<W, R>> providers; private Subscription streamSubscription; HostHolder(Observable<Instance<ConnectionProvider<W, R>>> providerStream, final Func1<FailureListener, ? extends TcpClientEventListener> eventListenerFactory) { this.providerStream = providerStream.lift(new HostCollector<W, R>(eventListenerFactory)) .serialize()/*Host collector emits concurrently*/; providers = Collections.emptyList(); subscribeToHostStream(); } List<HostConnectionProvider<W, R>> getProviders() { return providers; } private void subscribeToHostStream() { streamSubscription = providerStream.subscribe(new Action1<List<HostConnectionProvider<W, R>>>() { @Override public void call(List<HostConnectionProvider<W, R>> hostConnectionProviders) { providers = hostConnectionProviders; } }); } public Observable<Void> shutdown() { return Observable.create(new OnSubscribe<Void>() { @Override public void call(Subscriber<? super Void> subscriber) { if (null != streamSubscription) { streamSubscription.unsubscribe(); } subscriber.onCompleted(); } }); } }
6,248
0
Create_ds/ocelli/ocelli-rxnetty/src/main/java/netflix/ocelli/rxnetty
Create_ds/ocelli/ocelli-rxnetty/src/main/java/netflix/ocelli/rxnetty/internal/HostConnectionProvider.java
package netflix.ocelli.rxnetty.internal; import io.reactivex.netty.client.ConnectionProvider; import io.reactivex.netty.protocol.tcp.client.events.TcpClientEventListener; import java.util.Collection; public class HostConnectionProvider<W, R> { private final ConnectionProvider<W, R> provider; private final TcpClientEventListener eventsListener; private HostConnectionProvider(ConnectionProvider<W, R> provider) { this(provider, null); } HostConnectionProvider(ConnectionProvider<W, R> provider, TcpClientEventListener eventsListener) { this.provider = provider; this.eventsListener = eventsListener; } public static <W, R> boolean removeFrom(Collection<HostConnectionProvider<W, R>> c, ConnectionProvider<W, R> toRemove) { return c.remove(new HostConnectionProvider<W, R>(toRemove)); } public ConnectionProvider<W, R> getProvider() { return provider; } public TcpClientEventListener getEventsListener() { return eventsListener; } @Override public boolean equals(Object o) { if (this == o) { return true; } if (!(o instanceof HostConnectionProvider)) { return false; } @SuppressWarnings("unchecked") HostConnectionProvider<W, R> that = (HostConnectionProvider<W, R>) o; if (provider != null? !provider.equals(that.provider) : that.provider != null) { return false; } return true; } @Override public int hashCode() { return provider != null? provider.hashCode() : 0; } }
6,249
0
Create_ds/ocelli/ocelli-rxnetty/src/main/java/netflix/ocelli/rxnetty
Create_ds/ocelli/ocelli-rxnetty/src/main/java/netflix/ocelli/rxnetty/internal/HostCollector.java
package netflix.ocelli.rxnetty.internal; import io.reactivex.netty.client.ConnectionProvider; import io.reactivex.netty.protocol.tcp.client.events.TcpClientEventListener; import netflix.ocelli.Instance; import netflix.ocelli.rxnetty.FailureListener; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import rx.Observable; import rx.Observable.Operator; import rx.Scheduler; import rx.Subscriber; import rx.functions.Action0; import rx.functions.Action1; import rx.functions.Actions; import rx.functions.Func1; import rx.schedulers.Schedulers; import java.util.List; import java.util.concurrent.CopyOnWriteArrayList; import java.util.concurrent.TimeUnit; class HostCollector<W, R> implements Operator<List<HostConnectionProvider<W, R>>, Instance<ConnectionProvider<W, R>>> { private static final Logger logger = LoggerFactory.getLogger(HostCollector.class); protected final CopyOnWriteArrayList<HostConnectionProvider<W, R>> currentHosts = new CopyOnWriteArrayList<>(); private final Func1<FailureListener, ? extends TcpClientEventListener> eventListenerFactory; HostCollector(Func1<FailureListener, ? extends TcpClientEventListener> eventListenerFactory) { this.eventListenerFactory = eventListenerFactory; } @Override public Subscriber<? super Instance<ConnectionProvider<W, R>>> call(final Subscriber<? super List<HostConnectionProvider<W, R>>> o) { return new Subscriber<Instance<ConnectionProvider<W, R>>>() { @Override public void onCompleted() { o.onCompleted(); } @Override public void onError(Throwable e) { o.onError(e); } @Override public void onNext(Instance<ConnectionProvider<W, R>> i) { final ConnectionProvider<W, R> provider = i.getValue(); final TcpClientEventListener listener = eventListenerFactory.call(newFailureListener(provider, o)); final HostConnectionProvider<W, R> hcp = new HostConnectionProvider<>(i.getValue(), listener); addHost(hcp, o); bindToInstanceLifecycle(i, hcp, o); } }; } protected void removeHost(HostConnectionProvider<W, R> toRemove, Subscriber<? super List<HostConnectionProvider<W, R>>> hostListListener) { /*It's a copy-on-write list, so removal makes a copy with no interference to reads*/ currentHosts.remove(toRemove); hostListListener.onNext(currentHosts); } protected void addHost(HostConnectionProvider<W, R> toAdd, Subscriber<? super List<HostConnectionProvider<W, R>>> hostListListener) { /*It's a copy-on-write list, so addition makes a copy with no interference to reads*/ currentHosts.add(toAdd); hostListListener.onNext(currentHosts); } protected FailureListener newFailureListener(final ConnectionProvider<W, R> provider, final Subscriber<? super List<HostConnectionProvider<W, R>>> hostListListener) { return new FailureListener() { @Override public void remove() { HostConnectionProvider.removeFrom(currentHosts, provider); hostListListener.onNext(currentHosts); } @Override public void quarantine(long quarantineDuration, TimeUnit timeUnit) { quarantine(quarantineDuration, timeUnit, Schedulers.computation()); } @Override public void quarantine(long quarantineDuration, TimeUnit timeUnit, Scheduler timerScheduler) { final FailureListener fl = this; remove(); Observable.timer(quarantineDuration, timeUnit, timerScheduler) .subscribe(new Action1<Long>() { @Override public void call(Long aLong) { TcpClientEventListener listener = eventListenerFactory.call(fl); addHost(new HostConnectionProvider<W, R>(provider, listener), hostListListener); } }, new Action1<Throwable>() { @Override public void call(Throwable throwable) { logger.error("Error while adding back a quarantine instance to the load balancer.", throwable); } }); } }; } protected void bindToInstanceLifecycle(Instance<ConnectionProvider<W, R>> i, final HostConnectionProvider<W, R> hcp, final Subscriber<? super List<HostConnectionProvider<W, R>>> o) { i.getLifecycle() .finallyDo(new Action0() { @Override public void call() { removeHost(hcp, o); } }) .subscribe(Actions.empty(), new Action1<Throwable>() { @Override public void call(Throwable throwable) { // Do nothing as finallyDo takes care of both complete and error. } }); } }
6,250
0
Create_ds/ocelli/ocelli-rxnetty/src/main/java/netflix/ocelli/rxnetty
Create_ds/ocelli/ocelli-rxnetty/src/main/java/netflix/ocelli/rxnetty/protocol/WeightComparator.java
package netflix.ocelli.rxnetty.protocol; import netflix.ocelli.rxnetty.internal.HostConnectionProvider; import java.util.Comparator; /** * A comparator for {@link WeightAware} */ public class WeightComparator<W, R> implements Comparator<HostConnectionProvider<W, R>> { @Override public int compare(HostConnectionProvider<W, R> cp1, HostConnectionProvider<W, R> cp2) { WeightAware wa1 = (WeightAware) cp1.getEventsListener(); WeightAware wa2 = (WeightAware) cp2.getEventsListener(); return wa1.getWeight() > wa2.getWeight() ? 1 : -1; } }
6,251
0
Create_ds/ocelli/ocelli-rxnetty/src/main/java/netflix/ocelli/rxnetty
Create_ds/ocelli/ocelli-rxnetty/src/main/java/netflix/ocelli/rxnetty/protocol/WeightAware.java
package netflix.ocelli.rxnetty.protocol; import io.reactivex.netty.client.ConnectionProvider; import netflix.ocelli.loadbalancer.RandomWeightedLoadBalancer; /** * A property for RxNetty listeners that are used to also define weights for a particular host, typically in a * {@link RandomWeightedLoadBalancer} */ public interface WeightAware { /** * Returns the current weight of the associated host with this object. * <b>This method will be called every time {@link ConnectionProvider#nextConnection()} is called for every active * hosts, so it is recommended to not do any costly processing in this method, it should typically be a lookup of * an already calculated value.</b> * * @return The current weight. */ int getWeight(); }
6,252
0
Create_ds/ocelli/ocelli-rxnetty/src/main/java/netflix/ocelli/rxnetty/protocol
Create_ds/ocelli/ocelli-rxnetty/src/main/java/netflix/ocelli/rxnetty/protocol/tcp/WeightedTcpClientListener.java
package netflix.ocelli.rxnetty.protocol.tcp; import io.reactivex.netty.protocol.http.client.events.HttpClientEventsListener; import io.reactivex.netty.protocol.tcp.client.events.TcpClientEventListener; import netflix.ocelli.rxnetty.protocol.WeightAware; /** * An {@link TcpClientEventListener} contract with an additional property defined by {@link WeightAware} */ public abstract class WeightedTcpClientListener extends HttpClientEventsListener implements WeightAware { }
6,253
0
Create_ds/ocelli/ocelli-rxnetty/src/main/java/netflix/ocelli/rxnetty/protocol
Create_ds/ocelli/ocelli-rxnetty/src/main/java/netflix/ocelli/rxnetty/protocol/tcp/TcpLoadBalancer.java
package netflix.ocelli.rxnetty.protocol.tcp; import io.reactivex.netty.client.ConnectionProvider; import io.reactivex.netty.protocol.tcp.client.TcpClient; import io.reactivex.netty.protocol.tcp.client.events.TcpClientEventListener; import netflix.ocelli.Instance; import netflix.ocelli.LoadBalancerStrategy; import netflix.ocelli.loadbalancer.ChoiceOfTwoLoadBalancer; import netflix.ocelli.loadbalancer.RandomWeightedLoadBalancer; import netflix.ocelli.loadbalancer.RoundRobinLoadBalancer; import netflix.ocelli.loadbalancer.weighting.LinearWeightingStrategy; import netflix.ocelli.rxnetty.FailureListener; import netflix.ocelli.rxnetty.internal.AbstractLoadBalancer; import netflix.ocelli.rxnetty.internal.HostConnectionProvider; import netflix.ocelli.rxnetty.protocol.WeightComparator; import netflix.ocelli.rxnetty.protocol.http.WeightedHttpClientListener; import rx.Observable; import rx.functions.Func1; import java.net.SocketAddress; /** * An HTTP load balancer to be used with {@link TcpClient}. * * <h2>Failure detection</h2> * * For every host that this load balancer connects, it provides a way to register a {@link TcpClientEventListener} * instance that can detect failures based on the various events received. Upon detecting the failure, an appropriate * action can be taken for the host, using the provided {@link FailureListener}. * * <h2>Use with {@link TcpClient}</h2> * * In order to use this load balancer with RxNetty clients, one has to convert it to an instance of * {@link ConnectionProvider} by calling {@link #toConnectionProvider()} * * @param <W> Type of Objects written on the connections created by this load balancer. * @param <R> Type of Objects read from the connections created by this load balancer. */ public class TcpLoadBalancer<W, R> extends AbstractLoadBalancer<W, R> { private static final Func1<FailureListener, TcpClientEventListener> NO_LISTENER_FACTORY = new Func1<FailureListener, TcpClientEventListener>() { @Override public TcpClientEventListener call(FailureListener failureListener) { return null; } }; private TcpLoadBalancer(Observable<Instance<SocketAddress>> hosts, LoadBalancerStrategy<HostConnectionProvider<W, R>> loadBalancer) { this(hosts, loadBalancer, NO_LISTENER_FACTORY); } /** * Typically, static methods in this class would be used to create new instances of the load balancer with known * load balancing strategies. However, for any custom load balancing schemes, one can use this constructor directly. * * @param hosts Stream of hosts to use for load balancing. * @param loadBalancer The load balancing strategy. * @param eventListenerFactory A factory for creating new {@link TcpClientEventListener} per host. */ public TcpLoadBalancer(Observable<Instance<SocketAddress>> hosts, LoadBalancerStrategy<HostConnectionProvider<W, R>> loadBalancer, Func1<FailureListener, ? extends TcpClientEventListener> eventListenerFactory) { super(hosts, eventListenerFactory, loadBalancer); } /** * Creates a new load balancer using a round-robin load balancing strategy ({@link RoundRobinLoadBalancer}) over the * passed stream of hosts. The hosts ({@link SocketAddress}) emitted by the passed stream are used till their * lifecycle ends ({@code Observable} returned by {@link Instance#getLifecycle()} terminates). * * For using any failure detection schemes for the hosts, use {@link #roundRobin(Observable, Func1)} instead. * * @param hosts Stream of hosts to use for load balancing. * * @param <W> Type of Objects written on the connections created by this load balancer. * @param <R> Type of Objects read from the connections created by this load balancer. * * @return New load balancer instance. */ public static <W, R> TcpLoadBalancer<W, R> roundRobin(Observable<Instance<SocketAddress>> hosts) { return new TcpLoadBalancer<>(hosts, new RoundRobinLoadBalancer<HostConnectionProvider<W, R>>()); } /** * Creates a new load balancer using a round-robin load balancing strategy ({@link RoundRobinLoadBalancer}) over the * passed stream of hosts. The hosts ({@link SocketAddress}) emitted by the passed stream are used till their * lifecycle ends ({@code Observable} returned by {@link Instance#getLifecycle()} terminates) or are explicitly * removed by the passed failure detector. * * @param hosts Stream of hosts to use for load balancing. * @param failureDetector A factory for creating a {@link TcpClientEventListener} per host. The listeners based on * any criterion can then remove the host from the load balancing pool. * * @param <W> Type of Objects written on the connections created by this load balancer. * @param <R> Type of Objects read from the connections created by this load balancer. * * @return New load balancer instance. */ public static <W, R> TcpLoadBalancer<W, R> roundRobin(Observable<Instance<SocketAddress>> hosts, Func1<FailureListener, TcpClientEventListener> failureDetector) { return new TcpLoadBalancer<>(hosts, new RoundRobinLoadBalancer<HostConnectionProvider<W, R>>(), failureDetector); } /** * Creates a new load balancer using a weighted random load balancing strategy ({@link RandomWeightedLoadBalancer}) * over the passed stream of hosts. * * @param hosts Stream of hosts to use for load balancing. * @param listenerFactory A factory for creating {@link WeightedTcpClientListener} per active host. * * @param <W> Type of Objects written on the connections created by this load balancer. * @param <R> Type of Objects read from the connections created by this load balancer. * * @return New load balancer instance. */ public static <W, R> TcpLoadBalancer<W, R> weigthedRandom(Observable<Instance<SocketAddress>> hosts, Func1<FailureListener, WeightedTcpClientListener> listenerFactory) { LinearWeightingStrategy<HostConnectionProvider<W, R>> ws = new LinearWeightingStrategy<>( new Func1<HostConnectionProvider<W, R>, Integer>() { @Override public Integer call(HostConnectionProvider<W, R> cp) { WeightedHttpClientListener el = (WeightedHttpClientListener) cp.getEventsListener(); return el.getWeight(); } }); return new TcpLoadBalancer<W, R>(hosts, new RandomWeightedLoadBalancer<HostConnectionProvider<W, R>>(ws), listenerFactory); } /** * Creates a new load balancer using a power of two choices load balancing strategy ({@link ChoiceOfTwoLoadBalancer}) * over the passed stream of hosts. * * @param hosts Stream of hosts to use for load balancing. * @param listenerFactory A factory for creating {@link WeightedTcpClientListener} per active host. * * @param <W> Type of Objects written on the connections created by this load balancer. * @param <R> Type of Objects read from the connections created by this load balancer. * * @return New load balancer instance. */ public static <W, R> TcpLoadBalancer<W, R> choiceOfTwo(Observable<Instance<SocketAddress>> hosts, Func1<FailureListener, WeightedTcpClientListener> listenerFactory) { return new TcpLoadBalancer<W, R>(hosts, new ChoiceOfTwoLoadBalancer<>(new WeightComparator<W, R>()), listenerFactory); } }
6,254
0
Create_ds/ocelli/ocelli-rxnetty/src/main/java/netflix/ocelli/rxnetty/protocol
Create_ds/ocelli/ocelli-rxnetty/src/main/java/netflix/ocelli/rxnetty/protocol/tcp/ComparableTcpClientListener.java
package netflix.ocelli.rxnetty.protocol.tcp; import io.reactivex.netty.protocol.tcp.client.events.TcpClientEventListener; import netflix.ocelli.loadbalancer.ChoiceOfTwoLoadBalancer; /** * An {@link TcpClientEventListener} contract which is also a {@link Comparable}. These listeners are typically used * with a load balancer that chooses the best among two servers, eg: {@link ChoiceOfTwoLoadBalancer} */ public abstract class ComparableTcpClientListener extends TcpClientEventListener implements Comparable<ComparableTcpClientListener> { }
6,255
0
Create_ds/ocelli/ocelli-rxnetty/src/main/java/netflix/ocelli/rxnetty/protocol
Create_ds/ocelli/ocelli-rxnetty/src/main/java/netflix/ocelli/rxnetty/protocol/http/WeightedHttpClientListener.java
package netflix.ocelli.rxnetty.protocol.http; import io.reactivex.netty.protocol.http.client.events.HttpClientEventsListener; import netflix.ocelli.rxnetty.protocol.WeightAware; /** * An {@link HttpClientEventsListener} contract with an additional property defined by {@link WeightAware} */ public abstract class WeightedHttpClientListener extends HttpClientEventsListener implements WeightAware { }
6,256
0
Create_ds/ocelli/ocelli-rxnetty/src/main/java/netflix/ocelli/rxnetty/protocol
Create_ds/ocelli/ocelli-rxnetty/src/main/java/netflix/ocelli/rxnetty/protocol/http/HttpLoadBalancer.java
package netflix.ocelli.rxnetty.protocol.http; import io.reactivex.netty.client.ConnectionProvider; import io.reactivex.netty.protocol.http.client.HttpClient; import io.reactivex.netty.protocol.http.client.events.HttpClientEventsListener; import netflix.ocelli.Instance; import netflix.ocelli.LoadBalancerStrategy; import netflix.ocelli.loadbalancer.ChoiceOfTwoLoadBalancer; import netflix.ocelli.loadbalancer.RandomWeightedLoadBalancer; import netflix.ocelli.loadbalancer.RoundRobinLoadBalancer; import netflix.ocelli.loadbalancer.weighting.LinearWeightingStrategy; import netflix.ocelli.rxnetty.FailureListener; import netflix.ocelli.rxnetty.internal.AbstractLoadBalancer; import netflix.ocelli.rxnetty.internal.HostConnectionProvider; import netflix.ocelli.rxnetty.protocol.WeightComparator; import rx.Observable; import rx.functions.Func1; import java.net.SocketAddress; /** * An HTTP load balancer to be used with {@link HttpClient}. * * <h2>Failure detection</h2> * * For every host that this load balancer connects, it provides a way to register a {@link HttpClientEventsListener} * instance that can detect failures based on the various events received. Upon detecting the failure, an appropriate * action can be taken for the host, using the provided {@link FailureListener}. * * <h2>Use with {@link HttpClient}</h2> * * In order to use this load balancer with RxNetty clients, one has to convert it to an instance of * {@link ConnectionProvider} by calling {@link #toConnectionProvider()} * * @param <W> Type of Objects written on the connections created by this load balancer. * @param <R> Type of Objects read from the connections created by this load balancer. */ public class HttpLoadBalancer<W, R> extends AbstractLoadBalancer<W, R> { private static final Func1<FailureListener, HttpClientEventsListener> NO_LISTENER_FACTORY = new Func1<FailureListener, HttpClientEventsListener>() { @Override public HttpClientEventsListener call(FailureListener failureListener) { return null; } }; private HttpLoadBalancer(Observable<Instance<SocketAddress>> hosts, LoadBalancerStrategy<HostConnectionProvider<W, R>> loadBalancer) { this(hosts, loadBalancer, NO_LISTENER_FACTORY); } /** * Typically, static methods in this class would be used to create new instances of the load balancer with known * load balancing strategies. However, for any custom load balancing schemes, one can use this constructor directly. * * @param hosts Stream of hosts to use for load balancing. * @param loadBalancer The load balancing strategy. * @param eventListenerFactory A factory for creating new {@link HttpClientEventsListener} per host. */ public HttpLoadBalancer(Observable<Instance<SocketAddress>> hosts, LoadBalancerStrategy<HostConnectionProvider<W, R>> loadBalancer, Func1<FailureListener, ? extends HttpClientEventsListener> eventListenerFactory) { super(hosts, eventListenerFactory, loadBalancer); } /** * Creates a new load balancer using a round-robin load balancing strategy ({@link RoundRobinLoadBalancer}) over the * passed stream of hosts. The hosts ({@link SocketAddress}) emitted by the passed stream are used till their * lifecycle ends ({@code Observable} returned by {@link Instance#getLifecycle()} terminates). * * For using any failure detection schemes for the hosts, use {@link #roundRobin(Observable, Func1)} instead. * * @param hosts Stream of hosts to use for load balancing. * * @param <W> Type of Objects written on the connections created by this load balancer. * @param <R> Type of Objects read from the connections created by this load balancer. * * @return New load balancer instance. */ public static <W, R> HttpLoadBalancer<W, R> roundRobin(Observable<Instance<SocketAddress>> hosts) { return new HttpLoadBalancer<W, R>(hosts, new RoundRobinLoadBalancer<HostConnectionProvider<W, R>>()); } /** * Creates a new load balancer using a round-robin load balancing strategy ({@link RoundRobinLoadBalancer}) over the * passed stream of hosts. The hosts ({@link SocketAddress}) emitted by the passed stream are used till their * lifecycle ends ({@code Observable} returned by {@link Instance#getLifecycle()} terminates) or are explicitly * removed by the passed failure detector. * * @param hosts Stream of hosts to use for load balancing. * @param failureDetector A factory for creating a {@link HttpClientEventsListener} per host. The listeners based on * any criterion can then remove the host from the load balancing pool. * * @param <W> Type of Objects written on the connections created by this load balancer. * @param <R> Type of Objects read from the connections created by this load balancer. * * @return New load balancer instance. */ public static <W, R> HttpLoadBalancer<W, R> roundRobin(Observable<Instance<SocketAddress>> hosts, Func1<FailureListener, HttpClientEventsListener> failureDetector) { return new HttpLoadBalancer<>(hosts, new RoundRobinLoadBalancer<HostConnectionProvider<W, R>>(), failureDetector); } /** * Creates a new load balancer using a weighted random load balancing strategy ({@link RandomWeightedLoadBalancer}) * over the passed stream of hosts. * * @param hosts Stream of hosts to use for load balancing. * @param listenerFactory A factory for creating {@link WeightedHttpClientListener} per active host. * * @param <W> Type of Objects written on the connections created by this load balancer. * @param <R> Type of Objects read from the connections created by this load balancer. * * @return New load balancer instance. */ public static <W, R> HttpLoadBalancer<W, R> weigthedRandom(Observable<Instance<SocketAddress>> hosts, Func1<FailureListener, WeightedHttpClientListener> listenerFactory) { LinearWeightingStrategy<HostConnectionProvider<W, R>> ws = new LinearWeightingStrategy<>( new Func1<HostConnectionProvider<W, R>, Integer>() { @Override public Integer call(HostConnectionProvider<W, R> cp) { WeightedHttpClientListener el = (WeightedHttpClientListener) cp.getEventsListener(); return el.getWeight(); } }); return new HttpLoadBalancer<W, R>(hosts, new RandomWeightedLoadBalancer<HostConnectionProvider<W, R>>(ws), listenerFactory); } /** * Creates a new load balancer using a power of two choices load balancing strategy ({@link ChoiceOfTwoLoadBalancer}) * over the passed stream of hosts. * * @param hosts Stream of hosts to use for load balancing. * @param listenerFactory A factory for creating {@link WeightedHttpClientListener} per active host. * * @param <W> Type of Objects written on the connections created by this load balancer. * @param <R> Type of Objects read from the connections created by this load balancer. * * @return New load balancer instance. */ public static <W, R> HttpLoadBalancer<W, R> choiceOfTwo(Observable<Instance<SocketAddress>> hosts, Func1<FailureListener, WeightedHttpClientListener> listenerFactory) { return new HttpLoadBalancer<W, R>(hosts, new ChoiceOfTwoLoadBalancer<>(new WeightComparator<W, R>()), listenerFactory); } }
6,257
0
Create_ds/ocelli/ocelli-eureka2/src/test/java/netflix/ocelli
Create_ds/ocelli/ocelli-eureka2/src/test/java/netflix/ocelli/eureka2/Eureka2InterestManagerTest.java
package netflix.ocelli.eureka2; import com.netflix.eureka2.client.EurekaInterestClient; import com.netflix.eureka2.interests.ChangeNotification; import com.netflix.eureka2.interests.Interest; import com.netflix.eureka2.interests.Interests; import com.netflix.eureka2.registry.datacenter.BasicDataCenterInfo; import com.netflix.eureka2.registry.instance.InstanceInfo; import com.netflix.eureka2.registry.instance.ServicePort; import netflix.ocelli.Instance; import org.junit.Assert; import org.junit.Before; import org.junit.Test; import org.junit.runner.RunWith; import org.mockito.Mock; import org.mockito.Mockito; import org.mockito.runners.MockitoJUnitRunner; import rx.Observable; import java.net.SocketAddress; import java.util.Collections; import java.util.HashSet; import java.util.List; import java.util.concurrent.TimeUnit; @RunWith(MockitoJUnitRunner.class) public class Eureka2InterestManagerTest { @Mock private EurekaInterestClient clientMock; private Eureka2InterestManager membershipSource; public static final InstanceInfo INSTANCE_1 = new InstanceInfo.Builder() .withId("id_serviceA") .withApp("ServiceA") .withAppGroup("ServiceA_1") .withStatus(InstanceInfo.Status.UP) .withPorts(new HashSet<ServicePort>(Collections.singletonList(new ServicePort(8000, false)))) .withDataCenterInfo(BasicDataCenterInfo.fromSystemData()) .build(); public static final InstanceInfo INSTANCE_2 = new InstanceInfo.Builder() .withId("id_serviceA_2") .withApp("ServiceA") .withAppGroup("ServiceA_1") .withStatus(InstanceInfo.Status.UP) .withPorts(new HashSet<ServicePort>(Collections.singletonList(new ServicePort(8001, false)))) .withDataCenterInfo(BasicDataCenterInfo.fromSystemData()) .build(); public static final ChangeNotification<InstanceInfo> ADD_INSTANCE_1 = new ChangeNotification<InstanceInfo>(ChangeNotification.Kind.Add, INSTANCE_1); public static final ChangeNotification<InstanceInfo> ADD_INSTANCE_2 = new ChangeNotification<InstanceInfo>(ChangeNotification.Kind.Add, INSTANCE_2); @Before public void setUp() throws Exception { membershipSource = new Eureka2InterestManager(clientMock); } @Test public void testVipBasedInterest() throws Exception { Interest<InstanceInfo> interest = Interests.forVips("test-vip"); Mockito.when(clientMock.forInterest(interest)).thenReturn(Observable.just(ADD_INSTANCE_1, ADD_INSTANCE_2)); List<Instance<SocketAddress>> instances = membershipSource .forInterest(interest) .take(2) .toList().toBlocking() .toFuture() .get(1, TimeUnit.SECONDS); Assert.assertEquals(2, instances.size()); System.out.println("instances = " + instances); } }
6,258
0
Create_ds/ocelli/ocelli-eureka2/src/main/java/netflix/ocelli
Create_ds/ocelli/ocelli-eureka2/src/main/java/netflix/ocelli/eureka2/Eureka2InterestManager.java
package netflix.ocelli.eureka2; import com.netflix.eureka2.client.EurekaInterestClient; import com.netflix.eureka2.client.Eurekas; import com.netflix.eureka2.client.resolver.ServerResolver; import com.netflix.eureka2.interests.ChangeNotification; import com.netflix.eureka2.interests.Interest; import com.netflix.eureka2.interests.Interests; import com.netflix.eureka2.registry.instance.InstanceInfo; import com.netflix.eureka2.registry.instance.ServicePort; import netflix.ocelli.Instance; import netflix.ocelli.InstanceManager; import rx.Observable; import rx.Observable.OnSubscribe; import rx.Subscriber; import rx.functions.Action1; import rx.functions.Func1; import javax.inject.Inject; import java.net.InetSocketAddress; import java.net.SocketAddress; import java.util.HashSet; /** * @author Nitesh Kant */ public class Eureka2InterestManager { private final EurekaInterestClient client; private static final DefaultMapper defaultMapper = new DefaultMapper(); public Eureka2InterestManager(ServerResolver eurekaResolver) { client = Eurekas.newInterestClientBuilder().withServerResolver(eurekaResolver).build(); } @Inject public Eureka2InterestManager(EurekaInterestClient client) { this.client = client; } public Observable<Instance<SocketAddress>> forVip(String... vips) { return forInterest(Interests.forVips(vips)); } public Observable<Instance<SocketAddress>> forInterest(Interest<InstanceInfo> interest) { return forInterest(interest, defaultMapper); } public Observable<Instance<SocketAddress>> forInterest(final Interest<InstanceInfo> interest, final Func1<InstanceInfo, SocketAddress> instanceInfoToHost) { return Observable.create(new OnSubscribe<Instance<SocketAddress>>() { @Override public void call(Subscriber<? super Instance<SocketAddress>> s) { final InstanceManager<SocketAddress> subject = InstanceManager.create(); s.add(client .forInterest(interest) .subscribe(new Action1<ChangeNotification<InstanceInfo>>() { @Override public void call(ChangeNotification<InstanceInfo> notification) { SocketAddress host = instanceInfoToHost.call(notification.getData()); switch (notification.getKind()) { case Add: subject.add(host); break; case Delete: subject.remove(host); break; case Modify: subject.remove(host); subject.add(host); break; default: break; } } })); subject.subscribe(s); } }); } protected static class DefaultMapper implements Func1<InstanceInfo, SocketAddress> { @Override public SocketAddress call(InstanceInfo instanceInfo) { String ipAddress = instanceInfo.getDataCenterInfo().getDefaultAddress().getIpAddress(); HashSet<ServicePort> servicePorts = instanceInfo.getPorts(); ServicePort portToUse = servicePorts.iterator().next(); return new InetSocketAddress(ipAddress, portToUse.getPort()); } } }
6,259
0
Create_ds/ocelli/ocelli-eureka/src/test/java/netflix/ocelli
Create_ds/ocelli/ocelli-eureka/src/test/java/netflix/ocelli/eureka/EurekaInterestManagerTest.java
package netflix.ocelli.eureka; import java.util.Arrays; import java.util.List; import java.util.concurrent.TimeUnit; import java.util.concurrent.atomic.AtomicReference; import junit.framework.Assert; import netflix.ocelli.InstanceCollector; import netflix.ocelli.util.RxUtil; import org.junit.Test; import org.junit.runner.RunWith; import org.mockito.Mock; import org.mockito.Mockito; import org.mockito.runners.MockitoJUnitRunner; import rx.schedulers.TestScheduler; import com.google.common.collect.Sets; import com.netflix.appinfo.InstanceInfo; import com.netflix.appinfo.InstanceInfo.InstanceStatus; import com.netflix.discovery.DiscoveryClient; import com.netflix.discovery.shared.Application; @RunWith(MockitoJUnitRunner.class) public class EurekaInterestManagerTest { @Mock private DiscoveryClient client; @Mock private Application application; @Test public void testAddRemoveInstances() { InstanceInfo i1 = createInstance(1); InstanceInfo i2 = createInstance(2); InstanceInfo i3 = createInstance(3); InstanceInfo i4 = createInstance(4); Mockito.when(client.getApplication("foo")).thenReturn(application); AtomicReference<List<InstanceInfo>> result = new AtomicReference<List<InstanceInfo>>(); TestScheduler scheduler = new TestScheduler(); EurekaInterestManager eureka = new EurekaInterestManager(client); eureka.newInterest() .forApplication("foo") .withRefreshInterval(1, TimeUnit.SECONDS) .withScheduler(scheduler) .asObservable() .compose(InstanceCollector.<InstanceInfo>create()) .subscribe(RxUtil.set(result)); Mockito.when(application.getInstances()).thenReturn(Arrays.asList(i1, i2)); scheduler.advanceTimeBy(10, TimeUnit.SECONDS); Assert.assertEquals(Sets.newHashSet(i2, i1), Sets.newHashSet(result.get())); Mockito.when(application.getInstances()).thenReturn(Arrays.asList(i1, i2, i3)); scheduler.advanceTimeBy(10, TimeUnit.SECONDS); Assert.assertEquals(Sets.newHashSet(i3, i2, i1), Sets.newHashSet(result.get())); Mockito.when(application.getInstances()).thenReturn(Arrays.asList(i3, i4)); scheduler.advanceTimeBy(10, TimeUnit.SECONDS); Assert.assertEquals(Sets.newHashSet(i3, i4), Sets.newHashSet(result.get())); Mockito.when(application.getInstances()).thenReturn(Arrays.<InstanceInfo>asList()); scheduler.advanceTimeBy(10, TimeUnit.SECONDS); Assert.assertEquals(Sets.newHashSet(), Sets.newHashSet(result.get())); } InstanceInfo createInstance(int id) { return InstanceInfo.Builder.newBuilder() .setHostName("localhost:800" + id) .setAppName("foo") .setStatus(InstanceStatus.UP) .build(); } }
6,260
0
Create_ds/ocelli/ocelli-eureka/src/main/java/netflix/ocelli
Create_ds/ocelli/ocelli-eureka/src/main/java/netflix/ocelli/eureka/EurekaInterestManager.java
package netflix.ocelli.eureka; import com.netflix.appinfo.InstanceInfo; import com.netflix.discovery.DiscoveryClient; import netflix.ocelli.Instance; import netflix.ocelli.SnapshotToInstance; import rx.Observable; import rx.Scheduler; import rx.functions.Func0; import rx.functions.Func1; import rx.schedulers.Schedulers; import javax.inject.Inject; import java.util.List; import java.util.concurrent.TimeUnit; /** * Wrapper for v1 DisoveryClient which offers a convenient DSL to express interests * in host streams. * * {@code * <pre> * RoundRobinLoadBalancer lb = RoundRobinLoadBalancer.create(); * * EurekaInterestManager manager = new EurekaInterestMangaer(discoveryClient); * Subscription sub = manager * .newInterest() * .forApplication("applicationName") * .withRefreshInterval(30, TimeUnit.SECONDS) * .withScheduler(scheduler) * .asObservable() * .compose(InstanceCollector.<InstanceInfo>create()) * .subscribe(lb); * * lb.flatMap(operation); * </pre> * } * * @author elandau */ public class EurekaInterestManager { private static final int DEFAULT_REFRESH_RATE = 30; private final DiscoveryClient client; @Inject public EurekaInterestManager(DiscoveryClient client) { this.client = client; } public InterestDsl newInterest() { return new InterestDsl(client); } /** * DSL to simplify specifying the interest * * @author elandau */ public static class InterestDsl { private final DiscoveryClient client; private String appName; private String vip; private boolean secure = false; private String region; private long interval = DEFAULT_REFRESH_RATE; private TimeUnit intervalUnits = TimeUnit.SECONDS; private Scheduler scheduler = Schedulers.computation(); private InterestDsl(DiscoveryClient client) { this.client = client; } public InterestDsl withScheduler(Scheduler scheduler) { this.scheduler = scheduler; return this; } public InterestDsl withRefreshInterval(long interval, TimeUnit units) { this.interval = interval; this.intervalUnits = units; return this; } public InterestDsl forApplication(String appName) { this.appName = appName; return this; } public InterestDsl forVip(String vip) { this.vip = vip; return this; } public InterestDsl forRegion(String region) { this.region = region; return this; } public InterestDsl isSecure(boolean secure) { this.secure = secure; return this; } public Observable<Instance<InstanceInfo>> asObservable() { return create(createLister()); } private Observable<Instance<InstanceInfo>> create(final Func0<List<InstanceInfo>> lister) { return Observable .interval(interval, intervalUnits, scheduler) .onBackpressureDrop() .flatMap(new Func1<Long, Observable<List<InstanceInfo>>>() { @Override public Observable<List<InstanceInfo>> call(Long t1) { try { return Observable.just(lister.call()); } catch (Exception e) { return Observable.empty(); } } }, 1) .serialize() .compose(new SnapshotToInstance<InstanceInfo>()); } private Func0<List<InstanceInfo>> createLister() { if (appName != null) { if (vip != null) { return _forVipAndApplication(vip, appName, secure); } else { if (region != null) { return _forApplicationAndRegion(appName, region); } else { return _forApplication(appName); } } } else if (vip != null) { if (region != null) { return _forVip(vip, secure); } else { return _forVipAndRegion(vip, secure, region); } } throw new IllegalArgumentException("Interest combination not supported"); } private Func0<List<InstanceInfo>> _forApplication(final String appName) { return new Func0<List<InstanceInfo>>() { @Override public List<InstanceInfo> call() { return client.getApplication(appName).getInstances(); } }; } private Func0<List<InstanceInfo>> _forApplicationAndRegion(final String appName, final String region) { return new Func0<List<InstanceInfo>>() { @Override public List<InstanceInfo> call() { return client.getApplicationsForARegion(region).getRegisteredApplications(appName).getInstances(); } }; } private Func0<List<InstanceInfo>> _forVip(final String vip, final boolean secure) { return new Func0<List<InstanceInfo>>() { @Override public List<InstanceInfo> call() { return client.getInstancesByVipAddress(vip, secure); } }; } private Func0<List<InstanceInfo>> _forVipAndRegion(final String vip, final boolean secure, final String region) { return new Func0<List<InstanceInfo>>() { @Override public List<InstanceInfo> call() { return client.getInstancesByVipAddress(vip, secure, region); } }; } private Func0<List<InstanceInfo>> _forVipAndApplication(final String vip, final String appName, final boolean secure) { return new Func0<List<InstanceInfo>>() { @Override public List<InstanceInfo> call() { return client.getInstancesByVipAddressAndAppName(vip, appName, secure); } }; } } }
6,261
0
Create_ds/c3r/c3r-sdk-examples/src/test/java/com/amazonaws/c3r/examples
Create_ds/c3r/c3r-sdk-examples/src/test/java/com/amazonaws/c3r/examples/utils/FileTestUtility.java
// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 package com.amazonaws.c3r.examples.utils; import java.io.IOException; import java.nio.file.Files; import java.nio.file.Path; /** * A test utility for creating temporary Path resources for tests that will clean themselves up after execution. */ public abstract class FileTestUtility { /** * Creates a temporary directory with the prefix "temp" marked with deleteOnExit. * * @return A temporary Path * @throws IOException If the temporary Path cannot be created */ public static Path createTempDir() throws IOException { final Path tempDir = Files.createTempDirectory("temp"); tempDir.toFile().deleteOnExit(); return tempDir; } /** * Creates a temporary file with the prefix "testFile" and suffix ".tmp" marked with deleteOnExit. * * @return A temporary Path * @throws IOException If the temporary Path cannot be created */ public static Path createTempFile() throws IOException { return createTempFile("testFile", ".tmp"); } /** * Creates a temporary file with the prefix and suffix provided marked with deleteOnExit. * * @param prefix The prefix of the Path to create * @param suffix The suffix of the Path to create * @return A temporary Path * @throws IOException If the temporary Path cannot be created */ public static Path createTempFile(final String prefix, final String suffix) throws IOException { final Path tempDir = createTempDir(); final Path tempFile = Files.createTempFile(tempDir, prefix, suffix); tempFile.toFile().deleteOnExit(); return tempFile; } /** * Resolves a temporary file with the file name provided marked with deleteOnExit. * * @param fileName The name of the Path to resolve * @return A temporary Path * @throws IOException If the temporary Path cannot be resolved */ public static Path resolve(final String fileName) throws IOException { return resolve(fileName, createTempDir()); } /** * Resolves a temporary file with the prefix and suffix provided marked with deleteOnExit. * * @param fileName The name of the Path to resolve * @param tempDir The Path to use to resolve the temporary file * @return A temporary Path */ private static Path resolve(final String fileName, final Path tempDir) { final Path tempFile = tempDir.resolve(fileName); tempFile.toFile().deleteOnExit(); return tempFile; } }
6,262
0
Create_ds/c3r/c3r-sdk-examples/src/test/java/com/amazonaws/c3r/examples
Create_ds/c3r/c3r-sdk-examples/src/test/java/com/amazonaws/c3r/examples/csv/CsvExampleTest.java
// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 package com.amazonaws.c3r.examples.csv; import com.amazonaws.c3r.examples.utils.FileTestUtility; import org.junit.jupiter.api.Test; import java.io.IOException; import java.nio.file.Files; import java.nio.file.Path; import static org.junit.jupiter.api.Assertions.assertTrue; public class CsvExampleTest { @Test public void roundTripTest() throws IOException { final Path inputCsv = Path.of("../samples/csv/data_sample_without_quotes.csv"); final Path encryptedCsv = FileTestUtility.createTempFile("encrypted", ".csv"); final Path decryptedCsv = FileTestUtility.createTempFile("decrypted", ".csv"); CsvExample.encrypt(inputCsv.toString(), encryptedCsv.toString()); assertTrue(Files.size(encryptedCsv) > 0); CsvExample.decrypt(encryptedCsv.toString(), decryptedCsv.toString()); assertTrue(Files.size(decryptedCsv) > 0); } }
6,263
0
Create_ds/c3r/c3r-sdk-examples/src/test/java/com/amazonaws/c3r/examples
Create_ds/c3r/c3r-sdk-examples/src/test/java/com/amazonaws/c3r/examples/csv/CsvNoHeaderExampleTest.java
// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 package com.amazonaws.c3r.examples.csv; import com.amazonaws.c3r.examples.utils.FileTestUtility; import org.junit.jupiter.api.Test; import java.io.IOException; import java.nio.file.Files; import java.nio.file.Path; import static org.junit.jupiter.api.Assertions.assertTrue; public class CsvNoHeaderExampleTest { @Test public void roundTripTest() throws IOException { final Path inputCsv = Path.of("../samples/csv/data_sample_no_headers.csv"); final Path encryptedCsv = FileTestUtility.createTempFile("encrypted", ".csv"); final Path decryptedCsv = FileTestUtility.createTempFile("decrypted", ".csv"); CsvNoHeaderExample.encrypt(inputCsv.toString(), encryptedCsv.toString()); assertTrue(Files.size(encryptedCsv) > 0); CsvNoHeaderExample.decrypt(encryptedCsv.toString(), decryptedCsv.toString()); assertTrue(Files.size(decryptedCsv) > 0); } }
6,264
0
Create_ds/c3r/c3r-sdk-examples/src/test/java/com/amazonaws/c3r/examples
Create_ds/c3r/c3r-sdk-examples/src/test/java/com/amazonaws/c3r/examples/parquet/ParquetExampleTest.java
// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 package com.amazonaws.c3r.examples.parquet; import com.amazonaws.c3r.examples.utils.FileTestUtility; import org.junit.jupiter.api.Test; import java.io.IOException; import java.nio.file.Files; import java.nio.file.Path; import static org.junit.jupiter.api.Assertions.assertTrue; public class ParquetExampleTest { @Test public void roundTripTest() throws IOException { final Path inputParquet = Path.of("../samples/parquet/data_sample.parquet"); final Path encryptedParquet = FileTestUtility.createTempFile("encrypted", ".parquet"); final Path decryptedParquet = FileTestUtility.createTempFile("decrypted", ".parquet"); ParquetExample.encrypt(inputParquet.toString(), encryptedParquet.toString()); assertTrue(Files.size(encryptedParquet) > 0); ParquetExample.decrypt(encryptedParquet.toString(), decryptedParquet.toString()); assertTrue(Files.size(decryptedParquet) > 0); } }
6,265
0
Create_ds/c3r/c3r-sdk-examples/src/test/java/com/amazonaws/c3r/examples
Create_ds/c3r/c3r-sdk-examples/src/test/java/com/amazonaws/c3r/examples/spark/SparkExampleTest.java
// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 package com.amazonaws.c3r.examples.spark; import com.amazonaws.c3r.config.TableSchema; import com.amazonaws.c3r.examples.utils.FileTestUtility; import com.amazonaws.c3r.json.GsonUtil; import org.junit.jupiter.api.Test; import java.io.File; import java.io.IOException; import java.nio.file.Files; import java.nio.file.Path; import java.util.Arrays; import java.util.List; import java.util.Objects; import java.util.stream.Collectors; import static org.junit.jupiter.api.Assertions.assertNotNull; import static org.junit.jupiter.api.Assertions.assertTrue; public class SparkExampleTest { @Test public void roundTripTest() throws IOException { final Path source = Path.of("../samples/csv/data_sample_with_quotes.csv"); final Path encryptTarget = FileTestUtility.createTempDir(); final Path schemaFile = Path.of("../samples/schema/config_sample.json"); final TableSchema schema = GsonUtil.fromJson(Files.readString(schemaFile), TableSchema.class); SparkExample.encrypt(source.toString(), encryptTarget.toString(), schema); final List<File> encryptedCsvs = Arrays.stream(Objects.requireNonNull(encryptTarget.toFile().listFiles())) .filter(file -> file.getAbsolutePath().endsWith(".csv")) .collect(Collectors.toList()); for (File encryptedCsv : encryptedCsvs) { assertNotNull(encryptedCsv); assertTrue(encryptedCsv.exists()); assertTrue(Files.size(encryptedCsv.toPath()) > 0); } final Path decryptTarget = FileTestUtility.createTempDir(); for (File encryptedCsv : encryptedCsvs) { SparkExample.decrypt(encryptedCsv.getAbsolutePath(), decryptTarget.toString()); assertNotNull(encryptedCsv); assertTrue(encryptedCsv.exists()); assertTrue(Files.size(encryptedCsv.toPath()) > 0); } final List<File> decryptedCsvs = Arrays.stream(Objects.requireNonNull(decryptTarget.toFile().listFiles())) .filter(file -> file.getAbsolutePath().endsWith(".csv")) .collect(Collectors.toList()); for (File decryptedCsv : decryptedCsvs) { assertNotNull(decryptedCsv); assertTrue(decryptedCsv.exists()); assertTrue(Files.size(decryptedCsv.toPath()) > 0); } } }
6,266
0
Create_ds/c3r/c3r-sdk-examples/src/main/java/com/amazonaws/c3r/examples
Create_ds/c3r/c3r-sdk-examples/src/main/java/com/amazonaws/c3r/examples/csv/CsvExample.java
// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 package com.amazonaws.c3r.examples.csv; import com.amazonaws.c3r.action.CsvRowMarshaller; import com.amazonaws.c3r.action.CsvRowUnmarshaller; import com.amazonaws.c3r.action.RowMarshaller; import com.amazonaws.c3r.action.RowUnmarshaller; import com.amazonaws.c3r.config.ClientSettings; import com.amazonaws.c3r.config.ColumnHeader; import com.amazonaws.c3r.config.ColumnSchema; import com.amazonaws.c3r.config.ColumnType; import com.amazonaws.c3r.config.DecryptConfig; import com.amazonaws.c3r.config.EncryptConfig; import com.amazonaws.c3r.config.MappedTableSchema; import com.amazonaws.c3r.config.Pad; import com.amazonaws.c3r.config.PadType; import com.amazonaws.c3r.config.TableSchema; import com.amazonaws.c3r.data.CsvValue; import com.amazonaws.c3r.encryption.keys.KeyUtil; import com.amazonaws.c3r.io.FileFormat; import java.util.List; /** * Examples of encrypting and decrypting CSV files. */ public final class CsvExample { /** * An example 32-byte key used for testing. */ private static final String EXAMPLE_SHARED_SECRET_KEY = "AAECAwQFBgcICQoLDA0ODxAREhMUFrEXAMPLESECRET="; /** * Example collaboration ID, i.e., the value used by all participating parties as a salt for encryption. */ private static final String EXAMPLE_SALT = "00000000-1111-2222-3333-444444444444"; /** * Table schema for an input file with a header row which contains (at least) the following columns * (case-insensitive, leading and trailing whitespace are ignored). * <ul> * <li>firstname</li> * <li>lastname</li> * <li>address</li> * <li>city</li> * <li>state</li> * <li>phonenumber</li> * <li>title</li> * <li>level</li> * <li>notes</li> * </ul> */ private static final TableSchema EXAMPLE_TABLE_SCHEMA = new MappedTableSchema(List.of( ColumnSchema.builder() .sourceHeader(new ColumnHeader("firstname")) .targetHeader(new ColumnHeader("fname")) .type(ColumnType.CLEARTEXT) .build(), ColumnSchema.builder() .sourceHeader(new ColumnHeader("lastname")) .targetHeader(new ColumnHeader("lname")) .type(ColumnType.CLEARTEXT) .build(), ColumnSchema.builder() .sourceHeader(new ColumnHeader("address")) .targetHeader(new ColumnHeader("address")) .pad(Pad.builder().type(PadType.MAX).length(32).build()) .type(ColumnType.SEALED) .build(), ColumnSchema.builder() .sourceHeader(new ColumnHeader("city")) .targetHeader(new ColumnHeader("city")) .pad(Pad.builder().type(PadType.MAX).length(16).build()) .type(ColumnType.SEALED) .build(), ColumnSchema.builder() .sourceHeader(new ColumnHeader("state")) .targetHeader(new ColumnHeader("state")) .type(ColumnType.FINGERPRINT) .build(), ColumnSchema.builder() .sourceHeader(new ColumnHeader("phonenumber")) .targetHeader(new ColumnHeader("phonenumber_cleartext")) .pad(null) .type(ColumnType.CLEARTEXT) .build(), ColumnSchema.builder() .sourceHeader(new ColumnHeader("phonenumber")) .targetHeader(new ColumnHeader("phonenumber_sealed")) .pad(Pad.DEFAULT) .type(ColumnType.SEALED) .build(), ColumnSchema.builder() .sourceHeader(new ColumnHeader("phonenumber")) .targetHeader(new ColumnHeader("phonenumber_fingerprint")) .type(ColumnType.FINGERPRINT) .build(), ColumnSchema.builder() .sourceHeader(new ColumnHeader("title")) .targetHeader(new ColumnHeader("title")) .pad(Pad.builder().type(PadType.FIXED).length(128).build()) .type(ColumnType.SEALED) .build(), ColumnSchema.builder() .sourceHeader(new ColumnHeader("level")) .targetHeader(new ColumnHeader("level")) .pad(null) .type(ColumnType.CLEARTEXT) .build(), ColumnSchema.builder() .sourceHeader(new ColumnHeader("notes")) .targetHeader(new ColumnHeader("notes")) .pad(Pad.builder().type(PadType.MAX).length(100).build()) .type(ColumnType.SEALED) .build() )); /** * Hidden demo class constructor. */ private CsvExample() { } /** * Encrypt a file with the following columns with a predetermined schema, shared secret key, and collaboration ID. * <ul> * <li>firstname</li> * <li>lastname</li> * <li>address</li> * <li>city</li> * <li>state</li> * <li>phonenumber</li> * <li>title</li> * <li>level</li> * <li>notes</li> * </ul> * * @param sourceFile Source CSV file matching aforementioned schema * @param targetFile Destination for encrypted table */ public static void encrypt(final String sourceFile, final String targetFile) { final var encryptionConfig = EncryptConfig.builder() .sourceFile(sourceFile) .targetFile(targetFile) .fileFormat(FileFormat.CSV) .secretKey(KeyUtil.sharedSecretKeyFromString(EXAMPLE_SHARED_SECRET_KEY)) .salt(EXAMPLE_SALT) .tempDir(".") .settings(ClientSettings.lowAssuranceMode()) .tableSchema(EXAMPLE_TABLE_SCHEMA) .overwrite(true) .build(); final RowMarshaller<CsvValue> csvRowMarshaller = CsvRowMarshaller.newInstance(encryptionConfig); csvRowMarshaller.marshal(); csvRowMarshaller.close(); } /** * Decrypt an encrypted table for a predetermined shared secret key, and salt. * * @param sourceFile Encrypted table to decrypt * @param targetFile Where to store decrypted results */ public static void decrypt(final String sourceFile, final String targetFile) { final var decryptConfig = DecryptConfig.builder() .sourceFile(sourceFile) .targetFile(targetFile) .fileFormat(FileFormat.CSV) .secretKey(KeyUtil.sharedSecretKeyFromString(EXAMPLE_SHARED_SECRET_KEY)) .salt(EXAMPLE_SALT) .overwrite(true) .build(); final RowUnmarshaller<CsvValue> csvRowUnmarshaller = CsvRowUnmarshaller.newInstance(decryptConfig); csvRowUnmarshaller.unmarshal(); csvRowUnmarshaller.close(); } }
6,267
0
Create_ds/c3r/c3r-sdk-examples/src/main/java/com/amazonaws/c3r/examples
Create_ds/c3r/c3r-sdk-examples/src/main/java/com/amazonaws/c3r/examples/csv/CsvNoHeaderExample.java
// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 package com.amazonaws.c3r.examples.csv; import com.amazonaws.c3r.action.CsvRowMarshaller; import com.amazonaws.c3r.action.CsvRowUnmarshaller; import com.amazonaws.c3r.action.RowMarshaller; import com.amazonaws.c3r.action.RowUnmarshaller; import com.amazonaws.c3r.config.ClientSettings; import com.amazonaws.c3r.config.ColumnHeader; import com.amazonaws.c3r.config.ColumnSchema; import com.amazonaws.c3r.config.ColumnType; import com.amazonaws.c3r.config.DecryptConfig; import com.amazonaws.c3r.config.EncryptConfig; import com.amazonaws.c3r.config.Pad; import com.amazonaws.c3r.config.PadType; import com.amazonaws.c3r.config.PositionalTableSchema; import com.amazonaws.c3r.config.TableSchema; import com.amazonaws.c3r.data.CsvValue; import com.amazonaws.c3r.encryption.keys.KeyUtil; import com.amazonaws.c3r.io.FileFormat; import java.util.List; /** * Examples of encrypting and decrypting a CSV file with no headers in the input files. */ public final class CsvNoHeaderExample { /** * An example 32-byte key used for testing. */ private static final String EXAMPLE_SHARED_SECRET_KEY = "AAECAwQFBgcICQoLDA0ODxAREhMUFrEXAMPLESECRET="; /** * Example collaboration ID, i.e., the value used by all participating parties as a salt for encryption. */ private static final String EXAMPLE_SALT = "00000000-1111-2222-3333-444444444444"; /** * Table schema for an input CSV file with no header row and exactly 9 columns. Each List of * ColumnSchema indicates how many output columns that positional input column should be mapped to. */ private static final TableSchema EXAMPLE_TABLE_SCHEMA = new PositionalTableSchema(List.of( List.of(ColumnSchema.builder() .targetHeader(new ColumnHeader("fname")) .type(ColumnType.CLEARTEXT) .build()), List.of(ColumnSchema.builder() .targetHeader(new ColumnHeader("lname")) .type(ColumnType.CLEARTEXT) .build()), List.of(ColumnSchema.builder() .targetHeader(new ColumnHeader("address")) .pad(Pad.builder().type(PadType.MAX).length(32).build()) .type(ColumnType.SEALED) .build()), List.of(ColumnSchema.builder() .targetHeader(new ColumnHeader("city")) .pad(Pad.builder().type(PadType.MAX).length(16).build()) .type(ColumnType.SEALED) .build()), List.of(ColumnSchema.builder() .targetHeader(new ColumnHeader("state")) .type(ColumnType.FINGERPRINT) .build()), // We map a single input column to multiple output columns by providing a list with // the desired number of ColumnSchema in that column's position. List.of(ColumnSchema.builder() .targetHeader(new ColumnHeader("phonenumber_cleartext")) .pad(null) .type(ColumnType.CLEARTEXT) .build(), ColumnSchema.builder() .targetHeader(new ColumnHeader("phonenumber_sealed")) .pad(Pad.DEFAULT) .type(ColumnType.SEALED) .build(), ColumnSchema.builder() .targetHeader(new ColumnHeader("phonenumber_fingerprint")) .type(ColumnType.FINGERPRINT) .build()), List.of(ColumnSchema.builder() .targetHeader(new ColumnHeader("title")) .pad(Pad.builder().type(PadType.FIXED).length(128).build()) .type(ColumnType.SEALED) .build()), List.of(ColumnSchema.builder() .targetHeader(new ColumnHeader("level")) .pad(null) .type(ColumnType.CLEARTEXT) .build()), // We omit the last column from our encrypted table by providing an // empty list of ColumnSchema at that position. List.of() )); /** * Hidden demo class constructor. */ private CsvNoHeaderExample() { } /** * Encrypts a CSV file with no header row and exactly 9 columns according to a predetermined schema. * * @param sourceFile Source CSV file with no header row and exactly 9 columns * @param targetFile Destination for encrypted table */ public static void encrypt(final String sourceFile, final String targetFile) { final var encryptionConfig = EncryptConfig.builder() .sourceFile(sourceFile) .targetFile(targetFile) .fileFormat(FileFormat.CSV) .secretKey(KeyUtil.sharedSecretKeyFromString(EXAMPLE_SHARED_SECRET_KEY)) .salt(EXAMPLE_SALT) .tempDir(".") .settings(ClientSettings.lowAssuranceMode()) .tableSchema(EXAMPLE_TABLE_SCHEMA) .overwrite(true) .build(); final RowMarshaller<CsvValue> csvRowMarshaller = CsvRowMarshaller.newInstance(encryptionConfig); csvRowMarshaller.marshal(); csvRowMarshaller.close(); } /** * Decrypt an encrypted table for a predetermined shared secret key, and salt. * * @param sourceFile Encrypted table to decrypt * @param targetFile Where to store decrypted results */ public static void decrypt(final String sourceFile, final String targetFile) { final var decryptConfig = DecryptConfig.builder() .sourceFile(sourceFile) .targetFile(targetFile) .fileFormat(FileFormat.CSV) .secretKey(KeyUtil.sharedSecretKeyFromString(EXAMPLE_SHARED_SECRET_KEY)) .salt(EXAMPLE_SALT) .overwrite(true) .build(); final RowUnmarshaller<CsvValue> csvRowUnmarshaller = CsvRowUnmarshaller.newInstance(decryptConfig); csvRowUnmarshaller.unmarshal(); csvRowUnmarshaller.close(); } }
6,268
0
Create_ds/c3r/c3r-sdk-examples/src/main/java/com/amazonaws/c3r/examples
Create_ds/c3r/c3r-sdk-examples/src/main/java/com/amazonaws/c3r/examples/csv/package-info.java
// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 /** * Sample code showing how to use the SDK with CSV data. * * <p> * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * SPDX-License-Identifier: Apache-2.0 */ package com.amazonaws.c3r.examples.csv;
6,269
0
Create_ds/c3r/c3r-sdk-examples/src/main/java/com/amazonaws/c3r/examples
Create_ds/c3r/c3r-sdk-examples/src/main/java/com/amazonaws/c3r/examples/parquet/ParquetExample.java
// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 package com.amazonaws.c3r.examples.parquet; import com.amazonaws.c3r.action.ParquetRowMarshaller; import com.amazonaws.c3r.action.ParquetRowUnmarshaller; import com.amazonaws.c3r.action.RowMarshaller; import com.amazonaws.c3r.action.RowUnmarshaller; import com.amazonaws.c3r.config.ClientSettings; import com.amazonaws.c3r.config.ColumnHeader; import com.amazonaws.c3r.config.ColumnSchema; import com.amazonaws.c3r.config.ColumnType; import com.amazonaws.c3r.config.DecryptConfig; import com.amazonaws.c3r.config.EncryptConfig; import com.amazonaws.c3r.config.MappedTableSchema; import com.amazonaws.c3r.config.Pad; import com.amazonaws.c3r.config.PadType; import com.amazonaws.c3r.config.ParquetConfig; import com.amazonaws.c3r.config.TableSchema; import com.amazonaws.c3r.data.ParquetValue; import com.amazonaws.c3r.encryption.keys.KeyUtil; import com.amazonaws.c3r.io.FileFormat; import java.util.List; /** * Example code for creating a schema, encrypting and decrypting Parquet data. */ public final class ParquetExample { /** * An example 32-byte key used for testing. */ private static final String EXAMPLE_SHARED_SECRET_KEY = "AAECAwQFBgcICQoLDA0ODxAREhMUFrEXAMPLESECRET="; /** * An example salt for testing. */ private static final String EXAMPLE_SALT = "00000000-1111-2222-3333-444444444444"; /** * Generates a table schema. The input file has a header row which contains (at least) the following columns * (case-insensitive, leading and trailing whitespace are ignored): * <ul> * <li>firstname</li> * <li>lastname</li> * <li>address</li> * <li>city</li> * <li>state</li> * <li>phonenumber</li> * <li>title</li> * <li>level</li> * <li>notes</li> * </ul> */ private static final TableSchema EXAMPLE_TABLE_SCHEMA = new MappedTableSchema(List.of( ColumnSchema.builder() .sourceHeader(new ColumnHeader("firstname")) .targetHeader(new ColumnHeader("fname")) .type(ColumnType.CLEARTEXT) .build(), ColumnSchema.builder() .sourceHeader(new ColumnHeader("lastname")) .targetHeader(new ColumnHeader("lname")) .type(ColumnType.CLEARTEXT) .build(), ColumnSchema.builder() .sourceHeader(new ColumnHeader("address")) .targetHeader(new ColumnHeader("address")) .pad(Pad.builder().type(PadType.MAX).length(32).build()) .type(ColumnType.SEALED) .build(), ColumnSchema.builder() .sourceHeader(new ColumnHeader("city")) .targetHeader(new ColumnHeader("city")) .pad(Pad.builder().type(PadType.MAX).length(16).build()) .type(ColumnType.SEALED) .build(), ColumnSchema.builder() .sourceHeader(new ColumnHeader("state")) .targetHeader(new ColumnHeader("state")) .type(ColumnType.FINGERPRINT) .build(), ColumnSchema.builder() .sourceHeader(new ColumnHeader("phonenumber")) .targetHeader(new ColumnHeader("phonenumber_cleartext")) .pad(null) .type(ColumnType.CLEARTEXT) .build(), ColumnSchema.builder() .sourceHeader(new ColumnHeader("phonenumber")) .targetHeader(new ColumnHeader("phonenumber_sealed")) .pad(Pad.DEFAULT) .type(ColumnType.SEALED) .build(), ColumnSchema.builder() .sourceHeader(new ColumnHeader("phonenumber")) .targetHeader(new ColumnHeader("phonenumber_fingerprint")) .type(ColumnType.FINGERPRINT) .build(), ColumnSchema.builder() .sourceHeader(new ColumnHeader("title")) .targetHeader(new ColumnHeader("title")) .pad(Pad.builder().type(PadType.FIXED).length(128).build()) .type(ColumnType.SEALED) .build(), ColumnSchema.builder() .sourceHeader(new ColumnHeader("level")) .targetHeader(new ColumnHeader("level")) .pad(null) .type(ColumnType.CLEARTEXT) .build(), ColumnSchema.builder() .sourceHeader(new ColumnHeader("notes")) .targetHeader(new ColumnHeader("notes")) .pad(Pad.builder().type(PadType.MAX).length(100).build()) .type(ColumnType.SEALED) .build() )); /** * Hidden example class constructor. */ private ParquetExample() { } /** * Demonstrates encrypting a file. Uses the following columns with a predetermined schema, shared secret key, and collaboration ID: * <ul> * <li>firstname</li> * <li>lastname</li> * <li>address</li> * <li>city</li> * <li>state</li> * <li>phonenumber</li> * <li>title</li> * <li>level</li> * <li>notes</li> * </ul> * * @param sourceFile Source CSV file matching aforementioned schema * @param targetFile Destination for encrypted table */ public static void encrypt(final String sourceFile, final String targetFile) { final var encryptionConfig = EncryptConfig.builder() .sourceFile(sourceFile) .targetFile(targetFile) .fileFormat(FileFormat.PARQUET) .secretKey(KeyUtil.sharedSecretKeyFromString(EXAMPLE_SHARED_SECRET_KEY)) .salt(EXAMPLE_SALT) .tempDir(".") .settings(ClientSettings.lowAssuranceMode()) .tableSchema(EXAMPLE_TABLE_SCHEMA) .overwrite(true) .build(); final RowMarshaller<ParquetValue> parquetRowMarshaller = ParquetRowMarshaller.newInstance(encryptionConfig, ParquetConfig.DEFAULT); parquetRowMarshaller.marshal(); parquetRowMarshaller.close(); } /** * Decrypt an encrypted table for a predetermined shared secret key, and collaboration ID. * * @param sourceFile Encrypted table to decrypt * @param targetFile Where to store decrypted results */ public static void decrypt(final String sourceFile, final String targetFile) { final var decryptConfig = DecryptConfig.builder() .sourceFile(sourceFile) .targetFile(targetFile) .fileFormat(FileFormat.PARQUET) .secretKey(KeyUtil.sharedSecretKeyFromString(EXAMPLE_SHARED_SECRET_KEY)) .salt(EXAMPLE_SALT) .overwrite(true) .build(); final RowUnmarshaller<ParquetValue> parquetRowUnmarshaller = ParquetRowUnmarshaller.newInstance(decryptConfig); parquetRowUnmarshaller.unmarshal(); parquetRowUnmarshaller.close(); } }
6,270
0
Create_ds/c3r/c3r-sdk-examples/src/main/java/com/amazonaws/c3r/examples
Create_ds/c3r/c3r-sdk-examples/src/main/java/com/amazonaws/c3r/examples/parquet/package-info.java
// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 /** * Sample code showing how to use the SDK with Parquet data. * * <p> * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * SPDX-License-Identifier: Apache-2.0 */ package com.amazonaws.c3r.examples.parquet;
6,271
0
Create_ds/c3r/c3r-sdk-examples/src/main/java/com/amazonaws/c3r/examples
Create_ds/c3r/c3r-sdk-examples/src/main/java/com/amazonaws/c3r/examples/spark/SparkExample.java
// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 package com.amazonaws.c3r.examples.spark; import com.amazonaws.c3r.Transformer; import com.amazonaws.c3r.config.ClientSettings; import com.amazonaws.c3r.config.ColumnHeader; import com.amazonaws.c3r.config.ColumnInsight; import com.amazonaws.c3r.config.ColumnSchema; import com.amazonaws.c3r.config.ColumnType; import com.amazonaws.c3r.config.TableSchema; import com.amazonaws.c3r.data.ClientDataType; import com.amazonaws.c3r.encryption.EncryptionContext; import com.amazonaws.c3r.encryption.keys.KeyUtil; import com.amazonaws.c3r.exception.C3rRuntimeException; import com.amazonaws.c3r.internal.Nonce; import org.apache.spark.SparkConf; import org.apache.spark.api.java.function.MapFunction; import org.apache.spark.sql.Column; import org.apache.spark.sql.Dataset; import org.apache.spark.sql.Row; import org.apache.spark.sql.SaveMode; import org.apache.spark.sql.SparkSession; import org.apache.spark.sql.catalyst.encoders.ExpressionEncoder; import org.apache.spark.sql.catalyst.encoders.RowEncoder; import org.apache.spark.sql.functions; import scala.jdk.CollectionConverters; import java.nio.charset.StandardCharsets; import java.util.ArrayList; import java.util.Arrays; import java.util.Collection; import java.util.HashMap; import java.util.List; import java.util.Map; import java.util.Set; import java.util.stream.Collectors; /** * Example code for running Spark. * * <p> * Note that there are a few differences between C3R's pre-packaged offerings and orchestrating with Spark. * * <p> * The most important difference is the change in trust boundaries. When using the C3R normally, files exist on the same machine running * C3R. C3R never writes any data to disk unencrypted unless it is meant to be unencrypted in the output. With Spark, as an input file is * read, Spark is partitioning that data in memory and/or on disk before C3R ever gets an opportunity to encrypt it. This means that * cleartext forms of data that will eventually be encrypted may be written to disk and/or distributed to Spark Workers before it is * encrypted. Further, Spark Workers may exist on other machines or networks. If a Spark job fails, there could be * cleartext copies of the input file leftover across your Spark infrastructure. It is up to you to understand if this is permissible * for your threat model and to configure your Spark server according to your needs. * * <p> * Second, this Spark example is not managing file permissions for the output file. C3R normally sets this file to be RW by the Owner * only. Files written by Spark will inherit the permissions of where they are written. * * <p> * Third, Spark partitions and distributes the cleartext data before C3R drops columns that will not be included in the output. When * using the C3R SDK or CLI, these columns are dropped during the data load step before they're ever written to disk. If these columns * should never leave the initial location, they should be removed from the data before it is handed to this Spark example. * * <p> * Fourth, Spark may partition the data and thus the output files. You may need to take additional steps to merge the data if downstream * steps require it be one file. Note that when using S3 and Glue with AWS Clean Rooms, this should not be necessary. * * <p> * Finally, certain functionality like shuffling rows, dropping columns, finding max length of values in a column, and finding duplicate * values in a column are all revised in this example to take advantage of Spark. These are normally handled by C3R's * {@link com.amazonaws.c3r.action.RowMarshaller}. All of these functions will behave the same as they do with C3R except shuffling rows. * Instead of sorting on Nonces created using Java's {@code SecureRandom}, Spark is using its own {@code rand()} function for the shuffle. */ public final class SparkExample { /** * An example 32-byte key used for testing. */ private static final String EXAMPLE_SHARED_SECRET_KEY = "AAECAwQFBgcICQoLDA0ODxAREhMUFrEXAMPLESECRET="; /** * Example collaboration ID, i.e., the value used by all participating parties as a salt for encryption. */ private static final String EXAMPLE_SALT = "00000000-1111-2222-3333-444444444444"; /** * Insights for all the target columns that will be written. */ private static Collection<ColumnInsight> columnInsights; /** Hidden utility constructor. */ private SparkExample() { } /** * Create a Spark session for running the encrypt/decrypt methods. * * <p> * This method will by default create a local Spark Driver. Modify the URL of the Spark Driver within this function to run * this example on another Spark Driver. * * @return A spark session */ private static SparkSession initSparkSession() { // CHECKSTYLE:OFF final SparkConf conf = new SparkConf() .setAppName("C3RSparkDemo") // Update this to point to your own Spark Driver if not running this locally. .setMaster("local[*]"); // CHECKSTYLE:ON return SparkSession .builder() .config(conf) .getOrCreate(); } /** * Sample of Spark orchestrating the C3R SDK for encryption. * * <p> * This function is currently setup to only process CSV files. It can be modified to instead take a {@code Dataset<Row>}. There is no * functionality specific to a CSV after the initial data load. * * <p> * Please note that only {@code String} data types are currently supported. * * @param source input file * @param target output file * @param schema schema file */ public static void encrypt(final String source, final String target, final TableSchema schema) { final SparkSession spark = initSparkSession(); final ClientSettings clientSettings = ClientSettings.lowAssuranceMode(); columnInsights = schema.getColumns().stream().map(ColumnInsight::new) .collect(Collectors.toList()); Dataset<Row> rawInputData = readInput(source, spark); rawInputData = filterSourceColumnsBySchema(rawInputData); updateMaxValuesPerColumn(spark, rawInputData); validateDuplicates(clientSettings, rawInputData); rawInputData = shuffleData(rawInputData); rawInputData = mapSourceToTargetColumns(rawInputData); populateColumnPositions(rawInputData); rawInputData = marshalData(rawInputData); rawInputData.write().mode(SaveMode.Append).option("header", true).csv(target); closeSparkSession(spark); } /** * Sample of Spark orchestrating the C3R SDK for decryption. * * <p> * This function is currently setup to only process CSV files. It can be modified to instead take a {@code Dataset<Row>}. There is no * functionality specific to a CSV after the initial data load. * * <p> * Please note that only {@code String} data types are currently supported. * * @param source input file * @param target output file */ public static void decrypt(final String source, final String target) { final SparkSession spark = initSparkSession(); Dataset<Row> rawInputData = readInput(source, spark); rawInputData = unmarshalData(rawInputData); rawInputData.write().mode(SaveMode.Append).option("header", true).csv(target); closeSparkSession(spark); } /** * Reads the input file for processing. * * <p> * NOTE: Empty values in CSVs are treated as null by default when Spark parses them. To configure nulls with * Spark, see the <a href="https://spark.apache.org/docs/latest/sql-data-sources-csv.html">Spark documentation on CSVs</a>. * * @param source input file * @param spark the SparkSession to read with * @return The source data to be processed */ private static Dataset<Row> readInput(final String source, final SparkSession spark) { return spark.read() .option("header", "true") // Filter out the header row .option("inferSchema", "false") // Treat all fields as Strings .option("nullValue", null) .option("emptyValue", null) .csv(source); } /** * Filter source columns not in the schema. * * <p> * This is normally handled by C3R's {@link com.amazonaws.c3r.action.RowMarshaller} by dropping columns that won't be in the output * during the data load. * * @param rawInputData the Dataset to filter * @return A Dataset containing only source columns defined in the schema */ static Dataset<Row> filterSourceColumnsBySchema(final Dataset<Row> rawInputData) { final Set<ColumnHeader> schemaSourceColumns = columnInsights.stream() .map(ColumnSchema::getSourceHeader) .collect(Collectors.toSet()); final Set<ColumnHeader> inputColumns = Arrays.stream(rawInputData.columns()) .map(ColumnHeader::new) .collect(Collectors.toSet()); inputColumns.removeAll(schemaSourceColumns); Dataset<Row> toReturn = rawInputData; for (ColumnHeader columnHeader : inputColumns) { toReturn = toReturn.drop(columnHeader.toString()); } return toReturn; } /** * Updates {@link #columnInsights} with the max value length of their columns. These values are used during encryption whenever * {@link com.amazonaws.c3r.config.PadType#MAX} is configured for a sealed column. * * <p> * This is normally handled by C3R's {@link com.amazonaws.c3r.action.RowMarshaller} tracking the size of each value being read in * during the data load. * * @param spark The SparkSession to run the queries in * @param rawInputData The Dataset to run the queries against */ static void updateMaxValuesPerColumn(final SparkSession spark, final Dataset<Row> rawInputData) { rawInputData.createOrReplaceTempView("rawData"); final Map<ColumnHeader, List<ColumnInsight>> sourceMappedColumnInsights = columnInsights.stream() .collect(Collectors.groupingBy(ColumnInsight::getSourceHeader)); Arrays.stream(rawInputData.columns()).forEach(col -> { final int maxValue = spark.sql("SELECT max(length(" + col + ")) FROM rawData").first().getInt(0); final ColumnHeader columnHeader = new ColumnHeader(col); for (ColumnInsight insight : sourceMappedColumnInsights.get(columnHeader)) { insight.setMaxValueLength(maxValue); } }); } /** * Validates whether the input data meets the encryption settings for `allowDuplicates`. * * <p> * This is normally handled by C3R's {@link com.amazonaws.c3r.action.RowMarshaller} querying the temporary SQL table data is loaded * to. * * @param clientSettings The encryption settings to validate with * @param rawInputData The Dataset to be validated * @throws C3rRuntimeException If input data is invalid */ static void validateDuplicates(final ClientSettings clientSettings, final Dataset<Row> rawInputData) { if (clientSettings.isAllowDuplicates()) { return; } // Check for duplicates when `allowDuplicates` is false final String[] fingerprintColumns = columnInsights.stream() .filter(columnSchema -> columnSchema.getType() == ColumnType.FINGERPRINT) // enforced on fingerprint columns only .map(ColumnSchema::getSourceHeader) .map(ColumnHeader::toString) .distinct() .toArray(String[]::new); // Check for duplicate non-null values for (String col : fingerprintColumns) { final Dataset<Row> filteredData = rawInputData.groupBy(col).count().filter("count > 1"); if (!filteredData.isEmpty()) { throw new C3rRuntimeException("Duplicates were found in column `" + col + "`, but `allowDuplicates` is false."); } } // Check for duplicate null values when `preserveNulls` is false if (!clientSettings.isPreserveNulls()) { for (String col : fingerprintColumns) { final Column column = new Column(col); final Dataset<Row> filteredData = rawInputData.select(column) .groupBy(column) .count() .filter(column.isNull()) .filter("count > 1"); if (!filteredData.isEmpty()) { throw new C3rRuntimeException("Duplicates NULLs were found in column `" + col + "`, but `allowDuplicates` and " + "`preserveNulls` are false."); } } } } /** * Map the source columns to their respective target columns. * * <p> * This is normally handled by C3R's {@link com.amazonaws.c3r.action.RowMarshaller} by writing input columns of data to the intended * target columns during the data load. * * @param rawInputData the Dataset to map * @return A Dataset containing each target column */ static Dataset<Row> mapSourceToTargetColumns(final Dataset<Row> rawInputData) { final List<Column> targetColumns = new ArrayList<>(); columnInsights.forEach(target -> targetColumns.add(functions.col(target.getSourceHeader().toString()) .as(target.getTargetHeader().toString()))); return rawInputData.select(CollectionConverters.IteratorHasAsScala(targetColumns.iterator()).asScala().toSeq()); } /** * Encrypt source data. * * @param rawInputData The source data to be encrypted * @return The encrypted data */ static Dataset<Row> marshalData(final Dataset<Row> rawInputData) { final ExpressionEncoder<Row> rowEncoder = RowEncoder.apply(rawInputData.schema()); return rawInputData.map((MapFunction<Row, Row>) row -> { // Grab a nonce for the row final Nonce nonce = Nonce.nextNonce(); // Build a list of transformers for each row, limiting state to keys/salts/settings POJOs final Map<ColumnType, Transformer> transformers = Transformer.initTransformers( KeyUtil.sharedSecretKeyFromString(EXAMPLE_SHARED_SECRET_KEY), EXAMPLE_SALT, ClientSettings.lowAssuranceMode(), false); // Defaulting to false. // For each column in the row, transform the data return Row.fromSeq( CollectionConverters.IteratorHasAsScala(columnInsights.stream().map(column -> { if (column.getType() == ColumnType.CLEARTEXT) { return row.get(column.getSourceColumnPosition()); } final Transformer transformer = transformers.get(column.getType()); final String data = row.getString(column.getSourceColumnPosition()); final byte[] dataBytes = data == null ? null : data.getBytes(StandardCharsets.UTF_8); final EncryptionContext encryptionContext = new EncryptionContext(column, nonce, ClientDataType.STRING); final byte[] marshalledBytes = transformer.marshal(dataBytes, encryptionContext); return (marshalledBytes == null ? null : new String(marshalledBytes, StandardCharsets.UTF_8)); }).iterator()).asScala().toSeq()); }, rowEncoder); } /** * Decrypt source data. * * @param rawInputData The source data to be decrypted * @return The cleartext data */ static Dataset<Row> unmarshalData(final Dataset<Row> rawInputData) { final ExpressionEncoder<Row> rowEncoder = RowEncoder.apply(rawInputData.schema()); return rawInputData.map((MapFunction<Row, Row>) row -> { // Build a list of transformers for each row, limiting state to keys/salts/settings POJOs final Map<ColumnType, Transformer> transformers = Transformer.initTransformers( KeyUtil.sharedSecretKeyFromString(EXAMPLE_SHARED_SECRET_KEY), EXAMPLE_SALT, ClientSettings.lowAssuranceMode(), false); // Defaulting to false. // For each column in the row, transform the data final List<Object> unmarshalledValues = new ArrayList<>(); for (int i = 0; i < row.size(); i++) { final String data = row.getString(i); final byte[] dataBytes = data == null ? null : data.getBytes(StandardCharsets.UTF_8); Transformer transformer = transformers.get(ColumnType.CLEARTEXT); // Default to pass through if (Transformer.hasDescriptor(transformers.get(ColumnType.SEALED), dataBytes)) { transformer = transformers.get(ColumnType.SEALED); } else if (Transformer.hasDescriptor(transformers.get(ColumnType.FINGERPRINT), dataBytes)) { transformer = transformers.get(ColumnType.FINGERPRINT); } final byte[] unmarshalledBytes = transformer.unmarshal(dataBytes); unmarshalledValues.add(unmarshalledBytes == null ? null : new String(unmarshalledBytes, StandardCharsets.UTF_8)); } return Row.fromSeq( CollectionConverters.IteratorHasAsScala(unmarshalledValues.iterator()).asScala().toSeq()); }, rowEncoder); } /** * Find the positions for each column. * * @param rawInputData The source data to map the columns with */ static void populateColumnPositions(final Dataset<Row> rawInputData) { // Gather the positions of all the columns final String[] columns = rawInputData.columns(); final Map<ColumnHeader, Integer> columnPositions = new HashMap<>(); for (int i = 0; i < columns.length; i++) { columnPositions.put(new ColumnHeader(columns[i]), i); } for (ColumnInsight column : columnInsights) { final int position = columnPositions.get(column.getTargetHeader()); column.setSourceColumnPosition(position); } } /** * Shuffles the input data to hide ordering. * * <p> * This is normally handled by C3R's {@link com.amazonaws.c3r.action.RowMarshaller} by appending the Nonces used for each row to the * data on load and then sorting on those nonces before writing out the data. Instead of sorting on Nonces created using Java's * {@code SecureRandom}, Spark is using its own {@code rand()} function for the shuffle. * * @param rawInputData The Dataset to shuffle * @return The shuffled Dataset */ static Dataset<Row> shuffleData(final Dataset<Row> rawInputData) { return rawInputData.orderBy(functions.rand()); } /** * Shut down the Spark session. * * @param spark the SparkSession to close */ private static void closeSparkSession(final SparkSession spark) { spark.stop(); } }
6,272
0
Create_ds/c3r/c3r-sdk-examples/src/main/java/com/amazonaws/c3r/examples
Create_ds/c3r/c3r-sdk-examples/src/main/java/com/amazonaws/c3r/examples/spark/package-info.java
// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 /** * Sample code showing how to use the SDK with Spark. * * <p> * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * SPDX-License-Identifier: Apache-2.0 */ package com.amazonaws.c3r.examples.spark;
6,273
0
Create_ds/c3r/c3r-cli-spark/src/test/java/com/amazonaws/c3r/spark
Create_ds/c3r/c3r-cli-spark/src/test/java/com/amazonaws/c3r/spark/config/SparkDecryptConfigTest.java
// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 package com.amazonaws.c3r.spark.config; import com.amazonaws.c3r.exception.C3rIllegalArgumentException; import com.amazonaws.c3r.io.FileFormat; import com.amazonaws.c3r.spark.utils.FileTestUtility; import org.junit.jupiter.api.BeforeEach; import org.junit.jupiter.api.Test; import java.io.IOException; import static com.amazonaws.c3r.spark.utils.GeneralTestUtility.TEST_CONFIG_ONE_ROW_NULL_SAMPLE_CLEARTEXT; import static org.junit.jupiter.api.Assertions.assertDoesNotThrow; import static org.junit.jupiter.api.Assertions.assertThrows; import static org.junit.jupiter.api.Assertions.assertThrowsExactly; public class SparkDecryptConfigTest { private String output; private SparkDecryptConfig.SparkDecryptConfigBuilder minimalConfigBuilder(final String sourceFile) { return SparkDecryptConfig.builder() .secretKey(TEST_CONFIG_ONE_ROW_NULL_SAMPLE_CLEARTEXT.getKey()) .source(sourceFile) .targetDir(output) .salt(TEST_CONFIG_ONE_ROW_NULL_SAMPLE_CLEARTEXT.getSalt()); } @BeforeEach public void setup() throws IOException { output = FileTestUtility.createTempDir().resolve("outputDir").toString(); } @Test public void minimumViableConstructionTest() { assertDoesNotThrow(() -> minimalConfigBuilder(TEST_CONFIG_ONE_ROW_NULL_SAMPLE_CLEARTEXT.getInput()).build()); } @Test public void validateInputEmptyTest() { assertThrows(C3rIllegalArgumentException.class, () -> minimalConfigBuilder(TEST_CONFIG_ONE_ROW_NULL_SAMPLE_CLEARTEXT.getInput()) .source("").build()); } @Test public void validateOutputEmptyTest() { assertThrows(C3rIllegalArgumentException.class, () -> minimalConfigBuilder(TEST_CONFIG_ONE_ROW_NULL_SAMPLE_CLEARTEXT.getInput()) .targetDir("").build()); } @Test public void validateNoOverwriteTest() throws IOException { output = FileTestUtility.createTempFile().toString(); assertThrows(C3rIllegalArgumentException.class, () -> minimalConfigBuilder(TEST_CONFIG_ONE_ROW_NULL_SAMPLE_CLEARTEXT.getInput()) .overwrite(false).build()); } @Test public void validateOverwriteTest() throws IOException { output = FileTestUtility.createTempDir().toString(); assertDoesNotThrow(() -> minimalConfigBuilder(TEST_CONFIG_ONE_ROW_NULL_SAMPLE_CLEARTEXT.getInput()) .overwrite(true).build()); } @Test public void validateEmptySaltTest() { assertThrows(C3rIllegalArgumentException.class, () -> minimalConfigBuilder(TEST_CONFIG_ONE_ROW_NULL_SAMPLE_CLEARTEXT.getInput()) .salt("").build()); } @Test public void validateFileExtensionWhenInputIsDirectoryTest() { assertDoesNotThrow(() -> minimalConfigBuilder(TEST_CONFIG_ONE_ROW_NULL_SAMPLE_CLEARTEXT.getInput()) .source(FileTestUtility.createTempDir().toString()) .overwrite(true) .fileFormat(FileFormat.PARQUET) .build()); } @Test public void validateNoFileExtensionWhenInputIsDirectoryTest() { assertThrows(C3rIllegalArgumentException.class, () -> minimalConfigBuilder(TEST_CONFIG_ONE_ROW_NULL_SAMPLE_CLEARTEXT.getInput()) .source(FileTestUtility.createTempDir().toString()) .overwrite(true) .build()); } @Test public void unknownFileExtensionTest() throws IOException { final String pathWithUnknownExtension = FileTestUtility.createTempFile("input", ".unknown").toString(); // unknown extensions cause failure if no FileFormat is specified assertThrows(C3rIllegalArgumentException.class, () -> minimalConfigBuilder(pathWithUnknownExtension).build()); // specifying a FileFormat makes it work assertDoesNotThrow(() -> minimalConfigBuilder(pathWithUnknownExtension) .fileFormat(FileFormat.CSV) .build()); } @Test public void csvOptionsNonCsvFileFormatForFileTest() throws IOException { final String parquetPath = FileTestUtility.createTempFile("input", ".parquet").toString(); // parquet file is fine assertDoesNotThrow(() -> minimalConfigBuilder(parquetPath).build()); // parquet file with csvInputNullValue errors assertThrows(C3rIllegalArgumentException.class, () -> minimalConfigBuilder(parquetPath) .csvInputNullValue("") .build()); // parquet file with csvOutputNullValue errors assertThrows(C3rIllegalArgumentException.class, () -> minimalConfigBuilder(parquetPath) .csvOutputNullValue("") .build()); } @Test public void csvOptionNonCsvFileFormatForDirectoryTest() throws IOException { // Use an input directory final var config = minimalConfigBuilder(FileTestUtility.createTempDir().toString()) .overwrite(true) .fileFormat(FileFormat.PARQUET); // Parquet file format by itself is fine assertDoesNotThrow(() -> config.build()); // Parquet format with an input CSV null value specified is not accepted assertThrowsExactly(C3rIllegalArgumentException.class, () -> config.csvInputNullValue("NULL").build()); // Parquet format with an output CSV null value specified is not accepted assertThrowsExactly(C3rIllegalArgumentException.class, () -> config.csvOutputNullValue("NULL").build()); } }
6,274
0
Create_ds/c3r/c3r-cli-spark/src/test/java/com/amazonaws/c3r/spark
Create_ds/c3r/c3r-cli-spark/src/test/java/com/amazonaws/c3r/spark/config/SparkEncryptConfigTest.java
// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 package com.amazonaws.c3r.spark.config; import com.amazonaws.c3r.config.ClientSettings; import com.amazonaws.c3r.config.MappedTableSchema; import com.amazonaws.c3r.config.PositionalTableSchema; import com.amazonaws.c3r.config.TableSchema; import com.amazonaws.c3r.exception.C3rIllegalArgumentException; import com.amazonaws.c3r.io.FileFormat; import com.amazonaws.c3r.spark.action.SparkMarshaller; import com.amazonaws.c3r.spark.io.CsvTestUtility; import com.amazonaws.c3r.spark.io.csv.SparkCsvReader; import com.amazonaws.c3r.spark.io.csv.SparkCsvWriter; import com.amazonaws.c3r.spark.utils.FileTestUtility; import com.amazonaws.c3r.spark.utils.SparkSessionTestUtility; import org.apache.spark.sql.Dataset; import org.apache.spark.sql.Row; import org.apache.spark.sql.SparkSession; import org.junit.jupiter.api.BeforeEach; import org.junit.jupiter.api.Test; import java.io.IOException; import java.nio.file.Files; import java.nio.file.Path; import java.util.List; import static com.amazonaws.c3r.spark.utils.GeneralTestUtility.TEST_CONFIG_DATA_SAMPLE; import static com.amazonaws.c3r.spark.utils.GeneralTestUtility.TEST_CONFIG_ONE_ROW_NULL_SAMPLE_CLEARTEXT; import static com.amazonaws.c3r.spark.utils.GeneralTestUtility.cleartextColumn; import static org.junit.jupiter.api.Assertions.assertDoesNotThrow; import static org.junit.jupiter.api.Assertions.assertEquals; import static org.junit.jupiter.api.Assertions.assertThrows; import static org.junit.jupiter.api.Assertions.assertThrowsExactly; import static org.junit.jupiter.api.Assertions.assertTrue; public class SparkEncryptConfigTest { private String output; private SparkEncryptConfig.SparkEncryptConfigBuilder minimalConfigBuilder(final String sourceFile) { return SparkEncryptConfig.builder() .secretKey(TEST_CONFIG_ONE_ROW_NULL_SAMPLE_CLEARTEXT.getKey()) .source(sourceFile) .targetDir(output) .salt(TEST_CONFIG_ONE_ROW_NULL_SAMPLE_CLEARTEXT.getSalt()) .settings(TEST_CONFIG_ONE_ROW_NULL_SAMPLE_CLEARTEXT.getSettings()) .tableSchema(TEST_CONFIG_ONE_ROW_NULL_SAMPLE_CLEARTEXT.getSchema()); } // Helper function for calling row marshaller on settings. private void runConfig(final SparkEncryptConfig config) { final SparkSession session = SparkSessionTestUtility.initSparkSession(); final Dataset<Row> dataset = SparkCsvReader.readInput(session, config.getSourceFile(), config.getCsvInputNullValue(), config.getTableSchema().getPositionalColumnHeaders()); final Dataset<Row> marshalledDataset = SparkMarshaller.encrypt(dataset, config); SparkCsvWriter.writeOutput(marshalledDataset, config.getTargetFile(), config.getCsvOutputNullValue()); } @BeforeEach public void setup() throws IOException { output = FileTestUtility.createTempDir().resolve("outputDir").toString(); } @Test public void minimumViableConstructionTest() { assertDoesNotThrow(() -> minimalConfigBuilder(TEST_CONFIG_ONE_ROW_NULL_SAMPLE_CLEARTEXT.getInput()) .build()); } // Make sure input file must be specified. @Test public void validateInputBlankTest() { assertThrowsExactly(C3rIllegalArgumentException.class, () -> minimalConfigBuilder( TEST_CONFIG_ONE_ROW_NULL_SAMPLE_CLEARTEXT.getInput()) .source("").build()); } @Test public void validateOutputEmptyTest() { assertThrows(C3rIllegalArgumentException.class, () -> minimalConfigBuilder(TEST_CONFIG_ONE_ROW_NULL_SAMPLE_CLEARTEXT.getInput()) .targetDir("").build()); } @Test public void validateNoOverwriteTest() throws IOException { output = FileTestUtility.createTempDir().toString(); assertThrows(C3rIllegalArgumentException.class, () -> minimalConfigBuilder(TEST_CONFIG_ONE_ROW_NULL_SAMPLE_CLEARTEXT.getInput()) .overwrite(false).build()); } @Test public void validateOverwriteTest() throws IOException { output = FileTestUtility.createTempDir().toString(); assertDoesNotThrow(() -> minimalConfigBuilder(TEST_CONFIG_ONE_ROW_NULL_SAMPLE_CLEARTEXT.getInput()) .overwrite(true).build()); } @Test public void validateEmptySaltTest() { assertThrows(C3rIllegalArgumentException.class, () -> minimalConfigBuilder(TEST_CONFIG_ONE_ROW_NULL_SAMPLE_CLEARTEXT.getInput()) .salt("").build()); } @Test public void validateFileExtensionWhenInputIsDirectoryTest() { assertDoesNotThrow(() -> minimalConfigBuilder(TEST_CONFIG_ONE_ROW_NULL_SAMPLE_CLEARTEXT.getInput()) .source(FileTestUtility.createTempDir().toString()) .overwrite(true) .fileFormat(FileFormat.PARQUET) .build()); } @Test public void validateNoFileExtensionWhenInputIsDirectoryTest() { assertThrows(C3rIllegalArgumentException.class, () -> minimalConfigBuilder(TEST_CONFIG_ONE_ROW_NULL_SAMPLE_CLEARTEXT.getInput()) .source(FileTestUtility.createTempDir().toString()) .overwrite(true) .build()); } @Test public void unknownFileExtensionTest() throws IOException { final String pathWithUnknownExtension = FileTestUtility.createTempFile("input", ".unknown").toString(); // unknown extensions cause failure if no FileFormat is specified assertThrowsExactly(C3rIllegalArgumentException.class, () -> minimalConfigBuilder(pathWithUnknownExtension).build()); // specifying a FileFormat makes it work assertDoesNotThrow(() -> minimalConfigBuilder(pathWithUnknownExtension) .fileFormat(FileFormat.CSV) .build()); } @Test public void csvOptionsNonCsvFileFormatForFileTest() throws IOException { final String parquetPath = FileTestUtility.createTempFile("input", ".parquet").toString(); // parquet file is fine assertDoesNotThrow(() -> minimalConfigBuilder(parquetPath).build()); // parquet file with csvInputNullValue errors assertThrowsExactly(C3rIllegalArgumentException.class, () -> minimalConfigBuilder(parquetPath) .csvInputNullValue("") .build()); // parquet file with csvOutputNullValue errors assertThrowsExactly(C3rIllegalArgumentException.class, () -> minimalConfigBuilder(parquetPath) .csvOutputNullValue("") .build()); } @Test public void csvOptionNonCsvFileFormatForDirectoryTest() throws IOException { // Use an input directory final var config = minimalConfigBuilder(FileTestUtility.createTempDir().toString()) .overwrite(true) .fileFormat(FileFormat.PARQUET); // Parquet file format by itself is fine assertDoesNotThrow(() -> config.build()); // Parquet format with an input CSV null value specified is not accepted assertThrowsExactly(C3rIllegalArgumentException.class, () -> config.csvInputNullValue("NULL").build()); // Parquet format with an output CSV null value specified is not accepted assertThrowsExactly(C3rIllegalArgumentException.class, () -> config.csvOutputNullValue("NULL").build()); } // Make sure positional schema and file that are equivalent to file and schema with headers. @Test public void noHeaderFileProducesCorrectResultsTest() throws IOException { final String noHeadersFile = "../samples/csv/data_sample_no_headers.csv"; final TableSchema noHeadersSchema = new PositionalTableSchema(List.of( List.of(cleartextColumn(null, "FirstName")), List.of(cleartextColumn(null, "LastName")), List.of(cleartextColumn(null, "Address")), List.of(cleartextColumn(null, "City")), List.of(cleartextColumn(null, "State")), List.of(cleartextColumn(null, "PhoneNumber")), List.of(cleartextColumn(null, "Title")), List.of(cleartextColumn(null, "Level")), List.of(cleartextColumn(null, "Notes")) )); final String headersFile = "../samples/csv/data_sample_without_quotes.csv"; final TableSchema headersSchema = new MappedTableSchema(List.of( cleartextColumn("FirstName"), cleartextColumn("LastName"), cleartextColumn("Address"), cleartextColumn("City"), cleartextColumn("State"), cleartextColumn("PhoneNumber"), cleartextColumn("Title"), cleartextColumn("Level"), cleartextColumn("Notes") )); final SparkEncryptConfig noHeadersConfig = SparkEncryptConfig.builder() .source(noHeadersFile) .targetDir(FileTestUtility.createTempDir().resolve("encryptedNoHeaders").toString()) .overwrite(true) .csvInputNullValue(null) .csvOutputNullValue(null) .secretKey(TEST_CONFIG_DATA_SAMPLE.getKey()) .salt(TEST_CONFIG_DATA_SAMPLE.getSalt()) .settings(TEST_CONFIG_DATA_SAMPLE.getSettings()) .tableSchema(noHeadersSchema) .build(); runConfig(noHeadersConfig); final Path mergedNoHeadersOutput = CsvTestUtility.mergeOutput(Path.of(noHeadersConfig.getTargetFile())); final SparkEncryptConfig headersConfig = SparkEncryptConfig.builder() .source(headersFile) .targetDir(FileTestUtility.createTempDir().resolve("encryptedHeaders").toString()) .overwrite(true) .csvInputNullValue(null) .csvOutputNullValue(null) .secretKey(TEST_CONFIG_DATA_SAMPLE.getKey()) .salt(TEST_CONFIG_DATA_SAMPLE.getSalt()) .settings(TEST_CONFIG_DATA_SAMPLE.getSettings()) .tableSchema(headersSchema) .build(); runConfig(headersConfig); final Path mergedHeadersOutput = CsvTestUtility.mergeOutput(Path.of(headersConfig.getTargetFile())); final List<String> noHeaderLines = Files.readAllLines(mergedNoHeadersOutput); final List<String> headerLines = Files.readAllLines(mergedHeadersOutput); assertEquals(headerLines.size(), noHeaderLines.size()); noHeaderLines.sort(String::compareTo); headerLines.sort(String::compareTo); for (int i = 0; i < headerLines.size(); i++) { assertEquals(0, headerLines.get(i).compareTo(noHeaderLines.get(i))); } } // Make sure custom null values work with positional schemas. @Test public void customNullValueWithPositionalSchemaTest() throws IOException { final String noHeadersFile = "../samples/csv/data_sample_no_headers.csv"; final TableSchema noHeadersSchema = new PositionalTableSchema(List.of( List.of(cleartextColumn(null, "FirstName")), List.of(cleartextColumn(null, "LastName")), List.of(cleartextColumn(null, "Address")), List.of(cleartextColumn(null, "City")), List.of(cleartextColumn(null, "State")), List.of(cleartextColumn(null, "PhoneNumber")), List.of(cleartextColumn(null, "Title")), List.of(cleartextColumn(null, "Level")), List.of(cleartextColumn(null, "Notes")) )); final SparkEncryptConfig noHeadersConfig = SparkEncryptConfig.builder() .source(noHeadersFile) .targetDir(output) .overwrite(true) .csvInputNullValue("John") .csvOutputNullValue("NULLJOHNNULL") .secretKey(TEST_CONFIG_DATA_SAMPLE.getKey()) .salt(TEST_CONFIG_DATA_SAMPLE.getSalt()) .settings(TEST_CONFIG_DATA_SAMPLE.getSettings()) .tableSchema(noHeadersSchema) .build(); runConfig(noHeadersConfig); final Path mergedNoHeadersOutput = CsvTestUtility.mergeOutput(Path.of(noHeadersConfig.getTargetFile())); final List<String> noHeaderLines = Files.readAllLines(mergedNoHeadersOutput); boolean foundNull = false; for (String row : noHeaderLines) { foundNull |= row.startsWith("NULLJOHNNULL,Smith"); } assertTrue(foundNull); } // Check that validation fails because cleartext columns aren't allowed but cleartext columns are in the schema. @Test void checkAllowCleartextValidationTest() { final String noHeadersFile = "../samples/csv/data_sample_no_headers.csv"; final TableSchema schema = new MappedTableSchema(List.of(cleartextColumn("cleartext"))); final var config = SparkEncryptConfig.builder() .source(noHeadersFile) .targetDir(output) .overwrite(true) .secretKey(TEST_CONFIG_DATA_SAMPLE.getKey()) .salt(TEST_CONFIG_DATA_SAMPLE.getSalt()) .tableSchema(schema); final Exception e = assertThrowsExactly(C3rIllegalArgumentException.class, () -> config.settings(ClientSettings.highAssuranceMode()).build()); assertEquals("Cleartext columns found in the schema, but allowCleartext is false. Target column names: [`cleartext`]", e.getMessage()); assertDoesNotThrow(() -> config.settings(ClientSettings.lowAssuranceMode()).build()); } }
6,275
0
Create_ds/c3r/c3r-cli-spark/src/test/java/com/amazonaws/c3r/spark
Create_ds/c3r/c3r-cli-spark/src/test/java/com/amazonaws/c3r/spark/io/ParquetTestUtility.java
// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 package com.amazonaws.c3r.spark.io; import com.amazonaws.c3r.config.ColumnHeader; import com.amazonaws.c3r.data.ParquetSchema; import com.amazonaws.c3r.data.ParquetValue; import com.amazonaws.c3r.data.Row; import com.amazonaws.c3r.io.ParquetRowReader; import com.amazonaws.c3r.io.ParquetRowWriter; import com.amazonaws.c3r.spark.utils.FileTestUtility; import java.io.File; import java.io.IOException; import java.nio.file.Path; import java.util.ArrayList; import java.util.Arrays; import java.util.List; import java.util.Map; import java.util.Objects; import java.util.function.Function; import java.util.stream.Collectors; /** * Utility functions for reading Parquet data out of files. */ public final class ParquetTestUtility { /** * Hidden utility class constructor. */ private ParquetTestUtility() { } /** * Takes a row of Parquet values and returns them as an array of string values ordered by column indices. * * @param row Parquet values looked up by name * @param indices Mapping of column index to name * @return Ordered Parquet values converted to strings */ private static String[] rowToStringArray(final Row<ParquetValue> row, final Map<Integer, ColumnHeader> indices) { final String[] strings = new String[row.size()]; for (int i = 0; i < row.size(); i++) { strings[i] = Objects.requireNonNullElse(row.getValue(indices.get(i)).toString(), ""); } return strings; } /** * Reads a Parquet file into a list of ordered string values. * * @param filePath Location of the file to read * @return Contents of the file as a list of rows and the rows are string values */ public static List<String[]> readContentAsStringArrays(final String filePath) { final ParquetRowReader reader = ParquetRowReader.builder().sourceName(filePath).build(); final ParquetSchema parquetSchema = reader.getParquetSchema(); final Map<Integer, ColumnHeader> columnIndices = parquetSchema.getHeaders().stream() .collect(Collectors.toMap( parquetSchema::getColumnIndex, Function.identity() )); final var mapRows = readAllRows(reader); return mapRows.stream().map(row -> rowToStringArray(row, columnIndices)).collect(Collectors.toList()); } /** * Reads all the rows from a Parquet file to their Parquet type. * * @param reader Reads a particular Parquet file * @return Contents of the file as a list of rows with Parquet values */ public static List<Row<ParquetValue>> readAllRows(final ParquetRowReader reader) { final var rows = new ArrayList<Row<ParquetValue>>(); while (reader.hasNext()) { final var row = reader.next(); rows.add(row); } return rows; } private static List<Path> getOutputPaths(final Path output) { return Arrays.stream(Objects.requireNonNull(output.toFile().listFiles())) .filter(file -> file.getAbsolutePath().endsWith(".parquet")) .map(File::toPath) .collect(Collectors.toList()); } public static Path mergeOutput(final Path output) throws IOException { final Path mergedOutput = FileTestUtility.createTempFile("test", ".parquet"); final List<Path> paths = getOutputPaths(output); final List<Row<ParquetValue>> mergedLines = new ArrayList<>(); ParquetRowReader reader; ParquetSchema parquetSchema = null; for (Path p : paths) { reader = ParquetRowReader.builder().sourceName(p.toString()).build(); if (parquetSchema == null) { parquetSchema = reader.getParquetSchema(); } mergedLines.addAll(readAllRows(reader)); } final ParquetRowWriter writer = ParquetRowWriter.builder() .parquetSchema(parquetSchema) .targetName(mergedOutput.toString()).build(); for (Row<ParquetValue> row : mergedLines) { writer.writeRow(row); } writer.flush(); writer.close(); return mergedOutput; } }
6,276
0
Create_ds/c3r/c3r-cli-spark/src/test/java/com/amazonaws/c3r/spark
Create_ds/c3r/c3r-cli-spark/src/test/java/com/amazonaws/c3r/spark/io/CsvTestUtility.java
// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 package com.amazonaws.c3r.spark.io; import com.amazonaws.c3r.exception.C3rIllegalArgumentException; import com.amazonaws.c3r.spark.utils.FileTestUtility; import com.univocity.parsers.csv.CsvParser; import com.univocity.parsers.csv.CsvParserSettings; import java.io.File; import java.io.FileInputStream; import java.io.FileNotFoundException; import java.io.IOException; import java.io.InputStreamReader; import java.nio.charset.StandardCharsets; import java.nio.file.Files; import java.nio.file.Path; import java.util.ArrayList; import java.util.Arrays; import java.util.List; import java.util.Map; import java.util.Objects; import java.util.stream.Collectors; /** * Utility functions for common CSV data manipulation needed during testing. */ public final class CsvTestUtility { /** * Hidden utility class constructor. */ private CsvTestUtility() { } /** * Creates a simple CSV parser for the specified columns that will read out {@code maxColumns}. * * @param fileName Location of the file to read * @param maxColumns Maximum number of columns expected from file * @return Parser for getting file contents * @throws RuntimeException If the CSV file is not found */ public static CsvParser getCsvParser(final String fileName, final Integer maxColumns) { try { final CsvParserSettings settings = getBasicParserSettings(maxColumns, false); // creates a CSV parser final CsvParser parser = new CsvParser(settings); final InputStreamReader reader = new InputStreamReader(new FileInputStream(fileName), StandardCharsets.UTF_8); parser.beginParsing(reader); return parser; } catch (FileNotFoundException e) { throw new RuntimeException(e); } } /** * Create basic parser settings that don't modify/NULL any values * aside from the default whitespace trimming. * * @param maxColumns Most columns allowed in the CSV file * @param keepQuotes If quotes should be kept as part of the string read in or not * @return Settings to bring up a simple CSV parser */ private static CsvParserSettings getBasicParserSettings(final Integer maxColumns, final boolean keepQuotes) { final CsvParserSettings settings = new CsvParserSettings(); settings.setLineSeparatorDetectionEnabled(true); settings.setNullValue(""); settings.setEmptyValue("\"\""); settings.setKeepQuotes(keepQuotes); if (maxColumns != null) { settings.setMaxColumns(maxColumns); } return settings; } /** * Read the contents of the CSV file as rows, mapping column names to content. * * <p> * The column names are normalized per the C3R's normalizing (lower-cased and whitespace trimmed). * * @param fileName File to read * @return Rows read in the order they appear * @throws C3rIllegalArgumentException If the file does not have the same number of entries in each row */ public static List<Map<String, String>> readRows(final String fileName) { final CsvParserSettings settings = getBasicParserSettings(null, true); settings.setHeaderExtractionEnabled(true); final CsvParser parser = new CsvParser(settings); return parser.parseAllRecords(new File(fileName)).stream().map(r -> r.toFieldMap()).collect(Collectors.toList()); } /** * Read the file content with rows as arrays. There is no mapping to column headers, if any, in the file. * * @param fileName Location of file to read * @param keepQuotes If quotes should be kept as part of the string read in or not * @return List of rows where each row is an array of values * @throws RuntimeException If the file is not found */ public static List<String[]> readContentAsArrays(final String fileName, final boolean keepQuotes) { final CsvParserSettings settings = getBasicParserSettings(null, keepQuotes); return new CsvParser(settings).parseAll(new File(fileName), StandardCharsets.UTF_8); } private static List<Path> getOutputPaths(final Path output) { return Arrays.stream(Objects.requireNonNull(output.toFile().listFiles())) .filter(file -> file.getAbsolutePath().endsWith(".csv")) .map(File::toPath) .collect(Collectors.toList()); } public static Path mergeOutput(final Path output) throws IOException { final Path mergedOutput = FileTestUtility.createTempFile("test", ".csv"); final List<Path> paths = getOutputPaths(output); final List<String> mergedLines = new ArrayList<>(); for (Path p : paths) { final List<String> lines = Files.readAllLines(p, StandardCharsets.UTF_8); if (!lines.isEmpty()) { if (mergedLines.isEmpty()) { mergedLines.add(lines.get(0)); //add header only once } mergedLines.addAll(new ArrayList<>(lines.subList(1, lines.size()))); // Negative limit allows trailing empty space } } Files.write(mergedOutput, mergedLines); return mergedOutput; } }
6,277
0
Create_ds/c3r/c3r-cli-spark/src/test/java/com/amazonaws/c3r/spark/io
Create_ds/c3r/c3r-cli-spark/src/test/java/com/amazonaws/c3r/spark/io/schema/ParquetSchemaGeneratorTest.java
// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 package com.amazonaws.c3r.spark.io.schema; import com.amazonaws.c3r.data.ClientDataType; import com.amazonaws.c3r.spark.utils.FileTestUtility; import com.amazonaws.c3r.spark.utils.GeneralTestUtility; import com.amazonaws.c3r.spark.utils.SparkSessionTestUtility; import org.apache.spark.SparkException; import org.apache.spark.sql.SparkSession; import org.junit.jupiter.api.Test; import java.io.IOException; import java.util.Collections; import java.util.List; import static org.junit.jupiter.api.Assertions.assertEquals; import static org.junit.jupiter.api.Assertions.assertThrows; public class ParquetSchemaGeneratorTest { private final SparkSession sparkSession = SparkSessionTestUtility.initSparkSession(); private ParquetSchemaGenerator getTestSchemaGenerator(final String file) throws IOException { final String output = FileTestUtility.resolve("schema.json").toString(); return ParquetSchemaGenerator.builder() .inputParquetFile(file) .targetJsonFile(output) .overwrite(true) .sparkSession(sparkSession) .build(); } @Test public void getSourceHeadersTest() throws IOException { assertEquals( GeneralTestUtility.DATA_SAMPLE_HEADERS, getTestSchemaGenerator("../samples/parquet/data_sample.parquet").getSourceHeaders()); } @Test public void getSourceColumnCountTest() throws IOException { assertEquals( Collections.nCopies(GeneralTestUtility.DATA_SAMPLE_HEADERS.size(), ClientDataType.STRING), getTestSchemaGenerator("../samples/parquet/data_sample.parquet").getSourceColumnTypes()); } @Test public void getSourceColumnTypesTest() throws IOException { assertEquals( List.of(ClientDataType.UNKNOWN, ClientDataType.STRING, ClientDataType.UNKNOWN, ClientDataType.UNKNOWN, ClientDataType.UNKNOWN, ClientDataType.UNKNOWN, ClientDataType.UNKNOWN, ClientDataType.UNKNOWN, ClientDataType.UNKNOWN), getTestSchemaGenerator("../samples/parquet/rows_100_groups_10_prim_data.parquet").getSourceColumnTypes()); } @Test public void emptyFileTest() throws IOException { final String emptyParquetFile = FileTestUtility.createTempFile("empty", ".parquet").toString(); assertThrows(SparkException.class, () -> getTestSchemaGenerator(emptyParquetFile)); } }
6,278
0
Create_ds/c3r/c3r-cli-spark/src/test/java/com/amazonaws/c3r/spark/io
Create_ds/c3r/c3r-cli-spark/src/test/java/com/amazonaws/c3r/spark/io/schema/InteractiveSchemaGeneratorTest.java
// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 package com.amazonaws.c3r.spark.io.schema; import com.amazonaws.c3r.config.ClientSettings; import com.amazonaws.c3r.config.ColumnHeader; import com.amazonaws.c3r.config.ColumnSchema; import com.amazonaws.c3r.config.ColumnType; import com.amazonaws.c3r.config.Pad; import com.amazonaws.c3r.config.PadType; import com.amazonaws.c3r.config.TableSchema; import com.amazonaws.c3r.data.ClientDataType; import com.amazonaws.c3r.exception.C3rIllegalArgumentException; import com.amazonaws.c3r.exception.C3rRuntimeException; import com.amazonaws.c3r.internal.Limits; import com.amazonaws.c3r.json.GsonUtil; import com.amazonaws.c3r.spark.utils.FileTestUtility; import com.amazonaws.c3r.utils.FileUtil; import org.junit.jupiter.api.BeforeEach; import org.junit.jupiter.api.Test; import java.io.BufferedReader; import java.io.ByteArrayOutputStream; import java.io.IOException; import java.io.PrintStream; import java.io.StringReader; import java.nio.charset.StandardCharsets; import java.nio.file.Path; import java.util.ArrayList; import java.util.Arrays; import java.util.Collections; import java.util.List; import java.util.function.Consumer; import java.util.stream.Collectors; import java.util.stream.Stream; import static org.junit.jupiter.api.Assertions.assertDoesNotThrow; import static org.junit.jupiter.api.Assertions.assertEquals; import static org.junit.jupiter.api.Assertions.assertFalse; import static org.junit.jupiter.api.Assertions.assertNotEquals; import static org.junit.jupiter.api.Assertions.assertNull; import static org.junit.jupiter.api.Assertions.assertThrows; import static org.junit.jupiter.api.Assertions.assertTrue; public class InteractiveSchemaGeneratorTest { private final List<ColumnHeader> headers = Stream.of( "header1", "header2", "header3" ).map(ColumnHeader::new) .collect(Collectors.toList()); private final List<ClientDataType> stringColumnTypes = Collections.nCopies(headers.size(), ClientDataType.STRING); private final List<ClientDataType> unknownColumnTypes = Collections.nCopies(headers.size(), ClientDataType.UNKNOWN); private final String exampleMappedSchemaString = String.join("\n", "{", " \"headerRow\": true,", " \"columns\": [", " {", " \"sourceHeader\": \"header2\",", " \"targetHeader\": \"targetheader2_sealed\",", " \"type\": \"sealed\",", " \"pad\": {", " \"type\": \"NONE\"", " }", " },", " {", " \"sourceHeader\": \"header2\",", " \"targetHeader\": \"targetheader2_fingerprint\",", " \"type\": \"fingerprint\"", " },", " {", " \"sourceHeader\": \"header2\",", " \"targetHeader\": \"targetheader2\",", " \"type\": \"cleartext\"", " },", " {", " \"sourceHeader\": \"header3\",", " \"targetHeader\": \"header3\",", " \"type\": \"sealed\",", " \"pad\": {", " \"type\": \"MAX\",", " \"length\": \"0\"", " }", " }", " ]", "}"); private final String exampleMappedSchemaNoCleartextString = String.join("\n", "{", " \"headerRow\": true,", " \"columns\": [", " {", " \"sourceHeader\": \"header2\",", " \"targetHeader\": \"targetheader2_sealed\",", " \"type\": \"SEALED\",", " \"pad\": {", " \"type\": \"NONE\"", " }", " },", " {", " \"sourceHeader\": \"header2\",", " \"targetHeader\": \"targetheader2_fingerprint\",", " \"type\": \"FINGERPRINT\"", " },", " {", " \"sourceHeader\": \"header2\",", " \"targetHeader\": \"targetheader2\",", " \"type\": \"FINGERPRINT\"", " },", " {", " \"sourceHeader\": \"header3\",", " \"targetHeader\": \"header3\",", " \"type\": \"SEALED\",", " \"pad\": {", " \"type\": \"MAX\",", " \"length\": \"0\"", " }", " }", " ]", "}"); private final String examplePositionalSchemaString = String.join("\n", "{", " \"headerRow\": false,", " \"columns\": [", " [],", " [", " {", " \"type\": \"sealed\",", " \"pad\": {", " \"type\": \"NONE\"", " },", " \"targetHeader\": \"targetheader2_sealed\"", " },", " {", " \"type\": \"fingerprint\",", " \"targetHeader\": \"targetheader2_fingerprint\"", " },", " {", " \"type\": \"cleartext\",", " \"targetHeader\": \"targetheader2\"", " }", " ],", " [", " {", " \"type\": \"sealed\",", " \"pad\": {", " \"type\": \"MAX\",", " \"length\": 0", " },", " \"targetHeader\": \"targetheader3\"", " }", " ]", " ]", "}"); private final String exampleMappedSchemaAllCleartextString = String.join("\n", "{", " \"headerRow\": true,", " \"columns\": [", " {", " \"sourceHeader\": \"header2\",", " \"targetHeader\": \"targetheader2_1\",", " \"type\": \"cleartext\"", " },", " {", " \"sourceHeader\": \"header2\",", " \"targetHeader\": \"targetheader2_2\",", " \"type\": \"cleartext\"", " },", " {", " \"sourceHeader\": \"header2\",", " \"targetHeader\": \"targetheader2_3\",", " \"type\": \"cleartext\"", " },", " {", " \"sourceHeader\": \"header3\",", " \"targetHeader\": \"header3\",", " \"type\": \"cleartext\"", " }", " ]", "}"); private final String examplePositionalSchemaAllCleartextString = String.join("\n", "{", " \"headerRow\": false,", " \"columns\": [", " [],", " [", " {", " \"type\": \"cleartext\",", " \"targetHeader\": \"targetheader2_1\"", " },", " {", " \"type\": \"cleartext\",", " \"targetHeader\": \"targetheader2_2\"", " },", " {", " \"type\": \"cleartext\",", " \"targetHeader\": \"targetheader2_3\"", " }", " ],", " [", " {", " \"type\": \"cleartext\",", " \"targetHeader\": \"targetheader3\"", " }", " ]", " ]", "}"); private InteractiveSchemaGenerator schemaGen; private Path targetSchema; private ByteArrayOutputStream consoleOutput; @BeforeEach public void setup() throws IOException { targetSchema = FileTestUtility.resolve("schema.json"); } // Set up the interactive generator. private void createInteractiveSchemaGenerator(final String simulatedUserInput, final List<ColumnHeader> headers, final List<ClientDataType> types, final ClientSettings clientSettings) { final var userInput = new BufferedReader(new StringReader(simulatedUserInput + "\n")); consoleOutput = new ByteArrayOutputStream(); schemaGen = InteractiveSchemaGenerator.builder() .sourceHeaders(headers) .sourceColumnTypes(types) .targetJsonFile(targetSchema.toString()) .consoleInput(userInput) .consoleOutput(new PrintStream(consoleOutput, true, StandardCharsets.UTF_8)) .clientSettings(clientSettings) .build(); } @Test public void validateErrorWithMismatchedColumnCounts() { assertThrows(C3rIllegalArgumentException.class, () -> createInteractiveSchemaGenerator("", headers, List.of(), null)); } @Test public void validateUnexpectedUserInputEndError() { final List<String> incompleteUserInputs = List.of("", "0", "0\n", "0\n0", "0\n0\n"); final Consumer<String> schemaGenRunner = (userInput) -> InteractiveSchemaGenerator.builder() .sourceHeaders(headers) .sourceColumnTypes(stringColumnTypes) .targetJsonFile(targetSchema.toString()) .consoleInput(new BufferedReader(new StringReader(userInput))) .consoleOutput(new PrintStream(new ByteArrayOutputStream(), true, StandardCharsets.UTF_8)) .clientSettings(null) .build() .run(); for (var input : incompleteUserInputs) { assertThrows(C3rRuntimeException.class, () -> schemaGenRunner.accept(input)); } assertDoesNotThrow(() -> schemaGenRunner.accept("0\n0\n0")); } @Test public void promptNonnegativeIntValidTest() { final List<String> validInputs = List.of("42", "0", "100"); for (var input : validInputs) { createInteractiveSchemaGenerator(input, headers, stringColumnTypes, null); assertEquals( Integer.valueOf(input), schemaGen.promptNonNegativeInt("", null, 100)); assertFalse(consoleOutput.toString().toLowerCase().contains("expected")); } } @Test public void promptNonnegativeIntInvalidTest() { final List<String> validInputs = List.of("", "NotANumber", "-1", "101"); for (var input : validInputs) { createInteractiveSchemaGenerator(input, headers, stringColumnTypes, null); assertNull(schemaGen.promptNonNegativeInt("", null, 100)); assertTrue(consoleOutput.toString().toLowerCase().contains("expected")); } } @Test public void promptNonNegativeIntValidDefaultTest() { final List<String> validInputs = List.of("1", "", "3"); for (var input : validInputs) { createInteractiveSchemaGenerator(input, headers, stringColumnTypes, null); assertEquals( input.isBlank() ? 2 : Integer.parseInt(input), schemaGen.promptNonNegativeInt("", 2, 100)); assertFalse(consoleOutput.toString().toLowerCase().contains("expected")); } } @Test public void promptYesOrNoValidTest() { final List<Boolean> defaultBooleanAnswers = Arrays.asList(null, true, false); final List<String> validYesStrings = List.of("y", "yes", "Y", "YES"); for (var input : validYesStrings) { for (var answer : defaultBooleanAnswers) { createInteractiveSchemaGenerator(input, headers, stringColumnTypes, null); assertTrue(schemaGen.promptYesOrNo("", answer)); assertFalse(consoleOutput.toString().toLowerCase().contains("expected")); } } final List<String> validNoStrings = List.of("n", "no", "N", "NO"); for (var input : validNoStrings) { for (var answer : defaultBooleanAnswers) { createInteractiveSchemaGenerator(input, headers, stringColumnTypes, null); assertFalse(schemaGen.promptYesOrNo("", answer)); assertFalse(consoleOutput.toString().toLowerCase().contains("expected")); } } for (var answer : defaultBooleanAnswers) { createInteractiveSchemaGenerator("", headers, stringColumnTypes, null); assertEquals(answer, schemaGen.promptYesOrNo("", answer)); if (answer == null) { assertTrue(consoleOutput.toString().toLowerCase().contains("expected")); } else { assertFalse(consoleOutput.toString().toLowerCase().contains("expected")); } } } @Test public void promptYesOrNoInvalidTest() { createInteractiveSchemaGenerator("", headers, stringColumnTypes, null); assertNull(schemaGen.promptYesOrNo("", null)); assertTrue(consoleOutput.toString().toLowerCase().contains("expected")); createInteractiveSchemaGenerator("ja", headers, stringColumnTypes, null); assertNull(schemaGen.promptYesOrNo("", null)); assertTrue(consoleOutput.toString().toLowerCase().contains("expected")); createInteractiveSchemaGenerator("nein", headers, stringColumnTypes, null); assertNull(schemaGen.promptYesOrNo("", null)); assertTrue(consoleOutput.toString().toLowerCase().contains("expected")); } @Test public void promptColumnTypeValidTest() { final List<String> validCleartextInputs = List.of("c", "C", "cleartext", "CLEARTEXT"); final List<ClientSettings> permissiveSettings = new ArrayList<>(); permissiveSettings.add(null); permissiveSettings.add(ClientSettings.lowAssuranceMode()); for (var settings : permissiveSettings) { for (var input : validCleartextInputs) { createInteractiveSchemaGenerator(input, headers, stringColumnTypes, settings); assertEquals(ColumnType.CLEARTEXT, schemaGen.promptColumnType()); assertFalse(consoleOutput.toString().toLowerCase().contains("expected")); } final List<String> validFingerprintInputs = List.of("f", "F", "fingerprint", "FINGERPRINT"); for (var input : validFingerprintInputs) { createInteractiveSchemaGenerator(input, headers, stringColumnTypes, settings); assertEquals(ColumnType.FINGERPRINT, schemaGen.promptColumnType()); assertFalse(consoleOutput.toString().toLowerCase().contains("expected")); } final List<String> validSealedInputs = List.of("s", "S", "sealed", "SEALED"); for (var input : validSealedInputs) { createInteractiveSchemaGenerator(input, headers, stringColumnTypes, settings); assertEquals(ColumnType.SEALED, schemaGen.promptColumnType()); assertFalse(consoleOutput.toString().toLowerCase().contains("expected")); } } } @Test public void promptColumnTypeRestrictiveSettingsTest() { final List<String> validCleartextInputs = List.of("c", "C", "cleartext", "CLEARTEXT"); for (var input : validCleartextInputs) { createInteractiveSchemaGenerator(input, headers, stringColumnTypes, ClientSettings.highAssuranceMode()); assertNull(schemaGen.promptColumnType()); assertTrue(consoleOutput.toString().toLowerCase().contains("expected")); } final List<String> validFingerprintInputs = List.of("f", "F", "fingerprint", "FINGERPRINT"); for (var input : validFingerprintInputs) { createInteractiveSchemaGenerator(input, headers, stringColumnTypes, ClientSettings.highAssuranceMode()); assertEquals(ColumnType.FINGERPRINT, schemaGen.promptColumnType()); assertFalse(consoleOutput.toString().toLowerCase().contains("expected")); } final List<String> validSealedInputs = List.of("s", "S", "sealed", "SEALED"); for (var input : validSealedInputs) { createInteractiveSchemaGenerator(input, headers, stringColumnTypes, ClientSettings.highAssuranceMode()); assertEquals(ColumnType.SEALED, schemaGen.promptColumnType()); assertFalse(consoleOutput.toString().toLowerCase().contains("expected")); } } @Test public void promptColumnTypeInvalidTest() { final List<String> validCleartextInputs = List.of("", "a", "unrostricted", "solekt", "joyn"); for (var input : validCleartextInputs) { createInteractiveSchemaGenerator(input, headers, stringColumnTypes, null); assertNull(schemaGen.promptColumnType()); assertTrue(consoleOutput.toString().toLowerCase().contains("expected")); } } @Test public void promptTargetHeaderSuffixTest() { createInteractiveSchemaGenerator("", headers, stringColumnTypes, null); assertNull(schemaGen.promptTargetHeaderSuffix(ColumnType.CLEARTEXT)); assertFalse(consoleOutput.toString().toLowerCase().contains("expected")); createInteractiveSchemaGenerator("y", headers, stringColumnTypes, null); assertEquals("_sealed", schemaGen.promptTargetHeaderSuffix(ColumnType.SEALED)); assertFalse(consoleOutput.toString().toLowerCase().contains("expected")); createInteractiveSchemaGenerator("n", headers, stringColumnTypes, null); assertNull(schemaGen.promptTargetHeaderSuffix(ColumnType.SEALED)); assertFalse(consoleOutput.toString().toLowerCase().contains("expected")); createInteractiveSchemaGenerator("", headers, stringColumnTypes, null); assertEquals("_fingerprint", schemaGen.promptTargetHeaderSuffix(ColumnType.FINGERPRINT)); assertFalse(consoleOutput.toString().toLowerCase().contains("expected")); createInteractiveSchemaGenerator("n", headers, stringColumnTypes, null); assertNull(schemaGen.promptTargetHeaderSuffix(ColumnType.FINGERPRINT)); assertFalse(consoleOutput.toString().toLowerCase().contains("expected")); } @Test public void promptTargetHeaderTest() { createInteractiveSchemaGenerator("", headers, stringColumnTypes, null); assertEquals(new ColumnHeader("a"), schemaGen.promptTargetHeader(new ColumnHeader("a"), ColumnType.CLEARTEXT)); assertFalse(consoleOutput.toString().toLowerCase().contains("expected")); createInteractiveSchemaGenerator("b", headers, stringColumnTypes, null); assertEquals(new ColumnHeader("b"), schemaGen.promptTargetHeader(new ColumnHeader("a"), ColumnType.CLEARTEXT)); assertFalse(consoleOutput.toString().toLowerCase().contains("expected")); assertFalse(consoleOutput.toString().toLowerCase().contains("normalized")); createInteractiveSchemaGenerator("B", headers, stringColumnTypes, null); assertEquals(new ColumnHeader("b"), schemaGen.promptTargetHeader(new ColumnHeader("a"), ColumnType.CLEARTEXT)); assertFalse(consoleOutput.toString().toLowerCase().contains("expected")); assertTrue(consoleOutput.toString().toLowerCase().contains("normalized")); createInteractiveSchemaGenerator("b".repeat(Limits.GLUE_MAX_HEADER_UTF8_BYTE_LENGTH) + 1, headers, stringColumnTypes, null); assertNull(schemaGen.promptTargetHeader(new ColumnHeader("a"), ColumnType.CLEARTEXT)); assertTrue(consoleOutput.toString().toLowerCase().contains("expected")); } @Test public void promptTargetHeaderWithoutSourceHeadersTest() { // empty input does _not_ give you a default target header when no source headers exist createInteractiveSchemaGenerator("", null, stringColumnTypes, null); assertNull(schemaGen.promptTargetHeader(null, ColumnType.CLEARTEXT)); assertTrue(consoleOutput.toString().toLowerCase().contains("expected")); // providing input for a target header when source headers are null remains unchanged createInteractiveSchemaGenerator("b", headers, stringColumnTypes, null); assertEquals(new ColumnHeader("b"), schemaGen.promptTargetHeader(null, ColumnType.CLEARTEXT)); assertFalse(consoleOutput.toString().toLowerCase().contains("expected")); createInteractiveSchemaGenerator("B", headers, stringColumnTypes, null); assertEquals(new ColumnHeader("b"), schemaGen.promptTargetHeader(null, ColumnType.CLEARTEXT)); assertFalse(consoleOutput.toString().toLowerCase().contains("expected")); assertTrue(consoleOutput.toString().toLowerCase().contains("normalized")); } @Test public void promptTargetHeaderAlreadyUsedHeaderTest() { createInteractiveSchemaGenerator("\n", headers, stringColumnTypes, null); assertEquals(new ColumnHeader("header"), schemaGen.promptTargetHeader(new ColumnHeader("header"), ColumnType.CLEARTEXT)); assertFalse(consoleOutput.toString().toLowerCase().contains("expected")); assertNull(schemaGen.promptTargetHeader(new ColumnHeader("header"), ColumnType.CLEARTEXT)); assertTrue(consoleOutput.toString().toLowerCase().contains("expected")); } @Test public void promptTargetHeaderWithSuffixTest() { final String suffix = ColumnHeader.DEFAULT_FINGERPRINT_SUFFIX; createInteractiveSchemaGenerator("\n", headers, stringColumnTypes, null); assertEquals( new ColumnHeader("a_fingerprint"), schemaGen.promptTargetHeader(new ColumnHeader("a"), ColumnType.FINGERPRINT)); assertFalse(consoleOutput.toString().toLowerCase().contains("expected")); createInteractiveSchemaGenerator("b".repeat(Limits.AWS_CLEAN_ROOMS_HEADER_MAX_LENGTH - suffix.length()) + "\n", headers, stringColumnTypes, null); assertEquals( new ColumnHeader( "b".repeat(Limits.AWS_CLEAN_ROOMS_HEADER_MAX_LENGTH - suffix.length()) + suffix), schemaGen.promptTargetHeader(new ColumnHeader("a"), ColumnType.FINGERPRINT)); assertFalse(consoleOutput.toString().toLowerCase().contains("expected")); } @Test public void promptTargetHeaderCannotAddSuffixTest() { createInteractiveSchemaGenerator("a".repeat(Limits.AWS_CLEAN_ROOMS_HEADER_MAX_LENGTH) + "\n", headers, stringColumnTypes, null); assertNull(schemaGen.promptTargetHeader(new ColumnHeader("a"), ColumnType.FINGERPRINT)); assertTrue(consoleOutput.toString().toLowerCase().contains("unable to add header suffix")); } @Test public void promptPadTypeTest() { final var header = new ColumnHeader("a"); final PadType nullDefaultType = null; createInteractiveSchemaGenerator("", headers, stringColumnTypes, null); assertNull(schemaGen.promptPadType(header, nullDefaultType)); assertTrue(consoleOutput.toString().toLowerCase().contains("expected")); createInteractiveSchemaGenerator("", headers, stringColumnTypes, null); assertEquals(PadType.MAX, schemaGen.promptPadType(header, PadType.MAX)); assertFalse(consoleOutput.toString().toLowerCase().contains("expected")); createInteractiveSchemaGenerator("n", headers, stringColumnTypes, null); assertEquals(PadType.NONE, schemaGen.promptPadType(header, nullDefaultType)); assertFalse(consoleOutput.toString().toLowerCase().contains("expected")); createInteractiveSchemaGenerator("none", headers, stringColumnTypes, null); assertEquals(PadType.NONE, schemaGen.promptPadType(header, nullDefaultType)); assertFalse(consoleOutput.toString().toLowerCase().contains("expected")); createInteractiveSchemaGenerator("f", headers, stringColumnTypes, null); assertEquals(PadType.FIXED, schemaGen.promptPadType(header, nullDefaultType)); assertFalse(consoleOutput.toString().toLowerCase().contains("expected")); createInteractiveSchemaGenerator("fixed", headers, stringColumnTypes, null); assertEquals(PadType.FIXED, schemaGen.promptPadType(header, nullDefaultType)); assertFalse(consoleOutput.toString().toLowerCase().contains("expected")); createInteractiveSchemaGenerator("m", headers, stringColumnTypes, null); assertEquals(PadType.MAX, schemaGen.promptPadType(header, nullDefaultType)); assertFalse(consoleOutput.toString().toLowerCase().contains("expected")); createInteractiveSchemaGenerator("max", headers, stringColumnTypes, null); assertEquals(PadType.MAX, schemaGen.promptPadType(header, nullDefaultType)); assertFalse(consoleOutput.toString().toLowerCase().contains("expected")); createInteractiveSchemaGenerator("unknown", headers, stringColumnTypes, null); assertNull(schemaGen.promptPadType(header, nullDefaultType)); assertTrue(consoleOutput.toString().toLowerCase().contains("expected")); } @Test public void promptPadTest() { final var header = new ColumnHeader("a"); createInteractiveSchemaGenerator("n", headers, stringColumnTypes, null); assertEquals( Pad.DEFAULT, schemaGen.promptPad(header)); assertFalse(consoleOutput.toString().toLowerCase().contains("expected")); createInteractiveSchemaGenerator("f\n42", headers, stringColumnTypes, null); assertEquals( Pad.builder().type(PadType.FIXED).length(42).build(), schemaGen.promptPad(header)); assertFalse(consoleOutput.toString().toLowerCase().contains("expected")); createInteractiveSchemaGenerator("m\n42", headers, stringColumnTypes, null); assertEquals( Pad.builder().type(PadType.MAX).length(42).build(), schemaGen.promptPad(header)); assertFalse(consoleOutput.toString().toLowerCase().contains("expected")); } @Test public void promptColumnInfoWithSourceHeadersTest() { final String columnType = "sealed"; final String targetName = "target"; final String useSuffix = "no"; final String paddingType = "none"; createInteractiveSchemaGenerator(String.join("\n", columnType, targetName, useSuffix, paddingType), headers, stringColumnTypes, null); assertEquals( ColumnSchema.builder() .sourceHeader(new ColumnHeader("source")) .targetHeader(new ColumnHeader("target")) .type(ColumnType.SEALED) .pad(Pad.DEFAULT) .build(), schemaGen.promptColumnInfo(new ColumnHeader("source"), 1, 2)); assertFalse(consoleOutput.toString().toLowerCase().contains("expected")); } @Test public void promptColumnInfoWithSourceHeadersAndUnknownTypeTest() { createInteractiveSchemaGenerator("target", headers, unknownColumnTypes, null); assertEquals( ColumnSchema.builder() .sourceHeader(new ColumnHeader("source")) .targetHeader(new ColumnHeader("target")) .type(ColumnType.CLEARTEXT) .build(), schemaGen.promptColumnInfo(new ColumnHeader("source"), 1, 2)); assertTrue(consoleOutput.toString().toLowerCase().contains("cryptographic computing is not supported")); assertFalse(consoleOutput.toString().toLowerCase().contains("expected")); } @Test public void promptColumnInfoWithoutSourceHeadersTest() { createInteractiveSchemaGenerator("", null, stringColumnTypes, null); final String columnType = "sealed"; final String targetName = "target"; final String useSuffix = "no"; final String paddingType = "none"; createInteractiveSchemaGenerator(String.join("\n", columnType, targetName, useSuffix, paddingType), headers, stringColumnTypes, null); assertEquals( ColumnSchema.builder() .sourceHeader(null) .targetHeader(new ColumnHeader("target")) .type(ColumnType.SEALED) .pad(Pad.builder().type(PadType.NONE).build()) .build(), schemaGen.promptColumnInfo(null, 1, 2)); assertFalse(consoleOutput.toString().toLowerCase().contains("expected")); } @Test public void promptColumnInfoWithoutSourceHeadersAndUnknownTypeTest() { createInteractiveSchemaGenerator("target", null, unknownColumnTypes, null); assertEquals( ColumnSchema.builder() .targetHeader(new ColumnHeader("target")) .type(ColumnType.CLEARTEXT) .build(), schemaGen.promptColumnInfo(null, 1, 2)); assertTrue(consoleOutput.toString().toLowerCase().contains("cryptographic computing is not supported")); assertFalse(consoleOutput.toString().toLowerCase().contains("expected")); } @Test public void runGenerateNoSchemaTest() { // 0 target columns to generate for each source column createInteractiveSchemaGenerator("0\n".repeat(headers.size()), headers, stringColumnTypes, null); schemaGen.run(); assertTrue(consoleOutput.toString().contains("No target columns were specified.")); assertEquals(0, targetSchema.toFile().length()); } @Test public void runGenerateSchemaWithSourceHeadersTest() { final String userInput = String.join("\n", // source header1 "0", // number of columns for header1 // source header2 "3", // number of columns for header2 // header2, column 1 "sealed", // header2, column 1 type "targetHeader2", // header2, column 1 target header "yes", // header2, column 1 use suffix "none", // header2, column 1 padding type // header2, column 2 "fingerprint", // header2, column 2 type "targetHeader2", // header2, column 2 target header "yes", // header2, column 2 use suffix // header2, column 3 "cleartext", // header2, column 3 type "targetHeader2", // header2, column 3 target header // source header3 "", // number of columns for header3 (default to 1) "sealed", "", // header3, column 1 target header (default) "n", // header3, column 1 use suffix "max", // header3, column 1 padding type "" // header3, column 1 padding length (default 0) ); createInteractiveSchemaGenerator(userInput, headers, stringColumnTypes, null); schemaGen.run(); assertNotEquals(0, targetSchema.toFile().length()); final var expectedSchema = GsonUtil.fromJson(exampleMappedSchemaString, TableSchema.class); final var actualSchema = GsonUtil.fromJson(FileUtil.readBytes(targetSchema.toString()), TableSchema.class); assertEquals(GsonUtil.toJson(expectedSchema), GsonUtil.toJson((actualSchema))); } @Test public void runGenerateSchemaWithSourceHeadersUnknownTypesTest() { final String userInput = String.join("\n", // source header1 "0", // number of columns for header1 // source header2 "3", // number of columns for header2 // header2, column 1 // type is cleartext due to unknown client type "targetHeader2_1", // header2, column 1 target header // header2, column 2 // type is cleartext due to unknown client type "targetHeader2_2", // header2, column 2 target header // header2, column 3 // type is cleartext due to unknown client type "targetHeader2_3", // header2, column 2 target header // source header3 "", // number of columns for header3 (default to 1) // type is cleartext due to unknown client type "" // header3, column 1 target header (default) ); createInteractiveSchemaGenerator(userInput, headers, unknownColumnTypes, null); schemaGen.run(); assertNotEquals(0, targetSchema.toFile().length()); final var expectedSchema = GsonUtil.fromJson(exampleMappedSchemaAllCleartextString, TableSchema.class); final var actualSchema = GsonUtil.fromJson(FileUtil.readBytes(targetSchema.toString()), TableSchema.class); assertEquals(GsonUtil.toJson(expectedSchema), GsonUtil.toJson((actualSchema))); } @Test public void runGenerateSchemaWithoutSourceHeadersTest() { final String userInput = String.join("\n", // source header1 "0", // number of columns for header1 // source header2 "3", // number of columns for header2 // header2, column 1 "sealed", // header2, column 1 type "targetHeader2", // header2, column 1 target header "yes", // header2, column 1 use suffix "none", // header2, column 1 padding type // header2, column 2 "fingerprint", // header2, column 2 type "targetHeader2", // header2, column 2 target header "yes", // header2, column 2 use suffix // header2, column 3 "cleartext", // header2, column 3 type "targetHeader2", // header2, column 3 target header // source header3 "", // number of columns for header3 (default to 1) "sealed", "targetHeader3", // header3, column 1 target header (default) "n", // header3, column 1 use suffix "max", // header3, column 1 padding type "" // header3, column 1 padding length (default 0) ); createInteractiveSchemaGenerator(userInput, null, stringColumnTypes, null); schemaGen.run(); assertNotEquals(0, targetSchema.toFile().length()); final var expectedSchema = GsonUtil.fromJson(examplePositionalSchemaString, TableSchema.class); final var actualSchema = GsonUtil.fromJson(FileUtil.readBytes(targetSchema.toString()), TableSchema.class); assertEquals(GsonUtil.toJson(expectedSchema), GsonUtil.toJson((actualSchema))); } @Test public void runGenerateSchemaWithoutSourceHeadersUnknownTypesTest() { final String userInput = String.join("\n", // source header1 "0", // number of columns for header1 // source header2 "3", // number of columns for header2 // header2, column 1 // type is cleartext due to unknown client type "targetHeader2_1", // header2, column 1 target header // header2, column 2 // type is cleartext due to unknown client type "targetHeader2_2", // header2, column 2 target header // header2, column 3 // type is cleartext due to unknown client type "targetHeader2_3", // header2, column 3 target header // source header3 "", // number of columns for header3 (default to 1) // type is cleartext due to unknown client type "targetHeader3" // header3, column 1 target header (default) ); createInteractiveSchemaGenerator(userInput, null, unknownColumnTypes, null); schemaGen.run(); assertNotEquals(0, targetSchema.toFile().length()); final var expectedSchema = GsonUtil.fromJson(examplePositionalSchemaAllCleartextString, TableSchema.class); final var actualSchema = GsonUtil.fromJson(FileUtil.readBytes(targetSchema.toString()), TableSchema.class); assertEquals(GsonUtil.toJson(expectedSchema), GsonUtil.toJson((actualSchema))); } @Test public void runTestWithBadInputsMixedIn() { final String userInput = String.join("\n", // source header1 "zero", // bad number of columns for header1 "0", // number of columns for header1 // source header2 "three", // bad number of columns "3", // number of columns // header 2, column 1 "special", // bad column type "sealed", // header 2, column 1 type "long_name".repeat(Limits.AWS_CLEAN_ROOMS_HEADER_MAX_LENGTH), // header 2, column 1 bad target header "targetHeader2", // header 2, column 1 target header "maybe", // header 2, column 1 bad use suffix "yes", // header 2, column 1 use suffix "super", // header 2, column 1 bad padding type "none", // header 2, column 1 padding type // header 2, column 2 "goin", // header 2, column 2 bad type "fingerprint", // header 2, column 2 type "long_name".repeat(Limits.AWS_CLEAN_ROOMS_HEADER_MAX_LENGTH), // header 2, column 2 bad target header "targetHeader2", // header 2, column 2 target header "I can't decide", // header 2, column 2 bad use suffix "yes", // header 2, column 2 use suffix // header 2, column 3 "plaintext", // header 2, column 3 bad type "cleartext", // header 2, column 3 type "long_name".repeat(Limits.AWS_CLEAN_ROOMS_HEADER_MAX_LENGTH), // header 2, column 3 bad target header "targetHeader2", // header 2, column 3 target header // source header3 "one", // bad number of columns for header3 "", // number of columns for header3 (default to 1) "sealed", "", // header3, column 1 target header (default) "what", // bad header3, column 1 use suffix "n", // header3, column 1 use suffix "mux", // bad header3, column 1 padding type "max", // header3, column 1 padding type "zero", // header3, column 1 padding length (default 0) "" // header3, column 1 padding length (default 0) ); createInteractiveSchemaGenerator(userInput, headers, stringColumnTypes, null); schemaGen.run(); assertNotEquals(0, targetSchema.toFile().length()); final TableSchema expectedSchema = GsonUtil.fromJson(exampleMappedSchemaString, TableSchema.class); final TableSchema actualSchema = GsonUtil.fromJson(FileUtil.readBytes(targetSchema.toString()), TableSchema.class); assertEquals(GsonUtil.toJson(expectedSchema), GsonUtil.toJson(actualSchema)); } @Test public void nullValueCsvSchemaGeneratorTest() { // no headers assertThrows(NullPointerException.class, () -> CsvSchemaGenerator.builder() .inputCsvFile("../samples/csv/data_sample_without_quotes.csv") .targetJsonFile(targetSchema.toString()) .overwrite(true).build()); // no target assertThrows(NullPointerException.class, () -> CsvSchemaGenerator.builder() .inputCsvFile("../samples/csv/data_sample_without_quotes.csv") .overwrite(true) .hasHeaders(true).build()); // no input assertThrows(NullPointerException.class, () -> CsvSchemaGenerator.builder() .targetJsonFile(targetSchema.toString()) .overwrite(true) .hasHeaders(true).build()); // no overwrite assertThrows(NullPointerException.class, () -> CsvSchemaGenerator.builder() .inputCsvFile("../samples/csv/data_sample_without_quotes.csv") .targetJsonFile(targetSchema.toString()) .hasHeaders(true).build()); } @Test public void runGenerateSchemaWithSourceHeadersPermissiveSettingsTest() { final String userInput = String.join("\n", // source header1 "0", // number of columns for header1 // source header2 "3", // number of columns for header2 // header2, column 1 "sealed", // header2, column 1 type "targetHeader2", // header2, column 1 target header "yes", // header2, column 1 use suffix "none", // header2, column 1 padding type // header2, column 2 "fingerprint", // header2, column 2 type "targetHeader2", // header2, column 2 target header "yes", // header2, column 2 use suffix // header2, column 3 "cleartext", // header2, column 3 type "targetHeader2", // header2, column 3 target header // source header3 "", // number of columns for header3 (default to 1) "sealed", "", // header3, column 1 target header (default) "n", // header3, column 1 use suffix "max", // header3, column 1 padding type "" // header3, column 1 padding length (default 0) ); createInteractiveSchemaGenerator(userInput, headers, stringColumnTypes, ClientSettings.lowAssuranceMode()); schemaGen.run(); assertNotEquals(0, targetSchema.toFile().length()); final var expectedSchema = GsonUtil.fromJson(exampleMappedSchemaString, TableSchema.class); final var actualSchema = GsonUtil.fromJson(FileUtil.readBytes(targetSchema.toString()), TableSchema.class); assertEquals(GsonUtil.toJson(expectedSchema), GsonUtil.toJson((actualSchema))); } @Test public void runGenerateSchemaWithSourceHeadersRestrictiveSettingsTest() { final String userInput = String.join("\n", // source header1 "0", // number of columns for header1 // source header2 "3", // number of columns for header2 // header2, column 1 "sealed", // header2, column 1 type "targetHeader2", // header2, column 1 target header "yes", // header2, column 1 use suffix "none", // header2, column 1 padding type // header2, column 2 "fingerprint", // header2, column 2 type "targetHeader2", // header2, column 2 target header "yes", // header2, column 2 use suffix // header2, column 3 "cleartext", // header2, column 3 type, NOT ALLOWED "fingerprint", "targetHeader2", // header2, column 3 target header "n", // header2, column 3 use suffix // source header3 "", // number of columns for header3 (default to 1) "sealed", "", // header3, column 1 target header (default) "n", // header3, column 1 use suffix "max", // header3, column 1 padding type "" // header3, column 1 padding length (default 0) ); createInteractiveSchemaGenerator(userInput, headers, stringColumnTypes, ClientSettings.highAssuranceMode()); schemaGen.run(); assertNotEquals(0, targetSchema.toFile().length()); final var expectedSchema = GsonUtil.fromJson(exampleMappedSchemaNoCleartextString, TableSchema.class); final var actualSchema = GsonUtil.fromJson(FileUtil.readBytes(targetSchema.toString()), TableSchema.class); assertEquals(GsonUtil.toJson(expectedSchema), GsonUtil.toJson((actualSchema))); } @Test public void runGenerateSchemaWithSourceHeadersUnknownTypesPermissiveSettingsTest() { final String userInput = String.join("\n", // source header1 "0", // number of columns for header1 // source header2 "3", // number of columns for header2 // header2, column 1 // type is cleartext due to unknown client type "targetHeader2_1", // header2, column 1 target header // header2, column 2 // type is cleartext due to unknown client type "targetHeader2_2", // header2, column 2 target header // header2, column 3 // type is cleartext due to unknown client type "targetHeader2_3", // header2, column 2 target header // source header3 "", // number of columns for header3 (default to 1) // type is cleartext due to unknown client type "" // header3, column 1 target header (default) ); createInteractiveSchemaGenerator(userInput, headers, unknownColumnTypes, ClientSettings.lowAssuranceMode()); schemaGen.run(); assertNotEquals(0, targetSchema.toFile().length()); final var expectedSchema = GsonUtil.fromJson(exampleMappedSchemaAllCleartextString, TableSchema.class); final var actualSchema = GsonUtil.fromJson(FileUtil.readBytes(targetSchema.toString()), TableSchema.class); assertEquals(GsonUtil.toJson(expectedSchema), GsonUtil.toJson((actualSchema))); } @Test public void runGenerateSchemaWithSourceHeadersUnknownTypesRestrictiveSettingsTest() { final String userInput = String.join("\n", // source header1 "0", // number of columns for header1 // source header2 "3", // number of columns for header2 // header2, column 1 // type is cleartext due to unknown client type "targetHeader2_1", // header2, column 1 target header // header2, column 2 // type is cleartext due to unknown client type "targetHeader2_2", // header2, column 2 target header // header2, column 3 // type is cleartext due to unknown client type "targetHeader2_3", // header2, column 2 target header // source header3 "", // number of columns for header3 (default to 1) // type is cleartext due to unknown client type "" // header3, column 1 target header (default) ); createInteractiveSchemaGenerator(userInput, headers, unknownColumnTypes, ClientSettings.highAssuranceMode()); schemaGen.run(); assertTrue(consoleOutput.toString().contains("No source columns could be considered for output")); assertEquals(0, targetSchema.toFile().length()); } }
6,279
0
Create_ds/c3r/c3r-cli-spark/src/test/java/com/amazonaws/c3r/spark/io
Create_ds/c3r/c3r-cli-spark/src/test/java/com/amazonaws/c3r/spark/io/schema/CsvSchemaGeneratorTest.java
// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 package com.amazonaws.c3r.spark.io.schema; import com.amazonaws.c3r.exception.C3rRuntimeException; import com.amazonaws.c3r.spark.utils.FileTestUtility; import com.amazonaws.c3r.utils.FileUtil; import org.junit.jupiter.api.Test; import java.io.IOException; import java.nio.file.Path; import static com.amazonaws.c3r.spark.utils.GeneralTestUtility.DATA_SAMPLE_HEADERS; import static org.junit.jupiter.api.Assertions.assertEquals; import static org.junit.jupiter.api.Assertions.assertThrows; public class CsvSchemaGeneratorTest { private CsvSchemaGenerator getTestSchemaGenerator(final String file) throws IOException { final String output = FileTestUtility.resolve("schema.json").toString(); return CsvSchemaGenerator.builder() .inputCsvFile(file) .hasHeaders(true) .targetJsonFile(output) .overwrite(true) .build(); } @Test public void getSourceHeadersTest() throws IOException { assertEquals( DATA_SAMPLE_HEADERS, getTestSchemaGenerator(FileUtil.CURRENT_DIR + "/../samples/csv/data_sample_without_quotes.csv").getSourceHeaders()); } @Test public void getSourceColumnCountTest() throws IOException { assertEquals( DATA_SAMPLE_HEADERS.size(), getTestSchemaGenerator(FileUtil.CURRENT_DIR + "/../samples/csv/data_sample_without_quotes.csv").getSourceColumnCount()); } @Test public void emptyFileTest() throws IOException { final Path emptyCsvFile = FileTestUtility.createTempFile("empty", ".csv"); assertThrows(C3rRuntimeException.class, () -> getTestSchemaGenerator(emptyCsvFile.toString())); } }
6,280
0
Create_ds/c3r/c3r-cli-spark/src/test/java/com/amazonaws/c3r/spark/io
Create_ds/c3r/c3r-cli-spark/src/test/java/com/amazonaws/c3r/spark/io/schema/TemplateSchemaGeneratorTest.java
// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 package com.amazonaws.c3r.spark.io.schema; import com.amazonaws.c3r.config.ClientSettings; import com.amazonaws.c3r.config.ColumnHeader; import com.amazonaws.c3r.data.ClientDataType; import com.amazonaws.c3r.exception.C3rIllegalArgumentException; import com.amazonaws.c3r.spark.utils.FileTestUtility; import org.junit.jupiter.api.BeforeEach; import org.junit.jupiter.api.Test; import java.io.IOException; import java.nio.charset.StandardCharsets; import java.nio.file.Files; import java.nio.file.Path; import java.util.List; import static org.junit.jupiter.api.Assertions.assertEquals; import static org.junit.jupiter.api.Assertions.assertThrows; public class TemplateSchemaGeneratorTest { private Path tempSchema; @BeforeEach public void setup() throws IOException { tempSchema = FileTestUtility.resolve("schema.json"); } @Test public void validateErrorWithMismatchedColumnCounts() { assertThrows(C3rIllegalArgumentException.class, () -> TemplateSchemaGenerator.builder() .sourceHeaders(List.of(new ColumnHeader("_c0"))) .sourceColumnTypes(List.of()) .targetJsonFile(tempSchema.toString()) .build()); } @Test public void testTemplateWithSourceHeadersNoSettingsGeneration() throws IOException { final var expectedContent = String.join("\n", "{", " \"headerRow\": true,", " \"columns\": [", " {", " \"sourceHeader\": \"header1\",", " \"targetHeader\": \"header1\",", " \"type\": \"[sealed|fingerprint|cleartext]\",", " \"pad\": {", " \"COMMENT\": \"omit this pad entry unless column type is sealed\",", " \"type\": \"[none|fixed|max]\",", " \"length\": \"omit length property for type none, otherwise specify value in [0, 10000]\"", " }", " },", " {", " \"sourceHeader\": \"header2\",", " \"targetHeader\": \"header2\",", " \"type\": \"cleartext\"", " }", " ]", "}" ); final var headers = List.of( new ColumnHeader("header1"), new ColumnHeader("header2") ); final List<ClientDataType> types = List.of(ClientDataType.STRING, ClientDataType.UNKNOWN); final var generator = TemplateSchemaGenerator.builder() .sourceHeaders(headers) .sourceColumnTypes(types) .targetJsonFile(tempSchema.toString()) .build(); generator.run(); final String content = Files.readString(tempSchema, StandardCharsets.UTF_8); assertEquals(expectedContent, content); } @Test public void testTemplateWithoutSourceHeadersNoSettingsGeneration() throws IOException { final String expectedPositionalSchemaOutput = String.join("\n", "{", " \"headerRow\": false,", " \"columns\": [", " [", " {", " \"targetHeader\": \"_c0\",", " \"type\": \"[sealed|fingerprint|cleartext]\",", " \"pad\": {", " \"COMMENT\": \"omit this pad entry unless column type is sealed\",", " \"type\": \"[none|fixed|max]\",", " \"length\": \"omit length property for type none, otherwise specify value in [0, 10000]\"", " }", " }", " ],", " [", " {", " \"targetHeader\": \"_c1\",", " \"type\": \"cleartext\"", " }", " ]", " ]", "}"); final List<ClientDataType> types = List.of(ClientDataType.STRING, ClientDataType.UNKNOWN); TemplateSchemaGenerator.builder() .sourceHeaders(null) .sourceColumnTypes(types) .targetJsonFile(tempSchema.toString()) .build() .run(); final String content = Files.readString(tempSchema); assertEquals(expectedPositionalSchemaOutput, content); } @Test public void testTemplateWithSourceHeadersPermissiveSettingsGeneration() throws IOException { final var expectedContent = String.join("\n", "{", " \"headerRow\": true,", " \"columns\": [", " {", " \"sourceHeader\": \"header1\",", " \"targetHeader\": \"header1\",", " \"type\": \"[sealed|fingerprint|cleartext]\",", " \"pad\": {", " \"COMMENT\": \"omit this pad entry unless column type is sealed\",", " \"type\": \"[none|fixed|max]\",", " \"length\": \"omit length property for type none, otherwise specify value in [0, 10000]\"", " }", " },", " {", " \"sourceHeader\": \"header2\",", " \"targetHeader\": \"header2\",", " \"type\": \"cleartext\"", " }", " ]", "}" ); final var headers = List.of( new ColumnHeader("header1"), new ColumnHeader("header2") ); final List<ClientDataType> types = List.of(ClientDataType.STRING, ClientDataType.UNKNOWN); final var generator = TemplateSchemaGenerator.builder() .sourceHeaders(headers) .sourceColumnTypes(types) .targetJsonFile(tempSchema.toString()) .clientSettings(ClientSettings.lowAssuranceMode()) .build(); generator.run(); final String content = Files.readString(tempSchema, StandardCharsets.UTF_8); assertEquals(expectedContent, content); } @Test public void testTemplateWithoutSourceHeadersPermissiveSettingsGeneration() throws IOException { final String expectedPositionalSchemaOutput = String.join("\n", "{", " \"headerRow\": false,", " \"columns\": [", " [", " {", " \"targetHeader\": \"_c0\",", " \"type\": \"[sealed|fingerprint|cleartext]\",", " \"pad\": {", " \"COMMENT\": \"omit this pad entry unless column type is sealed\",", " \"type\": \"[none|fixed|max]\",", " \"length\": \"omit length property for type none, otherwise specify value in [0, 10000]\"", " }", " }", " ],", " [", " {", " \"targetHeader\": \"_c1\",", " \"type\": \"cleartext\"", " }", " ]", " ]", "}"); final List<ClientDataType> types = List.of(ClientDataType.STRING, ClientDataType.UNKNOWN); TemplateSchemaGenerator.builder() .sourceHeaders(null) .sourceColumnTypes(types) .targetJsonFile(tempSchema.toString()) .clientSettings(ClientSettings.lowAssuranceMode()) .build() .run(); final String content = Files.readString(tempSchema); assertEquals(expectedPositionalSchemaOutput, content); } @Test public void testTemplateWithSourceHeadersRestrictiveSettingsGeneration() throws IOException { final var expectedContent = String.join("\n", "{", " \"headerRow\": true,", " \"columns\": [", " {", " \"sourceHeader\": \"header1\",", " \"targetHeader\": \"header1\",", " \"type\": \"[sealed|fingerprint]\",", " \"pad\": {", " \"COMMENT\": \"omit this pad entry unless column type is sealed\",", " \"type\": \"[none|fixed|max]\",", " \"length\": \"omit length property for type none, otherwise specify value in [0, 10000]\"", " }", " }", " ]", "}" ); final var headers = List.of( new ColumnHeader("header1"), new ColumnHeader("header2") ); final List<ClientDataType> types = List.of(ClientDataType.STRING, ClientDataType.UNKNOWN); final var generator = TemplateSchemaGenerator.builder() .sourceHeaders(headers) .sourceColumnTypes(types) .targetJsonFile(tempSchema.toString()) .clientSettings(ClientSettings.highAssuranceMode()) .build(); generator.run(); final String content = Files.readString(tempSchema, StandardCharsets.UTF_8); assertEquals(expectedContent, content); } @Test public void testTemplateWithoutSourceHeadersRestrictiveSettingsGeneration() throws IOException { final String expectedPositionalSchemaOutput = String.join("\n", "{", " \"headerRow\": false,", " \"columns\": [", " [", " {", " \"targetHeader\": \"_c0\",", " \"type\": \"[sealed|fingerprint]\",", " \"pad\": {", " \"COMMENT\": \"omit this pad entry unless column type is sealed\",", " \"type\": \"[none|fixed|max]\",", " \"length\": \"omit length property for type none, otherwise specify value in [0, 10000]\"", " }", " }", " ],", " []", " ]", "}"); final List<ClientDataType> types = List.of(ClientDataType.STRING, ClientDataType.UNKNOWN); TemplateSchemaGenerator.builder() .sourceHeaders(null) .sourceColumnTypes(types) .targetJsonFile(tempSchema.toString()) .clientSettings(ClientSettings.highAssuranceMode()) .build() .run(); final String content = Files.readString(tempSchema); assertEquals(expectedPositionalSchemaOutput, content); } }
6,281
0
Create_ds/c3r/c3r-cli-spark/src/test/java/com/amazonaws/c3r/spark/io
Create_ds/c3r/c3r-cli-spark/src/test/java/com/amazonaws/c3r/spark/io/csv/SparkCsvWriterTest.java
// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 package com.amazonaws.c3r.spark.io.csv; import com.amazonaws.c3r.config.ColumnHeader; import com.amazonaws.c3r.exception.C3rRuntimeException; import com.amazonaws.c3r.io.CsvRowWriter; import com.amazonaws.c3r.spark.utils.FileTestUtility; import com.amazonaws.c3r.spark.utils.SparkSessionTestUtility; import org.apache.spark.sql.Dataset; import org.apache.spark.sql.Row; import org.apache.spark.sql.SparkSession; import org.junit.jupiter.api.BeforeEach; import org.junit.jupiter.api.Test; import java.io.IOException; import java.nio.file.Files; import java.nio.file.Path; import java.util.HashMap; import java.util.List; import java.util.Map; import java.util.UUID; import java.util.stream.Collectors; import java.util.stream.Stream; import static org.junit.jupiter.api.Assertions.assertEquals; import static org.junit.jupiter.api.Assertions.assertNull; import static org.junit.jupiter.api.Assertions.assertThrows; import static org.junit.jupiter.api.Assertions.assertTrue; public class SparkCsvWriterTest { private final List<ColumnHeader> dataSampleHeaders = Stream.of("FirstName", "LastName", "Address", "City", "State", "PhoneNumber", "Title", "Level", "Notes" ) .map(ColumnHeader::new) .collect(Collectors.toList()); private final SparkSession session = SparkSessionTestUtility.initSparkSession(); private Path tempInputFile; private Path tempOutputDir; @BeforeEach public void setup() throws IOException { tempInputFile = FileTestUtility.createTempFile("temp", ".csv"); tempOutputDir = FileTestUtility.createTempDir(); } @Test public void initWriterHeadersTest() { final Map<String, String> properties = new HashMap<>(); final String headers = dataSampleHeaders.stream().map(ColumnHeader::toString).collect(Collectors.joining(",")); properties.put("headers", headers); properties.put("path", tempOutputDir.toString()); final CsvRowWriter writer = SparkCsvWriter.initWriter(0, properties); assertEquals(dataSampleHeaders.size(), writer.getHeaders().size()); assertTrue(dataSampleHeaders.containsAll(writer.getHeaders())); } @Test public void initWriterNoPathTest() { final Map<String, String> properties = new HashMap<>(); final String headers = dataSampleHeaders.stream().map(ColumnHeader::toString).collect(Collectors.joining(",")); properties.put("headers", headers); assertThrows(C3rRuntimeException.class, () -> SparkCsvWriter.initWriter(0, properties)); } @Test public void initWriterTargetTest() { final Map<String, String> properties = new HashMap<>(); final String headers = dataSampleHeaders.stream().map(ColumnHeader::toString).collect(Collectors.joining(",")); properties.put("headers", headers); properties.put("path", tempOutputDir.toString()); properties.put("sessionUuid", UUID.randomUUID().toString()); final CsvRowWriter writer = SparkCsvWriter.initWriter(1, properties); String target = writer.getTargetName(); target = target.substring(tempOutputDir.toString().length() + 1); // Remove dir path final String[] split = target.split("-"); assertEquals(7, split.length); // UUID hyphens plus the initial. assertEquals("part", split[0]); assertEquals("00001", split[1]); } @Test public void quotedSpaceTest() throws IOException { final String singleRowQuotedSpace = "column\n\" \""; Files.writeString(tempInputFile, singleRowQuotedSpace); final Dataset<Row> originalDataset = SparkCsvReader.readInput(session, tempInputFile.toString(), null, null); SparkCsvWriter.writeOutput(originalDataset, tempOutputDir.toString(), null); final Dataset<Row> writtenDataset = SparkCsvReader.readInput(session, tempOutputDir.toString(), null, null); final List<Row> originalData = originalDataset.collectAsList(); final List<Row> writtenData = writtenDataset.collectAsList(); // ensure data read in doesn't change when written out assertEquals(originalData.get(0).getString(0), writtenData.get(0).get(0)); assertEquals(" ", writtenData.get(0).get(0)); } @Test public void unquotedBlankTest() throws IOException { final String singleRowQuotedSpace = "column, column2\n ,"; Files.writeString(tempInputFile, singleRowQuotedSpace); final Dataset<Row> originalDataset = SparkCsvReader.readInput(session, tempInputFile.toString(), null, null); SparkCsvWriter.writeOutput(originalDataset, tempOutputDir.toString(), null); final Dataset<Row> writtenDataset = SparkCsvReader.readInput(session, tempOutputDir.toString(), null, null); final List<Row> originalData = originalDataset.collectAsList(); final List<Row> writtenData = writtenDataset.collectAsList(); // ensure data read in doesn't change when written out assertNull(originalData.get(0).get(0)); assertNull(originalData.get(0).get(1)); assertNull(writtenData.get(0).get(0)); assertNull(writtenData.get(0).get(1)); } @Test public void customNullTest() throws IOException { final String singleRowQuotedSpace = "column, column2\ncolumn,"; Files.writeString(tempInputFile, singleRowQuotedSpace); final Dataset<Row> originalDataset = SparkCsvReader.readInput(session, tempInputFile.toString(), "column", null); SparkCsvWriter.writeOutput(originalDataset, tempOutputDir.toString(), "column"); final Dataset<Row> writtenDataset = SparkCsvReader.readInput(session, tempOutputDir.toString(), null, null); final List<Row> originalData = originalDataset.collectAsList(); final List<Row> writtenData = writtenDataset.collectAsList(); // ensure a column with a header that equals the custom null value is not dropped. assertEquals("column", originalDataset.columns()[0]); assertEquals("column", writtenDataset.columns()[0]); // ensure custom null respected assertNull(originalData.get(0).get(0)); assertEquals("column", writtenData.get(0).get(0)); } }
6,282
0
Create_ds/c3r/c3r-cli-spark/src/test/java/com/amazonaws/c3r/spark/io
Create_ds/c3r/c3r-cli-spark/src/test/java/com/amazonaws/c3r/spark/io/csv/SparkCsvReaderTest.java
// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 package com.amazonaws.c3r.spark.io.csv; import com.amazonaws.c3r.config.ColumnHeader; import com.amazonaws.c3r.exception.C3rIllegalArgumentException; import com.amazonaws.c3r.exception.C3rRuntimeException; import com.amazonaws.c3r.io.CsvRowReader; import com.amazonaws.c3r.spark.config.SparkConfig; import com.amazonaws.c3r.spark.utils.FileTestUtility; import com.amazonaws.c3r.spark.utils.SparkSessionTestUtility; import org.apache.spark.sql.Dataset; import org.apache.spark.sql.Row; import org.apache.spark.sql.SparkSession; import org.junit.jupiter.api.BeforeEach; import org.junit.jupiter.api.Test; import java.io.IOException; import java.nio.file.Files; import java.nio.file.Path; import java.util.HashMap; import java.util.List; import java.util.Map; import java.util.stream.Collectors; import java.util.stream.Stream; import static com.amazonaws.c3r.spark.utils.GeneralTestUtility.DATA_SAMPLE_HEADERS; import static com.amazonaws.c3r.spark.utils.GeneralTestUtility.DATA_SAMPLE_HEADERS_NO_NORMALIZATION; import static org.junit.jupiter.api.Assertions.assertEquals; import static org.junit.jupiter.api.Assertions.assertFalse; import static org.junit.jupiter.api.Assertions.assertNotNull; import static org.junit.jupiter.api.Assertions.assertNull; import static org.junit.jupiter.api.Assertions.assertThrows; import static org.junit.jupiter.api.Assertions.assertTrue; public class SparkCsvReaderTest { private final SparkSession session = SparkSessionTestUtility.initSparkSession(); private Path tempFile; @BeforeEach public void setup() throws IOException { tempFile = FileTestUtility.createTempFile(); } @Test public void initReaderHeadersTest() { final Map<String, String> properties = new HashMap<>(); properties.put("path", "../samples/csv/data_sample_with_quotes.csv"); final CsvRowReader reader = SparkCsvReader.initReader(properties); assertEquals( DATA_SAMPLE_HEADERS.stream().map(ColumnHeader::toString).sorted().collect(Collectors.toList()), reader.getHeaders().stream().map(ColumnHeader::toString).sorted().collect(Collectors.toList())); } @Test public void initReaderHeadersNoNormalizationTest() { final Map<String, String> properties = new HashMap<>(); properties.put("path", "../samples/csv/data_sample_with_quotes.csv"); properties.put(SparkConfig.PROPERTY_KEY_SKIP_HEADER_NORMALIZATION, "true"); final CsvRowReader reader = SparkCsvReader.initReader(properties); assertEquals( DATA_SAMPLE_HEADERS_NO_NORMALIZATION.stream().map(ColumnHeader::toString).sorted().collect(Collectors.toList()), reader.getHeaders().stream().map(ColumnHeader::toString).sorted().collect(Collectors.toList())); } @Test public void initReaderNoHeadersTest() { final List<ColumnHeader> customDataSampleHeaders = Stream.of("FirstNameCustom", "LastNameCustom", "AddressCustom", "CityCustom", "StateCustom", "PhoneNumberCustom", "TitleCustom", "LevelCustom", "NotesCustom" ) .map(ColumnHeader::new) .collect(Collectors.toList()); final Map<String, String> properties = new HashMap<>(); properties.put("path", "../samples/csv/data_sample_no_headers.csv"); final String customHeader = customDataSampleHeaders.stream().map(ColumnHeader::toString).collect(Collectors.joining(",")); properties.put("headers", customHeader); final CsvRowReader reader = SparkCsvReader.initReader(properties); assertEquals(customDataSampleHeaders.size(), reader.getHeaders().size()); assertTrue(customDataSampleHeaders.containsAll(reader.getHeaders())); } @Test public void initReaderNoPathTest() { final Map<String, String> properties = new HashMap<>(); assertThrows(C3rRuntimeException.class, () -> SparkCsvReader.initReader(properties)); } @Test public void inputDirectoryTest() throws IOException { final Path tempDir = FileTestUtility.createTempDir(); final Path file1 = tempDir.resolve("file1.csv"); Files.writeString(file1, "column,column2\nfoo,bar"); final Path file2 = tempDir.resolve("file2.csv"); Files.writeString(file2, "column,column2\nbaz,buzz"); final List<Row> fullDataset = SparkCsvReader.readInput(session, tempDir.toString(), null, null) .collectAsList(); final List<Row> dataset1 = SparkCsvReader.readInput(session, file1.toString(), null, null) .collectAsList(); final List<Row> dataset2 = SparkCsvReader.readInput(session, file2.toString(), null, null) .collectAsList(); assertTrue(fullDataset.containsAll(dataset1)); assertTrue(fullDataset.containsAll(dataset2)); } @Test public void inputNestedDirectoryTest() throws IOException { final Path tempDir = FileTestUtility.createTempDir(); final Path file1 = tempDir.resolve("file1.csv"); Files.writeString(file1, "column,column2\nfoo,bar"); final Path nestedTempDir = tempDir.resolve("nested"); Files.createDirectory(nestedTempDir); final Path file2 = nestedTempDir.resolve("file2.csv"); Files.writeString(file2, "column,column2\nbaz,buzz"); final List<Row> fullDataset = SparkCsvReader.readInput(session, tempDir.toString(), null, null) .collectAsList(); final List<Row> dataset1 = SparkCsvReader.readInput(session, file1.toString(), null, null) .collectAsList(); final List<Row> dataset2 = SparkCsvReader.readInput(session, file2.toString(), null, null) .collectAsList(); assertTrue(fullDataset.containsAll(dataset1)); // recursion currently not supported assertFalse(fullDataset.containsAll(dataset2)); } @Test public void inputDirectoryDuplicatesTest() throws IOException { final Path tempDir = FileTestUtility.createTempDir(); final Path file1 = tempDir.resolve("file1.csv"); final String duplicateFileContents = "column,column2\nfoo,bar"; Files.writeString(file1, duplicateFileContents); final Path file2 = tempDir.resolve("file2.csv"); Files.writeString(file2, duplicateFileContents); final List<Row> fullDataset = SparkCsvReader.readInput(session, tempDir.toString(), null, null) .collectAsList(); final List<Row> dataset1 = SparkCsvReader.readInput(session, file1.toString(), null, null) .collectAsList(); final List<Row> dataset2 = SparkCsvReader.readInput(session, file2.toString(), null, null) .collectAsList(); assertTrue(fullDataset.containsAll(dataset1)); assertTrue(fullDataset.containsAll(dataset2)); assertEquals(2, fullDataset.size()); } @Test public void inputDirectoryUnrelatedDatasetsTest() throws IOException { final Path tempDir = FileTestUtility.createTempDir(); final Path file1 = tempDir.resolve("file1.csv"); Files.writeString(file1, "columnFoo,columnBar\nfoo,bar"); final Path file2 = tempDir.resolve("file2.csv"); Files.writeString(file2, "columnBaz,columnBuzz\nbaz,buzz"); assertThrows(C3rRuntimeException.class, () -> SparkCsvReader.readInput(session, tempDir.toString(), null, null) .collectAsList()); } @Test public void quotedSpaceTest() throws IOException { final String singleRowQuotedSpace = "column\n\" \""; Files.writeString(tempFile, singleRowQuotedSpace); final List<Row> dataset = SparkCsvReader.readInput(session, tempFile.toString(), null, null) .collectAsList(); assertEquals(" ", dataset.get(0).getString(0)); } @Test public void unquotedBlankTest() throws IOException { final String singleRowQuotedSpace = "column, column2\n ,"; Files.writeString(tempFile, singleRowQuotedSpace); final List<Row> dataset = SparkCsvReader.readInput(session, tempFile.toString(), null, null) .collectAsList(); assertNull(dataset.get(0).get(0)); assertNull(dataset.get(0).get(1)); } @Test public void customNullTest() throws IOException { final String singleRowQuotedSpace = "column, column2\ncolumn,"; Files.writeString(tempFile, singleRowQuotedSpace); final Dataset<Row> dataset = SparkCsvReader.readInput(session, tempFile.toString(), "column", null); // ensure a column with a header that equals the custom null value is not dropped. assertEquals("column", dataset.columns()[0]); final List<Row> data = dataset.collectAsList(); // ensure custom null respected assertNull(data.get(0).get(0)); // ensure empty value respected assertNotNull(data.get(0).get(1)); assertEquals("", data.get(0).getString(1)); } @Test public void maliciousColumnHeaderTest() throws IOException { final String singleRowQuotedSpace = "'; DROP ALL TABLES;"; Files.writeString(tempFile, singleRowQuotedSpace); // Assert a malicious column header can't be read assertThrows(C3rIllegalArgumentException.class, () -> SparkCsvReader.readInput(session, tempFile.toString(), null, null)); } }
6,283
0
Create_ds/c3r/c3r-cli-spark/src/test/java/com/amazonaws/c3r/spark/io
Create_ds/c3r/c3r-cli-spark/src/test/java/com/amazonaws/c3r/spark/io/parquet/SparkParquetWriterTest.java
// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 package com.amazonaws.c3r.spark.io.parquet; import com.amazonaws.c3r.config.ClientSettings; import com.amazonaws.c3r.config.TableSchema; import com.amazonaws.c3r.encryption.keys.KeyUtil; import com.amazonaws.c3r.json.GsonUtil; import com.amazonaws.c3r.spark.config.SparkEncryptConfig; import com.amazonaws.c3r.spark.utils.FileTestUtility; import com.amazonaws.c3r.spark.utils.SparkSessionTestUtility; import com.amazonaws.c3r.utils.FileUtil; import org.apache.spark.sql.Dataset; import org.apache.spark.sql.Row; import org.apache.spark.sql.SparkSession; import org.junit.jupiter.api.BeforeAll; import org.junit.jupiter.api.Test; import java.io.IOException; import java.util.Arrays; import java.util.List; import java.util.stream.Collectors; import static com.amazonaws.c3r.spark.utils.GeneralTestUtility.EXAMPLE_SALT; import static org.junit.jupiter.api.Assertions.assertEquals; import static org.junit.jupiter.api.Assertions.assertTrue; public class SparkParquetWriterTest { private static SparkSession session; private static SparkEncryptConfig config; /** * Initial setup done only once because the data is immutable and starting Spark sessions each time is expensive. * * @throws IOException if Schema can't be read. */ @BeforeAll public static void setup() throws IOException { final TableSchema schema = GsonUtil.fromJson(FileUtil.readBytes("../samples/schema/config_sample.json"), TableSchema.class); session = SparkSessionTestUtility.initSparkSession(); config = SparkEncryptConfig.builder() .source("../samples/parquet/data_sample.parquet") .targetDir(FileTestUtility.createTempDir().resolve("output").toString()) .overwrite(true) .secretKey(KeyUtil.sharedSecretKeyFromString(System.getenv(KeyUtil.KEY_ENV_VAR))) .salt(EXAMPLE_SALT.toString()) .tableSchema(schema) .settings(ClientSettings.lowAssuranceMode()) .build(); } @Test public void writeOutputTest() { final Dataset<Row> originalDataset = SparkParquetReader.readInput( session, config.getSourceFile()); final List<String> originalColumns = Arrays.stream(originalDataset.columns()) .map(String::toLowerCase) .sorted() .collect(Collectors.toList()); SparkParquetWriter.writeOutput(originalDataset, config.getTargetFile()); final Dataset<Row> newDataset = SparkParquetReader.readInput( session, config.getTargetFile()); final List<String> newColumns = Arrays.stream(originalDataset.columns()) .map(String::toLowerCase) .sorted() .collect(Collectors.toList()); assertEquals(originalColumns.size(), newColumns.size()); assertTrue(originalColumns.containsAll(newColumns)); // Confirm after writing and reading back that no data was lost final List<Row> originalDatasetRows = originalDataset.collectAsList(); final List<Row> newDatasetRows = newDataset.collectAsList(); assertEquals(originalDatasetRows.size(), newDatasetRows.size()); assertTrue(originalDatasetRows.containsAll(newDatasetRows)); } }
6,284
0
Create_ds/c3r/c3r-cli-spark/src/test/java/com/amazonaws/c3r/spark/io
Create_ds/c3r/c3r-cli-spark/src/test/java/com/amazonaws/c3r/spark/io/parquet/SparkParquetReaderTest.java
// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 package com.amazonaws.c3r.spark.io.parquet; import com.amazonaws.c3r.config.ClientSettings; import com.amazonaws.c3r.config.ColumnHeader; import com.amazonaws.c3r.config.ParquetConfig; import com.amazonaws.c3r.config.TableSchema; import com.amazonaws.c3r.encryption.keys.KeyUtil; import com.amazonaws.c3r.exception.C3rIllegalArgumentException; import com.amazonaws.c3r.exception.C3rRuntimeException; import com.amazonaws.c3r.internal.Limits; import com.amazonaws.c3r.json.GsonUtil; import com.amazonaws.c3r.spark.config.SparkEncryptConfig; import com.amazonaws.c3r.spark.utils.FileTestUtility; import com.amazonaws.c3r.spark.utils.SparkSessionTestUtility; import com.amazonaws.c3r.utils.FileUtil; import org.apache.spark.sql.Dataset; import org.apache.spark.sql.Row; import org.apache.spark.sql.SparkSession; import org.apache.spark.sql.types.DataTypes; import org.apache.spark.sql.types.StructField; import org.apache.spark.sql.types.StructType; import org.junit.jupiter.api.BeforeAll; import org.junit.jupiter.api.Test; import scala.collection.Iterable; import scala.collection.immutable.Seq; import java.io.IOException; import java.nio.file.Files; import java.nio.file.Path; import java.util.ArrayList; import java.util.Arrays; import java.util.List; import java.util.stream.Collectors; import static com.amazonaws.c3r.spark.utils.GeneralTestUtility.DATA_SAMPLE_HEADERS; import static com.amazonaws.c3r.spark.utils.GeneralTestUtility.DATA_SAMPLE_HEADERS_NO_NORMALIZATION; import static com.amazonaws.c3r.spark.utils.GeneralTestUtility.EXAMPLE_SALT; import static org.junit.jupiter.api.Assertions.assertEquals; import static org.junit.jupiter.api.Assertions.assertFalse; import static org.junit.jupiter.api.Assertions.assertThrows; import static org.junit.jupiter.api.Assertions.assertTrue; import static org.mockito.Mockito.mock; import static org.mockito.Mockito.when; public class SparkParquetReaderTest { private static SparkSession session; private static TableSchema schema; private static SparkEncryptConfig config; /** * Initial setup done only once because the data is immutable and starting Spark sessions each time is expensive. * * @throws IOException if Schema can't be read. */ @BeforeAll public static void setup() throws IOException { schema = GsonUtil.fromJson(FileUtil.readBytes("../samples/schema/config_sample.json"), TableSchema.class); session = SparkSessionTestUtility.initSparkSession(); config = SparkEncryptConfig.builder() .source("../samples/parquet/data_sample.parquet") .targetDir(FileTestUtility.createTempDir().resolve("output").toString()) .overwrite(true) .secretKey(KeyUtil.sharedSecretKeyFromString(System.getenv(KeyUtil.KEY_ENV_VAR))) .salt(EXAMPLE_SALT.toString()) .tableSchema(schema) .settings(ClientSettings.lowAssuranceMode()) .build(); } @Test public void readInputColumnsTest() { final Dataset<Row> dataset = SparkParquetReader.readInput(session, config.getSourceFile()); final List<String> columns = Arrays.stream(dataset.columns()) .sorted() .collect(Collectors.toList()); assertEquals( DATA_SAMPLE_HEADERS.stream().map(ColumnHeader::toString).sorted().collect(Collectors.toList()), columns); } @Test public void readInputColumnsNoNormalizationTest() { final Dataset<Row> dataset = SparkParquetReader.readInput(session, config.getSourceFile(), /* skipHeaderNormalization */ true, ParquetConfig.DEFAULT); final List<String> columns = Arrays.stream(dataset.columns()) .sorted() .collect(Collectors.toList()); assertEquals( DATA_SAMPLE_HEADERS_NO_NORMALIZATION.stream().map(ColumnHeader::toString).sorted().collect(Collectors.toList()), columns); } @Test public void readInputDirectoryTest() throws IOException { final Path tempDir = FileTestUtility.createTempDir(); final Path copiedFile = tempDir.resolve("copied.parquet"); Files.copy(Path.of("../samples/parquet/data_sample.parquet"), copiedFile); final Dataset<Row> dataset = SparkParquetReader.readInput(session, tempDir.toString()); final List<String> columns = Arrays.stream(dataset.columns()) .map(String::toLowerCase) .sorted() .collect(Collectors.toList()); final List<String> expectedColumns = schema.getColumns().stream() .map(columnSchema -> columnSchema.getSourceHeader().toString()) .distinct() .sorted() .collect(Collectors.toList()); assertEquals(expectedColumns.size(), columns.size()); assertTrue(expectedColumns.containsAll(columns)); } @Test public void maxColumnCountTest() { final Dataset<Row> dataset = mock(Dataset.class); when(dataset.columns()).thenReturn(new String[SparkParquetReader.MAX_COLUMN_COUNT + 1]); when(dataset.count()).thenReturn(0L); // in range row size assertThrows(C3rRuntimeException.class, () -> SparkParquetReader.validate(dataset)); } @Test public void maxRowCountTest() { final Dataset<Row> dataset = mock(Dataset.class); when(dataset.columns()).thenReturn(new String[0]); // in range column size when(dataset.count()).thenReturn(Limits.ROW_COUNT_MAX + 1L); assertThrows(C3rRuntimeException.class, () -> SparkParquetReader.validate(dataset)); } @Test public void maliciousColumnHeaderWithoutNormalizationTest() throws IOException { final StructField maliciousColumn = DataTypes.createStructField("; DROP ALL TABLES;", DataTypes.StringType, true); final StructType maliciousSchema = DataTypes.createStructType(new StructField[]{maliciousColumn}); final ArrayList<Row> data = new ArrayList<>(); data.add(Row.fromSeq(Seq.from(Iterable.single("value")))); final Dataset<Row> maliciousDataset = session.createDataFrame(data, maliciousSchema); final Path tempDir = FileTestUtility.createTempDir(); SparkParquetWriter.writeOutput(maliciousDataset, tempDir.toString()); final Dataset<Row> dataset = SparkParquetReader.readInput(session, tempDir.toString(), true, ParquetConfig.DEFAULT); /* Assert the malicious header is like any other. While the standard Spark Parquet reader will allow special chars, since a ColumnHeader will not, we can assume any fields like this will be dropped later before any further parsing. */ assertEquals(maliciousColumn.name(), dataset.columns()[0]); // Assert values still exist assertFalse(dataset.isEmpty()); } @Test public void maliciousColumnHeaderWithNormalizationTest() throws IOException { final StructField maliciousColumn = DataTypes.createStructField("; DROP ALL TABLES;", DataTypes.StringType, true); final StructType maliciousSchema = DataTypes.createStructType(new StructField[]{maliciousColumn}); final ArrayList<Row> data = new ArrayList<>(); data.add(Row.fromSeq(Seq.from(Iterable.single("value")))); final Dataset<Row> maliciousDataset = session.createDataFrame(data, maliciousSchema); final Path tempDir = FileTestUtility.createTempDir(); SparkParquetWriter.writeOutput(maliciousDataset, tempDir.toString()); // Assert a malicious column header can't be read assertThrows(C3rIllegalArgumentException.class, () -> SparkParquetReader.readInput(session, tempDir.toString(), false, ParquetConfig.DEFAULT)); } }
6,285
0
Create_ds/c3r/c3r-cli-spark/src/test/java/com/amazonaws/c3r/spark
Create_ds/c3r/c3r-cli-spark/src/test/java/com/amazonaws/c3r/spark/utils/DecryptSdkConfigTestUtility.java
// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 package com.amazonaws.c3r.spark.utils; import lombok.Builder; import lombok.Getter; import javax.crypto.spec.SecretKeySpec; /** * Basic Decryption settings. */ @Builder @Getter public class DecryptSdkConfigTestUtility { /** * Key to use for decryption. */ @Builder.Default private SecretKeySpec key = null; /** * Salt for key generation. */ @Builder.Default private String salt = null; /** * Input file. */ @Builder.Default private String input = null; /** * Column header names. */ @Builder.Default private String[] columnHeaders = null; }
6,286
0
Create_ds/c3r/c3r-cli-spark/src/test/java/com/amazonaws/c3r/spark
Create_ds/c3r/c3r-cli-spark/src/test/java/com/amazonaws/c3r/spark/utils/SparkSessionTestUtility.java
// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 package com.amazonaws.c3r.spark.utils; import org.apache.spark.SparkConf; import org.apache.spark.sql.SparkSession; public abstract class SparkSessionTestUtility { /** * Initializes a SparkSession object with the passed Spark Drive URL. * * @return A SparkSession connected to the Spark Driver */ public static SparkSession initSparkSession() { // CHECKSTYLE:OFF final SparkConf conf = new SparkConf() .setAppName("C3R") .setMaster("local[*]"); // CHECKSTYLE:ON return SparkSession .builder() .config(conf) .getOrCreate(); } /** * Shut down the Spark session. * * @param spark the SparkSession to close */ public static void closeSparkSession(final SparkSession spark) { spark.stop(); } }
6,287
0
Create_ds/c3r/c3r-cli-spark/src/test/java/com/amazonaws/c3r/spark
Create_ds/c3r/c3r-cli-spark/src/test/java/com/amazonaws/c3r/spark/utils/TimingResultTestUtility.java
// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 package com.amazonaws.c3r.spark.utils; import com.amazonaws.c3r.config.ColumnType; import lombok.Builder; /** * Used to store performance testing metrics. */ @Builder public class TimingResultTestUtility { /** * Header names for timing results. */ public static final String[] HEADERS = { "Columns", "Rows", "Marshal Time (s)", "Unmarshal Time (s)", "Input Size (MB)", "Marshalled Size (MB)", "Unmarshalled Size (MB)", "Cleartext Columns", "Sealed Columns", "Fingerprint Columns", "Chars/Entry" }; /** * How many column types we are supporting. */ private static final int NUM_COL_TYPES = ColumnType.values().length; /** * Conversion factor for bytes to megabytes. */ private static final double MB = Math.pow(2, 20); /** * How many characters per entry in the input file. */ private Integer charsPerEntry; /** * Number of columns in the files. */ private Integer columnCount; /** * Number of rows in the files. */ private Long rowCount; /** * Size of original input file. */ private Long inputSizeBytes; /** * Time spent marshalling data. */ private Long marshalTimeSec; /** * Size of marshalled file. */ private Long marshalledSizeBytes; /** * Time spent unmarshalling data. */ private Long unmarshalTimeSec; /** * Size of the unmarshalled file. */ private Long unmarshalledSizeBytes; }
6,288
0
Create_ds/c3r/c3r-cli-spark/src/test/java/com/amazonaws/c3r/spark
Create_ds/c3r/c3r-cli-spark/src/test/java/com/amazonaws/c3r/spark/utils/TableGeneratorTestUtility.java
// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 package com.amazonaws.c3r.spark.utils; import com.amazonaws.c3r.config.ColumnType; import com.google.gson.JsonArray; import com.google.gson.JsonObject; import com.google.gson.JsonPrimitive; import lombok.Builder; import java.io.IOException; import java.nio.charset.StandardCharsets; import java.nio.file.Files; import java.nio.file.Path; import java.util.Arrays; import java.util.Random; import java.util.stream.Collectors; import java.util.stream.IntStream; /** * Used to generate CSV files with random data and an associated schema for testing purposes. */ @Builder public final class TableGeneratorTestUtility { /** * Number of column types currently supported. */ private static final int COL_TYPES = ColumnType.values().length; /** * Hidden utility class constructor. */ private TableGeneratorTestUtility() { } /** * Generates unique column header names based on type. * * @param columnIndex Which column to create a header for * @return Column type name followed by column number */ private static String headerName(final int columnIndex) { switch (columnIndex % COL_TYPES) { case 0: return "cleartext" + columnIndex; case 1: return "sealed" + columnIndex; default: return "fingerprint" + columnIndex; } } /** * Generates the JSON output for a column schema. During data generation the column types are evenly rotated between: * <ul> * <li>Cleartext</li> * <li>Sealed with a Max Pad of Length 0</li> * <li>Fingerprint</li> * </ul> * * @param columnIndex Which column to generate a schema for (determines types) * @return JSON object representing the column's schema */ private static JsonObject columnSchema(final int columnIndex) { final JsonObject obj = new JsonObject(); final JsonObject pad = new JsonObject(); obj.addProperty("sourceHeader", headerName(columnIndex)); switch (columnIndex % COL_TYPES) { case 0: obj.addProperty("type", "cleartext"); break; case 1: obj.addProperty("type", "sealed"); pad.addProperty("type", "max"); pad.addProperty("length", 0); obj.add("pad", pad); break; default: obj.addProperty("type", "fingerprint"); break; } return obj; } /** * Generates a prefix for the CSV and schema files. * * @param columnCount Number of columns in generated file * @param rowCount Number of rows in generated file * @return String value {@code misc<columnCount>by<rowCount>-} for start of file name */ public static String filePrefix(final int columnCount, final long rowCount) { return "misc" + columnCount + "by" + rowCount + "-"; } /** * Generates a schema to match the generated CSV file. Column types rotate as specified in {@link #columnSchema(int)}. * * @param columnCount Number of columns in generated file * @param rowCount Number of rows in generated file (used for naming file only) * @return Path to schema file * @throws IOException If there was an error writing the schema to disk */ public static Path generateSchema(final int columnCount, final long rowCount) throws IOException { final JsonArray columns = new JsonArray(columnCount); for (int i = 0; i < columnCount; i++) { columns.add(columnSchema(i)); } final JsonObject content = new JsonObject(); content.add("headerRow", new JsonPrimitive(true)); content.add("columns", columns); final Path path = FileTestUtility.resolve(filePrefix(columnCount, rowCount) + ".json"); final var writer = Files.newBufferedWriter(path, StandardCharsets.UTF_8); writer.write(content.toString()); writer.close(); return path; } /** * Generate a random alphanumeric string of the specified size. * * @param size Number of characters in the string * @return Random alphanumeric string */ private static String randomString(final int size) { final String chars = "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789"; final Random random = new Random(); final StringBuilder sb = new StringBuilder(); for (int i = 0; i < size; i++) { sb.append(chars.charAt(random.nextInt(chars.length()))); } return sb.toString(); } /** * Creates a CSV file of the specified size filled with random alphanumeric strings. * * @param entrySize Number of characters in each entry * @param columnCount Number of columns in the output file * @param rowCount Number of rows in te output file * @return Path to the generated file * @throws IOException If an error occurred while writing the file */ public static Path generateCsv(final int entrySize, final int columnCount, final long rowCount) throws IOException { final Path path = FileTestUtility.resolve(filePrefix(columnCount, rowCount) + ".csv"); final var writer = Files.newBufferedWriter(path, StandardCharsets.UTF_8); final var headers = IntStream.range(0, columnCount).boxed().map(TableGeneratorTestUtility::headerName) .collect(Collectors.joining(",")); writer.write(headers); writer.write(System.lineSeparator()); for (int i = 0; i < rowCount; i++) { final String entry = randomString(entrySize); final var entries = new String[columnCount]; Arrays.fill(entries, entry); writer.write(String.join(",", entries)); writer.write(System.lineSeparator()); } writer.close(); return path; } }
6,289
0
Create_ds/c3r/c3r-cli-spark/src/test/java/com/amazonaws/c3r/spark
Create_ds/c3r/c3r-cli-spark/src/test/java/com/amazonaws/c3r/spark/utils/GeneralTestUtility.java
// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 package com.amazonaws.c3r.spark.utils; import com.amazonaws.c3r.config.ColumnHeader; import com.amazonaws.c3r.config.ColumnSchema; import com.amazonaws.c3r.config.ColumnType; import com.amazonaws.c3r.config.MappedTableSchema; import com.amazonaws.c3r.config.Pad; import com.amazonaws.c3r.config.PadType; import com.amazonaws.c3r.config.TableSchema; import com.amazonaws.c3r.encryption.keys.KeyUtil; import javax.crypto.spec.SecretKeySpec; import java.util.HashMap; import java.util.List; import java.util.Map; import java.util.UUID; import java.util.function.Predicate; import java.util.stream.Collectors; import java.util.stream.Stream; import static org.junit.jupiter.api.Assertions.assertTrue; /** * Set of Utilities used for Testing. A combination of file settings and helper functions. */ public abstract class GeneralTestUtility { /** * A 32-byte key used for testing. */ public static final byte[] EXAMPLE_KEY_BYTES = new byte[]{ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31 }; /** * Example salt for testing. */ public static final UUID EXAMPLE_SALT = UUID.fromString("00000000-1111-2222-3333-444444444444"); /** * List of headers from the golden test file (data_sample.csv). */ public static final List<String> DATA_SAMPLE_HEADER_STRINGS = List.of("FirstName", "LastName", "Address", "City", "State", "PhoneNumber", "Title", "Level", "Notes" ); public static final List<ColumnHeader> DATA_SAMPLE_HEADERS_NO_NORMALIZATION = DATA_SAMPLE_HEADER_STRINGS.stream() .map(ColumnHeader::ofRaw) .collect(Collectors.toList()); public static final List<ColumnHeader> DATA_SAMPLE_HEADERS = DATA_SAMPLE_HEADER_STRINGS.stream() .map(ColumnHeader::new) .collect(Collectors.toList()); /** * Schema for data_sample.csv. */ public static final TableSchema CONFIG_SAMPLE = new MappedTableSchema(List.of( cleartextColumn("firstname"), cleartextColumn("lastname"), sealedColumn("address", PadType.MAX, 32), sealedColumn("city", PadType.MAX, 16), fingerprintColumn("state"), cleartextColumn("phonenumber", "phonenumber_cleartext"), sealedColumn("phonenumber", "phonenumber_sealed"), fingerprintColumn("phonenumber", "phonenumber_fingerprint"), sealedColumn("title", PadType.FIXED, 128), cleartextColumn("level"), sealedColumn("notes", PadType.MAX, 100) )); /** * Encryption configuration used for data_sample.csv (matches decryption configuration for marshalled_data_sample.csv). */ public static final EncryptSdkConfigTestUtility TEST_CONFIG_DATA_SAMPLE = EncryptSdkConfigTestUtility.builder() .input("../samples/csv/data_sample_with_quotes.csv") .inputColumnHeaders(CONFIG_SAMPLE.getColumns().stream().map(ColumnSchema::getSourceHeader).map(ColumnHeader::toString) .collect(Collectors.toList())) .outputColumnHeaders(CONFIG_SAMPLE.getColumns().stream().map(ColumnSchema::getTargetHeader).map(ColumnHeader::toString) .collect(Collectors.toList())) .salt("saltybytes") .key(new SecretKeySpec(EXAMPLE_KEY_BYTES, KeyUtil.KEY_ALG)) .schema(CONFIG_SAMPLE) .build(); /** * Encryption configuration used for one_row_null_sample.csv with only cleartext columns. */ public static final EncryptSdkConfigTestUtility TEST_CONFIG_ONE_ROW_NULL_SAMPLE_CLEARTEXT = EncryptSdkConfigTestUtility.builder() .input("../samples/csv/one_row_null_sample.csv") .inputColumnHeaders(List.of("firstname", "lastname", "address", "city")) .outputColumnHeaders(List.of("firstname", "lastname", "address", "city")) .salt("saltybytes") .key(new SecretKeySpec(EXAMPLE_KEY_BYTES, KeyUtil.KEY_ALG)) .schema(new MappedTableSchema(Stream.of("firstname", "lastname", "address", "city").map(GeneralTestUtility::cleartextColumn) .collect(Collectors.toList()))) .build(); /** * Create a ColumnHeader if name isn't null. * * <p> * This helper function is to support testing positional schemas. Those schemas need to have {@code null} as the value * for the sourceHeader. However, {@code new ColumnHeader(null)} fails validation. Instead of using the ternary operator * everywhere we assign the source value, we can call this function instead which is a bit cleaner. By having this helper, * we don't need to make another full set of helper functions for schema creation, we can just pass {@code null} in to the * existing helpers. {@link com.amazonaws.c3r.config.PositionalTableSchema} uses this functionality in the creation of all it's * test variables at the top of the file if you want to see an example usage of why we need to pass null through. * * @param name Name of the column or {@code null} if there isn't one * @return Input string transformed into {@link ColumnHeader} or {@code null} if {@code name} was {@code null} */ private static ColumnHeader nameHelper(final String name) { if (name == null) { return null; } return new ColumnHeader(name); } /** * Helper function that handles cleartext column boilerplate. * * @param name Name to be used for input and output row * @return An cleartext column schema */ public static ColumnSchema cleartextColumn(final String name) { return ColumnSchema.builder() .sourceHeader(nameHelper(name)) .targetHeader(nameHelper(name)) .pad(null) .type(ColumnType.CLEARTEXT) .build(); } /** * Helper function that handles cleartext column boilerplate. * * @param nameIn Source column header name * @param nameOut Target column header name * @return An cleartext column schema */ public static ColumnSchema cleartextColumn(final String nameIn, final String nameOut) { return ColumnSchema.builder() .sourceHeader(nameHelper(nameIn)) .targetHeader(nameHelper(nameOut)) .pad(null) .type(ColumnType.CLEARTEXT) .build(); } /** * Helper function for a sealed column with no pad. * * @param nameIn Source header name * @param nameOut Target header name * @return A sealed column schema */ public static ColumnSchema sealedColumn(final String nameIn, final String nameOut) { return ColumnSchema.builder() .sourceHeader(nameHelper(nameIn)) .targetHeader(nameHelper(nameOut)) .pad(Pad.DEFAULT) .type(ColumnType.SEALED) .build(); } /** * Helper function for a sealed column with specified padding. * * @param name Name for source and target column headers * @param type What pad type to use * @param length How long the pad should be * @return A sealed column schema */ public static ColumnSchema sealedColumn(final String name, final PadType type, final Integer length) { return ColumnSchema.builder() .sourceHeader(nameHelper(name)) .targetHeader(nameHelper(name)) .pad(Pad.builder().type(type).length(length).build()) .type(ColumnType.SEALED) .build(); } /** * Helper function for creating a fingerprint column. * * @param name The name to use for both the source and target header * @return A fingerprint column schema */ public static ColumnSchema fingerprintColumn(final String name) { return ColumnSchema.builder() .sourceHeader(nameHelper(name)) .targetHeader(nameHelper(name)) .type(ColumnType.FINGERPRINT) .build(); } /** * Helper function for creating a fingerprint column. * * @param nameIn The name to use for the source header * @param nameOut The name to use for the target header * @return A fingerprint column schema */ public static ColumnSchema fingerprintColumn(final String nameIn, final String nameOut) { return ColumnSchema.builder() .sourceHeader(nameHelper(nameIn)) .targetHeader(nameHelper(nameOut)) .type(ColumnType.FINGERPRINT) .build(); } /** * Build a simple Row from strings for testing; string values are used verbatim. * * @param rowEntries CSV row entries given in key, value, key, value, etc... order a la `Map.of(..)` * @return A row with the given key/value pairs */ public static Map<String, String> row(final String... rowEntries) { final var row = new HashMap<String, String>(); for (int i = 0; i < rowEntries.length; i += 2) { row.put(rowEntries[i], rowEntries[i + 1]); } return row; } /** * Takes a mapping of column headers to values along with a set of map entries for a column header to a test function. * This class creates the map of predicate functions by column header and calls {@link #assertRowEntryPredicates(Map, Map)}. * * @param content A map of column headers to row content * @param predicates A variable length list of arguments that are map entries for testing row data * @see #assertRowEntryPredicates(Map, Map) */ @SafeVarargs public static void assertRowEntryPredicates(final Map<String, String> content, final Map.Entry<String, Predicate<String>>... predicates) { assertRowEntryPredicates(content, Map.ofEntries(predicates)); } /** * Using a mapping of headers to values and headers to test functions, verify each value in a row. * * @param content Map of column headers to row content * @param predicateMap Map of column headers to a predicate function to check the column's value * @throws RuntimeException If the number of tests don't match the number of entries in the row */ public static void assertRowEntryPredicates(final Map<String, String> content, final Map<String, Predicate<String>> predicateMap) { if (!content.keySet().equals(predicateMap.keySet())) { throw new RuntimeException( String.join("\n", "Bad test! Content keys and predicate keys don't match!", " Content headers: " + String.join(",", content.keySet()), "Predicate headers: " + String.join(",", predicateMap.keySet()))); } content.forEach((header, value) -> assertTrue(predicateMap.get(header).test(value), "Row entry predicate failure: `" + header + "` -> `" + value + "`")); } }
6,290
0
Create_ds/c3r/c3r-cli-spark/src/test/java/com/amazonaws/c3r/spark
Create_ds/c3r/c3r-cli-spark/src/test/java/com/amazonaws/c3r/spark/utils/StringTestUtilityTest.java
// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 package com.amazonaws.c3r.spark.utils; import org.junit.jupiter.api.Test; import static org.junit.jupiter.api.Assertions.assertEquals; public class StringTestUtilityTest { @Test public void countMatchesTest() { assertEquals(0, StringTestUtility.countMatches("a", "")); assertEquals(0, StringTestUtility.countMatches("a", "b")); assertEquals(1, StringTestUtility.countMatches("a", "a")); assertEquals(1, StringTestUtility.countMatches("a", "abcd")); assertEquals(3, StringTestUtility.countMatches("a", "abcdabcdabcd")); assertEquals(3, StringTestUtility.countMatches("aa", "aaabcdaaabcdaaabcd")); } }
6,291
0
Create_ds/c3r/c3r-cli-spark/src/test/java/com/amazonaws/c3r/spark
Create_ds/c3r/c3r-cli-spark/src/test/java/com/amazonaws/c3r/spark/utils/EncryptSdkConfigTestUtility.java
// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 package com.amazonaws.c3r.spark.utils; import com.amazonaws.c3r.config.ClientSettings; import com.amazonaws.c3r.config.TableSchema; import lombok.Builder; import lombok.Getter; import javax.crypto.spec.SecretKeySpec; import java.util.List; /** * Basic configuration settings for encryption. */ @Builder @Getter public class EncryptSdkConfigTestUtility { /** * Schema specification. */ @Builder.Default private TableSchema schema = null; /** * Key to use for encryption. */ @Builder.Default private SecretKeySpec key = null; /** * Salt to use for key generation. */ @Builder.Default private String salt = null; /** * Security related parameters. */ @Builder.Default private ClientSettings settings = ClientSettings.lowAssuranceMode(); /** * Input file. */ @Builder.Default private String input = null; /** * Column headers in the input file. */ @Builder.Default private List<String> inputColumnHeaders = null; /** * Column headers to use in the output file. */ @Builder.Default private List<String> outputColumnHeaders = null; }
6,292
0
Create_ds/c3r/c3r-cli-spark/src/test/java/com/amazonaws/c3r/spark
Create_ds/c3r/c3r-cli-spark/src/test/java/com/amazonaws/c3r/spark/utils/StringTestUtility.java
// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 package com.amazonaws.c3r.spark.utils; import java.util.regex.Pattern; public final class StringTestUtility { private StringTestUtility() { } /** * Counts how many times a search string occurs (non-overlapping) in given string content. * * @param searchString String to search for * @param content Content to search in * @return The number of occurrences of the search string in the content. */ public static int countMatches(final String searchString, final String content) { return content.split(Pattern.quote(searchString), -1).length - 1; } }
6,293
0
Create_ds/c3r/c3r-cli-spark/src/test/java/com/amazonaws/c3r/spark
Create_ds/c3r/c3r-cli-spark/src/test/java/com/amazonaws/c3r/spark/utils/FileTestUtility.java
// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 package com.amazonaws.c3r.spark.utils; import java.io.IOException; import java.nio.file.Files; import java.nio.file.Path; /** * A test utility for creating temporary Path resources for tests that will clean themselves up after execution. */ public abstract class FileTestUtility { /** * Creates a temporary directory with the prefix "temp" marked with deleteOnExit. * * @return A temporary Path * @throws IOException If the temporary Path cannot be created */ public static Path createTempDir() throws IOException { final Path tempDir = Files.createTempDirectory("temp"); tempDir.toFile().deleteOnExit(); return tempDir; } /** * Creates a temporary file with the prefix "testFile" and suffix ".tmp" marked with deleteOnExit. * * @return A temporary Path * @throws IOException If the temporary Path cannot be created */ public static Path createTempFile() throws IOException { return createTempFile("testFile", ".tmp"); } /** * Creates a temporary file with the prefix and suffix provided marked with deleteOnExit. * * @param prefix The prefix of the Path to create * @param suffix The suffix of the Path to create * @return A temporary Path * @throws IOException If the temporary Path cannot be created */ public static Path createTempFile(final String prefix, final String suffix) throws IOException { final Path tempDir = createTempDir(); final Path tempFile = Files.createTempFile(tempDir, prefix, suffix); tempFile.toFile().deleteOnExit(); return tempFile; } /** * Resolves a temporary file with the file name provided marked with deleteOnExit. * * @param fileName The name of the Path to resolve * @return A temporary Path * @throws IOException If the temporary Path cannot be resolved */ public static Path resolve(final String fileName) throws IOException { return resolve(fileName, createTempDir()); } /** * Resolves a temporary file with the prefix and suffix provided marked with deleteOnExit. * * @param fileName The name of the Path to resolve * @param tempDir The Path to use to resolve the temporary file * @return A temporary Path */ private static Path resolve(final String fileName, final Path tempDir) { final Path tempFile = tempDir.resolve(fileName); tempFile.toFile().deleteOnExit(); return tempFile; } }
6,294
0
Create_ds/c3r/c3r-cli-spark/src/test/java/com/amazonaws/c3r/spark
Create_ds/c3r/c3r-cli-spark/src/test/java/com/amazonaws/c3r/spark/cli/MainEnvVarKeyInvalidTest.java
// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 package com.amazonaws.c3r.spark.cli; import com.amazonaws.c3r.spark.utils.SparkSessionTestUtility; import org.apache.spark.sql.SparkSession; import org.junit.jupiter.api.BeforeEach; import org.junit.jupiter.api.Test; import picocli.CommandLine; import static org.junit.jupiter.api.Assertions.assertNotEquals; /* * Tests specifically needing an invalid key in the environment * variable for the shared secret key. */ public class MainEnvVarKeyInvalidTest { private static final String ENC_INPUT_PATH = "../samples/csv/data_sample_without_quotes.csv"; private static final String SCHEMA_PATH = "../samples/schema/config_sample.json"; private static final String DEC_INPUT_PATH = "../samples/csv/marshalled_data_sample.csv"; private DecryptCliConfigTestUtility decArgs; private CommandLine decMain; private EncryptCliConfigTestUtility encArgs; private CommandLine encMain; public int runEncryptMainWithCliArgs() { return encMain.execute(encArgs.toArrayWithoutMode()); } public int runDecryptMainWithCliArgs() { return decMain.execute(decArgs.toArrayWithoutMode()); } @BeforeEach public void setup() { final SparkSession sparkSession = SparkSessionTestUtility.initSparkSession(); encArgs = EncryptCliConfigTestUtility.defaultDryRunTestArgs(ENC_INPUT_PATH, SCHEMA_PATH); encMain = EncryptMode.getApp(null, sparkSession); decArgs = DecryptCliConfigTestUtility.defaultDryRunTestArgs(DEC_INPUT_PATH); decMain = DecryptMode.getApp(sparkSession); } @Test public void validateEncryptSecretKeyInvalidTest() { assertNotEquals(0, runEncryptMainWithCliArgs()); } @Test public void validateDecryptSecretKeyInvalidTest() { assertNotEquals(0, runDecryptMainWithCliArgs()); } }
6,295
0
Create_ds/c3r/c3r-cli-spark/src/test/java/com/amazonaws/c3r/spark
Create_ds/c3r/c3r-cli-spark/src/test/java/com/amazonaws/c3r/spark/cli/CliTestUtility.java
// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 package com.amazonaws.c3r.spark.cli; import com.amazonaws.c3r.cleanrooms.CleanRoomsDao; import com.amazonaws.c3r.spark.cleanrooms.CleanRoomsDaoTestUtility; import com.amazonaws.c3r.spark.utils.SparkSessionTestUtility; import static org.mockito.ArgumentMatchers.any; import static org.mockito.Mockito.when; /** * Utilities to interface with the CLI interface as if you were calling from the command line. */ public final class CliTestUtility { /** * Hidden utility class constructor. */ private CliTestUtility() { } /** * Runs the cli with a mock to replace an actual connection to AWS Clean Rooms. * * @param args Command line parameters for encrypt mode * @return {@value Main#SUCCESS} if no errors are encountered or {@value Main#FAILURE} */ public static int runWithoutCleanRooms(final EncryptCliConfigTestUtility args) { final CleanRoomsDao cleanRoomsDao; cleanRoomsDao = CleanRoomsDaoTestUtility.generateMockDao(); when(cleanRoomsDao.getCollaborationDataEncryptionMetadata(any())).thenReturn(args.getClientSettings()); return EncryptMode.getApp(cleanRoomsDao, SparkSessionTestUtility.initSparkSession()) .execute(args.toArrayWithoutMode()); } }
6,296
0
Create_ds/c3r/c3r-cli-spark/src/test/java/com/amazonaws/c3r/spark
Create_ds/c3r/c3r-cli-spark/src/test/java/com/amazonaws/c3r/spark/cli/MainArgParseTest.java
// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 package com.amazonaws.c3r.spark.cli; import org.junit.jupiter.api.Test; import org.junit.jupiter.params.ParameterizedTest; import org.junit.jupiter.params.provider.ValueSource; import picocli.CommandLine; import java.util.Arrays; import java.util.List; import java.util.stream.Collectors; import static org.junit.jupiter.api.Assertions.assertDoesNotThrow; import static org.junit.jupiter.api.Assertions.assertEquals; import static org.junit.jupiter.api.Assertions.assertFalse; import static org.junit.jupiter.api.Assertions.assertThrows; import static org.junit.jupiter.api.Assertions.assertTrue; /** * Class for testing CLI argument parsing from the top-level which intentionally * does not execute any C3R business logic. I.e., only testing CLI parsing * configurations are correct with respect to which arguments are required, * which are exclusive, how certain common behaviors are triggered, etc. */ public class MainArgParseTest { @Test public void noArgsTest() { final CommandLine.ParseResult result = Main.getApp().parseArgs(); assertFalse(result.isVersionHelpRequested()); assertFalse(result.isUsageHelpRequested()); assertEquals(0, result.subcommands().size()); } @ParameterizedTest @ValueSource(strings = {"-V", "--version"}) public void mainVersionTest(final String versionFlag) { final CommandLine.ParseResult result = Main.getApp().parseArgs(versionFlag); assertTrue(result.isVersionHelpRequested()); assertFalse(result.isUsageHelpRequested()); assertEquals(0, result.subcommands().size()); } @ParameterizedTest @ValueSource(strings = {"-h", "--help"}) public void mainHelpTest(final String helpFlag) { final CommandLine.ParseResult result = Main.getApp().parseArgs(helpFlag); assertFalse(result.isVersionHelpRequested()); assertTrue(result.isUsageHelpRequested()); assertEquals(0, result.subcommands().size()); } /** * Check help parses as expected for a certain mode. * * @param mode CLI mode * @param help Help flag */ private void checkModeHelpFlag(final String mode, final String help) { final CommandLine.ParseResult mainResult = Main.getApp().parseArgs(mode, help); assertEquals(1, mainResult.subcommands().size()); final CommandLine.ParseResult modeResult = mainResult.subcommand(); assertEquals(mode, modeResult.commandSpec().name()); assertEquals(1, modeResult.expandedArgs().size()); assertEquals(help, modeResult.expandedArgs().get(0)); assertFalse(modeResult.isVersionHelpRequested()); assertTrue(modeResult.isUsageHelpRequested()); } @ParameterizedTest @ValueSource(strings = {"encrypt", "decrypt", "schema"}) public void modeHelpFlagTest(final String mode) { checkModeHelpFlag(mode, "-h"); checkModeHelpFlag(mode, "--help"); } /** * Check version parses as expected for a certain mode. * * @param mode CLI mode * @param version Version flag */ private void checkModeVersionFlag(final String mode, final String version) { final CommandLine.ParseResult mainResult = Main.getApp().parseArgs(mode, version); assertEquals(1, mainResult.subcommands().size()); final CommandLine.ParseResult modeResult = mainResult.subcommand(); assertEquals(mode, modeResult.commandSpec().name()); assertEquals(1, modeResult.expandedArgs().size()); assertEquals(version, modeResult.expandedArgs().get(0)); assertTrue(modeResult.isVersionHelpRequested()); assertFalse(modeResult.isUsageHelpRequested()); } @ParameterizedTest @ValueSource(strings = {"encrypt", "decrypt", "schema"}) public void modeVersionFlagTest(final String mode) { checkModeVersionFlag(mode, "-V"); checkModeVersionFlag(mode, "--version"); } @ParameterizedTest @ValueSource(strings = {"encrypt", "decrypt", "schema"}) public void subcommandsWithNoArgsTest(final String mode) { // NOTE: This assumes the above listed modes have _some_ required arguments assertThrows(CommandLine.MissingParameterException.class, () -> Main.getApp().parseArgs(mode)); } @Test public void invalidSubcommandTest() { // NOTE: This assumes the above listed modes have _some_ required arguments assertThrows(CommandLine.UnmatchedArgumentException.class, () -> Main.getApp().parseArgs("not-a-real-mode")); } /** * Asserts that no errors occur when using the given minimal args, * and then asserts that removing any of the arguments after the * first (i.e., the mode name itself) raises an error and a missing parameter). * * @param minimalArgs Minimal argument list - first element is mode name, remaining are arguments * for that mode. */ public void checkMinimalRequiredModeArgs(final String[] minimalArgs) { // NOTE: This assumes the above listed modes have _some_ required arguments assertDoesNotThrow(() -> Main.getApp().parseArgs(minimalArgs)); // check that for this mode (element 0), removing any argument causes a CLI parse error for (int pos = 1; pos < minimalArgs.length; pos++) { final List<String> invalidParameters = Arrays.stream(minimalArgs).collect(Collectors.toList()); invalidParameters.remove(pos); assertThrows(CommandLine.MissingParameterException.class, () -> Main.getApp().parseArgs(invalidParameters.toArray(String[]::new))); } } @Test public void encryptWithRequiredArgs() { final String[] parameters = {"encrypt", "input", "--id=00000000-1111-2222-3333-444444444444", "--schema=schema"}; checkMinimalRequiredModeArgs(parameters); } @Test public void decryptWithRequiredArgs() { final String[] parameters = {"decrypt", "input", "--id=00000000-1111-2222-3333-444444444444"}; checkMinimalRequiredModeArgs(parameters); } @ParameterizedTest @ValueSource(strings = {"-t", "--template", "-i", "--interactive"}) public void schemaWithRequiredArgs(final String modeFlag) { final String[] parameters = {"schema", "input", modeFlag}; checkMinimalRequiredModeArgs(parameters); } @Test public void schemaGenModesExclusiveArgs() { final String[] parameters = {"schema", "input", "-i", "-t"}; // parsing with both -i and -t errors due to those being mutually exclusive assertThrows(CommandLine.MutuallyExclusiveArgsException.class, () -> Main.getApp().parseArgs(parameters)); // and simply dropping one fixes things assertDoesNotThrow(() -> Main.getApp().parseArgs(Arrays.copyOfRange(parameters, 0, parameters.length - 1))); } }
6,297
0
Create_ds/c3r/c3r-cli-spark/src/test/java/com/amazonaws/c3r/spark
Create_ds/c3r/c3r-cli-spark/src/test/java/com/amazonaws/c3r/spark/cli/EncryptCliConfigTestUtility.java
// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 package com.amazonaws.c3r.spark.cli; import com.amazonaws.c3r.config.ClientSettings; import com.amazonaws.c3r.io.FileFormat; import com.amazonaws.c3r.spark.utils.GeneralTestUtility; import lombok.Getter; import lombok.Setter; import java.util.ArrayList; import java.util.List; /** * Class for conveniently generating various command line argument * combinations for the `encrypt` command. */ @Setter public final class EncryptCliConfigTestUtility { /** * Schema file location. */ private String schema; /** * Collaboration ID to use for computing shared secret keys. */ private String collaborationId; /** * Input file location. */ @Getter private String input; /** * Value used in the input file to represent {@code null} in the CSV data. */ private String csvInputNullValue; /** * Value to use in the output file to represent {@code null} in the CSV data. */ private String csvOutputNullValue; /** * Location to write the output file. */ @Getter private String output; /** * Whether the output file should be overwritten if it already exists. */ private boolean overwrite; /** * Whether encryption will actually be run or only the configuration will be validated. */ private boolean dryRun; /** * Whether plaintext values are allowed. */ private boolean allowCleartext; /** * Whether duplicate values are allowed in fingerprint columns. */ private boolean allowDuplicates; /** * Whether columns with different names should be allowed in a join statement. */ private boolean allowJoinsOnColumnsWithDifferentNames; /** * Whether {@code null} values should be preserved during encryption. */ private boolean preserveNulls; /** * Whether a stacktrace should be printed. */ private boolean enableStackTraces; /** * Input file data type. */ private FileFormat fileFormat; /** * AWS CLI profile. */ private String profile; /** * AWS region. */ private String region; /** * Hidden default constructor so static instance creators are used. */ private EncryptCliConfigTestUtility() { } /** * Default test values for encryption args to use with tests. * * @return Default test values */ public static EncryptCliConfigTestUtility defaultTestArgs() { final var args = new EncryptCliConfigTestUtility(); args.enableStackTraces = true; args.allowCleartext = true; args.overwrite = true; args.schema = "mySchema"; args.collaborationId = GeneralTestUtility.EXAMPLE_SALT.toString(); args.input = "mySourceFile"; return args; } /** * Creates a test configuration for a dry run. Skips all data processing and validates settings. * * @param file Input file to use for the dry run * @param schema Schema file to use for the dry run * @return Default dry run configuration with specified files */ public static EncryptCliConfigTestUtility defaultDryRunTestArgs(final String file, final String schema) { final var args = new EncryptCliConfigTestUtility(); args.schema = (schema == null) ? "mySchema" : schema; args.collaborationId = GeneralTestUtility.EXAMPLE_SALT.toString(); args.input = (file == null) ? "mySourceFile" : file; args.overwrite = true; args.dryRun = true; args.allowCleartext = true; args.enableStackTraces = true; return args; } /** * Empty CLI configuration. * * @return Configuration instance with no set values */ public static EncryptCliConfigTestUtility blankTestArgs() { return new EncryptCliConfigTestUtility(); } /** * Create an instance of {@code ClientSettings} using the specified values. * * @return {@link ClientSettings} using values stored in this instance */ public ClientSettings getClientSettings() { return ClientSettings.builder() .allowCleartext(allowCleartext) .allowDuplicates(allowDuplicates) .allowJoinsOnColumnsWithDifferentNames(allowJoinsOnColumnsWithDifferentNames) .preserveNulls(preserveNulls).build(); } /** * Converts the specified command line parameters to a list. * * @return List of command line parameters * @see EncryptCliConfigTestUtility#getCliArgsWithoutMode */ public List<String> getCliArgs() { final List<String> args = new ArrayList<>(); args.add("encrypt"); if (input != null) { args.add(input); } if (schema != null) { args.add("--schema=" + schema); } if (collaborationId != null) { args.add("--id=" + collaborationId); } if (csvInputNullValue != null) { args.add("--csvInputNULLValue=" + csvInputNullValue); } if (csvOutputNullValue != null) { args.add("--csvOutputNULLValue=" + csvOutputNullValue); } if (output != null) { args.add("--output=" + output); } if (overwrite) { args.add("--overwrite"); } if (dryRun) { args.add("--dryRun"); } if (enableStackTraces) { args.add("--enableStackTraces"); } if (fileFormat != null) { args.add("--fileFormat=" + fileFormat); } if (profile != null) { args.add("--profile=" + profile); } if (region != null) { args.add("--region=" + region); } return args; } /** * Converts the specified command line parameters to a list without including the CLI mode parameter. * * @return List of command line parameters. * @see EncryptCliConfigTestUtility#getCliArgs */ public List<String> getCliArgsWithoutMode() { final List<String> args = getCliArgs(); args.remove(0); return args; } /** * Converts the specified command line parameters to an array without including the CLI mode parameter. * * @return Array of command line parameters */ public String[] toArrayWithoutMode() { return getCliArgsWithoutMode().toArray(String[]::new); } }
6,298
0
Create_ds/c3r/c3r-cli-spark/src/test/java/com/amazonaws/c3r/spark
Create_ds/c3r/c3r-cli-spark/src/test/java/com/amazonaws/c3r/spark/cli/SchemaModeTest.java
// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 package com.amazonaws.c3r.spark.cli; import com.amazonaws.c3r.config.ClientSettings; import com.amazonaws.c3r.config.ColumnType; import com.amazonaws.c3r.spark.cleanrooms.CleanRoomsDaoTestUtility; import com.amazonaws.c3r.spark.utils.FileTestUtility; import com.amazonaws.c3r.spark.utils.GeneralTestUtility; import com.amazonaws.c3r.spark.utils.SparkSessionTestUtility; import com.amazonaws.c3r.spark.utils.StringTestUtility; import org.apache.spark.sql.SparkSession; import org.junit.jupiter.api.BeforeEach; import org.junit.jupiter.api.Test; import java.io.BufferedInputStream; import java.io.ByteArrayInputStream; import java.io.IOException; import java.nio.charset.StandardCharsets; import java.nio.file.Files; import java.nio.file.Path; import java.util.Arrays; import java.util.stream.Collectors; import static org.junit.jupiter.api.Assertions.assertEquals; import static org.junit.jupiter.api.Assertions.assertTrue; import static org.mockito.ArgumentMatchers.any; import static org.mockito.Mockito.when; public class SchemaModeTest { private static final int SAMPLE_DATA_COLUMN_COUNT = 9; private static final String ALL_COLUMN_TYPES = "[" + Arrays.stream(ColumnType.values()) .map(ColumnType::toString) .collect(Collectors.joining("|")) + "]"; private static final String ALL_COLUMN_TYPES_SANS_CLEARTEXT = "[" + Arrays.stream(ColumnType.values()) .filter(c -> c != ColumnType.CLEARTEXT) .map(ColumnType::toString) .collect(Collectors.joining("|")) + "]"; private final SparkSession sparkSession = SparkSessionTestUtility.initSparkSession(); private Path schemaPath; @BeforeEach public void setup() throws IOException { schemaPath = FileTestUtility.resolve("schema.json"); } // Generate a template without settings and shallowly check content contains expected entries private void runTemplateGeneratorNoSettings(final String inputFile, final boolean hasHeaderRow) throws IOException { final var args = SchemaCliConfigTestUtility.builder() .input(inputFile) .output(schemaPath.toString()) .subMode("--template") .noHeaders(!hasHeaderRow) .overwrite(true) .build(); assertEquals(0, SchemaMode.getApp(null, sparkSession).execute(args.toArrayWithoutMode())); assertTrue(Files.exists(schemaPath)); assertTrue(Files.size(schemaPath) > 0); final String contents = Files.readString(schemaPath); assertTrue(contents.contains("\"headerRow\": " + hasHeaderRow)); assertEquals(hasHeaderRow ? SAMPLE_DATA_COLUMN_COUNT : 0, StringTestUtility.countMatches("sourceHeader", contents)); assertEquals(SAMPLE_DATA_COLUMN_COUNT, StringTestUtility.countMatches(ALL_COLUMN_TYPES, contents)); } // Generate a template with permissive settings and shallowly check content contains expected entries private void runTemplateGeneratorPermissiveSettings(final String inputFile, final boolean hasHeaderRow) throws IOException { final var args = SchemaCliConfigTestUtility.builder() .input(inputFile) .output(schemaPath.toString()) .subMode("--template") .noHeaders(!hasHeaderRow) .overwrite(true) .collaborationId(GeneralTestUtility.EXAMPLE_SALT.toString()) .build(); final var cleanRoomsDao = CleanRoomsDaoTestUtility.generateMockDao(); when(cleanRoomsDao.getCollaborationDataEncryptionMetadata(any())).thenReturn(ClientSettings.lowAssuranceMode()); assertEquals(0, SchemaMode.getApp(cleanRoomsDao, sparkSession).execute(args.toArrayWithoutMode())); assertTrue(Files.exists(schemaPath)); assertTrue(Files.size(schemaPath) > 0); final String contents = Files.readString(schemaPath); assertTrue(contents.contains("\"headerRow\": " + hasHeaderRow)); assertEquals(hasHeaderRow ? SAMPLE_DATA_COLUMN_COUNT : 0, StringTestUtility.countMatches("sourceHeader", contents)); assertEquals(SAMPLE_DATA_COLUMN_COUNT, StringTestUtility.countMatches(ALL_COLUMN_TYPES, contents)); } // Generate a template with restrictive settings and shallowly check content contains expected entries private void runTemplateGeneratorRestrictiveSettings(final String inputFile, final int expectedTargetColumnCount, final boolean hasHeaderRow) throws IOException { final var args = SchemaCliConfigTestUtility.builder() .input(inputFile) .output(schemaPath.toString()) .subMode("--template") .noHeaders(!hasHeaderRow) .overwrite(true) .collaborationId(GeneralTestUtility.EXAMPLE_SALT.toString()) .build(); final var cleanRoomsDao = CleanRoomsDaoTestUtility.generateMockDao(); when(cleanRoomsDao.getCollaborationDataEncryptionMetadata(any())).thenReturn(ClientSettings.highAssuranceMode()); assertEquals(0, SchemaMode.getApp(cleanRoomsDao, sparkSession).execute(args.toArrayWithoutMode())); assertTrue(Files.exists(schemaPath)); assertTrue(Files.size(schemaPath) > 0); final String contents = Files.readString(schemaPath); assertTrue(contents.contains("\"headerRow\": " + hasHeaderRow)); assertEquals(hasHeaderRow ? expectedTargetColumnCount : 0, StringTestUtility.countMatches("sourceHeader", contents)); assertEquals(expectedTargetColumnCount, StringTestUtility.countMatches("targetHeader", contents)); assertEquals(expectedTargetColumnCount, StringTestUtility.countMatches(ALL_COLUMN_TYPES_SANS_CLEARTEXT, contents)); } // Run interactive schema gen without settings and check it returns results // and shallowly check content contains expected entries private void runInteractiveGeneratorNoSettings(final String inputFile, final boolean hasHeaderRow) throws IOException { final var args = SchemaCliConfigTestUtility.builder() .input(inputFile) .output(schemaPath.toString()) .subMode("--interactive") .noHeaders(!hasHeaderRow) .overwrite(true) .build(); // number greater than test file column counts (test will fail if too low, so no incorrectness risk in // picking a number) final int columnCountUpperBound = 100; // user input which repeatedly says the source column in question should generate one cleartext column // with a trivial name final StringBuilder inputBuilder = new StringBuilder(); for (int i = 0; i < columnCountUpperBound; i++) { // 1 target column inputBuilder.append("1\n"); // target column type inputBuilder.append("cleartext\n"); // target column name inputBuilder.append("column").append(i).append('\n'); } final var userInput = new ByteArrayInputStream(inputBuilder.toString().getBytes(StandardCharsets.UTF_8)); System.setIn(new BufferedInputStream(userInput)); assertEquals(0, Main.getApp().execute(args.toArray())); assertTrue(schemaPath.toFile().exists()); assertTrue(schemaPath.toFile().length() > 0); final String contents = Files.readString(schemaPath); assertTrue(contents.contains("\"headerRow\": " + hasHeaderRow)); assertEquals(hasHeaderRow ? SAMPLE_DATA_COLUMN_COUNT : 0, StringTestUtility.countMatches("sourceHeader", contents)); assertEquals(SAMPLE_DATA_COLUMN_COUNT, StringTestUtility.countMatches("\"" + ColumnType.CLEARTEXT + "\"", contents)); } // Run interactive schema gen with permissive settings and check it returns results // and shallowly check content contains expected entries private void runInteractiveGeneratorPermissiveSettings(final String inputFile, final boolean hasHeaderRow) throws IOException { final var args = SchemaCliConfigTestUtility.builder() .input(inputFile) .output(schemaPath.toString()) .subMode("--interactive") .noHeaders(!hasHeaderRow) .overwrite(true) .collaborationId(GeneralTestUtility.EXAMPLE_SALT.toString()) .build(); // number greater than test file column counts (test will fail if too low, so no incorrectness risk in // picking a number) final int columnCountUpperBound = 100; // user input which repeatedly says the source column in question should generate one cleartext column // with a trivial name final StringBuilder inputBuilder = new StringBuilder(); for (int i = 0; i < columnCountUpperBound; i++) { // 1 target column inputBuilder.append("1\n"); // target column type inputBuilder.append("cleartext\n"); // target column name inputBuilder.append("column").append(i).append('\n'); } final var userInput = new ByteArrayInputStream(inputBuilder.toString().getBytes(StandardCharsets.UTF_8)); System.setIn(new BufferedInputStream(userInput)); final var cleanRoomsDao = CleanRoomsDaoTestUtility.generateMockDao(); when(cleanRoomsDao.getCollaborationDataEncryptionMetadata(any())).thenReturn(ClientSettings.lowAssuranceMode()); assertEquals(0, SchemaMode.getApp(cleanRoomsDao, sparkSession).execute(args.toArrayWithoutMode())); assertTrue(schemaPath.toFile().exists()); assertTrue(schemaPath.toFile().length() > 0); final String contents = Files.readString(schemaPath); assertTrue(contents.contains("\"headerRow\": " + hasHeaderRow)); assertEquals(hasHeaderRow ? SAMPLE_DATA_COLUMN_COUNT : 0, StringTestUtility.countMatches("sourceHeader", contents)); assertEquals(SAMPLE_DATA_COLUMN_COUNT, StringTestUtility.countMatches("\"" + ColumnType.CLEARTEXT + "\"", contents)); } // Run interactive schema gen with restrictive settings and check it returns results // and shallowly check content contains expected entries= private void runInteractiveGeneratorRestrictiveSettings(final String inputFile, final int expectedTargetColumnCount, final boolean hasHeaderRow) throws IOException { final var args = SchemaCliConfigTestUtility.builder() .input(inputFile) .output(schemaPath.toString()) .subMode("--interactive") .noHeaders(!hasHeaderRow) .overwrite(true) .collaborationId(GeneralTestUtility.EXAMPLE_SALT.toString()) .build(); // number greater than test file column counts (test will fail if too low, so no incorrectness risk in // picking a number) final int columnCountUpperBound = 100; // user input which repeatedly says the source column in question should generate one cleartext column // with a trivial name final StringBuilder inputBuilder = new StringBuilder(); for (int i = 0; i < columnCountUpperBound; i++) { // 1 target column inputBuilder.append("1\n"); // target column type, will fail due to restrictive settings inputBuilder.append("cleartext\n"); // target column type, will succeed inputBuilder.append("fingerprint\n"); // target column name inputBuilder.append("column").append(i).append('\n'); // skip suffix inputBuilder.append("\n"); } final var userInput = new ByteArrayInputStream(inputBuilder.toString().getBytes(StandardCharsets.UTF_8)); System.setIn(new BufferedInputStream(userInput)); final var cleanRoomsDao = CleanRoomsDaoTestUtility.generateMockDao(); when(cleanRoomsDao.getCollaborationDataEncryptionMetadata(any())).thenReturn(ClientSettings.highAssuranceMode()); assertEquals(0, SchemaMode.getApp(cleanRoomsDao, sparkSession).execute(args.toArrayWithoutMode())); assertTrue(schemaPath.toFile().exists()); assertTrue(schemaPath.toFile().length() > 0); final String contents = Files.readString(schemaPath); assertTrue(contents.contains("\"headerRow\": " + hasHeaderRow)); assertEquals(hasHeaderRow ? expectedTargetColumnCount : 0, StringTestUtility.countMatches("sourceHeader", contents)); assertEquals(expectedTargetColumnCount, StringTestUtility.countMatches("targetHeader", contents)); assertEquals(0, StringTestUtility.countMatches(ColumnType.CLEARTEXT.toString(), contents)); assertEquals(expectedTargetColumnCount, StringTestUtility.countMatches("\"" + ColumnType.FINGERPRINT + "\"", contents)); } @Test public void schemaTemplateCsvTest() throws IOException { runTemplateGeneratorNoSettings("../samples/csv/data_sample_without_quotes.csv", true); } @Test public void schemaTemplateCsvNoHeadersTest() throws IOException { runTemplateGeneratorNoSettings("../samples/csv/data_sample_no_headers.csv", false); } @Test public void schemaTemplateWithPermissiveSettingsCsvTest() throws IOException { runTemplateGeneratorPermissiveSettings("../samples/csv/data_sample_without_quotes.csv", true); } @Test public void schemaTemplateWithPermissiveSettingsCsvNoHeadersTest() throws IOException { runTemplateGeneratorPermissiveSettings("../samples/csv/data_sample_no_headers.csv", false); } @Test public void schemaTemplateWithRestrictiveSettingsCsvTest() throws IOException { runTemplateGeneratorRestrictiveSettings("../samples/csv/data_sample_without_quotes.csv", SAMPLE_DATA_COLUMN_COUNT, true); } @Test public void schemaTemplateWithRestrictiveSettingsCsvNoHeadersTest() throws IOException { runTemplateGeneratorRestrictiveSettings("../samples/csv/data_sample_no_headers.csv", SAMPLE_DATA_COLUMN_COUNT, false); } @Test public void schemaTemplateParquetTest() throws IOException { runTemplateGeneratorNoSettings("../samples/parquet/data_sample.parquet", true); } @Test public void schemaTemplateWithPermissiveSettingsParquetTest() throws IOException { runTemplateGeneratorPermissiveSettings("../samples/parquet/data_sample.parquet", true); } @Test public void schemaTemplateWithRestrictiveSettingsParquetTest() throws IOException { runTemplateGeneratorRestrictiveSettings("../samples/parquet/data_sample.parquet", SAMPLE_DATA_COLUMN_COUNT, true); } @Test public void schemaTemplateWithRestrictiveSettingsParquetMixedDataTest() throws IOException { // only 1 column is a string, so we only expect 1 target columns runTemplateGeneratorRestrictiveSettings("../samples/parquet/rows_100_groups_10_prim_data.parquet", 1, true); } @Test public void schemaInteractiveCsvTest() throws IOException { runInteractiveGeneratorNoSettings("../samples/csv/data_sample_without_quotes.csv", true); } // Check that interactive schema command returns results and shallowly check content contains expected entries @Test public void schemaInteractiveCsvNoHeadersTest() throws IOException { runInteractiveGeneratorNoSettings("../samples/csv/data_sample_no_headers.csv", false); } @Test public void schemaInteractiveParquetTest() throws IOException { runInteractiveGeneratorNoSettings("../samples/parquet/data_sample.parquet", true); } @Test public void schemaInteractivePermissiveSettingsCsvTest() throws IOException { runInteractiveGeneratorPermissiveSettings("../samples/csv/data_sample_without_quotes.csv", true); } @Test public void schemaInteractivePermissiveSettingsCsvNoHeadersTest() throws IOException { runInteractiveGeneratorPermissiveSettings("../samples/csv/data_sample_no_headers.csv", false); } @Test public void schemaInteractivePermissiveSettingsParquetTest() throws IOException { runInteractiveGeneratorNoSettings("../samples/parquet/data_sample.parquet", true); } @Test public void schemaInteractiveRestrictiveSettingsCsvTest() throws IOException { runInteractiveGeneratorRestrictiveSettings("../samples/csv/data_sample_without_quotes.csv", SAMPLE_DATA_COLUMN_COUNT, true); } @Test public void schemaInteractiveRestrictiveSettingsCsvNoHeadersTest() throws IOException { runInteractiveGeneratorRestrictiveSettings("../samples/csv/data_sample_no_headers.csv", SAMPLE_DATA_COLUMN_COUNT, false); } @Test public void schemaInteractiveRestrictiveSettingsParquetTest() throws IOException { runInteractiveGeneratorRestrictiveSettings("../samples/parquet/data_sample.parquet", SAMPLE_DATA_COLUMN_COUNT, true); } @Test public void schemaInteractiveRestrictiveSettingsParquetMixedDataTest() throws IOException { // Only 1 column is of type string, so we expect 1 target column only runInteractiveGeneratorRestrictiveSettings("../samples/parquet/rows_100_groups_10_prim_data.parquet", 1, true); } }
6,299