code
stringlengths 25
201k
| docstring
stringlengths 19
96.2k
| func_name
stringlengths 0
235
| language
stringclasses 1
value | repo
stringlengths 8
51
| path
stringlengths 11
314
| url
stringlengths 62
377
| license
stringclasses 7
values |
|---|---|---|---|---|---|---|---|
public static <T> CompletableFuture<T> supplyAsync(
SupplierWithException<T, ?> supplier, Executor executor) {
return CompletableFuture.supplyAsync(
() -> {
try {
return supplier.get();
} catch (Throwable e) {
throw new CompletionException(e);
}
},
executor);
}
|
Returns a future which is completed with the result of the {@link SupplierWithException}.
@param supplier to provide the future's value
@param executor to execute the supplier
@param <T> type of the result
@return Future which is completed with the value of the supplier
|
supplyAsync
|
java
|
apache/flink
|
flink-core/src/main/java/org/apache/flink/util/concurrent/FutureUtils.java
|
https://github.com/apache/flink/blob/master/flink-core/src/main/java/org/apache/flink/util/concurrent/FutureUtils.java
|
Apache-2.0
|
public static CompletableFuture<Void> runAsync(
RunnableWithException runnable, Executor executor) {
return CompletableFuture.runAsync(
() -> {
try {
runnable.run();
} catch (Throwable e) {
throw new CompletionException(e);
}
},
executor);
}
|
Returns a future which is completed when {@link RunnableWithException} is finished.
@param runnable represents the task
@param executor to execute the runnable
@return Future which is completed when runnable is finished
|
runAsync
|
java
|
apache/flink
|
flink-core/src/main/java/org/apache/flink/util/concurrent/FutureUtils.java
|
https://github.com/apache/flink/blob/master/flink-core/src/main/java/org/apache/flink/util/concurrent/FutureUtils.java
|
Apache-2.0
|
public static <IN, OUT> CompletableFuture<OUT> thenApplyAsyncIfNotDone(
CompletableFuture<IN> completableFuture,
Executor executor,
Function<? super IN, ? extends OUT> applyFun) {
return completableFuture.isDone()
? completableFuture.thenApply(applyFun)
: completableFuture.thenApplyAsync(applyFun, executor);
}
|
This function takes a {@link CompletableFuture} and a function to apply to this future. If
the input future is already done, this function returns {@link
CompletableFuture#thenApply(Function)}. Otherwise, the return value is {@link
CompletableFuture#thenApplyAsync(Function, Executor)} with the given executor.
@param completableFuture the completable future for which we want to apply.
@param executor the executor to run the apply function if the future is not yet done.
@param applyFun the function to apply.
@param <IN> type of the input future.
@param <OUT> type of the output future.
@return a completable future that is applying the given function to the input future.
|
thenApplyAsyncIfNotDone
|
java
|
apache/flink
|
flink-core/src/main/java/org/apache/flink/util/concurrent/FutureUtils.java
|
https://github.com/apache/flink/blob/master/flink-core/src/main/java/org/apache/flink/util/concurrent/FutureUtils.java
|
Apache-2.0
|
public static <IN, OUT> CompletableFuture<OUT> thenComposeAsyncIfNotDone(
CompletableFuture<IN> completableFuture,
Executor executor,
Function<? super IN, ? extends CompletionStage<OUT>> composeFun) {
return completableFuture.isDone()
? completableFuture.thenCompose(composeFun)
: completableFuture.thenComposeAsync(composeFun, executor);
}
|
This function takes a {@link CompletableFuture} and a function to compose with this future.
If the input future is already done, this function returns {@link
CompletableFuture#thenCompose(Function)}. Otherwise, the return value is {@link
CompletableFuture#thenComposeAsync(Function, Executor)} with the given executor.
@param completableFuture the completable future for which we want to compose.
@param executor the executor to run the compose function if the future is not yet done.
@param composeFun the function to compose.
@param <IN> type of the input future.
@param <OUT> type of the output future.
@return a completable future that is a composition of the input future and the function.
|
thenComposeAsyncIfNotDone
|
java
|
apache/flink
|
flink-core/src/main/java/org/apache/flink/util/concurrent/FutureUtils.java
|
https://github.com/apache/flink/blob/master/flink-core/src/main/java/org/apache/flink/util/concurrent/FutureUtils.java
|
Apache-2.0
|
public static <IN> CompletableFuture<IN> whenCompleteAsyncIfNotDone(
CompletableFuture<IN> completableFuture,
Executor executor,
BiConsumer<? super IN, ? super Throwable> whenCompleteFun) {
return completableFuture.isDone()
? completableFuture.whenComplete(whenCompleteFun)
: completableFuture.whenCompleteAsync(whenCompleteFun, executor);
}
|
This function takes a {@link CompletableFuture} and a bi-consumer to call on completion of
this future. If the input future is already done, this function returns {@link
CompletableFuture#whenComplete(BiConsumer)}. Otherwise, the return value is {@link
CompletableFuture#whenCompleteAsync(BiConsumer, Executor)} with the given executor.
@param completableFuture the completable future for which we want to call #whenComplete.
@param executor the executor to run the whenComplete function if the future is not yet done.
@param whenCompleteFun the bi-consumer function to call when the future is completed.
@param <IN> type of the input future.
@return the new completion stage.
|
whenCompleteAsyncIfNotDone
|
java
|
apache/flink
|
flink-core/src/main/java/org/apache/flink/util/concurrent/FutureUtils.java
|
https://github.com/apache/flink/blob/master/flink-core/src/main/java/org/apache/flink/util/concurrent/FutureUtils.java
|
Apache-2.0
|
public static <IN> CompletableFuture<Void> thenAcceptAsyncIfNotDone(
CompletableFuture<IN> completableFuture,
Executor executor,
Consumer<? super IN> consumer) {
return completableFuture.isDone()
? completableFuture.thenAccept(consumer)
: completableFuture.thenAcceptAsync(consumer, executor);
}
|
This function takes a {@link CompletableFuture} and a consumer to accept the result of this
future. If the input future is already done, this function returns {@link
CompletableFuture#thenAccept(Consumer)}. Otherwise, the return value is {@link
CompletableFuture#thenAcceptAsync(Consumer, Executor)} with the given executor.
@param completableFuture the completable future for which we want to call #thenAccept.
@param executor the executor to run the thenAccept function if the future is not yet done.
@param consumer the consumer function to call when the future is completed.
@param <IN> type of the input future.
@return the new completion stage.
|
thenAcceptAsyncIfNotDone
|
java
|
apache/flink
|
flink-core/src/main/java/org/apache/flink/util/concurrent/FutureUtils.java
|
https://github.com/apache/flink/blob/master/flink-core/src/main/java/org/apache/flink/util/concurrent/FutureUtils.java
|
Apache-2.0
|
public static <IN, OUT> CompletableFuture<OUT> handleAsyncIfNotDone(
CompletableFuture<IN> completableFuture,
Executor executor,
BiFunction<? super IN, Throwable, ? extends OUT> handler) {
return completableFuture.isDone()
? completableFuture.handle(handler)
: completableFuture.handleAsync(handler, executor);
}
|
This function takes a {@link CompletableFuture} and a handler function for the result of this
future. If the input future is already done, this function returns {@link
CompletableFuture#handle(BiFunction)}. Otherwise, the return value is {@link
CompletableFuture#handleAsync(BiFunction, Executor)} with the given executor.
@param completableFuture the completable future for which we want to call #handle.
@param executor the executor to run the handle function if the future is not yet done.
@param handler the handler function to call when the future is completed.
@param <IN> type of the handler input argument.
@param <OUT> type of the handler return value.
@return the new completion stage.
|
handleAsyncIfNotDone
|
java
|
apache/flink
|
flink-core/src/main/java/org/apache/flink/util/concurrent/FutureUtils.java
|
https://github.com/apache/flink/blob/master/flink-core/src/main/java/org/apache/flink/util/concurrent/FutureUtils.java
|
Apache-2.0
|
public static boolean isCompletedNormally(CompletableFuture<?> future) {
return future.isDone() && !future.isCompletedExceptionally();
}
|
@return true if future has completed normally, false otherwise.
|
isCompletedNormally
|
java
|
apache/flink
|
flink-core/src/main/java/org/apache/flink/util/concurrent/FutureUtils.java
|
https://github.com/apache/flink/blob/master/flink-core/src/main/java/org/apache/flink/util/concurrent/FutureUtils.java
|
Apache-2.0
|
public static <T> T checkStateAndGet(CompletableFuture<T> future) {
checkCompletedNormally(future);
return getWithoutException(future);
}
|
Perform check state that future has completed normally and return the result.
@return the result of completable future.
@throws IllegalStateException Thrown, if future has not completed or it has completed
exceptionally.
|
checkStateAndGet
|
java
|
apache/flink
|
flink-core/src/main/java/org/apache/flink/util/concurrent/FutureUtils.java
|
https://github.com/apache/flink/blob/master/flink-core/src/main/java/org/apache/flink/util/concurrent/FutureUtils.java
|
Apache-2.0
|
@Nullable
public static <T> T getWithoutException(CompletableFuture<T> future) {
if (isCompletedNormally(future)) {
try {
return future.get();
} catch (InterruptedException | ExecutionException ignored) {
}
}
return null;
}
|
Gets the result of a completable future without any exception thrown.
@param future the completable future specified.
@param <T> the type of result
@return the result of completable future, or null if it's unfinished or finished
exceptionally
|
getWithoutException
|
java
|
apache/flink
|
flink-core/src/main/java/org/apache/flink/util/concurrent/FutureUtils.java
|
https://github.com/apache/flink/blob/master/flink-core/src/main/java/org/apache/flink/util/concurrent/FutureUtils.java
|
Apache-2.0
|
public static <T> T getOrDefault(CompletableFuture<T> future, T defaultValue) {
T value = getWithoutException(future);
return value == null ? defaultValue : value;
}
|
@return the result of completable future, or the defaultValue if it has not yet completed.
|
getOrDefault
|
java
|
apache/flink
|
flink-core/src/main/java/org/apache/flink/util/concurrent/FutureUtils.java
|
https://github.com/apache/flink/blob/master/flink-core/src/main/java/org/apache/flink/util/concurrent/FutureUtils.java
|
Apache-2.0
|
@Override
public void run() {
future.completeExceptionally(new TimeoutException(timeoutMsg));
}
|
Runnable to complete the given future with a {@link TimeoutException}.
|
run
|
java
|
apache/flink
|
flink-core/src/main/java/org/apache/flink/util/concurrent/FutureUtils.java
|
https://github.com/apache/flink/blob/master/flink-core/src/main/java/org/apache/flink/util/concurrent/FutureUtils.java
|
Apache-2.0
|
private static ScheduledFuture<?> delay(Runnable runnable, long delay, TimeUnit timeUnit) {
checkNotNull(runnable);
checkNotNull(timeUnit);
return DELAYER.schedule(runnable, delay, timeUnit);
}
|
Delay the given action by the given delay.
@param runnable to execute after the given delay
@param delay after which to execute the runnable
@param timeUnit time unit of the delay
@return Future of the scheduled action
|
delay
|
java
|
apache/flink
|
flink-core/src/main/java/org/apache/flink/util/concurrent/FutureUtils.java
|
https://github.com/apache/flink/blob/master/flink-core/src/main/java/org/apache/flink/util/concurrent/FutureUtils.java
|
Apache-2.0
|
public static void assertNoException(CompletableFuture<?> completableFuture) {
handleUncaughtException(completableFuture, FatalExitExceptionHandler.INSTANCE);
}
|
Asserts that the given {@link CompletableFuture} is not completed exceptionally. If the
future is completed exceptionally, then it will call the {@link FatalExitExceptionHandler}.
@param completableFuture to assert for no exceptions
|
assertNoException
|
java
|
apache/flink
|
flink-core/src/main/java/org/apache/flink/util/concurrent/FutureUtils.java
|
https://github.com/apache/flink/blob/master/flink-core/src/main/java/org/apache/flink/util/concurrent/FutureUtils.java
|
Apache-2.0
|
public static <T, E extends Throwable> CompletableFuture<T> handleException(
CompletableFuture<? extends T> completableFuture,
Class<E> exceptionClass,
Function<? super E, ? extends T> exceptionHandler) {
final CompletableFuture<T> handledFuture = new CompletableFuture<>();
checkNotNull(completableFuture)
.whenComplete(
(result, throwable) -> {
if (throwable == null) {
handledFuture.complete(result);
} else if (exceptionClass.isAssignableFrom(throwable.getClass())) {
final E exception = exceptionClass.cast(throwable);
try {
handledFuture.complete(exceptionHandler.apply(exception));
} catch (Throwable t) {
handledFuture.completeExceptionally(t);
}
} else {
handledFuture.completeExceptionally(throwable);
}
});
return handledFuture;
}
|
Checks that the given {@link CompletableFuture} is not completed exceptionally with the
specified class. If the future is completed exceptionally with the specific class, then try
to recover using a given exception handler. If the exception does not match the specified
class, just pass it through to later stages.
@param completableFuture to assert for a given exception
@param exceptionClass exception class to assert for
@param exceptionHandler to call if the future is completed exceptionally with the specific
exception
@return completable future, that can recover from a specified exception
|
handleException
|
java
|
apache/flink
|
flink-core/src/main/java/org/apache/flink/util/concurrent/FutureUtils.java
|
https://github.com/apache/flink/blob/master/flink-core/src/main/java/org/apache/flink/util/concurrent/FutureUtils.java
|
Apache-2.0
|
public static <T> void forwardAsync(
CompletableFuture<T> source, CompletableFuture<T> target, Executor executor) {
source.whenCompleteAsync(forwardTo(target), executor);
}
|
Forwards the value from the source future to the target future using the provided executor.
@param source future to forward the value from
@param target future to forward the value to
@param executor executor to forward the source value to the target future
@param <T> type of the value
|
forwardAsync
|
java
|
apache/flink
|
flink-core/src/main/java/org/apache/flink/util/concurrent/FutureUtils.java
|
https://github.com/apache/flink/blob/master/flink-core/src/main/java/org/apache/flink/util/concurrent/FutureUtils.java
|
Apache-2.0
|
public static <T> void doForward(
@Nullable T value, @Nullable Throwable throwable, CompletableFuture<T> target) {
if (throwable != null) {
target.completeExceptionally(throwable);
} else {
target.complete(value);
}
}
|
Completes the given future with either the given value or throwable, depending on which
parameter is not null.
@param value value with which the future should be completed
@param throwable throwable with which the future should be completed exceptionally
@param target future to complete
@param <T> completed future
|
doForward
|
java
|
apache/flink
|
flink-core/src/main/java/org/apache/flink/util/concurrent/FutureUtils.java
|
https://github.com/apache/flink/blob/master/flink-core/src/main/java/org/apache/flink/util/concurrent/FutureUtils.java
|
Apache-2.0
|
public static <T> CompletableFuture<T> switchExecutor(
CompletableFuture<? extends T> source, Executor executor) {
return source.handleAsync(
(t, throwable) -> {
if (throwable != null) {
throw new CompletionException(throwable);
} else {
return t;
}
},
executor);
}
|
Switches the execution context of the given source future. This works for normally and
exceptionally completed futures.
@param source source to switch the execution context for
@param executor executor representing the new execution context
@param <T> type of the source
@return future which is executed by the given executor
|
switchExecutor
|
java
|
apache/flink
|
flink-core/src/main/java/org/apache/flink/util/concurrent/FutureUtils.java
|
https://github.com/apache/flink/blob/master/flink-core/src/main/java/org/apache/flink/util/concurrent/FutureUtils.java
|
Apache-2.0
|
@Override
public void rejectedExecution(Runnable r, ThreadPoolExecutor executor) {
if (executor.isShutdown()) {
if (logger != null) {
logger.debug("Execution is rejected because shutdown is in progress");
}
} else {
throw new RejectedExecutionException();
}
}
|
Rejected executions are ignored or logged in debug if the executor is {@link
java.util.concurrent.ThreadPoolExecutor#isShutdown shutdown}. Otherwise, {@link
RejectedExecutionException} is thrown.
|
rejectedExecution
|
java
|
apache/flink
|
flink-core/src/main/java/org/apache/flink/util/concurrent/IgnoreShutdownRejectedExecutionHandler.java
|
https://github.com/apache/flink/blob/master/flink-core/src/main/java/org/apache/flink/util/concurrent/IgnoreShutdownRejectedExecutionHandler.java
|
Apache-2.0
|
@Override
public void execute(@Nonnull Runnable command) {
threadFactory.newThread(command).start();
}
|
An {@link Executor} that runs every runnable in a separate thread.
|
execute
|
java
|
apache/flink
|
flink-core/src/main/java/org/apache/flink/util/concurrent/SeparateThreadExecutor.java
|
https://github.com/apache/flink/blob/master/flink-core/src/main/java/org/apache/flink/util/concurrent/SeparateThreadExecutor.java
|
Apache-2.0
|
public static void errorLogThreadDump(Logger logger) {
final ThreadInfo[] perThreadInfo =
ManagementFactory.getThreadMXBean().dumpAllThreads(true, true);
logger.error(
"Thread dump: \n{}",
Arrays.stream(perThreadInfo).map(Object::toString).collect(Collectors.joining()));
}
|
{@code ThreadUtils} collects helper methods in the context of threading.
|
errorLogThreadDump
|
java
|
apache/flink
|
flink-core/src/main/java/org/apache/flink/util/concurrent/ThreadUtils.java
|
https://github.com/apache/flink/blob/master/flink-core/src/main/java/org/apache/flink/util/concurrent/ThreadUtils.java
|
Apache-2.0
|
@SuppressWarnings("unchecked")
public static <T> Function<T, Void> nullFn() {
return (Function<T, Void>) NULL_FN;
}
|
Function which returns {@code null} (type: Void).
@param <T> input type
@return Function which returns {@code null}.
|
nullFn
|
java
|
apache/flink
|
flink-core/src/main/java/org/apache/flink/util/function/FunctionUtils.java
|
https://github.com/apache/flink/blob/master/flink-core/src/main/java/org/apache/flink/util/function/FunctionUtils.java
|
Apache-2.0
|
@SuppressWarnings("unchecked")
public static <T> Consumer<T> ignoreFn() {
return (Consumer<T>) IGNORE_FN;
}
|
Consumer which ignores the input.
@param <T> type of the input
@return Ignoring {@link Consumer}
|
ignoreFn
|
java
|
apache/flink
|
flink-core/src/main/java/org/apache/flink/util/function/FunctionUtils.java
|
https://github.com/apache/flink/blob/master/flink-core/src/main/java/org/apache/flink/util/function/FunctionUtils.java
|
Apache-2.0
|
public static <A> Consumer<A> uncheckedConsumer(ThrowingConsumer<A, ?> throwingConsumer) {
return (A value) -> {
try {
throwingConsumer.accept(value);
} catch (Throwable t) {
ExceptionUtils.rethrow(t);
}
};
}
|
Converts a {@link ThrowingConsumer} into a {@link Consumer} which throws checked exceptions
as unchecked.
@param throwingConsumer to convert into a {@link Consumer}
@param <A> input type
@return {@link Consumer} which throws all checked exceptions as unchecked
|
uncheckedConsumer
|
java
|
apache/flink
|
flink-core/src/main/java/org/apache/flink/util/function/FunctionUtils.java
|
https://github.com/apache/flink/blob/master/flink-core/src/main/java/org/apache/flink/util/function/FunctionUtils.java
|
Apache-2.0
|
public static <T> Supplier<T> uncheckedSupplier(
SupplierWithException<T, ?> supplierWithException) {
return () -> {
T result = null;
try {
result = supplierWithException.get();
} catch (Throwable t) {
ExceptionUtils.rethrow(t);
}
return result;
};
}
|
Converts a {@link SupplierWithException} into a {@link Supplier} which throws all checked
exceptions as unchecked.
@param supplierWithException to convert into a {@link Supplier}
@return {@link Supplier} which throws all checked exceptions as unchecked.
|
uncheckedSupplier
|
java
|
apache/flink
|
flink-core/src/main/java/org/apache/flink/util/function/FunctionUtils.java
|
https://github.com/apache/flink/blob/master/flink-core/src/main/java/org/apache/flink/util/function/FunctionUtils.java
|
Apache-2.0
|
public static <T> Callable<T> asCallable(RunnableWithException command, T result) {
return () -> {
command.run();
return result;
};
}
|
Converts {@link RunnableWithException} into a {@link Callable} that will return the {@code
result}.
|
asCallable
|
java
|
apache/flink
|
flink-core/src/main/java/org/apache/flink/util/function/FunctionUtils.java
|
https://github.com/apache/flink/blob/master/flink-core/src/main/java/org/apache/flink/util/function/FunctionUtils.java
|
Apache-2.0
|
private static WatermarkOutput createImmediateOutput(WatermarkOutputMultiplexer multiplexer) {
final String id = UUID.randomUUID().toString();
multiplexer.registerNewOutput(id);
return multiplexer.getImmediateOutput(id);
}
|
Convenience method so we don't have to go through the output ID dance when we only want an
immediate output for a given output ID.
|
createImmediateOutput
|
java
|
apache/flink
|
flink-core/src/test/java/org/apache/flink/api/common/eventtime/WatermarkOutputMultiplexerTest.java
|
https://github.com/apache/flink/blob/master/flink-core/src/test/java/org/apache/flink/api/common/eventtime/WatermarkOutputMultiplexerTest.java
|
Apache-2.0
|
private File createBinaryInputFile(String fileName, int blockSize, int numBlocks)
throws IOException {
final File tempFile = TempDirUtils.newFile(tempDir, fileName);
try (FileOutputStream fileOutputStream = new FileOutputStream(tempFile)) {
for (int i = 0; i < blockSize * numBlocks; i++) {
fileOutputStream.write(new byte[] {1});
}
}
return tempFile;
}
|
Creates a temp file with a certain number of blocks of a certain size.
|
createBinaryInputFile
|
java
|
apache/flink
|
flink-core/src/test/java/org/apache/flink/api/common/io/BinaryInputFormatTest.java
|
https://github.com/apache/flink/blob/master/flink-core/src/test/java/org/apache/flink/api/common/io/BinaryInputFormatTest.java
|
Apache-2.0
|
@Test
void testReadOverSplitBoundariesUnaligned() throws IOException {
final String myString = "value1\nvalue2\nvalue3";
final FileInputSplit split = createTempFile(myString);
FileInputSplit split1 =
new FileInputSplit(
0, split.getPath(), 0, split.getLength() / 2, split.getHostnames());
FileInputSplit split2 =
new FileInputSplit(
1,
split.getPath(),
split1.getLength(),
split.getLength(),
split.getHostnames());
final Configuration parameters = new Configuration();
format.configure(parameters);
format.open(split1);
assertThat(format.nextRecord(null)).isEqualTo("value1");
assertThat(format.nextRecord(null)).isEqualTo("value2");
assertThat(format.nextRecord(null)).isNull();
assertThat(format.reachedEnd()).isTrue();
format.close();
format.open(split2);
assertThat(format.nextRecord(null)).isEqualTo("value3");
assertThat(format.nextRecord(null)).isNull();
assertThat(format.reachedEnd()).isTrue();
format.close();
}
|
Tests that the records are read correctly when the split boundary is in the middle of a
record.
|
testReadOverSplitBoundariesUnaligned
|
java
|
apache/flink
|
flink-core/src/test/java/org/apache/flink/api/common/io/DelimitedInputFormatTest.java
|
https://github.com/apache/flink/blob/master/flink-core/src/test/java/org/apache/flink/api/common/io/DelimitedInputFormatTest.java
|
Apache-2.0
|
@Test
void testNoNestedDirectoryTrue() throws IOException {
String filePath = TestFileUtils.createTempFile("foo");
this.format.setFilePath(new Path(filePath));
this.config.set(getBooleanConfigOption("recursive.file.enumeration"), true);
format.configure(this.config);
FileInputSplit[] splits = format.createInputSplits(1);
assertThat(splits).hasSize(1);
}
|
Test without nested directory and recursive.file.enumeration = true
|
testNoNestedDirectoryTrue
|
java
|
apache/flink
|
flink-core/src/test/java/org/apache/flink/api/common/io/EnumerateNestedFilesTest.java
|
https://github.com/apache/flink/blob/master/flink-core/src/test/java/org/apache/flink/api/common/io/EnumerateNestedFilesTest.java
|
Apache-2.0
|
@Test
void testOneNestedDirectoryTrue() throws IOException {
String firstLevelDir = TestFileUtils.randomFileName();
String secondLevelDir = TestFileUtils.randomFileName();
File insideNestedDir = TempDirUtils.newFolder(tempDir, firstLevelDir, secondLevelDir);
File nestedDir = insideNestedDir.getParentFile();
// create a file in the first-level and two files in the nested dir
TestFileUtils.createTempFileInDirectory(nestedDir.getAbsolutePath(), "paella");
TestFileUtils.createTempFileInDirectory(insideNestedDir.getAbsolutePath(), "kalamari");
TestFileUtils.createTempFileInDirectory(insideNestedDir.getAbsolutePath(), "fideua");
this.format.setFilePath(new Path(nestedDir.toURI().toString()));
this.format.setNestedFileEnumeration(true);
format.configure(this.config);
FileInputSplit[] splits = format.createInputSplits(1);
assertThat(splits).hasSize(3);
}
|
Test with one nested directory and recursive.file.enumeration = true
|
testOneNestedDirectoryTrue
|
java
|
apache/flink
|
flink-core/src/test/java/org/apache/flink/api/common/io/EnumerateNestedFilesTest.java
|
https://github.com/apache/flink/blob/master/flink-core/src/test/java/org/apache/flink/api/common/io/EnumerateNestedFilesTest.java
|
Apache-2.0
|
@Test
void testOneNestedDirectoryFalse() throws IOException {
String firstLevelDir = TestFileUtils.randomFileName();
String secondLevelDir = TestFileUtils.randomFileName();
File insideNestedDir = TempDirUtils.newFolder(tempDir, firstLevelDir, secondLevelDir);
File nestedDir = insideNestedDir.getParentFile();
// create a file in the first-level and two files in the nested dir
TestFileUtils.createTempFileInDirectory(nestedDir.getAbsolutePath(), "paella");
TestFileUtils.createTempFileInDirectory(insideNestedDir.getAbsolutePath(), "kalamari");
TestFileUtils.createTempFileInDirectory(insideNestedDir.getAbsolutePath(), "fideua");
this.format.setFilePath(new Path(nestedDir.toURI().toString()));
this.config.set(getBooleanConfigOption("recursive.file.enumeration"), false);
format.configure(this.config);
FileInputSplit[] splits = format.createInputSplits(1);
assertThat(splits).hasSize(1);
}
|
Test with one nested directory and recursive.file.enumeration = false
|
testOneNestedDirectoryFalse
|
java
|
apache/flink
|
flink-core/src/test/java/org/apache/flink/api/common/io/EnumerateNestedFilesTest.java
|
https://github.com/apache/flink/blob/master/flink-core/src/test/java/org/apache/flink/api/common/io/EnumerateNestedFilesTest.java
|
Apache-2.0
|
@Test
void testOnlyLevel2NestedDirectories() throws IOException {
String rootDir = TestFileUtils.randomFileName();
String nestedDir = TestFileUtils.randomFileName();
String firstNestedNestedDir = TestFileUtils.randomFileName();
String secondNestedNestedDir = TestFileUtils.randomFileName();
File testDir = TempDirUtils.newFolder(tempDir, rootDir);
TempDirUtils.newFolder(tempDir, rootDir, nestedDir);
File nestedNestedDir1 =
TempDirUtils.newFolder(tempDir, rootDir, nestedDir, firstNestedNestedDir);
File nestedNestedDir2 =
TempDirUtils.newFolder(tempDir, rootDir, nestedDir, secondNestedNestedDir);
// create files in second level
TestFileUtils.createTempFileInDirectory(nestedNestedDir1.getAbsolutePath(), "paella");
TestFileUtils.createTempFileInDirectory(nestedNestedDir1.getAbsolutePath(), "kalamari");
TestFileUtils.createTempFileInDirectory(nestedNestedDir2.getAbsolutePath(), "fideua");
TestFileUtils.createTempFileInDirectory(nestedNestedDir2.getAbsolutePath(), "bravas");
this.format.setFilePath(new Path(testDir.getAbsolutePath()));
this.format.setNestedFileEnumeration(true);
format.configure(this.config);
FileInputSplit[] splits = format.createInputSplits(1);
assertThat(splits).hasSize(4);
}
|
Tests if the recursion is invoked correctly in nested directories.
|
testOnlyLevel2NestedDirectories
|
java
|
apache/flink
|
flink-core/src/test/java/org/apache/flink/api/common/io/EnumerateNestedFilesTest.java
|
https://github.com/apache/flink/blob/master/flink-core/src/test/java/org/apache/flink/api/common/io/EnumerateNestedFilesTest.java
|
Apache-2.0
|
@Test
void testFileInputFormatWithCompression() throws IOException {
String tempFile =
TestFileUtils.createTempFileDirForProvidedFormats(
TempDirUtils.newFolder(temporaryFolder),
FileInputFormat.getSupportedCompressionFormats());
final DummyFileInputFormat format = new DummyFileInputFormat();
format.setFilePath(tempFile);
format.configure(new Configuration());
FileInputSplit[] splits = format.createInputSplits(2);
final Set<String> supportedCompressionFormats =
FileInputFormat.getSupportedCompressionFormats();
assertThat(splits).hasSameSizeAs(supportedCompressionFormats);
for (FileInputSplit split : splits) {
assertThat(split.getLength())
.isEqualTo(
FileInputFormat.READ_WHOLE_SPLIT_FLAG); // unsplittable compressed files
// have this size as a
// flag for "read whole file"
assertThat(split.getStart()).isZero(); // always read from the beginning.
}
// test if this also works for "mixed" directories
TestFileUtils.createTempFileInDirectory(
tempFile.replace("file:", ""),
"this creates a test file with a random extension (at least not .deflate)");
final DummyFileInputFormat formatMixed = new DummyFileInputFormat();
formatMixed.setFilePath(tempFile);
formatMixed.configure(new Configuration());
FileInputSplit[] splitsMixed = formatMixed.createInputSplits(2);
assertThat(splitsMixed).hasSize(supportedCompressionFormats.size() + 1);
for (FileInputSplit split : splitsMixed) {
final String extension =
FileInputFormat.extractFileExtension(split.getPath().getName());
if (supportedCompressionFormats.contains(extension)) {
assertThat(split.getLength())
.isEqualTo(
FileInputFormat.READ_WHOLE_SPLIT_FLAG); // unsplittable compressed
// files have this size as a
// flag for "read whole file"
assertThat(split.getStart()).isZero(); // always read from the beginning.
} else {
assertThat(split.getStart()).isEqualTo(0L);
assertThat(split.getLength() > 0).as("split size not correct").isTrue();
}
}
}
|
Create directory with compressed files and see if it creates a split for each file. Each
split has to start from the beginning.
|
testFileInputFormatWithCompression
|
java
|
apache/flink
|
flink-core/src/test/java/org/apache/flink/api/common/io/FileInputFormatTest.java
|
https://github.com/apache/flink/blob/master/flink-core/src/test/java/org/apache/flink/api/common/io/FileInputFormatTest.java
|
Apache-2.0
|
@Test
void testFileInputFormatWithCompressionFromFileSource() throws IOException {
String tempFile =
TestFileUtils.createTempFileDirForProvidedFormats(
TempDirUtils.newFolder(temporaryFolder),
FileInputFormat.getSupportedCompressionFormats());
DummyFileInputFormat format = new DummyFileInputFormat();
format.setFilePath(tempFile);
format.configure(new Configuration());
// manually create a FileInputSplit per file as FileSource would do
// see org.apache.flink.connector.file.table.DeserializationSchemaAdapter.Reader()
List<FileInputSplit> splits = manuallyCreateSplits(tempFile);
final Set<String> supportedCompressionFormats =
FileInputFormat.getSupportedCompressionFormats();
// one file per compression format, one split per file
assertThat(splits).hasSameSizeAs(supportedCompressionFormats);
for (FileInputSplit split : splits) {
assertThat(split.getStart()).isZero(); // always read from the beginning.
format.open(split);
assertThat(format.compressedRead).isTrue();
assertThat(format.getSplitLength())
.isEqualTo(
FileInputFormat.READ_WHOLE_SPLIT_FLAG); // unsplittable compressed files
// have this size
// as flag for "read whole file"
}
}
|
Some FileInputFormats don't use FileInputFormat#createSplits (that would detect that the file
is non-splittable and deal with reading boundaries correctly), they all create splits
manually from FileSourceSplit. If input files are compressed, ensure that the size of the
split is not the compressed file size and that the compression decorator is called.
|
testFileInputFormatWithCompressionFromFileSource
|
java
|
apache/flink
|
flink-core/src/test/java/org/apache/flink/api/common/io/FileInputFormatTest.java
|
https://github.com/apache/flink/blob/master/flink-core/src/test/java/org/apache/flink/api/common/io/FileInputFormatTest.java
|
Apache-2.0
|
@Test
void testCheckRuntimeContextAccess() {
final SerializedInputFormat<Value> inputFormat = new SerializedInputFormat<>();
final TaskInfo taskInfo = new TaskInfoImpl("test name", 3, 1, 3, 0);
inputFormat.setRuntimeContext(
new RuntimeUDFContext(
taskInfo,
getClass().getClassLoader(),
new ExecutionConfig(),
new HashMap<>(),
new HashMap<>(),
UnregisteredMetricsGroup.createOperatorMetricGroup()));
assertThat(inputFormat.getRuntimeContext().getTaskInfo().getIndexOfThisSubtask()).isOne();
assertThat(inputFormat.getRuntimeContext().getTaskInfo().getNumberOfParallelSubtasks())
.isEqualTo(3);
}
|
Tests runtime context access from inside an RichInputFormat class.
|
testCheckRuntimeContextAccess
|
java
|
apache/flink
|
flink-core/src/test/java/org/apache/flink/api/common/io/RichInputFormatTest.java
|
https://github.com/apache/flink/blob/master/flink-core/src/test/java/org/apache/flink/api/common/io/RichInputFormatTest.java
|
Apache-2.0
|
@Test
void testCheckRuntimeContextAccess() {
final SerializedOutputFormat<Value> inputFormat = new SerializedOutputFormat<>();
final TaskInfo taskInfo = new TaskInfoImpl("test name", 3, 1, 3, 0);
inputFormat.setRuntimeContext(
new RuntimeUDFContext(
taskInfo,
getClass().getClassLoader(),
new ExecutionConfig(),
new HashMap<>(),
new HashMap<>(),
UnregisteredMetricsGroup.createOperatorMetricGroup()));
assertThat(inputFormat.getRuntimeContext().getTaskInfo().getIndexOfThisSubtask()).isOne();
assertThat(inputFormat.getRuntimeContext().getTaskInfo().getNumberOfParallelSubtasks())
.isEqualTo(3);
}
|
Tests runtime context access from inside an RichOutputFormat class.
|
testCheckRuntimeContextAccess
|
java
|
apache/flink
|
flink-core/src/test/java/org/apache/flink/api/common/io/RichOutputFormatTest.java
|
https://github.com/apache/flink/blob/master/flink-core/src/test/java/org/apache/flink/api/common/io/RichOutputFormatTest.java
|
Apache-2.0
|
protected Order[] getTestedOrder() {
return new Order[] {Order.ASCENDING, Order.DESCENDING};
}
|
Abstract test base for comparators.
@param <T>
|
getTestedOrder
|
java
|
apache/flink
|
flink-core/src/test/java/org/apache/flink/api/common/typeutils/ComparatorTestBase.java
|
https://github.com/apache/flink/blob/master/flink-core/src/test/java/org/apache/flink/api/common/typeutils/ComparatorTestBase.java
|
Apache-2.0
|
@Override
public void close() {
Thread.currentThread().setContextClassLoader(originalThreadContextClassLoader);
}
|
Utility class to temporarily use a different classloader as the thread context classloader.
|
close
|
java
|
apache/flink
|
flink-core/src/test/java/org/apache/flink/api/common/typeutils/ThreadContextClassLoader.java
|
https://github.com/apache/flink/blob/master/flink-core/src/test/java/org/apache/flink/api/common/typeutils/ThreadContextClassLoader.java
|
Apache-2.0
|
@Override
public String toString() {
return name + " / " + flinkVersion;
}
|
Specification of one test scenario. This mainly needs a {@link PreUpgradeSetup} and {@link
UpgradeVerifier}.
|
toString
|
java
|
apache/flink
|
flink-core/src/test/java/org/apache/flink/api/common/typeutils/TypeSerializerUpgradeTestBase.java
|
https://github.com/apache/flink/blob/master/flink-core/src/test/java/org/apache/flink/api/common/typeutils/TypeSerializerUpgradeTestBase.java
|
Apache-2.0
|
private <T> void assertSerializerIsValid(
TypeSerializer<T> serializer, DataInputView dataInput, Condition<T> testDataMatcher)
throws Exception {
DataInputView serializedData =
readAndThenWriteData(dataInput, serializer, serializer, testDataMatcher);
TypeSerializerSnapshot<T> snapshot = writeAndThenReadSerializerSnapshot(serializer);
TypeSerializer<T> restoreSerializer = snapshot.restoreSerializer();
serializedData =
readAndThenWriteData(
serializedData, restoreSerializer, restoreSerializer, testDataMatcher);
TypeSerializer<T> duplicateSerializer = snapshot.restoreSerializer().duplicate();
readAndThenWriteData(
serializedData, duplicateSerializer, duplicateSerializer, testDataMatcher);
}
|
Asserts that a given {@link TypeSerializer} is valid, given a {@link DataInputView} of
serialized data.
<p>A serializer is valid, iff:
<ul>
<li>1. The serializer can read and then write again the given serialized data.
<li>2. The serializer can produce a serializer snapshot which can be written and then read
back again.
<li>3. The serializer's produced snapshot is capable of creating a restore serializer.
<li>4. The restore serializer created from the serializer snapshot can read and then write
again data written by step 1. Given that the serializer is not a restore serializer
already.
</ul>
|
assertSerializerIsValid
|
java
|
apache/flink
|
flink-core/src/test/java/org/apache/flink/api/common/typeutils/TypeSerializerUpgradeTestBase.java
|
https://github.com/apache/flink/blob/master/flink-core/src/test/java/org/apache/flink/api/common/typeutils/TypeSerializerUpgradeTestBase.java
|
Apache-2.0
|
@Test
void checkIndenticalEnums() throws Exception {
assertThat(checkCompatibility(ENUM_A, ENUM_A).isCompatibleAsIs()).isTrue();
}
|
Check that identical enums don't require migration
|
checkIndenticalEnums
|
java
|
apache/flink
|
flink-core/src/test/java/org/apache/flink/api/common/typeutils/base/EnumSerializerCompatibilityTest.java
|
https://github.com/apache/flink/blob/master/flink-core/src/test/java/org/apache/flink/api/common/typeutils/base/EnumSerializerCompatibilityTest.java
|
Apache-2.0
|
@Test
void checkAppendedField() throws Exception {
assertThat(checkCompatibility(ENUM_A, ENUM_B).isCompatibleWithReconfiguredSerializer())
.isTrue();
}
|
Check that appending fields to the enum does not require migration
|
checkAppendedField
|
java
|
apache/flink
|
flink-core/src/test/java/org/apache/flink/api/common/typeutils/base/EnumSerializerCompatibilityTest.java
|
https://github.com/apache/flink/blob/master/flink-core/src/test/java/org/apache/flink/api/common/typeutils/base/EnumSerializerCompatibilityTest.java
|
Apache-2.0
|
@Test
void removingFieldShouldBeIncompatible() throws Exception {
assertThatThrownBy(() -> checkCompatibility(ENUM_B, ENUM_A))
.isInstanceOf(IllegalStateException.class);
}
|
Check that removing enum fields makes the snapshot incompatible
|
removingFieldShouldBeIncompatible
|
java
|
apache/flink
|
flink-core/src/test/java/org/apache/flink/api/common/typeutils/base/EnumSerializerCompatibilityTest.java
|
https://github.com/apache/flink/blob/master/flink-core/src/test/java/org/apache/flink/api/common/typeutils/base/EnumSerializerCompatibilityTest.java
|
Apache-2.0
|
public Builder waitOnlyForInitialSplits() {
this.readerWaitingForSplitsBehaviour =
MockSourceReader.WaitingForSplits.WAIT_FOR_INITIAL;
return this;
}
|
Instructs the {@link MockSourceReader} to not finish if there has been no splits
assignment messages yet.
@see #waitUntilAllSplitsAssigned()
@see #doNotWaitForSplitsAssignment()
|
waitOnlyForInitialSplits
|
java
|
apache/flink
|
flink-core/src/test/java/org/apache/flink/api/connector/source/mocks/MockSource.java
|
https://github.com/apache/flink/blob/master/flink-core/src/test/java/org/apache/flink/api/connector/source/mocks/MockSource.java
|
Apache-2.0
|
public Builder waitUntilAllSplitsAssigned() {
this.readerWaitingForSplitsBehaviour =
MockSourceReader.WaitingForSplits.WAIT_UNTIL_ALL_SPLITS_ASSIGNED;
return this;
}
|
Instructs the {@link MockSourceReader} to finish only if the {@link MockSplitEnumerator}
assigned all splits and there will be no more splits distributed.
@see #waitUntilAllSplitsAssigned()
@see #doNotWaitForSplitsAssignment()
|
waitUntilAllSplitsAssigned
|
java
|
apache/flink
|
flink-core/src/test/java/org/apache/flink/api/connector/source/mocks/MockSource.java
|
https://github.com/apache/flink/blob/master/flink-core/src/test/java/org/apache/flink/api/connector/source/mocks/MockSource.java
|
Apache-2.0
|
public Builder doNotWaitForSplitsAssignment() {
this.readerWaitingForSplitsBehaviour =
MockSourceReader.WaitingForSplits.DO_NOT_WAIT_FOR_SPLITS;
return this;
}
|
Instructs the {@link MockSourceReader} to finish irrespective if there can be splits
assigned in the future or not.
@see #waitUntilAllSplitsAssigned()
@see #doNotWaitForSplitsAssignment()
|
doNotWaitForSplitsAssignment
|
java
|
apache/flink
|
flink-core/src/test/java/org/apache/flink/api/connector/source/mocks/MockSource.java
|
https://github.com/apache/flink/blob/master/flink-core/src/test/java/org/apache/flink/api/connector/source/mocks/MockSource.java
|
Apache-2.0
|
public int[] getNext(boolean blocking) throws InterruptedException {
Integer value = blocking ? records.take() : records.poll();
return value == null ? null : new int[] {value, index++};
}
|
Get the next element. Block if asked.
|
getNext
|
java
|
apache/flink
|
flink-core/src/test/java/org/apache/flink/api/connector/source/mocks/MockSourceSplit.java
|
https://github.com/apache/flink/blob/master/flink-core/src/test/java/org/apache/flink/api/connector/source/mocks/MockSourceSplit.java
|
Apache-2.0
|
@Override
public ParameterizedParentImpl map(Integer value) throws Exception {
return null;
}
|
Representation of map function for type extraction.
|
map
|
java
|
apache/flink
|
flink-core/src/test/java/org/apache/flink/api/java/typeutils/PojoParametrizedTypeExtractionTest.java
|
https://github.com/apache/flink/blob/master/flink-core/src/test/java/org/apache/flink/api/java/typeutils/PojoParametrizedTypeExtractionTest.java
|
Apache-2.0
|
@Override
public boolean equals(Object obj) {
if (obj instanceof PojoContainingTuple) {
PojoContainingTuple other = (PojoContainingTuple) obj;
return someInt == other.someInt && theTuple.equals(other.theTuple);
}
return false;
}
|
This file belongs to the PojoComparatorTest test
|
equals
|
java
|
apache/flink
|
flink-core/src/test/java/org/apache/flink/api/java/typeutils/runtime/PojoContainingTuple.java
|
https://github.com/apache/flink/blob/master/flink-core/src/test/java/org/apache/flink/api/java/typeutils/runtime/PojoContainingTuple.java
|
Apache-2.0
|
@SuppressWarnings({"rawtypes", "unchecked"})
@Test
void testTuplePojoTestEquality() throws IncompatibleKeysException {
// test with a simple, string-key first.
PojoTypeInfo<TestUserClass> pType = (PojoTypeInfo<TestUserClass>) type;
List<FlatFieldDescriptor> result = new ArrayList<FlatFieldDescriptor>();
pType.getFlatFields("nestedClass.dumm2", 0, result);
int[] fields = new int[1]; // see below
fields[0] = result.get(0).getPosition();
TypeComparator<TestUserClass> pojoComp =
pType.createComparator(fields, new boolean[] {true}, 0, new ExecutionConfig());
TestUserClass pojoTestRecord =
new TestUserClass(
0,
"abc",
3d,
new int[] {1, 2, 3},
new Date(),
new NestedTestUserClass(1, "haha", 4d, new int[] {5, 4, 3}));
int pHash = pojoComp.hash(pojoTestRecord);
Tuple1<String> tupleTest = new Tuple1<String>("haha");
TupleTypeInfo<Tuple1<String>> tType =
(TupleTypeInfo<Tuple1<String>>) TypeExtractor.getForObject(tupleTest);
TypeComparator<Tuple1<String>> tupleComp =
tType.createComparator(
new int[] {0}, new boolean[] {true}, 0, new ExecutionConfig());
int tHash = tupleComp.hash(tupleTest);
assertThat(tHash)
.isEqualTo(pHash)
.withFailMessage(
"The hashing for tuples and pojos must be the same, so that they are mixable");
Tuple3<Integer, String, Double> multiTupleTest =
new Tuple3<Integer, String, Double>(
1, "haha", 4d); // its important here to use the same values.
TupleTypeInfo<Tuple3<Integer, String, Double>> multiTupleType =
(TupleTypeInfo<Tuple3<Integer, String, Double>>)
TypeExtractor.getForObject(multiTupleTest);
ExpressionKeys fieldKey = new ExpressionKeys(new int[] {1, 0, 2}, multiTupleType);
ExpressionKeys expressKey =
new ExpressionKeys(
new String[] {
"nestedClass.dumm2", "nestedClass.dumm1", "nestedClass.dumm3"
},
pType);
assertThat(fieldKey.areCompatible(expressKey))
.isTrue()
.withFailMessage("Expecting the keys to be compatible");
TypeComparator<TestUserClass> multiPojoComp =
pType.createComparator(
expressKey.computeLogicalKeyPositions(),
new boolean[] {true, true, true},
0,
new ExecutionConfig());
int multiPojoHash = multiPojoComp.hash(pojoTestRecord);
// pojo order is: dumm2 (str), dumm1 (int), dumm3 (double).
TypeComparator<Tuple3<Integer, String, Double>> multiTupleComp =
multiTupleType.createComparator(
fieldKey.computeLogicalKeyPositions(),
new boolean[] {true, true, true},
0,
new ExecutionConfig());
int multiTupleHash = multiTupleComp.hash(multiTupleTest);
assertThat(multiPojoHash)
.isEqualTo(multiTupleHash)
.withFailMessage(
"The hashing for tuples and pojos must be the same, so that they are mixable. Also for those with multiple key fields");
}
|
This tests if the hashes returned by the pojo and tuple comparators are the same
|
testTuplePojoTestEquality
|
java
|
apache/flink
|
flink-core/src/test/java/org/apache/flink/api/java/typeutils/runtime/PojoSerializerTest.java
|
https://github.com/apache/flink/blob/master/flink-core/src/test/java/org/apache/flink/api/java/typeutils/runtime/PojoSerializerTest.java
|
Apache-2.0
|
@Test
void testReconfigureWithDifferentPojoType() throws Exception {
PojoSerializer<SubTestUserClassB> pojoSerializer1 =
(PojoSerializer<SubTestUserClassB>)
TypeExtractor.getForClass(SubTestUserClassB.class)
.createSerializer(new SerializerConfigImpl());
// snapshot configuration and serialize to bytes
TypeSerializerSnapshot pojoSerializerConfigSnapshot =
pojoSerializer1.snapshotConfiguration();
byte[] serializedConfig;
try (ByteArrayOutputStream out = new ByteArrayOutputStream()) {
TypeSerializerSnapshotSerializationUtil.writeSerializerSnapshot(
new DataOutputViewStreamWrapper(out), pojoSerializerConfigSnapshot);
serializedConfig = out.toByteArray();
}
PojoSerializer<SubTestUserClassA> pojoSerializer2 =
(PojoSerializer<SubTestUserClassA>)
TypeExtractor.getForClass(SubTestUserClassA.class)
.createSerializer(new SerializerConfigImpl());
// read configuration again from bytes
try (ByteArrayInputStream in = new ByteArrayInputStream(serializedConfig)) {
pojoSerializerConfigSnapshot =
TypeSerializerSnapshotSerializationUtil.readSerializerSnapshot(
new DataInputViewStreamWrapper(in),
Thread.currentThread().getContextClassLoader());
}
@SuppressWarnings("unchecked")
TypeSerializerSchemaCompatibility<SubTestUserClassA> compatResult =
pojoSerializer2
.snapshotConfiguration()
.resolveSchemaCompatibility(pojoSerializerConfigSnapshot);
assertThat(compatResult.isIncompatible()).isTrue();
}
|
Verifies that reconfiguring with a config snapshot of a preceding POJO serializer with
different POJO type will result in INCOMPATIBLE.
|
testReconfigureWithDifferentPojoType
|
java
|
apache/flink
|
flink-core/src/test/java/org/apache/flink/api/java/typeutils/runtime/PojoSerializerTest.java
|
https://github.com/apache/flink/blob/master/flink-core/src/test/java/org/apache/flink/api/java/typeutils/runtime/PojoSerializerTest.java
|
Apache-2.0
|
@Test
void testReconfigureDifferentSubclassRegistrationOrder() throws Exception {
SerializerConfigImpl serializerConfig = new SerializerConfigImpl();
serializerConfig.registerPojoType(SubTestUserClassA.class);
serializerConfig.registerPojoType(SubTestUserClassB.class);
PojoSerializer<TestUserClass> pojoSerializer =
(PojoSerializer<TestUserClass>) type.createSerializer(serializerConfig);
// get original registration ids
int subClassATag = pojoSerializer.getRegisteredClasses().get(SubTestUserClassA.class);
int subClassBTag = pojoSerializer.getRegisteredClasses().get(SubTestUserClassB.class);
// snapshot configuration and serialize to bytes
TypeSerializerSnapshot pojoSerializerConfigSnapshot =
pojoSerializer.snapshotConfiguration();
byte[] serializedConfig;
try (ByteArrayOutputStream out = new ByteArrayOutputStream()) {
TypeSerializerSnapshotSerializationUtil.writeSerializerSnapshot(
new DataOutputViewStreamWrapper(out), pojoSerializerConfigSnapshot);
serializedConfig = out.toByteArray();
}
// use new config and instantiate new PojoSerializer
serializerConfig = new SerializerConfigImpl();
serializerConfig.registerPojoType(
SubTestUserClassB.class); // test with B registered before A
serializerConfig.registerPojoType(SubTestUserClassA.class);
pojoSerializer = (PojoSerializer<TestUserClass>) type.createSerializer(serializerConfig);
// read configuration from bytes
try (ByteArrayInputStream in = new ByteArrayInputStream(serializedConfig)) {
pojoSerializerConfigSnapshot =
TypeSerializerSnapshotSerializationUtil.readSerializerSnapshot(
new DataInputViewStreamWrapper(in),
Thread.currentThread().getContextClassLoader());
}
@SuppressWarnings("unchecked")
TypeSerializerSchemaCompatibility<TestUserClass> compatResult =
pojoSerializer
.snapshotConfiguration()
.resolveSchemaCompatibility(pojoSerializerConfigSnapshot);
assertThat(compatResult.isCompatibleWithReconfiguredSerializer()).isTrue();
assertThat(compatResult.getReconfiguredSerializer()).isInstanceOf(PojoSerializer.class);
// reconfigure - check reconfiguration result and that registration ids remains the same
// assertEquals(ReconfigureResult.COMPATIBLE,
// pojoSerializer.reconfigure(pojoSerializerConfigSnapshot));
PojoSerializer<TestUserClass> reconfiguredPojoSerializer =
(PojoSerializer<TestUserClass>) compatResult.getReconfiguredSerializer();
assertThat(subClassATag)
.isEqualTo(
reconfiguredPojoSerializer
.getRegisteredClasses()
.get(SubTestUserClassA.class)
.intValue());
assertThat(subClassBTag)
.isEqualTo(
reconfiguredPojoSerializer
.getRegisteredClasses()
.get(SubTestUserClassB.class)
.intValue());
}
|
Tests that reconfiguration correctly reorders subclass registrations to their previous order.
|
testReconfigureDifferentSubclassRegistrationOrder
|
java
|
apache/flink
|
flink-core/src/test/java/org/apache/flink/api/java/typeutils/runtime/PojoSerializerTest.java
|
https://github.com/apache/flink/blob/master/flink-core/src/test/java/org/apache/flink/api/java/typeutils/runtime/PojoSerializerTest.java
|
Apache-2.0
|
@Test
void testReconfigureRepopulateNonregisteredSubclassSerializerCache() throws Exception {
// don't register any subclasses
PojoSerializer<TestUserClass> pojoSerializer =
(PojoSerializer<TestUserClass>) type.createSerializer(new SerializerConfigImpl());
// create cached serializers for SubTestUserClassA and SubTestUserClassB
pojoSerializer.getSubclassSerializer(SubTestUserClassA.class);
pojoSerializer.getSubclassSerializer(SubTestUserClassB.class);
assertThat(pojoSerializer.getSubclassSerializerCache())
.containsOnlyKeys(SubTestUserClassA.class, SubTestUserClassB.class);
// snapshot configuration and serialize to bytes
TypeSerializerSnapshot pojoSerializerConfigSnapshot =
pojoSerializer.snapshotConfiguration();
byte[] serializedConfig;
try (ByteArrayOutputStream out = new ByteArrayOutputStream()) {
TypeSerializerSnapshotSerializationUtil.writeSerializerSnapshot(
new DataOutputViewStreamWrapper(out), pojoSerializerConfigSnapshot);
serializedConfig = out.toByteArray();
}
// instantiate new PojoSerializer
pojoSerializer =
(PojoSerializer<TestUserClass>) type.createSerializer(new SerializerConfigImpl());
// read configuration from bytes
try (ByteArrayInputStream in = new ByteArrayInputStream(serializedConfig)) {
pojoSerializerConfigSnapshot =
TypeSerializerSnapshotSerializationUtil.readSerializerSnapshot(
new DataInputViewStreamWrapper(in),
Thread.currentThread().getContextClassLoader());
}
// reconfigure - check reconfiguration result and that subclass serializer cache is
// repopulated
@SuppressWarnings("unchecked")
TypeSerializerSchemaCompatibility<TestUserClass> compatResult =
pojoSerializer
.snapshotConfiguration()
.resolveSchemaCompatibility(pojoSerializerConfigSnapshot);
assertThat(compatResult.isCompatibleWithReconfiguredSerializer()).isTrue();
assertThat(compatResult.getReconfiguredSerializer()).isInstanceOf(PojoSerializer.class);
PojoSerializer<TestUserClass> reconfiguredPojoSerializer =
(PojoSerializer<TestUserClass>) compatResult.getReconfiguredSerializer();
assertThat(reconfiguredPojoSerializer.getSubclassSerializerCache())
.containsOnlyKeys(SubTestUserClassA.class, SubTestUserClassB.class);
}
|
Tests that reconfiguration repopulates previously cached subclass serializers.
|
testReconfigureRepopulateNonregisteredSubclassSerializerCache
|
java
|
apache/flink
|
flink-core/src/test/java/org/apache/flink/api/java/typeutils/runtime/PojoSerializerTest.java
|
https://github.com/apache/flink/blob/master/flink-core/src/test/java/org/apache/flink/api/java/typeutils/runtime/PojoSerializerTest.java
|
Apache-2.0
|
@Test
void testReconfigureWithPreviouslyNonregisteredSubclasses() throws Exception {
// don't register any subclasses at first
PojoSerializer<TestUserClass> pojoSerializer =
(PojoSerializer<TestUserClass>) type.createSerializer(new SerializerConfigImpl());
// create cached serializers for SubTestUserClassA and SubTestUserClassB
pojoSerializer.getSubclassSerializer(SubTestUserClassA.class);
pojoSerializer.getSubclassSerializer(SubTestUserClassB.class);
// make sure serializers are in cache
assertThat(pojoSerializer.getSubclassSerializerCache())
.containsOnlyKeys(SubTestUserClassA.class, SubTestUserClassB.class);
// make sure that registrations are empty
assertThat(pojoSerializer.getRegisteredClasses()).isEmpty();
assertThat(pojoSerializer.getRegisteredSerializers()).isEmpty();
// snapshot configuration and serialize to bytes
TypeSerializerSnapshot pojoSerializerConfigSnapshot =
pojoSerializer.snapshotConfiguration();
byte[] serializedConfig;
try (ByteArrayOutputStream out = new ByteArrayOutputStream()) {
TypeSerializerSnapshotSerializationUtil.writeSerializerSnapshot(
new DataOutputViewStreamWrapper(out), pojoSerializerConfigSnapshot);
serializedConfig = out.toByteArray();
}
// instantiate new PojoSerializer, with new execution config that has the subclass
// registrations
SerializerConfigImpl serializerConfig = new SerializerConfigImpl();
serializerConfig.registerPojoType(SubTestUserClassA.class);
serializerConfig.registerPojoType(SubTestUserClassB.class);
pojoSerializer = (PojoSerializer<TestUserClass>) type.createSerializer(serializerConfig);
// read configuration from bytes
try (ByteArrayInputStream in = new ByteArrayInputStream(serializedConfig)) {
pojoSerializerConfigSnapshot =
TypeSerializerSnapshotSerializationUtil.readSerializerSnapshot(
new DataInputViewStreamWrapper(in),
Thread.currentThread().getContextClassLoader());
}
// reconfigure - check reconfiguration result and that
// 1) subclass serializer cache is repopulated
// 2) registrations also contain the now registered subclasses
@SuppressWarnings("unchecked")
TypeSerializerSchemaCompatibility<TestUserClass> compatResult =
pojoSerializer
.snapshotConfiguration()
.resolveSchemaCompatibility(pojoSerializerConfigSnapshot);
assertThat(compatResult.isCompatibleWithReconfiguredSerializer()).isTrue();
assertThat(compatResult.getReconfiguredSerializer()).isInstanceOf(PojoSerializer.class);
PojoSerializer<TestUserClass> reconfiguredPojoSerializer =
(PojoSerializer<TestUserClass>) compatResult.getReconfiguredSerializer();
assertThat(reconfiguredPojoSerializer.getSubclassSerializerCache())
.containsOnlyKeys(SubTestUserClassA.class, SubTestUserClassB.class);
assertThat(reconfiguredPojoSerializer.getRegisteredClasses())
.containsOnlyKeys(SubTestUserClassA.class, SubTestUserClassB.class);
}
|
Tests that: - Previous Pojo serializer did not have registrations, and created cached
serializers for subclasses - On restore, it had those subclasses registered
<p>In this case, after reconfiguration, the cache should be repopulated, and registrations
should also exist for the subclasses.
<p>Note: the cache still needs to be repopulated because previous data of those subclasses
were written with the cached serializers. In this case, the repopulated cache has
reconfigured serializers for the subclasses so that previous written data can be read, but
the registered serializers for the subclasses do not necessarily need to be reconfigured
since they will only be used to write new data.
|
testReconfigureWithPreviouslyNonregisteredSubclasses
|
java
|
apache/flink
|
flink-core/src/test/java/org/apache/flink/api/java/typeutils/runtime/PojoSerializerTest.java
|
https://github.com/apache/flink/blob/master/flink-core/src/test/java/org/apache/flink/api/java/typeutils/runtime/PojoSerializerTest.java
|
Apache-2.0
|
@Test
void testAllCorePatterns() {
assertThat(PARENT_FIRST_PACKAGES).contains("java.");
assertThat(PARENT_FIRST_PACKAGES).contains("org.apache.flink.");
assertThat(PARENT_FIRST_PACKAGES).contains("javax.annotation.");
}
|
All java and Flink classes must be loaded parent first.
|
testAllCorePatterns
|
java
|
apache/flink
|
flink-core/src/test/java/org/apache/flink/configuration/ParentFirstPatternsTest.java
|
https://github.com/apache/flink/blob/master/flink-core/src/test/java/org/apache/flink/configuration/ParentFirstPatternsTest.java
|
Apache-2.0
|
@Test
void testLoggersParentFirst() {
assertThat(PARENT_FIRST_PACKAGES).contains("org.slf4j");
assertThat(PARENT_FIRST_PACKAGES).contains("org.apache.log4j");
assertThat(PARENT_FIRST_PACKAGES).contains("org.apache.logging");
assertThat(PARENT_FIRST_PACKAGES).contains("org.apache.commons.logging");
assertThat(PARENT_FIRST_PACKAGES).contains("ch.qos.logback");
}
|
To avoid multiple binding problems and warnings for logger frameworks, we load them
parent-first.
|
testLoggersParentFirst
|
java
|
apache/flink
|
flink-core/src/test/java/org/apache/flink/configuration/ParentFirstPatternsTest.java
|
https://github.com/apache/flink/blob/master/flink-core/src/test/java/org/apache/flink/configuration/ParentFirstPatternsTest.java
|
Apache-2.0
|
@Test
void testScalaParentFirst() {
assertThat(PARENT_FIRST_PACKAGES).contains("scala.");
}
|
As long as Scala is not a pure user library, but is also used in the Flink runtime, we need
to load all Scala classes parent-first.
|
testScalaParentFirst
|
java
|
apache/flink
|
flink-core/src/test/java/org/apache/flink/configuration/ParentFirstPatternsTest.java
|
https://github.com/apache/flink/blob/master/flink-core/src/test/java/org/apache/flink/configuration/ParentFirstPatternsTest.java
|
Apache-2.0
|
@Test
void testHadoopParentFirst() {
assertThat(PARENT_FIRST_PACKAGES).contains("org.apache.hadoop.");
}
|
As long as we have Hadoop classes leaking through some of Flink's APIs (example bucketing
sink), we need to make them parent first.
|
testHadoopParentFirst
|
java
|
apache/flink
|
flink-core/src/test/java/org/apache/flink/configuration/ParentFirstPatternsTest.java
|
https://github.com/apache/flink/blob/master/flink-core/src/test/java/org/apache/flink/configuration/ParentFirstPatternsTest.java
|
Apache-2.0
|
@SuppressWarnings("deprecation")
@Test
void checkEnableSSL() {
// new options take precedence
Configuration newOptions = new Configuration();
newOptions.set(SecurityOptions.SSL_INTERNAL_ENABLED, true);
newOptions.set(SecurityOptions.SSL_REST_ENABLED, false);
assertThat(SecurityOptions.isInternalSSLEnabled(newOptions)).isTrue();
assertThat(SecurityOptions.isRestSSLEnabled(newOptions)).isFalse();
}
|
Tests whether activation of internal / REST SSL evaluates the config flags correctly.
|
checkEnableSSL
|
java
|
apache/flink
|
flink-core/src/test/java/org/apache/flink/configuration/SecurityOptionsTest.java
|
https://github.com/apache/flink/blob/master/flink-core/src/test/java/org/apache/flink/configuration/SecurityOptionsTest.java
|
Apache-2.0
|
@Test
void checkEnableRestSSLAuthentication() {
// SSL has to be enabled
Configuration noSSLOptions = new Configuration();
noSSLOptions.set(SecurityOptions.SSL_REST_ENABLED, false);
noSSLOptions.set(SecurityOptions.SSL_REST_AUTHENTICATION_ENABLED, true);
assertThat(SecurityOptions.isRestSSLAuthenticationEnabled(noSSLOptions)).isFalse();
// authentication is disabled by default
Configuration defaultOptions = new Configuration();
defaultOptions.set(SecurityOptions.SSL_REST_ENABLED, true);
assertThat(SecurityOptions.isRestSSLAuthenticationEnabled(defaultOptions)).isFalse();
Configuration options = new Configuration();
options.set(SecurityOptions.SSL_REST_ENABLED, true);
options.set(SecurityOptions.SSL_REST_AUTHENTICATION_ENABLED, true);
assertThat(SecurityOptions.isRestSSLAuthenticationEnabled(options)).isTrue();
}
|
Tests whether activation of REST mutual SSL authentication evaluates the config flags
correctly.
|
checkEnableRestSSLAuthentication
|
java
|
apache/flink
|
flink-core/src/test/java/org/apache/flink/configuration/SecurityOptionsTest.java
|
https://github.com/apache/flink/blob/master/flink-core/src/test/java/org/apache/flink/configuration/SecurityOptionsTest.java
|
Apache-2.0
|
@Override
public void close() throws IOException {
closeCalledLatch.countDown();
try {
blockCloseLatch.await();
} catch (InterruptedException e) {
Thread.currentThread().interrupt();
}
}
|
A noop {@link Closeable} implementation that blocks inside {@link #close()}.
|
close
|
java
|
apache/flink
|
flink-core/src/test/java/org/apache/flink/core/fs/AbstractAutoCloseableRegistryTest.java
|
https://github.com/apache/flink/blob/master/flink-core/src/test/java/org/apache/flink/core/fs/AbstractAutoCloseableRegistryTest.java
|
Apache-2.0
|
public void awaitClose(final long timeout, final TimeUnit timeUnit)
throws InterruptedException {
assertThat(closeCalledLatch.await(timeout, timeUnit)).isTrue();
}
|
Causes the current thread to wait until {@link #close()} is called.
|
awaitClose
|
java
|
apache/flink
|
flink-core/src/test/java/org/apache/flink/core/fs/AbstractAutoCloseableRegistryTest.java
|
https://github.com/apache/flink/blob/master/flink-core/src/test/java/org/apache/flink/core/fs/AbstractAutoCloseableRegistryTest.java
|
Apache-2.0
|
public static void checkPathEventualExistence(
FileSystem fs, Path path, boolean expectedExists, long consistencyToleranceNS)
throws IOException, InterruptedException {
boolean dirExists;
long deadline = System.nanoTime() + consistencyToleranceNS;
while ((dirExists = fs.exists(path)) != expectedExists
&& System.nanoTime() - deadline < 0) {
Thread.sleep(10);
}
assertThat(dirExists).isEqualTo(expectedExists);
}
|
Verifies that the given path eventually appears on / disappears from <tt>fs</tt> within
<tt>consistencyToleranceNS</tt> nanoseconds.
|
checkPathEventualExistence
|
java
|
apache/flink
|
flink-core/src/test/java/org/apache/flink/core/fs/FileSystemTestUtils.java
|
https://github.com/apache/flink/blob/master/flink-core/src/test/java/org/apache/flink/core/fs/FileSystemTestUtils.java
|
Apache-2.0
|
@Test
void testConfiguration() throws Exception {
final String fsScheme = TestFileSystem.SCHEME;
// nothing configured, we should get a regular file system
FileSystem schemeFs = FileSystem.get(URI.create(fsScheme + ":///a/b/c"));
FileSystem localFs = FileSystem.get(File.createTempFile("junit", null, tempDir).toURI());
assertThat(schemeFs).isNotInstanceOf(LimitedConnectionsFileSystem.class);
assertThat(localFs).isNotInstanceOf(LimitedConnectionsFileSystem.class);
// configure some limits, which should cause "fsScheme" to be limited
final Configuration config = new Configuration();
config.set(getIntConfigOption("fs." + fsScheme + ".limit.total"), 42);
config.set(getIntConfigOption("fs." + fsScheme + ".limit.input"), 11);
config.set(getIntConfigOption("fs." + fsScheme + ".limit.output"), 40);
config.set(getIntConfigOption("fs." + fsScheme + ".limit.timeout"), 12345);
config.set(getIntConfigOption("fs." + fsScheme + ".limit.stream-timeout"), 98765);
try {
FileSystem.initialize(config);
schemeFs = FileSystem.get(URI.create(fsScheme + ":///a/b/c"));
localFs = FileSystem.get(File.createTempFile("junit", null, tempDir).toURI());
assertThat(schemeFs).isInstanceOf(LimitedConnectionsFileSystem.class);
assertThat(localFs).isNotInstanceOf(LimitedConnectionsFileSystem.class);
LimitedConnectionsFileSystem limitedFs = (LimitedConnectionsFileSystem) schemeFs;
assertThat(limitedFs.getMaxNumOpenStreamsTotal()).isEqualTo(42);
assertThat(limitedFs.getMaxNumOpenInputStreams()).isEqualTo(11);
assertThat(limitedFs.getMaxNumOpenOutputStreams()).isEqualTo(40);
assertThat(limitedFs.getStreamOpenTimeout()).isEqualTo(12345);
assertThat(limitedFs.getStreamInactivityTimeout()).isEqualTo(98765);
} finally {
// clear all settings
FileSystem.initialize(new Configuration());
}
}
|
This test validates that the File System is correctly wrapped by the file system factories
when the corresponding entries are in the configuration.
|
testConfiguration
|
java
|
apache/flink
|
flink-core/src/test/java/org/apache/flink/core/fs/LimitedConnectionsConfigurationTest.java
|
https://github.com/apache/flink/blob/master/flink-core/src/test/java/org/apache/flink/core/fs/LimitedConnectionsConfigurationTest.java
|
Apache-2.0
|
@Test
void testSlowOutputStreamNotClosed() throws Exception {
final LimitedConnectionsFileSystem fs =
new LimitedConnectionsFileSystem(LocalFileSystem.getSharedInstance(), 1, 0L, 1000L);
// some competing threads
final Random rnd = new Random();
final ReaderThread[] threads = new ReaderThread[10];
for (int i = 0; i < threads.length; i++) {
File file = File.createTempFile("junit", null, tempFolder);
createRandomContents(file, rnd);
Path path = new Path(file.toURI());
threads[i] = new ReaderThread(fs, path, 1, Integer.MAX_VALUE);
}
// open the stream we test
try (FSDataOutputStream out =
fs.create(
new Path(File.createTempFile("junit", null, tempFolder).toURI()),
WriteMode.OVERWRITE)) {
// start the other threads that will try to shoot this stream down
for (ReaderThread t : threads) {
t.start();
}
// read the stream slowly.
Thread.sleep(5);
for (int bytesLeft = 50; bytesLeft > 0; bytesLeft--) {
out.write(bytesLeft);
Thread.sleep(5);
}
}
// wait for clean shutdown
for (ReaderThread t : threads) {
t.sync();
}
}
|
Tests that a slowly written output stream is not accidentally closed too aggressively, due to
a wrong initialization of the timestamps or bytes written that mark when the last progress
was checked.
|
testSlowOutputStreamNotClosed
|
java
|
apache/flink
|
flink-core/src/test/java/org/apache/flink/core/fs/LimitedConnectionsFileSystemTest.java
|
https://github.com/apache/flink/blob/master/flink-core/src/test/java/org/apache/flink/core/fs/LimitedConnectionsFileSystemTest.java
|
Apache-2.0
|
@Test
void testSetPosition() throws Exception {
assertThat(stream.available()).isEqualTo(data.length);
assertThat(stream.read()).isEqualTo('0');
stream.setPosition(1);
assertThat(stream.available()).isEqualTo(data.length - 1);
assertThat(stream.read()).isEqualTo('1');
stream.setPosition(3);
assertThat(stream.available()).isEqualTo(data.length - 3);
assertThat(stream.read()).isEqualTo('3');
stream.setPosition(data.length);
assertThat(stream.available()).isZero();
assertThat(stream.read()).isEqualTo(-1);
}
|
Test setting position on a {@link ByteArrayInputStreamWithPos}.
|
testSetPosition
|
java
|
apache/flink
|
flink-core/src/test/java/org/apache/flink/core/memory/ByteArrayInputStreamWithPosTest.java
|
https://github.com/apache/flink/blob/master/flink-core/src/test/java/org/apache/flink/core/memory/ByteArrayInputStreamWithPosTest.java
|
Apache-2.0
|
@Test
void testSetTooLargePosition() throws Exception {
assertThatThrownBy(() -> stream.setPosition(data.length + 1))
.isInstanceOf(IllegalArgumentException.class)
.hasMessageContaining("Position out of bounds.");
}
|
Test that the expected position exceeds the capacity of the byte array.
|
testSetTooLargePosition
|
java
|
apache/flink
|
flink-core/src/test/java/org/apache/flink/core/memory/ByteArrayInputStreamWithPosTest.java
|
https://github.com/apache/flink/blob/master/flink-core/src/test/java/org/apache/flink/core/memory/ByteArrayInputStreamWithPosTest.java
|
Apache-2.0
|
@Test
void testSetPositionWhenBufferIsFull() throws Exception {
stream.write(new byte[BUFFER_SIZE]);
// check whether the buffer is filled fully
assertThat(stream.getBuf()).hasSize(BUFFER_SIZE);
// check current position is the end of the buffer
assertThat(stream.getPosition()).isEqualTo(BUFFER_SIZE);
stream.setPosition(BUFFER_SIZE);
// confirm current position is at where we expect.
assertThat(stream.getPosition()).isEqualTo(BUFFER_SIZE);
}
|
Test setting position which is exactly the same with the buffer size.
|
testSetPositionWhenBufferIsFull
|
java
|
apache/flink
|
flink-core/src/test/java/org/apache/flink/core/memory/ByteArrayOutputStreamWithPosTest.java
|
https://github.com/apache/flink/blob/master/flink-core/src/test/java/org/apache/flink/core/memory/ByteArrayOutputStreamWithPosTest.java
|
Apache-2.0
|
@Test
void testSetPositionLargerThanBufferSize() throws Exception {
// fully fill the buffer
stream.write(new byte[BUFFER_SIZE]);
assertThat(stream.getBuf()).hasSize(BUFFER_SIZE);
// expand the buffer by setting position beyond the buffer length
stream.setPosition(BUFFER_SIZE + 1);
assertThat(stream.getBuf()).hasSize(BUFFER_SIZE * 2);
assertThat(stream.getPosition()).isEqualTo(BUFFER_SIZE + 1);
}
|
Test setting position larger than buffer size.
|
testSetPositionLargerThanBufferSize
|
java
|
apache/flink
|
flink-core/src/test/java/org/apache/flink/core/memory/ByteArrayOutputStreamWithPosTest.java
|
https://github.com/apache/flink/blob/master/flink-core/src/test/java/org/apache/flink/core/memory/ByteArrayOutputStreamWithPosTest.java
|
Apache-2.0
|
@Test
void testToString() throws IOException {
byte[] data = "1234567890".getBytes(ConfigConstants.DEFAULT_CHARSET);
try (ByteArrayOutputStreamWithPos stream = new ByteArrayOutputStreamWithPos(data.length)) {
stream.write(data);
assertThat(stream.toString().getBytes(ConfigConstants.DEFAULT_CHARSET))
.containsExactly(data);
for (int i = 0; i < data.length; i++) {
stream.setPosition(i);
assertThat(stream.toString().getBytes(ConfigConstants.DEFAULT_CHARSET))
.containsExactly(Arrays.copyOf(data, i));
}
// validate that the stored bytes are still tracked properly even when expanding array
stream.setPosition(data.length + 1);
assertThat(stream.toString().getBytes(ConfigConstants.DEFAULT_CHARSET))
.containsExactly(Arrays.copyOf(data, data.length + 1));
}
}
|
Test that toString returns a substring of the buffer with range(0, position).
|
testToString
|
java
|
apache/flink
|
flink-core/src/test/java/org/apache/flink/core/memory/ByteArrayOutputStreamWithPosTest.java
|
https://github.com/apache/flink/blob/master/flink-core/src/test/java/org/apache/flink/core/memory/ByteArrayOutputStreamWithPosTest.java
|
Apache-2.0
|
@Test
void testJMXServiceRegisterMBean() throws Exception {
TestObject testObject = new TestObject();
ObjectName testObjectName = new ObjectName("org.apache.flink.management", "key", "value");
MBeanServer mBeanServer = ManagementFactory.getPlatformMBeanServer();
try {
Optional<JMXServer> server = JMXService.getInstance();
assertThat(server).isPresent();
mBeanServer.registerMBean(testObject, testObjectName);
JMXServiceURL url =
new JMXServiceURL(
"service:jmx:rmi://localhost:"
+ server.get().getPort()
+ "/jndi/rmi://localhost:"
+ server.get().getPort()
+ "/jmxrmi");
JMXConnector jmxConn = JMXConnectorFactory.connect(url);
MBeanServerConnection mbeanConnConn = jmxConn.getMBeanServerConnection();
assertThat((int) mbeanConnConn.getAttribute(testObjectName, "Foo")).isOne();
mBeanServer.unregisterMBean(testObjectName);
assertThatThrownBy(() -> mbeanConnConn.getAttribute(testObjectName, "Foo"))
.isInstanceOf(InstanceNotFoundException.class);
} finally {
JMXService.stopInstance();
}
}
|
Verifies initialize, registered mBean and retrieval via attribute.
|
testJMXServiceRegisterMBean
|
java
|
apache/flink
|
flink-core/src/test/java/org/apache/flink/management/jmx/JMXServerTest.java
|
https://github.com/apache/flink/blob/master/flink-core/src/test/java/org/apache/flink/management/jmx/JMXServerTest.java
|
Apache-2.0
|
@Test
void testJMXServiceInitWithOccupiedPort() throws Exception {
try (ServerSocket socket = new ServerSocket(0)) {
JMXService.startInstance(String.valueOf(socket.getLocalPort()));
assertThat(JMXService.getInstance()).isNotPresent();
}
}
|
Verifies initialize failure with occupied port.
|
testJMXServiceInitWithOccupiedPort
|
java
|
apache/flink
|
flink-core/src/test/java/org/apache/flink/management/jmx/JMXServiceTest.java
|
https://github.com/apache/flink/blob/master/flink-core/src/test/java/org/apache/flink/management/jmx/JMXServiceTest.java
|
Apache-2.0
|
@Override
protected Class<?> loadClass(String name, boolean resolve) throws ClassNotFoundException {
if (cnfThrowingClassnames.contains(name)) {
throw new ClassNotFoundException();
} else {
return super.loadClass(name, resolve);
}
}
|
Utility classloader used in tests that allows simulating {@link ClassNotFoundException}s for
specific classes.
|
loadClass
|
java
|
apache/flink
|
flink-core/src/test/java/org/apache/flink/testutils/ArtificialCNFExceptionThrowingClassLoader.java
|
https://github.com/apache/flink/blob/master/flink-core/src/test/java/org/apache/flink/testutils/ArtificialCNFExceptionThrowingClassLoader.java
|
Apache-2.0
|
public static ObjectAndClassLoader<Serializable> createSerializableObjectFromNewClassLoader() {
final String classSource =
"import java.io.Serializable;"
+ "import java.util.Random;"
+ "public class TestSerializable implements Serializable {"
+ " private static final long serialVersionUID = -3L;"
+ " private final long random;"
+ " public TestSerializable() {"
+ " random = new Random().nextLong();"
+ " }"
+ " public boolean equals(Object o) {"
+ " if (this == o) { return true; }"
+ " if ((o == null) || (getClass() != o.getClass())) { return false; }"
+ " TestSerializable that = (TestSerializable) o;"
+ " return random == random;"
+ " }"
+ " public int hashCode() {"
+ " return (int)(random ^ random >>> 32);"
+ " }"
+ " public String toString() {"
+ " return \"TestSerializable{random=\" + random + '}';"
+ " }"
+ "}";
return createObjectFromNewClassLoader("TestSerializable", Serializable.class, classSource);
}
|
Creates a new ClassLoader and a new {@link Serializable} class inside that ClassLoader. This
is useful when unit testing the class loading behavior of code, and needing a class that is
outside the system class path.
<p>NOTE: Even though this method may throw IOExceptions, we do not declare those and rather
wrap them in Runtime Exceptions. While this is generally discouraged, we do this here because
it is merely a test utility and not production code, and it makes it easier to use this
method during the initialization of variables and especially static variables.
|
createSerializableObjectFromNewClassLoader
|
java
|
apache/flink
|
flink-core/src/test/java/org/apache/flink/testutils/ClassLoaderUtils.java
|
https://github.com/apache/flink/blob/master/flink-core/src/test/java/org/apache/flink/testutils/ClassLoaderUtils.java
|
Apache-2.0
|
public static CustomEqualityMatcher deeplyEquals(Object item) {
return new CustomEqualityMatcher(item, new DeeplyEqualsChecker());
}
|
This matcher performs similar comparison to {@link org.hamcrest.core.IsEqual}, which
resembles {@link java.util.Objects#deepEquals(Object, Object)} logic. The only difference
here is that {@link Tuple}s are treated similarly to arrays.
<p>This means that if we compare two Tuples that contain arrays, those arrays will be
compared with {@link Arrays#deepEquals(Object[], Object[])} rather than with reference
comparison.
@param item expected value
|
deeplyEquals
|
java
|
apache/flink
|
flink-core/src/test/java/org/apache/flink/testutils/CustomEqualityMatcher.java
|
https://github.com/apache/flink/blob/master/flink-core/src/test/java/org/apache/flink/testutils/CustomEqualityMatcher.java
|
Apache-2.0
|
@Test
void testDeserializeEmptyObject() {
EmptyObjectSerializer emptyObjectSerializer = new EmptyObjectSerializer();
Output output = new ByteBufferOutput(1000);
emptyObjectSerializer.write(null, output, new Object());
Input input = new NoFetchingInput(new ByteArrayInputStream(output.toBytes()));
final Object deserialized = emptyObjectSerializer.read(null, input, Object.class);
assertThat(deserialized).isExactlyInstanceOf(Object.class);
}
|
Try deserializing an object whose serialization result is an empty byte array, and don't
expect any exceptions.
|
testDeserializeEmptyObject
|
java
|
apache/flink
|
flink-core/src/test/java/org/apache/flink/testutils/runtime/NoFetchingInputTest.java
|
https://github.com/apache/flink/blob/master/flink-core/src/test/java/org/apache/flink/testutils/runtime/NoFetchingInputTest.java
|
Apache-2.0
|
public static Collection<java.nio.file.Path> prepareTestFiles(final java.nio.file.Path dir)
throws IOException {
final java.nio.file.Path jobSubDir1 = Files.createDirectory(dir.resolve("_sub_dir1"));
final java.nio.file.Path jobSubDir2 = Files.createDirectory(dir.resolve("_sub_dir2"));
final java.nio.file.Path jarFile1 = Files.createFile(dir.resolve("file1.jar"));
final java.nio.file.Path jarFile2 = Files.createFile(dir.resolve("file2.jar"));
final java.nio.file.Path jarFile3 = Files.createFile(jobSubDir1.resolve("file3.jar"));
final java.nio.file.Path jarFile4 = Files.createFile(jobSubDir2.resolve("file4.jar"));
final Collection<java.nio.file.Path> jarFiles = new ArrayList<>();
Files.createFile(dir.resolve("file1.txt"));
Files.createFile(jobSubDir2.resolve("file2.txt"));
jarFiles.add(jarFile1);
jarFiles.add(jarFile2);
jarFiles.add(jarFile3);
jarFiles.add(jarFile4);
return jarFiles;
}
|
Generate some files in the directory {@code dir}.
@param dir the directory where the files are generated
@return The list of generated files
@throws IOException if I/O error occurs while generating the files
|
prepareTestFiles
|
java
|
apache/flink
|
flink-core/src/test/java/org/apache/flink/util/FileUtilsTest.java
|
https://github.com/apache/flink/blob/master/flink-core/src/test/java/org/apache/flink/util/FileUtilsTest.java
|
Apache-2.0
|
private void verifyDirectoryCompression(
final java.nio.file.Path testDir, final java.nio.file.Path compressDir)
throws IOException {
final String testFileContent =
"Goethe - Faust: Der Tragoedie erster Teil\n"
+ "Prolog im Himmel.\n"
+ "Der Herr. Die himmlischen Heerscharen. Nachher Mephistopheles. Die drei\n"
+ "Erzengel treten vor.\n"
+ "RAPHAEL: Die Sonne toent, nach alter Weise, In Brudersphaeren Wettgesang,\n"
+ "Und ihre vorgeschriebne Reise Vollendet sie mit Donnergang. Ihr Anblick\n"
+ "gibt den Engeln Staerke, Wenn keiner Sie ergruenden mag; die unbegreiflich\n"
+ "hohen Werke Sind herrlich wie am ersten Tag.\n"
+ "GABRIEL: Und schnell und unbegreiflich schnelle Dreht sich umher der Erde\n"
+ "Pracht; Es wechselt Paradieseshelle Mit tiefer, schauervoller Nacht. Es\n"
+ "schaeumt das Meer in breiten Fluessen Am tiefen Grund der Felsen auf, Und\n"
+ "Fels und Meer wird fortgerissen Im ewig schnellem Sphaerenlauf.\n"
+ "MICHAEL: Und Stuerme brausen um die Wette Vom Meer aufs Land, vom Land\n"
+ "aufs Meer, und bilden wuetend eine Kette Der tiefsten Wirkung rings umher.\n"
+ "Da flammt ein blitzendes Verheeren Dem Pfade vor des Donnerschlags. Doch\n"
+ "deine Boten, Herr, verehren Das sanfte Wandeln deines Tags.";
final java.nio.file.Path extractDir =
TempDirUtils.newFolder(temporaryFolder, "extractDir").toPath();
final java.nio.file.Path originalDir = Paths.get("rootDir");
final java.nio.file.Path emptySubDir = originalDir.resolve("emptyDir");
final java.nio.file.Path fullSubDir = originalDir.resolve("fullDir");
final java.nio.file.Path file1 = originalDir.resolve("file1");
final java.nio.file.Path file2 = originalDir.resolve("file2");
final java.nio.file.Path file3 = fullSubDir.resolve("file3");
Files.createDirectory(testDir.resolve(originalDir));
Files.createDirectory(testDir.resolve(emptySubDir));
Files.createDirectory(testDir.resolve(fullSubDir));
Files.copy(
new ByteArrayInputStream(testFileContent.getBytes(StandardCharsets.UTF_8)),
testDir.resolve(file1));
Files.createFile(testDir.resolve(file2));
Files.copy(
new ByteArrayInputStream(testFileContent.getBytes(StandardCharsets.UTF_8)),
testDir.resolve(file3));
final Path zip =
FileUtils.compressDirectory(
new Path(compressDir.resolve(originalDir).toString()),
new Path(compressDir.resolve(originalDir) + ".zip"));
FileUtils.expandDirectory(zip, new Path(extractDir.toAbsolutePath().toString()));
assertDirEquals(compressDir.resolve(originalDir), extractDir.resolve(originalDir));
}
|
Generate some directories in a original directory based on the {@code testDir}.
@param testDir the path of the directory where the test directories are generated
@param compressDir the path of directory to be verified
@throws IOException if I/O error occurs while generating the directories
|
verifyDirectoryCompression
|
java
|
apache/flink
|
flink-core/src/test/java/org/apache/flink/util/FileUtilsTest.java
|
https://github.com/apache/flink/blob/master/flink-core/src/test/java/org/apache/flink/util/FileUtilsTest.java
|
Apache-2.0
|
public static <D, W> void testMethodForwarding(
Class<D> delegateClass, Function<D, W> wrapperFactory)
throws NoSuchMethodException, IllegalAccessException, InvocationTargetException {
testMethodForwarding(
delegateClass, wrapperFactory, () -> spy(delegateClass), Collections.emptySet());
}
|
This is a best effort automatic test for method forwarding between a delegate and its
wrapper, where the wrapper class is a subtype of the delegate. This ignores methods that are
inherited from Object.
@param delegateClass the class for the delegate.
@param wrapperFactory factory that produces a wrapper from a delegate.
@param <D> type of the delegate
@param <W> type of the wrapper
|
testMethodForwarding
|
java
|
apache/flink
|
flink-core/src/test/java/org/apache/flink/util/MethodForwardingTestUtil.java
|
https://github.com/apache/flink/blob/master/flink-core/src/test/java/org/apache/flink/util/MethodForwardingTestUtil.java
|
Apache-2.0
|
public static <D, W, I extends D> void testMethodForwarding(
Class<D> delegateClass,
Function<I, W> wrapperFactory,
Supplier<I> delegateObjectSupplier)
throws NoSuchMethodException, IllegalAccessException, InvocationTargetException {
testMethodForwarding(
delegateClass, wrapperFactory, delegateObjectSupplier, Collections.emptySet());
}
|
This is a best effort automatic test for method forwarding between a delegate and its
wrapper, where the wrapper class is a subtype of the delegate. This ignores methods that are
inherited from Object.
@param delegateClass the class for the delegate.
@param wrapperFactory factory that produces a wrapper from a delegate.
@param delegateObjectSupplier supplier for the delegate object passed to the wrapper factory.
@param <D> type of the delegate
@param <W> type of the wrapper
@param <I> type of the object created as delegate, is a subtype of D.
|
testMethodForwarding
|
java
|
apache/flink
|
flink-core/src/test/java/org/apache/flink/util/MethodForwardingTestUtil.java
|
https://github.com/apache/flink/blob/master/flink-core/src/test/java/org/apache/flink/util/MethodForwardingTestUtil.java
|
Apache-2.0
|
public static <D, W, I extends D> void testMethodForwarding(
Class<D> delegateClass,
Function<I, W> wrapperFactory,
Supplier<I> delegateObjectSupplier,
Set<Method> skipMethodSet)
throws NoSuchMethodException, InvocationTargetException, IllegalAccessException {
Preconditions.checkNotNull(delegateClass);
Preconditions.checkNotNull(wrapperFactory);
Preconditions.checkNotNull(skipMethodSet);
I delegate = delegateObjectSupplier.get();
// check if we need to wrap the delegate object as a spy, or if it is already testable with
// Mockito.
if (!MockUtil.isSpy(delegate) || !MockUtil.isMock(delegate)) {
delegate = spy(delegate);
}
W wrapper = wrapperFactory.apply(delegate);
// ensure that wrapper is a subtype of delegate
Preconditions.checkArgument(delegateClass.isAssignableFrom(wrapper.getClass()));
for (Method delegateMethod : delegateClass.getMethods()) {
if (checkSkipMethodForwardCheck(delegateMethod, skipMethodSet)) {
continue;
}
// find the correct method to substitute the bridge for erased generic types.
// if this doesn't work, the user need to exclude the method and write an additional
// test.
Method wrapperMethod =
wrapper.getClass()
.getMethod(
delegateMethod.getName(), delegateMethod.getParameterTypes());
// things get a bit fuzzy here, best effort to find a match but this might end up with a
// wrong method.
if (wrapperMethod.isBridge()) {
for (Method method : wrapper.getClass().getMethods()) {
if (!method.isBridge()
&& method.getName().equals(wrapperMethod.getName())
&& method.getParameterCount() == wrapperMethod.getParameterCount()) {
wrapperMethod = method;
break;
}
}
}
Class<?>[] parameterTypes = wrapperMethod.getParameterTypes();
Object[] arguments = new Object[parameterTypes.length];
for (int j = 0; j < arguments.length; j++) {
Class<?> parameterType = parameterTypes[j];
if (parameterType.isArray()) {
arguments[j] = Array.newInstance(parameterType.getComponentType(), 0);
} else if (parameterType.isPrimitive()) {
if (boolean.class.equals(parameterType)) {
arguments[j] = false;
} else if (char.class.equals(parameterType)) {
arguments[j] = 'a';
} else {
arguments[j] = (byte) 0;
}
} else {
arguments[j] = Mockito.mock(parameterType);
}
}
wrapperMethod.invoke(wrapper, arguments);
delegateMethod.invoke(Mockito.verify(delegate, Mockito.times(1)), arguments);
reset(delegate);
}
}
|
This is a best effort automatic test for method forwarding between a delegate and its
wrapper, where the wrapper class is a subtype of the delegate. Methods can be remapped in
case that the implementation does not call the original method. Remapping to null skips the
method. This ignores methods that are inherited from Object.
@param delegateClass the class for the delegate.
@param wrapperFactory factory that produces a wrapper from a delegate.
@param delegateObjectSupplier supplier for the delegate object passed to the wrapper factory.
@param skipMethodSet set of methods to ignore.
@param <D> type of the delegate
@param <W> type of the wrapper
@param <I> type of the object created as delegate, is a subtype of D.
|
testMethodForwarding
|
java
|
apache/flink
|
flink-core/src/test/java/org/apache/flink/util/MethodForwardingTestUtil.java
|
https://github.com/apache/flink/blob/master/flink-core/src/test/java/org/apache/flink/util/MethodForwardingTestUtil.java
|
Apache-2.0
|
private static boolean checkSkipMethodForwardCheck(
Method delegateMethod, Set<Method> skipMethods) {
if (delegateMethod.isBridge()
|| delegateMethod.isDefault()
|| skipMethods.contains(delegateMethod)) {
return true;
}
// skip methods declared in Object (Mockito doesn't like them)
try {
Object.class.getMethod(delegateMethod.getName(), delegateMethod.getParameterTypes());
return true;
} catch (Exception ignore) {
}
return false;
}
|
Test if this method should be skipped in our check for proper forwarding, e.g. because it is
just a bridge.
|
checkSkipMethodForwardCheck
|
java
|
apache/flink
|
flink-core/src/test/java/org/apache/flink/util/MethodForwardingTestUtil.java
|
https://github.com/apache/flink/blob/master/flink-core/src/test/java/org/apache/flink/util/MethodForwardingTestUtil.java
|
Apache-2.0
|
@Test
void testConcurrentExecutionConfigSerialization()
throws ExecutionException, InterruptedException {
final int numInputs = 10;
Collection<String> input = new ArrayList<>(numInputs);
for (int i = 0; i < numInputs; i++) {
input.add("--" + UUID.randomUUID());
input.add(UUID.randomUUID().toString());
}
final String[] args = input.toArray(new String[0]);
final ParameterTool parameterTool = (ParameterTool) createParameterToolFromArgs(args);
final int numThreads = 5;
final int numSerializations = 100;
final Collection<CompletableFuture<Void>> futures = new ArrayList<>(numSerializations);
final ExecutorService executorService = Executors.newFixedThreadPool(numThreads);
try {
for (int i = 0; i < numSerializations; i++) {
futures.add(
CompletableFuture.runAsync(
() -> {
try {
serializeDeserialize(parameterTool);
} catch (Exception e) {
throw new CompletionException(e);
}
},
executorService));
}
for (CompletableFuture<Void> future : futures) {
future.get();
}
} finally {
executorService.shutdownNow();
executorService.awaitTermination(1000L, TimeUnit.MILLISECONDS);
}
}
|
Tests that we can concurrently serialize and access the ParameterTool. See FLINK-7943
|
testConcurrentExecutionConfigSerialization
|
java
|
apache/flink
|
flink-core/src/test/java/org/apache/flink/util/ParameterToolTest.java
|
https://github.com/apache/flink/blob/master/flink-core/src/test/java/org/apache/flink/util/ParameterToolTest.java
|
Apache-2.0
|
private void serializeDeserialize(ParameterTool parameterTool)
throws IOException, ClassNotFoundException {
// weirdly enough, this call has side effects making the ParameterTool serialization fail if
// not
// using a concurrent data structure.
parameterTool.get(UUID.randomUUID().toString());
try (ByteArrayOutputStream baos = new ByteArrayOutputStream();
ObjectOutputStream oos = new ObjectOutputStream(baos)) {
oos.writeObject(parameterTool);
oos.close();
baos.close();
ByteArrayInputStream bais = new ByteArrayInputStream(baos.toByteArray());
ObjectInputStream ois = new ObjectInputStream(bais);
// this should work :-)
ParameterTool deserializedParameterTool = ((ParameterTool) ois.readObject());
}
}
|
Accesses parameter tool parameters and then serializes the given parameter tool and
deserializes again.
@param parameterTool to serialize/deserialize
|
serializeDeserialize
|
java
|
apache/flink
|
flink-core/src/test/java/org/apache/flink/util/ParameterToolTest.java
|
https://github.com/apache/flink/blob/master/flink-core/src/test/java/org/apache/flink/util/ParameterToolTest.java
|
Apache-2.0
|
public static File createJarFile(File tmpDir, String jarName, String className, String javaCode)
throws IOException {
return createJarFile(tmpDir, jarName, Collections.singletonMap(className, javaCode));
}
|
Pack the generated class into a JAR and return the path of the JAR.
|
createJarFile
|
java
|
apache/flink
|
flink-core/src/test/java/org/apache/flink/util/UserClassLoaderJarTestUtils.java
|
https://github.com/apache/flink/blob/master/flink-core/src/test/java/org/apache/flink/util/UserClassLoaderJarTestUtils.java
|
Apache-2.0
|
public static File createJarFile(
File tmpDir, String jarName, Map<String, String> classNameCodes) throws IOException {
List<File> javaFiles = new ArrayList<>();
for (Map.Entry<String, String> entry : classNameCodes.entrySet()) {
// write class source code to file
File javaFile = Paths.get(tmpDir.toString(), entry.getKey() + ".java").toFile();
//noinspection ResultOfMethodCallIgnored
javaFile.createNewFile();
FileUtils.writeFileUtf8(javaFile, entry.getValue());
javaFiles.add(javaFile);
}
// compile class source code
DiagnosticCollector<JavaFileObject> diagnostics = new DiagnosticCollector<>();
JavaCompiler compiler = ToolProvider.getSystemJavaCompiler();
StandardJavaFileManager fileManager =
compiler.getStandardFileManager(diagnostics, null, null);
Iterable<? extends JavaFileObject> compilationUnit =
fileManager.getJavaFileObjectsFromFiles(javaFiles);
JavaCompiler.CompilationTask task =
compiler.getTask(
null,
fileManager,
diagnostics,
Collections.emptyList(),
null,
compilationUnit);
task.call();
// pack class file to jar
File jarFile = Paths.get(tmpDir.toString(), jarName).toFile();
JarOutputStream jos = new JarOutputStream(new FileOutputStream(jarFile));
for (String className : classNameCodes.keySet()) {
File classFile = Paths.get(tmpDir.toString(), className + ".class").toFile();
JarEntry jarEntry = new JarEntry(className + ".class");
jos.putNextEntry(jarEntry);
byte[] classBytes = FileUtils.readAllBytes(classFile.toPath());
jos.write(classBytes);
jos.closeEntry();
}
jos.close();
return jarFile;
}
|
Pack the generated classes into a JAR and return the path of the JAR.
|
createJarFile
|
java
|
apache/flink
|
flink-core/src/test/java/org/apache/flink/util/UserClassLoaderJarTestUtils.java
|
https://github.com/apache/flink/blob/master/flink-core/src/test/java/org/apache/flink/util/UserClassLoaderJarTestUtils.java
|
Apache-2.0
|
@Override
public Wrapped getWrappedDelegate() {
if (levels-- == 0) {
return new Wrapped();
} else {
return this;
}
}
|
Wraps around {@link Wrapped} a specified number of times.
|
getWrappedDelegate
|
java
|
apache/flink
|
flink-core/src/test/java/org/apache/flink/util/WrappingProxyUtilTest.java
|
https://github.com/apache/flink/blob/master/flink-core/src/test/java/org/apache/flink/util/WrappingProxyUtilTest.java
|
Apache-2.0
|
@Test
void testRetrySuccess() {
final int retries = 10;
final AtomicInteger atomicInteger = new AtomicInteger(0);
CompletableFuture<Boolean> retryFuture =
FutureUtils.retry(
() ->
CompletableFuture.supplyAsync(
() -> {
if (atomicInteger.incrementAndGet() == retries) {
return true;
} else {
throw new CompletionException(
new FlinkException("Test exception"));
}
},
EXECUTOR_RESOURCE.getExecutor()),
retries,
EXECUTOR_RESOURCE.getExecutor());
assertThatFuture(retryFuture).eventuallySucceeds().isEqualTo(true);
assertThat(atomicInteger).hasValue(retries);
}
|
Tests that we can retry an operation.
|
testRetrySuccess
|
java
|
apache/flink
|
flink-core/src/test/java/org/apache/flink/util/concurrent/FutureUtilsTest.java
|
https://github.com/apache/flink/blob/master/flink-core/src/test/java/org/apache/flink/util/concurrent/FutureUtilsTest.java
|
Apache-2.0
|
@Test
void testRetryFailureFixedRetries() {
final int retries = 3;
CompletableFuture<?> retryFuture =
FutureUtils.retry(
() ->
FutureUtils.completedExceptionally(
new FlinkException("Test exception")),
retries,
EXECUTOR_RESOURCE.getExecutor());
assertThatFuture(retryFuture)
.eventuallyFailsWith(ExecutionException.class)
.withCauseInstanceOf(FutureUtils.RetryException.class);
}
|
Tests that a retry future is failed after all retries have been consumed.
|
testRetryFailureFixedRetries
|
java
|
apache/flink
|
flink-core/src/test/java/org/apache/flink/util/concurrent/FutureUtilsTest.java
|
https://github.com/apache/flink/blob/master/flink-core/src/test/java/org/apache/flink/util/concurrent/FutureUtilsTest.java
|
Apache-2.0
|
@Test
void testRetryCancellation() throws InterruptedException {
final int retries = 10;
final AtomicInteger atomicInteger = new AtomicInteger(0);
final OneShotLatch notificationLatch = new OneShotLatch();
final OneShotLatch waitLatch = new OneShotLatch();
final AtomicReference<Throwable> atomicThrowable = new AtomicReference<>(null);
CompletableFuture<?> retryFuture =
FutureUtils.retry(
() ->
CompletableFuture.supplyAsync(
() -> {
if (atomicInteger.incrementAndGet() == 2) {
notificationLatch.trigger();
try {
waitLatch.await();
} catch (InterruptedException e) {
atomicThrowable.compareAndSet(null, e);
}
}
throw new CompletionException(
new FlinkException("Test exception"));
},
EXECUTOR_RESOURCE.getExecutor()),
retries,
EXECUTOR_RESOURCE.getExecutor());
// await that we have failed once
notificationLatch.await();
assertThat(retryFuture).isNotDone();
// cancel the retry future
retryFuture.cancel(false);
// let the retry operation continue
waitLatch.trigger();
assertThat(retryFuture).isCancelled();
assertThat(atomicInteger).hasValue(2);
assertThat(atomicThrowable.get()).isNull();
}
|
Tests that we can cancel a retry future.
|
testRetryCancellation
|
java
|
apache/flink
|
flink-core/src/test/java/org/apache/flink/util/concurrent/FutureUtilsTest.java
|
https://github.com/apache/flink/blob/master/flink-core/src/test/java/org/apache/flink/util/concurrent/FutureUtilsTest.java
|
Apache-2.0
|
@Test
void testStopAtNonRetryableException() {
final int retries = 10;
final int notRetry = 3;
final AtomicInteger atomicInteger = new AtomicInteger(0);
final FlinkRuntimeException nonRetryableException =
new FlinkRuntimeException("Non-retryable exception");
CompletableFuture<Boolean> retryFuture =
FutureUtils.retry(
() ->
CompletableFuture.supplyAsync(
() -> {
if (atomicInteger.incrementAndGet() == notRetry) {
// throw non-retryable exception
throw new CompletionException(
nonRetryableException);
} else {
throw new CompletionException(
new FlinkException("Test exception"));
}
},
EXECUTOR_RESOURCE.getExecutor()),
retries,
throwable ->
ExceptionUtils.findThrowable(throwable, FlinkException.class)
.isPresent(),
EXECUTOR_RESOURCE.getExecutor());
assertThatFuture(retryFuture)
.eventuallyFailsWith(ExecutionException.class)
.extracting(FlinkAssertions::chainOfCauses, FlinkAssertions.STREAM_THROWABLE)
.last()
.isEqualTo(nonRetryableException);
assertThat(atomicInteger).hasValue(notRetry);
}
|
Test that {@link FutureUtils#retry} should stop at non-retryable exception.
|
testStopAtNonRetryableException
|
java
|
apache/flink
|
flink-core/src/test/java/org/apache/flink/util/concurrent/FutureUtilsTest.java
|
https://github.com/apache/flink/blob/master/flink-core/src/test/java/org/apache/flink/util/concurrent/FutureUtilsTest.java
|
Apache-2.0
|
@Test
void testRetryWithDelayRetryStrategyFailure() {
CompletableFuture<?> retryFuture =
FutureUtils.retryWithDelay(
() ->
FutureUtils.completedExceptionally(
new FlinkException("Test exception")),
new FixedRetryStrategy(3, Duration.ofMillis(1L)),
new ScheduledExecutorServiceAdapter(EXECUTOR_RESOURCE.getExecutor()));
assertThatFuture(retryFuture)
.eventuallyFailsWith(ExecutionException.class)
.withCauseInstanceOf(FutureUtils.RetryException.class);
}
|
Tests that retry with delay fails after having exceeded all retries.
|
testRetryWithDelayRetryStrategyFailure
|
java
|
apache/flink
|
flink-core/src/test/java/org/apache/flink/util/concurrent/FutureUtilsTest.java
|
https://github.com/apache/flink/blob/master/flink-core/src/test/java/org/apache/flink/util/concurrent/FutureUtilsTest.java
|
Apache-2.0
|
@Test
void testRetryWithDelayRetryStrategy() {
final int retries = 4;
final AtomicInteger countDown = new AtomicInteger(retries);
long start = System.currentTimeMillis();
CompletableFuture<Boolean> retryFuture =
FutureUtils.retryWithDelay(
() -> {
if (countDown.getAndDecrement() == 0) {
return CompletableFuture.completedFuture(true);
} else {
return FutureUtils.completedExceptionally(
new FlinkException("Test exception."));
}
},
new ExponentialBackoffRetryStrategy(
retries, Duration.ofMillis(2L), Duration.ofMillis(5L)),
new ScheduledExecutorServiceAdapter(EXECUTOR_RESOURCE.getExecutor()));
assertThatFuture(retryFuture).eventuallySucceeds().isEqualTo(true);
long completionTime = System.currentTimeMillis() - start;
assertThat(completionTime)
.as("The completion time should be at least retries times delay between retries.")
.isGreaterThanOrEqualTo(2 + 4 + 5 + 5);
}
|
Tests that the delay is respected between subsequent retries of a retry future with retry
delay.
|
testRetryWithDelayRetryStrategy
|
java
|
apache/flink
|
flink-core/src/test/java/org/apache/flink/util/concurrent/FutureUtilsTest.java
|
https://github.com/apache/flink/blob/master/flink-core/src/test/java/org/apache/flink/util/concurrent/FutureUtilsTest.java
|
Apache-2.0
|
@Test
void testRetryWithDelayRetryStrategyCancellation() {
final ManuallyTriggeredScheduledExecutor scheduledExecutor =
new ManuallyTriggeredScheduledExecutor();
CompletableFuture<?> retryFuture =
FutureUtils.retryWithDelay(
() ->
FutureUtils.completedExceptionally(
new FlinkException("Test exception")),
new FixedRetryStrategy(1, TestingUtils.infiniteDuration()),
scheduledExecutor);
assertThat(retryFuture).isNotDone();
final Collection<ScheduledFuture<?>> scheduledTasks =
scheduledExecutor.getActiveScheduledTasks();
assertThat(scheduledTasks).isNotEmpty();
final ScheduledFuture<?> scheduledFuture = scheduledTasks.iterator().next();
assertThat(scheduledFuture.isDone()).isFalse();
retryFuture.cancel(false);
assertThat(retryFuture).isCancelled();
assertThat(scheduledFuture.isCancelled()).isTrue();
}
|
Tests that all scheduled tasks are canceled if the retry future is being cancelled.
|
testRetryWithDelayRetryStrategyCancellation
|
java
|
apache/flink
|
flink-core/src/test/java/org/apache/flink/util/concurrent/FutureUtilsTest.java
|
https://github.com/apache/flink/blob/master/flink-core/src/test/java/org/apache/flink/util/concurrent/FutureUtilsTest.java
|
Apache-2.0
|
public static <IN, OUT, ACC>
AggregatingStateDeclarationBuilder<IN, OUT, ACC> aggregatingStateBuilder(
String name,
TypeDescriptor<ACC> aggTypeDescriptor,
AggregateFunction<IN, ACC, OUT> aggregateFunction) {
return new AggregatingStateDeclarationBuilder<>(name, aggTypeDescriptor, aggregateFunction);
}
|
Get the builder of {@link AggregatingStateDeclaration}.
|
aggregatingStateBuilder
|
java
|
apache/flink
|
flink-core-api/src/main/java/org/apache/flink/api/common/state/StateDeclarations.java
|
https://github.com/apache/flink/blob/master/flink-core-api/src/main/java/org/apache/flink/api/common/state/StateDeclarations.java
|
Apache-2.0
|
public static <T> ReducingStateDeclarationBuilder<T> reducingStateBuilder(
String name, TypeDescriptor<T> typeInformation, ReduceFunction<T> reduceFunction) {
return new ReducingStateDeclarationBuilder<>(name, typeInformation, reduceFunction);
}
|
Get the builder of {@link ReducingStateDeclaration}.
|
reducingStateBuilder
|
java
|
apache/flink
|
flink-core-api/src/main/java/org/apache/flink/api/common/state/StateDeclarations.java
|
https://github.com/apache/flink/blob/master/flink-core-api/src/main/java/org/apache/flink/api/common/state/StateDeclarations.java
|
Apache-2.0
|
public static <K, V> MapStateDeclarationBuilder<K, V> mapStateBuilder(
String name,
TypeDescriptor<K> keyTypeInformation,
TypeDescriptor<V> valueTypeInformation) {
return new MapStateDeclarationBuilder<>(name, keyTypeInformation, valueTypeInformation);
}
|
Get the builder of {@link MapStateDeclaration}.
|
mapStateBuilder
|
java
|
apache/flink
|
flink-core-api/src/main/java/org/apache/flink/api/common/state/StateDeclarations.java
|
https://github.com/apache/flink/blob/master/flink-core-api/src/main/java/org/apache/flink/api/common/state/StateDeclarations.java
|
Apache-2.0
|
public static <T> ListStateDeclarationBuilder<T> listStateBuilder(
String name, TypeDescriptor<T> elementTypeInformation) {
return new ListStateDeclarationBuilder<>(name, elementTypeInformation);
}
|
Get the builder of {@link ListStateDeclaration}.
|
listStateBuilder
|
java
|
apache/flink
|
flink-core-api/src/main/java/org/apache/flink/api/common/state/StateDeclarations.java
|
https://github.com/apache/flink/blob/master/flink-core-api/src/main/java/org/apache/flink/api/common/state/StateDeclarations.java
|
Apache-2.0
|
public <T> ValueStateDeclarationBuilder<T> valueStateBuilder(
String name, TypeDescriptor<T> valueType) {
return new ValueStateDeclarationBuilder<>(name, valueType);
}
|
Get the builder of {@link ValueStateDeclaration}.
|
valueStateBuilder
|
java
|
apache/flink
|
flink-core-api/src/main/java/org/apache/flink/api/common/state/StateDeclarations.java
|
https://github.com/apache/flink/blob/master/flink-core-api/src/main/java/org/apache/flink/api/common/state/StateDeclarations.java
|
Apache-2.0
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.