code
stringlengths
25
201k
docstring
stringlengths
19
96.2k
func_name
stringlengths
0
235
language
stringclasses
1 value
repo
stringlengths
8
51
path
stringlengths
11
314
url
stringlengths
62
377
license
stringclasses
7 values
@Test void testScheduledExecutorServiceWithFixedDelaySchedule() throws Exception { ScheduledExecutor scheduledExecutor = pekkoRpcService.getScheduledExecutor(); final int tries = 4; final long delay = 10L; final CountDownLatch countDownLatch = new CountDownLatch(tries); long currentTime = System.nanoTime(); ScheduledFuture<?> future = scheduledExecutor.scheduleWithFixedDelay( countDownLatch::countDown, delay, delay, TimeUnit.MILLISECONDS); assertThat((Future) future).isNotDone(); countDownLatch.await(); // the future should not complete since we have a periodic task assertThat((Future) future).isNotDone(); long finalTime = System.nanoTime() - currentTime; // the processing should have taken at least delay times the number of count downs. assertThat(finalTime).isGreaterThanOrEqualTo(tries * delay); future.cancel(true); }
Tests that the RPC service's scheduled executor service can execute runnable with a fixed delay.
testScheduledExecutorServiceWithFixedDelaySchedule
java
apache/flink
flink-rpc/flink-rpc-akka/src/test/java/org/apache/flink/runtime/rpc/pekko/PekkoRpcServiceTest.java
https://github.com/apache/flink/blob/master/flink-rpc/flink-rpc-akka/src/test/java/org/apache/flink/runtime/rpc/pekko/PekkoRpcServiceTest.java
Apache-2.0
@Test void testScheduledExecutorServiceCancelWithFixedDelay() throws InterruptedException { ScheduledExecutor scheduledExecutor = pekkoRpcService.getScheduledExecutor(); long delay = 10L; final OneShotLatch futureTask = new OneShotLatch(); final OneShotLatch latch = new OneShotLatch(); final OneShotLatch shouldNotBeTriggeredLatch = new OneShotLatch(); ScheduledFuture<?> future = scheduledExecutor.scheduleWithFixedDelay( () -> { try { if (futureTask.isTriggered()) { shouldNotBeTriggeredLatch.trigger(); } else { // first run futureTask.trigger(); latch.await(); } } catch (InterruptedException ignored) { // ignore } }, delay, delay, TimeUnit.MILLISECONDS); // wait until we're in the runnable futureTask.await(); // cancel the scheduled future future.cancel(false); latch.trigger(); assertThatThrownBy(() -> shouldNotBeTriggeredLatch.await(5 * delay, TimeUnit.MILLISECONDS)) .isInstanceOf(TimeoutException.class); }
Tests that canceling the returned future will stop the execution of the scheduled runnable.
testScheduledExecutorServiceCancelWithFixedDelay
java
apache/flink
flink-rpc/flink-rpc-akka/src/test/java/org/apache/flink/runtime/rpc/pekko/PekkoRpcServiceTest.java
https://github.com/apache/flink/blob/master/flink-rpc/flink-rpc-akka/src/test/java/org/apache/flink/runtime/rpc/pekko/PekkoRpcServiceTest.java
Apache-2.0
@Test void testRpcServiceShutDownWithRpcEndpoints() throws Exception { final PekkoRpcService pekkoRpcService = startRpcService(); try { final int numberActors = 5; final RpcServiceShutdownTestHelper rpcServiceShutdownTestHelper = startStopNCountingAsynchronousOnStopEndpoints(pekkoRpcService, numberActors); for (CompletableFuture<Void> onStopFuture : rpcServiceShutdownTestHelper.getStopFutures()) { onStopFuture.complete(null); } rpcServiceShutdownTestHelper.waitForRpcServiceTermination(); assertThat(pekkoRpcService.getActorSystem().whenTerminated().isCompleted()).isTrue(); } finally { RpcUtils.terminateRpcService(pekkoRpcService); } }
Tests that the {@link PekkoRpcService} terminates all its RpcEndpoints when shutting down.
testRpcServiceShutDownWithRpcEndpoints
java
apache/flink
flink-rpc/flink-rpc-akka/src/test/java/org/apache/flink/runtime/rpc/pekko/PekkoRpcServiceTest.java
https://github.com/apache/flink/blob/master/flink-rpc/flink-rpc-akka/src/test/java/org/apache/flink/runtime/rpc/pekko/PekkoRpcServiceTest.java
Apache-2.0
public static Runnable withContextClassLoader( Runnable runnable, ClassLoader contextClassLoader) { return () -> runWithContextClassLoader(runnable::run, contextClassLoader); }
Wraps the given runnable in a {@link TemporaryClassLoaderContext} to prevent the plugin class loader from leaking into Flink. @param runnable runnable to wrap @param contextClassLoader class loader that should be set as the context class loader @return wrapped runnable
withContextClassLoader
java
apache/flink
flink-rpc/flink-rpc-core/src/main/java/org/apache/flink/runtime/concurrent/ClassLoadingUtils.java
https://github.com/apache/flink/blob/master/flink-rpc/flink-rpc-core/src/main/java/org/apache/flink/runtime/concurrent/ClassLoadingUtils.java
Apache-2.0
public static <T extends Throwable> void runWithContextClassLoader( ThrowingRunnable<T> runnable, ClassLoader contextClassLoader) throws T { try (TemporaryClassLoaderContext ignored = TemporaryClassLoaderContext.of(contextClassLoader)) { runnable.run(); } }
Runs the given runnable in a {@link TemporaryClassLoaderContext} to prevent the plugin class loader from leaking into Flink. @param runnable runnable to run @param contextClassLoader class loader that should be set as the context class loader
runWithContextClassLoader
java
apache/flink
flink-rpc/flink-rpc-core/src/main/java/org/apache/flink/runtime/concurrent/ClassLoadingUtils.java
https://github.com/apache/flink/blob/master/flink-rpc/flink-rpc-core/src/main/java/org/apache/flink/runtime/concurrent/ClassLoadingUtils.java
Apache-2.0
@Override public void execute(Runnable command) { backingExecutor.execute( ClassLoadingUtils.withContextClassLoader(command, contextClassLoader)); }
An {@link Executor} wrapper that temporarily resets the ContextClassLoader to the given ClassLoader.
execute
java
apache/flink
flink-rpc/flink-rpc-core/src/main/java/org/apache/flink/runtime/concurrent/ClassLoadingUtils.java
https://github.com/apache/flink/blob/master/flink-rpc/flink-rpc-core/src/main/java/org/apache/flink/runtime/concurrent/ClassLoadingUtils.java
Apache-2.0
public F getFencingToken() { return fencingToken; }
Base class for fenced {@link RpcEndpoint}. A fenced rpc endpoint expects all rpc messages being enriched with fencing tokens. Furthermore, the rpc endpoint has its own fencing token assigned. The rpc is then only executed if the attached fencing token equals the endpoint's own token. @param <F> type of the fencing token
getFencingToken
java
apache/flink
flink-rpc/flink-rpc-core/src/main/java/org/apache/flink/runtime/rpc/FencedRpcEndpoint.java
https://github.com/apache/flink/blob/master/flink-rpc/flink-rpc-core/src/main/java/org/apache/flink/runtime/rpc/FencedRpcEndpoint.java
Apache-2.0
public static boolean isRunningInExpectedThread(@Nullable Thread expected) { Thread actual = Thread.currentThread(); if (expected != actual) { String violationMsg = "Violation of main thread constraint detected: expected <" + expected + "> but running in <" + actual + ">."; LOG.warn(violationMsg, new Exception(violationMsg)); return false; } return true; }
Returns true iff the current thread is equals to the provided expected thread and logs violations. @param expected the expected main thread. @return true iff the current thread is equals to the provided expected thread.
isRunningInExpectedThread
java
apache/flink
flink-rpc/flink-rpc-core/src/main/java/org/apache/flink/runtime/rpc/MainThreadValidatorUtil.java
https://github.com/apache/flink/blob/master/flink-rpc/flink-rpc-core/src/main/java/org/apache/flink/runtime/rpc/MainThreadValidatorUtil.java
Apache-2.0
public String getEndpointId() { return endpointId; }
Returns the rpc endpoint's identifier. @return Rpc endpoint's identifier.
getEndpointId
java
apache/flink
flink-rpc/flink-rpc-core/src/main/java/org/apache/flink/runtime/rpc/RpcEndpoint.java
https://github.com/apache/flink/blob/master/flink-rpc/flink-rpc-core/src/main/java/org/apache/flink/runtime/rpc/RpcEndpoint.java
Apache-2.0
protected boolean isRunning() { validateRunsInMainThread(); return isRunning; }
Returns whether the RPC endpoint is started and not stopped or being stopped. @return whether the RPC endpoint is started and not stopped or being stopped.
isRunning
java
apache/flink
flink-rpc/flink-rpc-core/src/main/java/org/apache/flink/runtime/rpc/RpcEndpoint.java
https://github.com/apache/flink/blob/master/flink-rpc/flink-rpc-core/src/main/java/org/apache/flink/runtime/rpc/RpcEndpoint.java
Apache-2.0
public final void internalCallOnStart() throws Exception { validateRunsInMainThread(); isRunning = true; onStart(); }
Internal method which is called by the RpcService implementation to start the RpcEndpoint. @throws Exception indicating that the rpc endpoint could not be started. If an exception occurs, then the rpc endpoint will automatically terminate.
internalCallOnStart
java
apache/flink
flink-rpc/flink-rpc-core/src/main/java/org/apache/flink/runtime/rpc/RpcEndpoint.java
https://github.com/apache/flink/blob/master/flink-rpc/flink-rpc-core/src/main/java/org/apache/flink/runtime/rpc/RpcEndpoint.java
Apache-2.0
public final CompletableFuture<Void> internalCallOnStop() { validateRunsInMainThread(); CompletableFuture<Void> stopFuture = new CompletableFuture<>(); try { resourceRegistry.close(); stopFuture.complete(null); } catch (IOException e) { stopFuture.completeExceptionally( new RuntimeException("Close resource registry fail", e)); } stopFuture = CompletableFuture.allOf(stopFuture, onStop()); isRunning = false; return stopFuture; }
Internal method which is called by the RpcService implementation to stop the RpcEndpoint. @return Future which is completed once all post stop actions are completed. If an error occurs this future is completed exceptionally
internalCallOnStop
java
apache/flink
flink-rpc/flink-rpc-core/src/main/java/org/apache/flink/runtime/rpc/RpcEndpoint.java
https://github.com/apache/flink/blob/master/flink-rpc/flink-rpc-core/src/main/java/org/apache/flink/runtime/rpc/RpcEndpoint.java
Apache-2.0
protected void registerResource(Closeable closeableResource) { try { resourceRegistry.registerCloseable(closeableResource); } catch (IOException e) { throw new RuntimeException( "Registry closeable resource " + closeableResource + " fail", e); } }
Register the given closeable resource to {@link CloseableRegistry}. @param closeableResource the given closeable resource
registerResource
java
apache/flink
flink-rpc/flink-rpc-core/src/main/java/org/apache/flink/runtime/rpc/RpcEndpoint.java
https://github.com/apache/flink/blob/master/flink-rpc/flink-rpc-core/src/main/java/org/apache/flink/runtime/rpc/RpcEndpoint.java
Apache-2.0
protected boolean unregisterResource(Closeable closeableResource) { return resourceRegistry.unregisterCloseable(closeableResource); }
Unregister the given closeable resource from {@link CloseableRegistry}. @param closeableResource the given closeable resource @return true if the given resource unregister successful, otherwise false
unregisterResource
java
apache/flink
flink-rpc/flink-rpc-core/src/main/java/org/apache/flink/runtime/rpc/RpcEndpoint.java
https://github.com/apache/flink/blob/master/flink-rpc/flink-rpc-core/src/main/java/org/apache/flink/runtime/rpc/RpcEndpoint.java
Apache-2.0
protected CompletableFuture<Void> onStop() { return CompletableFuture.completedFuture(null); }
User overridable callback which is called from {@link #internalCallOnStop()}. <p>This method is called when the RpcEndpoint is being shut down. The method is guaranteed to be executed in the main thread context and can be used to clean up internal state. <p>IMPORTANT: This method should never be called directly by the user. @return Future which is completed once all post stop actions are completed. If an error occurs this future is completed exceptionally
onStop
java
apache/flink
flink-rpc/flink-rpc-core/src/main/java/org/apache/flink/runtime/rpc/RpcEndpoint.java
https://github.com/apache/flink/blob/master/flink-rpc/flink-rpc-core/src/main/java/org/apache/flink/runtime/rpc/RpcEndpoint.java
Apache-2.0
@Override public final CompletableFuture<Void> closeAsync() { rpcService.stopServer(rpcServer); return getTerminationFuture(); }
Triggers the shut down of the rpc endpoint. The shut down is executed asynchronously. <p>In order to wait on the completion of the shut down, obtain the termination future via {@link #getTerminationFuture()}} and wait on its completion.
closeAsync
java
apache/flink
flink-rpc/flink-rpc-core/src/main/java/org/apache/flink/runtime/rpc/RpcEndpoint.java
https://github.com/apache/flink/blob/master/flink-rpc/flink-rpc-core/src/main/java/org/apache/flink/runtime/rpc/RpcEndpoint.java
Apache-2.0
public <C extends RpcGateway> C getSelfGateway(Class<C> selfGatewayType) { return rpcService.getSelfGateway(selfGatewayType, rpcServer); }
Returns a self gateway of the specified type which can be used to issue asynchronous calls against the RpcEndpoint. <p>IMPORTANT: The self gateway type must be implemented by the RpcEndpoint. Otherwise the method will fail. @param selfGatewayType class of the self gateway type @param <C> type of the self gateway to create @return Self gateway of the specified type which can be used to issue asynchronous rpcs
getSelfGateway
java
apache/flink
flink-rpc/flink-rpc-core/src/main/java/org/apache/flink/runtime/rpc/RpcEndpoint.java
https://github.com/apache/flink/blob/master/flink-rpc/flink-rpc-core/src/main/java/org/apache/flink/runtime/rpc/RpcEndpoint.java
Apache-2.0
@Override public String getAddress() { return rpcServer.getAddress(); }
Gets the address of the underlying RPC endpoint. The address should be fully qualified so that a remote system can connect to this RPC endpoint via this address. @return Fully qualified address of the underlying RPC endpoint
getAddress
java
apache/flink
flink-rpc/flink-rpc-core/src/main/java/org/apache/flink/runtime/rpc/RpcEndpoint.java
https://github.com/apache/flink/blob/master/flink-rpc/flink-rpc-core/src/main/java/org/apache/flink/runtime/rpc/RpcEndpoint.java
Apache-2.0
@Override public String getHostname() { return rpcServer.getHostname(); }
Gets the hostname of the underlying RPC endpoint. @return Hostname on which the RPC endpoint is running
getHostname
java
apache/flink
flink-rpc/flink-rpc-core/src/main/java/org/apache/flink/runtime/rpc/RpcEndpoint.java
https://github.com/apache/flink/blob/master/flink-rpc/flink-rpc-core/src/main/java/org/apache/flink/runtime/rpc/RpcEndpoint.java
Apache-2.0
protected MainThreadExecutor getMainThreadExecutor() { return mainThreadExecutor; }
Gets the main thread execution context. The main thread execution context can be used to execute tasks in the main thread of the underlying RPC endpoint. @return Main thread execution context
getMainThreadExecutor
java
apache/flink
flink-rpc/flink-rpc-core/src/main/java/org/apache/flink/runtime/rpc/RpcEndpoint.java
https://github.com/apache/flink/blob/master/flink-rpc/flink-rpc-core/src/main/java/org/apache/flink/runtime/rpc/RpcEndpoint.java
Apache-2.0
protected Executor getMainThreadExecutor(JobID jobID) { // todo: consider caching return MdcUtils.scopeToJob(jobID, getMainThreadExecutor()); }
Gets the main thread execution context. The main thread execution context can be used to execute tasks in the main thread of the underlying RPC endpoint. @param jobID the {@link JobID} to scope the returned {@link ComponentMainThreadExecutor} to, i.e. add/remove before/after the invocations using the returned executor @return Main thread execution context
getMainThreadExecutor
java
apache/flink
flink-rpc/flink-rpc-core/src/main/java/org/apache/flink/runtime/rpc/RpcEndpoint.java
https://github.com/apache/flink/blob/master/flink-rpc/flink-rpc-core/src/main/java/org/apache/flink/runtime/rpc/RpcEndpoint.java
Apache-2.0
public RpcService getRpcService() { return rpcService; }
Gets the endpoint's RPC service. @return The endpoint's RPC service
getRpcService
java
apache/flink
flink-rpc/flink-rpc-core/src/main/java/org/apache/flink/runtime/rpc/RpcEndpoint.java
https://github.com/apache/flink/blob/master/flink-rpc/flink-rpc-core/src/main/java/org/apache/flink/runtime/rpc/RpcEndpoint.java
Apache-2.0
public CompletableFuture<Void> getTerminationFuture() { return rpcServer.getTerminationFuture(); }
Return a future which is completed with true when the rpc endpoint has been terminated. In case of a failure, this future is completed with the occurring exception. @return Future which is completed when the rpc endpoint has been terminated.
getTerminationFuture
java
apache/flink
flink-rpc/flink-rpc-core/src/main/java/org/apache/flink/runtime/rpc/RpcEndpoint.java
https://github.com/apache/flink/blob/master/flink-rpc/flink-rpc-core/src/main/java/org/apache/flink/runtime/rpc/RpcEndpoint.java
Apache-2.0
protected <V> CompletableFuture<V> callAsync(Callable<V> callable, Duration timeout) { return rpcServer.callAsync(callable, timeout); }
Execute the callable in the main thread of the underlying RPC service, returning a future for the result of the callable. If the callable is not completed within the given timeout, then the future will be failed with a {@link TimeoutException}. @param callable Callable to be executed in the main thread of the underlying rpc server @param timeout Timeout for the callable to be completed @param <V> Return type of the callable @return Future for the result of the callable.
callAsync
java
apache/flink
flink-rpc/flink-rpc-core/src/main/java/org/apache/flink/runtime/rpc/RpcEndpoint.java
https://github.com/apache/flink/blob/master/flink-rpc/flink-rpc-core/src/main/java/org/apache/flink/runtime/rpc/RpcEndpoint.java
Apache-2.0
public void validateRunsInMainThread() { assert MainThreadValidatorUtil.isRunningInExpectedThread(currentMainThread.get()); }
Validates that the method call happens in the RPC endpoint's main thread. <p><b>IMPORTANT:</b> This check only happens when assertions are enabled, such as when running tests. <p>This can be used for additional checks, like <pre>{@code protected void concurrencyCriticalMethod() { validateRunsInMainThread(); // some critical stuff } }</pre>
validateRunsInMainThread
java
apache/flink
flink-rpc/flink-rpc-core/src/main/java/org/apache/flink/runtime/rpc/RpcEndpoint.java
https://github.com/apache/flink/blob/master/flink-rpc/flink-rpc-core/src/main/java/org/apache/flink/runtime/rpc/RpcEndpoint.java
Apache-2.0
boolean validateResourceClosed() { return mainThreadExecutor.validateScheduledExecutorClosed() && resourceRegistry.isClosed(); }
Validate whether all the resources are closed. @return true if all the resources are closed, otherwise false
validateResourceClosed
java
apache/flink
flink-rpc/flink-rpc-core/src/main/java/org/apache/flink/runtime/rpc/RpcEndpoint.java
https://github.com/apache/flink/blob/master/flink-rpc/flink-rpc-core/src/main/java/org/apache/flink/runtime/rpc/RpcEndpoint.java
Apache-2.0
@Override public void execute(@Nonnull Runnable command) { gateway.runAsync(command); }
The main scheduled executor manages the scheduled tasks and send them to gateway when they should be executed.
execute
java
apache/flink
flink-rpc/flink-rpc-core/src/main/java/org/apache/flink/runtime/rpc/RpcEndpoint.java
https://github.com/apache/flink/blob/master/flink-rpc/flink-rpc-core/src/main/java/org/apache/flink/runtime/rpc/RpcEndpoint.java
Apache-2.0
@Override public ScheduledFuture<?> schedule(Runnable command, long delay, TimeUnit unit) { final long delayMillis = TimeUnit.MILLISECONDS.convert(delay, unit); FutureTask<Void> ft = new FutureTask<>(command, null); if (mainScheduledExecutor.isShutdown()) { log.warn( "The scheduled executor service is shutdown and ignores the command {}", command); } else { mainScheduledExecutor.schedule( () -> gateway.runAsync(ft), delayMillis, TimeUnit.MILLISECONDS); } return new ScheduledFutureAdapter<>(ft, delayMillis, TimeUnit.MILLISECONDS); }
The mainScheduledExecutor manages the task and sends it to the gateway after the given delay. @param command the task to execute in the future @param delay the time from now to delay the execution @param unit the time unit of the delay parameter @return a ScheduledFuture representing the completion of the scheduled task
schedule
java
apache/flink
flink-rpc/flink-rpc-core/src/main/java/org/apache/flink/runtime/rpc/RpcEndpoint.java
https://github.com/apache/flink/blob/master/flink-rpc/flink-rpc-core/src/main/java/org/apache/flink/runtime/rpc/RpcEndpoint.java
Apache-2.0
@Override public void close() { if (!mainScheduledExecutor.isShutdown()) { mainScheduledExecutor.shutdownNow(); } }
Shutdown the {@link ScheduledThreadPoolExecutor} and remove all the pending tasks.
close
java
apache/flink
flink-rpc/flink-rpc-core/src/main/java/org/apache/flink/runtime/rpc/RpcEndpoint.java
https://github.com/apache/flink/blob/master/flink-rpc/flink-rpc-core/src/main/java/org/apache/flink/runtime/rpc/RpcEndpoint.java
Apache-2.0
final boolean validateScheduledExecutorClosed() { return mainScheduledExecutor.isShutdown(); }
Validate whether the scheduled executor is closed. @return true if the scheduled executor is shutdown, otherwise false
validateScheduledExecutorClosed
java
apache/flink
flink-rpc/flink-rpc-core/src/main/java/org/apache/flink/runtime/rpc/RpcEndpoint.java
https://github.com/apache/flink/blob/master/flink-rpc/flink-rpc-core/src/main/java/org/apache/flink/runtime/rpc/RpcEndpoint.java
Apache-2.0
public static Duration extractRpcTimeout( Annotation[][] parameterAnnotations, Object[] args, Duration defaultTimeout) { if (args != null) { Preconditions.checkArgument(parameterAnnotations.length == args.length); for (int i = 0; i < parameterAnnotations.length; i++) { if (isRpcTimeout(parameterAnnotations[i])) { if (args[i] instanceof Duration) { return (Duration) args[i]; } else { throw new RuntimeException( "The rpc timeout parameter must be of type " + Duration.class.getName() + ". The type " + args[i].getClass().getName() + " is not supported."); } } } } return defaultTimeout; }
Extracts the {@link RpcTimeout} annotated rpc timeout value from the list of given method arguments. If no {@link RpcTimeout} annotated parameter could be found, then the default timeout is returned. @param parameterAnnotations Parameter annotations @param args Array of arguments @param defaultTimeout Default timeout to return if no {@link RpcTimeout} annotated parameter has been found @return Timeout extracted from the array of arguments or the default timeout
extractRpcTimeout
java
apache/flink
flink-rpc/flink-rpc-core/src/main/java/org/apache/flink/runtime/rpc/RpcGatewayUtils.java
https://github.com/apache/flink/blob/master/flink-rpc/flink-rpc-core/src/main/java/org/apache/flink/runtime/rpc/RpcGatewayUtils.java
Apache-2.0
private static boolean isRpcTimeout(Annotation[] annotations) { for (Annotation annotation : annotations) { if (annotation.annotationType().equals(RpcTimeout.class)) { return true; } } return false; }
Checks whether any of the annotations is of type {@link RpcTimeout}. @param annotations Array of annotations @return True if {@link RpcTimeout} was found; otherwise false
isRpcTimeout
java
apache/flink
flink-rpc/flink-rpc-core/src/main/java/org/apache/flink/runtime/rpc/RpcGatewayUtils.java
https://github.com/apache/flink/blob/master/flink-rpc/flink-rpc-core/src/main/java/org/apache/flink/runtime/rpc/RpcGatewayUtils.java
Apache-2.0
public static String createRandomName(String prefix) { Preconditions.checkNotNull(prefix, "Prefix must not be null."); long nameOffset; // obtain the next name offset by incrementing it atomically do { nameOffset = nextNameOffset.get(); } while (!nextNameOffset.compareAndSet(nameOffset, nameOffset + 1L)); return prefix + '_' + nameOffset; }
Creates a random name of the form prefix_X, where X is an increasing number. @param prefix Prefix string to prepend to the monotonically increasing name offset number @return A random name of the form prefix_X where X is an increasing number
createRandomName
java
apache/flink
flink-rpc/flink-rpc-core/src/main/java/org/apache/flink/runtime/rpc/RpcServiceUtils.java
https://github.com/apache/flink/blob/master/flink-rpc/flink-rpc-core/src/main/java/org/apache/flink/runtime/rpc/RpcServiceUtils.java
Apache-2.0
public static String createWildcardName(String prefix) { return prefix + "_*"; }
Creates a wildcard name symmetric to {@link #createRandomName(String)}. @param prefix prefix of the wildcard name @return wildcard name starting with the prefix
createWildcardName
java
apache/flink
flink-rpc/flink-rpc-core/src/main/java/org/apache/flink/runtime/rpc/RpcServiceUtils.java
https://github.com/apache/flink/blob/master/flink-rpc/flink-rpc-core/src/main/java/org/apache/flink/runtime/rpc/RpcServiceUtils.java
Apache-2.0
@Override default void close() {}
Hook to cleanup resources, like common thread pools or classloaders.
close
java
apache/flink
flink-rpc/flink-rpc-core/src/main/java/org/apache/flink/runtime/rpc/RpcSystem.java
https://github.com/apache/flink/blob/master/flink-rpc/flink-rpc-core/src/main/java/org/apache/flink/runtime/rpc/RpcSystem.java
Apache-2.0
static RpcSystem load() { return load(new Configuration()); }
Loads the RpcSystem. @return loaded RpcSystem
load
java
apache/flink
flink-rpc/flink-rpc-core/src/main/java/org/apache/flink/runtime/rpc/RpcSystem.java
https://github.com/apache/flink/blob/master/flink-rpc/flink-rpc-core/src/main/java/org/apache/flink/runtime/rpc/RpcSystem.java
Apache-2.0
static RpcSystem load(Configuration config) { final PriorityQueue<RpcSystemLoader> rpcSystemLoaders = new PriorityQueue<>(Comparator.comparingInt(RpcSystemLoader::getLoadPriority)); ServiceLoader.load(RpcSystemLoader.class).forEach(rpcSystemLoaders::add); final Iterator<RpcSystemLoader> iterator = rpcSystemLoaders.iterator(); Exception loadError = null; while (iterator.hasNext()) { final RpcSystemLoader next = iterator.next(); try { return next.loadRpcSystem(config); } catch (Exception e) { loadError = ExceptionUtils.firstOrSuppressed(e, loadError); } } throw new RpcLoaderException("Could not load RpcSystem.", loadError); }
Loads the RpcSystem. @param config Flink configuration @return loaded RpcSystem
load
java
apache/flink
flink-rpc/flink-rpc-core/src/main/java/org/apache/flink/runtime/rpc/RpcSystem.java
https://github.com/apache/flink/blob/master/flink-rpc/flink-rpc-core/src/main/java/org/apache/flink/runtime/rpc/RpcSystem.java
Apache-2.0
public static Set<Class<? extends RpcGateway>> extractImplementedRpcGateways(Class<?> clazz) { HashSet<Class<? extends RpcGateway>> interfaces = new HashSet<>(); while (clazz != null) { for (Class<?> interfaze : clazz.getInterfaces()) { if (RpcGateway.class.isAssignableFrom(interfaze)) { interfaces.add((Class<? extends RpcGateway>) interfaze); } } clazz = clazz.getSuperclass(); } return interfaces; }
Extracts all {@link RpcGateway} interfaces implemented by the given clazz. @param clazz from which to extract the implemented RpcGateway interfaces @return A set of all implemented RpcGateway interfaces
extractImplementedRpcGateways
java
apache/flink
flink-rpc/flink-rpc-core/src/main/java/org/apache/flink/runtime/rpc/RpcUtils.java
https://github.com/apache/flink/blob/master/flink-rpc/flink-rpc-core/src/main/java/org/apache/flink/runtime/rpc/RpcUtils.java
Apache-2.0
@VisibleForTesting public static void terminateRpcEndpoint(RpcEndpoint... rpcEndpoints) throws ExecutionException, InterruptedException { terminateAsyncCloseables(Arrays.asList(rpcEndpoints)); }
Shuts the given {@link RpcEndpoint}s down and awaits their termination. @param rpcEndpoints to terminate @throws ExecutionException if a problem occurred @throws InterruptedException if the operation has been interrupted
terminateRpcEndpoint
java
apache/flink
flink-rpc/flink-rpc-core/src/main/java/org/apache/flink/runtime/rpc/RpcUtils.java
https://github.com/apache/flink/blob/master/flink-rpc/flink-rpc-core/src/main/java/org/apache/flink/runtime/rpc/RpcUtils.java
Apache-2.0
public static RpcService createRemoteRpcService( RpcSystem rpcSystem, Configuration configuration, @Nullable String externalAddress, String externalPortRange, @Nullable String bindAddress, @SuppressWarnings("OptionalUsedAsFieldOrParameterType") Optional<Integer> bindPort) throws Exception { RpcSystem.RpcServiceBuilder rpcServiceBuilder = rpcSystem.remoteServiceBuilder(configuration, externalAddress, externalPortRange); if (bindAddress != null) { rpcServiceBuilder = rpcServiceBuilder.withBindAddress(bindAddress); } if (bindPort.isPresent()) { rpcServiceBuilder = rpcServiceBuilder.withBindPort(bindPort.get()); } return rpcServiceBuilder.createAndStart(); }
Convenient shortcut for constructing a remote RPC Service that takes care of checking for null and empty optionals. @see RpcSystem#remoteServiceBuilder(Configuration, String, String)
createRemoteRpcService
java
apache/flink
flink-rpc/flink-rpc-core/src/main/java/org/apache/flink/runtime/rpc/RpcUtils.java
https://github.com/apache/flink/blob/master/flink-rpc/flink-rpc-core/src/main/java/org/apache/flink/runtime/rpc/RpcUtils.java
Apache-2.0
static String convertRpcToString( String declaringClassName, String methodName, Class<?>[] parameterTypes) { final StringBuilder paramTypeStringBuilder = new StringBuilder(parameterTypes.length * 5); if (parameterTypes.length > 0) { paramTypeStringBuilder.append(parameterTypes[0].getSimpleName()); for (int i = 1; i < parameterTypes.length; i++) { paramTypeStringBuilder.append(", ").append(parameterTypes[i].getSimpleName()); } } return declaringClassName + '.' + methodName + '(' + paramTypeStringBuilder + ')'; }
Converts a rpc call into its string representation. @param declaringClassName declaringClassName declares the specified rpc @param methodName methodName of the rpc @param parameterTypes parameterTypes of the rpc @return string representation of the rpc
convertRpcToString
java
apache/flink
flink-rpc/flink-rpc-core/src/main/java/org/apache/flink/runtime/rpc/messages/RpcInvocation.java
https://github.com/apache/flink/blob/master/flink-rpc/flink-rpc-core/src/main/java/org/apache/flink/runtime/rpc/messages/RpcInvocation.java
Apache-2.0
public AccumulatorSnapshot getSnapshot() { try { return new AccumulatorSnapshot(jobID, taskID, userAccumulators); } catch (Throwable e) { LOG.warn("Failed to serialize accumulators for task.", e); return null; } }
Creates a snapshot of this accumulator registry. @return a serialized accumulator map
getSnapshot
java
apache/flink
flink-runtime/src/main/java/org/apache/flink/runtime/accumulators/AccumulatorRegistry.java
https://github.com/apache/flink/blob/master/flink-runtime/src/main/java/org/apache/flink/runtime/accumulators/AccumulatorRegistry.java
Apache-2.0
public Map<String, Accumulator<?, ?>> getUserMap() { return userAccumulators; }
Gets the map for user-defined accumulators.
getUserMap
java
apache/flink
flink-runtime/src/main/java/org/apache/flink/runtime/accumulators/AccumulatorRegistry.java
https://github.com/apache/flink/blob/master/flink-runtime/src/main/java/org/apache/flink/runtime/accumulators/AccumulatorRegistry.java
Apache-2.0
public Map<String, Accumulator<?, ?>> deserializeUserAccumulators(ClassLoader classLoader) throws IOException, ClassNotFoundException { return userAccumulators.deserializeValue(classLoader); }
Gets the user-defined accumulators values. @return the serialized map
deserializeUserAccumulators
java
apache/flink
flink-runtime/src/main/java/org/apache/flink/runtime/accumulators/AccumulatorSnapshot.java
https://github.com/apache/flink/blob/master/flink-runtime/src/main/java/org/apache/flink/runtime/accumulators/AccumulatorSnapshot.java
Apache-2.0
public RecordContext<K> buildContext(Object record, K key) { return buildContext(record, key, false); }
Build a new context based on record and key. Also wired with internal {@link KeyAccountingUnit}. @param record the given record. @param key the given key. @return the built record context.
buildContext
java
apache/flink
flink-runtime/src/main/java/org/apache/flink/runtime/asyncprocessing/AsyncExecutionController.java
https://github.com/apache/flink/blob/master/flink-runtime/src/main/java/org/apache/flink/runtime/asyncprocessing/AsyncExecutionController.java
Apache-2.0
public RecordContext<K> buildContext(Object record, K key, boolean inherit) { if (inherit && currentContext != null) { return new RecordContext<>( record == null ? RecordContext.EMPTY_RECORD : record, key, this::disposeContext, KeyGroupRangeAssignment.assignToKeyGroup(key, maxParallelism), epochManager.onEpoch(currentContext.getEpoch()), currentContext.getVariablesReference(), // When inheriting, we increase the priority by 1 to ensure that the record is // processed right after the current record if possible. currentContext.getPriority() + 1); } else { return new RecordContext<>( record == null ? RecordContext.EMPTY_RECORD : record, key, this::disposeContext, KeyGroupRangeAssignment.assignToKeyGroup(key, maxParallelism), epochManager.onRecord(), declarationManager.variableCount()); } }
Build a new context based on record and key. Also wired with internal {@link KeyAccountingUnit}. @param record the given record. @param key the given key. @param inherit whether to inherit epoch and variables from the current context. Or otherwise create new ones. @return the built record context.
buildContext
java
apache/flink
flink-runtime/src/main/java/org/apache/flink/runtime/asyncprocessing/AsyncExecutionController.java
https://github.com/apache/flink/blob/master/flink-runtime/src/main/java/org/apache/flink/runtime/asyncprocessing/AsyncExecutionController.java
Apache-2.0
void disposeContext(RecordContext<K> toDispose) { epochManager.completeOneRecord(toDispose.getEpoch()); keyAccountingUnit.release(toDispose.getRecord(), toDispose.getKey()); inFlightRecordNum.decrementAndGet(); StateRequest<K, ?, ?, ?> nextRequest = stateRequestsBuffer.unblockOneByKey(toDispose.getKey()); if (nextRequest != null) { Preconditions.checkState(tryOccupyKey(nextRequest.getRecordContext())); insertActiveBuffer(nextRequest); } }
Dispose a context. @param toDispose the context to dispose.
disposeContext
java
apache/flink
flink-runtime/src/main/java/org/apache/flink/runtime/asyncprocessing/AsyncExecutionController.java
https://github.com/apache/flink/blob/master/flink-runtime/src/main/java/org/apache/flink/runtime/asyncprocessing/AsyncExecutionController.java
Apache-2.0
boolean tryOccupyKey(RecordContext<K> recordContext) { boolean occupied = recordContext.isKeyOccupied(); if (!occupied && keyAccountingUnit.occupy(recordContext.getRecord(), recordContext.getKey())) { recordContext.setKeyOccupied(); occupied = true; } return occupied; }
Try to occupy a key by a given context. @param recordContext the given context. @return true if occupy succeed or the key has already occupied by this context.
tryOccupyKey
java
apache/flink
flink-runtime/src/main/java/org/apache/flink/runtime/asyncprocessing/AsyncExecutionController.java
https://github.com/apache/flink/blob/master/flink-runtime/src/main/java/org/apache/flink/runtime/asyncprocessing/AsyncExecutionController.java
Apache-2.0
public boolean triggerIfNeeded(boolean force) { if (!force && stateRequestsBuffer.activeQueueSize() < batchSize) { return false; } Optional<StateRequestContainer> toRun = stateRequestsBuffer.popActive( batchSize, () -> stateExecutor.createStateRequestContainer()); if (!toRun.isPresent() || toRun.get().isEmpty()) { return false; } stateExecutor.executeBatchRequests(toRun.get()); stateRequestsBuffer.advanceSeq(); return true; }
Trigger a batch of requests. @param force whether to trigger requests in force.
triggerIfNeeded
java
apache/flink
flink-runtime/src/main/java/org/apache/flink/runtime/asyncprocessing/AsyncExecutionController.java
https://github.com/apache/flink/blob/master/flink-runtime/src/main/java/org/apache/flink/runtime/asyncprocessing/AsyncExecutionController.java
Apache-2.0
public StateFuture<Void> syncPointRequestWithCallback( ThrowingRunnable<Exception> callback, boolean allowOverdraft) { return handleRequest(null, StateRequestType.SYNC_POINT, true, null, allowOverdraft) .thenAccept(v -> callback.run()); }
A helper to request a {@link StateRequestType#SYNC_POINT} and run a callback if it finishes (once the record is not blocked). @param callback the callback to run if it finishes (once the record is not blocked). @param allowOverdraft whether to overdraft the in-flight buffer.
syncPointRequestWithCallback
java
apache/flink
flink-runtime/src/main/java/org/apache/flink/runtime/asyncprocessing/AsyncExecutionController.java
https://github.com/apache/flink/blob/master/flink-runtime/src/main/java/org/apache/flink/runtime/asyncprocessing/AsyncExecutionController.java
Apache-2.0
private void waitForNewMails() throws InterruptedException { if (!callbackRunner.isHasMail()) { synchronized (notifyLock) { if (!callbackRunner.isHasMail()) { waitingMail = true; notifyLock.wait(1); waitingMail = false; } } } }
Wait for new mails if there is no more mail.
waitForNewMails
java
apache/flink
flink-runtime/src/main/java/org/apache/flink/runtime/asyncprocessing/AsyncExecutionController.java
https://github.com/apache/flink/blob/master/flink-runtime/src/main/java/org/apache/flink/runtime/asyncprocessing/AsyncExecutionController.java
Apache-2.0
public Epoch onRecord() { if (finishingEpoch != null) { finishingEpoch.ongoingRecordCount++; return finishingEpoch; } else { activeEpoch.ongoingRecordCount++; return activeEpoch; } }
Add a record to the current epoch and return the current open epoch, the epoch will be associated with the {@link RecordContext} of this record. Must be invoked within task thread. @return the current open epoch.
onRecord
java
apache/flink
flink-runtime/src/main/java/org/apache/flink/runtime/asyncprocessing/EpochManager.java
https://github.com/apache/flink/blob/master/flink-runtime/src/main/java/org/apache/flink/runtime/asyncprocessing/EpochManager.java
Apache-2.0
public Epoch onEpoch(Epoch epoch) { epoch.ongoingRecordCount++; return epoch; }
Add a record to a specified epoch. @param epoch the specified epoch. @return the specified epoch itself.
onEpoch
java
apache/flink
flink-runtime/src/main/java/org/apache/flink/runtime/asyncprocessing/EpochManager.java
https://github.com/apache/flink/blob/master/flink-runtime/src/main/java/org/apache/flink/runtime/asyncprocessing/EpochManager.java
Apache-2.0
public void onNonRecord( @Nullable Runnable triggerAction, @Nullable Runnable finalAction, ParallelMode parallelMode) { LOG.trace( "on NonRecord, old epoch: {}, outputQueue size: {}", activeEpoch, outputQueue.size()); switchActiveEpoch(triggerAction, finalAction); if (parallelMode == ParallelMode.SERIAL_BETWEEN_EPOCH) { asyncExecutionController.drainInflightRecords(0); } }
Add a non-record to the current epoch, close current epoch and open a new epoch. Must be invoked within task thread. @param triggerAction the action associated with this non-record. @param parallelMode the parallel mode for this epoch.
onNonRecord
java
apache/flink
flink-runtime/src/main/java/org/apache/flink/runtime/asyncprocessing/EpochManager.java
https://github.com/apache/flink/blob/master/flink-runtime/src/main/java/org/apache/flink/runtime/asyncprocessing/EpochManager.java
Apache-2.0
boolean isKeyOccupied() { return keyOccupied; }
Check if this context has occupied the key.
isKeyOccupied
java
apache/flink
flink-runtime/src/main/java/org/apache/flink/runtime/asyncprocessing/RecordContext.java
https://github.com/apache/flink/blob/master/flink-runtime/src/main/java/org/apache/flink/runtime/asyncprocessing/RecordContext.java
Apache-2.0
public int retain() { return unsafe.getAndAddInt(this, referenceOffset, 1) + 1; }
The "unsafe", which can be used to perform native memory accesses.
retain
java
apache/flink
flink-runtime/src/main/java/org/apache/flink/runtime/asyncprocessing/ReferenceCounted.java
https://github.com/apache/flink/blob/master/flink-runtime/src/main/java/org/apache/flink/runtime/asyncprocessing/ReferenceCounted.java
Apache-2.0
public <OUT> InternalStateFuture<OUT> create(RecordContext<K> context) { return new ContextStateFutureImpl<>( (runnable) -> callbackRunner.submit( () -> { asyncExecutionController.setCurrentContext(context); runnable.run(); }), exceptionHandler, context); }
An internal factory for {@link InternalStateFuture} that build future with necessary context switch and wired with mailbox executor.
create
java
apache/flink
flink-runtime/src/main/java/org/apache/flink/runtime/asyncprocessing/StateFutureFactory.java
https://github.com/apache/flink/blob/master/flink-runtime/src/main/java/org/apache/flink/runtime/asyncprocessing/StateFutureFactory.java
Apache-2.0
@Nullable StateRequest<K, ?, ?, ?> unblockOneByKey(K key) { if (!blockingQueue.containsKey(key)) { return null; } StateRequest<K, ?, ?, ?> stateRequest = blockingQueue.get(key).removeFirst(); if (blockingQueue.get(key).isEmpty()) { blockingQueue.remove(key); } blockingQueueSize--; return stateRequest; }
Try to pull one state request with specific key from blocking queue to active queue. @param key The key to release, the other records with this key is no longer blocking. @return The first record context with the same key in blocking queue, null if no such record.
unblockOneByKey
java
apache/flink
flink-runtime/src/main/java/org/apache/flink/runtime/asyncprocessing/StateRequestBuffer.java
https://github.com/apache/flink/blob/master/flink-runtime/src/main/java/org/apache/flink/runtime/asyncprocessing/StateRequestBuffer.java
Apache-2.0
int blockingQueueSize() { return blockingQueueSize; }
Get the number of state requests of blocking queue in constant-time. @return the number of state requests of blocking queue.
blockingQueueSize
java
apache/flink
flink-runtime/src/main/java/org/apache/flink/runtime/asyncprocessing/StateRequestBuffer.java
https://github.com/apache/flink/blob/master/flink-runtime/src/main/java/org/apache/flink/runtime/asyncprocessing/StateRequestBuffer.java
Apache-2.0
int blockingKeyNum() { return blockingQueue.size(); }
Get the number of different keys in blocking queue. @return the number of different keys in blocking queue.
blockingKeyNum
java
apache/flink
flink-runtime/src/main/java/org/apache/flink/runtime/asyncprocessing/StateRequestBuffer.java
https://github.com/apache/flink/blob/master/flink-runtime/src/main/java/org/apache/flink/runtime/asyncprocessing/StateRequestBuffer.java
Apache-2.0
int activeQueueSize() { return activeQueue.size(); }
Get the number of state requests of active queue in constant-time. @return the number of state requests of active queue.
activeQueueSize
java
apache/flink
flink-runtime/src/main/java/org/apache/flink/runtime/asyncprocessing/StateRequestBuffer.java
https://github.com/apache/flink/blob/master/flink-runtime/src/main/java/org/apache/flink/runtime/asyncprocessing/StateRequestBuffer.java
Apache-2.0
public <T> DeclaredVariable<T> declareVariable( TypeSerializer<T> serializer, String name, @Nullable Supplier<T> initialValue) throws DeclarationException { return manager.registerVariable(serializer, name, initialValue); }
Declare a variable that will keep value across callback with same context. @param serializer the serializer of variable. @param name the name. @param initialValue the initializer of variable. Can be null if no need to initialize. @param <T> The type of value.
declareVariable
java
apache/flink
flink-runtime/src/main/java/org/apache/flink/runtime/asyncprocessing/declare/DeclarationContext.java
https://github.com/apache/flink/blob/master/flink-runtime/src/main/java/org/apache/flink/runtime/asyncprocessing/declare/DeclarationContext.java
Apache-2.0
public <T> ContextVariable<T> declareVariable(@Nullable Supplier<T> initializer) throws DeclarationException { return manager.registerVariable(initializer); }
Declare a variable that will keep value across callback with same context. This value cannot be serialized into checkpoint. @param initializer the initializer of variable. Can be null if no need to initialize. @param <T> The type of value.
declareVariable
java
apache/flink
flink-runtime/src/main/java/org/apache/flink/runtime/asyncprocessing/declare/DeclarationContext.java
https://github.com/apache/flink/blob/master/flink-runtime/src/main/java/org/apache/flink/runtime/asyncprocessing/declare/DeclarationContext.java
Apache-2.0
public <IN> DeclarationChain<IN>.DeclarationStage<IN> declareChain() throws DeclarationException { return new DeclarationChain<IN>(this).firstStage(); }
Declaring a processing in chain-style. This method start a chain with an input type. @return the chain itself. @param <IN> the in type of the first block.
declareChain
java
apache/flink
flink-runtime/src/main/java/org/apache/flink/runtime/asyncprocessing/declare/DeclarationContext.java
https://github.com/apache/flink/blob/master/flink-runtime/src/main/java/org/apache/flink/runtime/asyncprocessing/declare/DeclarationContext.java
Apache-2.0
@Override public void setCurrentNamespace(N namespace) { declaredNamespace.set(namespace); state.setCurrentNamespace(namespace); }
A partitioned state that wraps a declared namespace and hide the namespace switching from user. User will only use the state just like the public state APIs without any consideration on namespace. <p>This wrap class is useful in DataStream window operation, where namespace is managed by the operator and user function is free from namespace manipulation.
setCurrentNamespace
java
apache/flink
flink-runtime/src/main/java/org/apache/flink/runtime/asyncprocessing/declare/state/StateWithDeclaredNamespace.java
https://github.com/apache/flink/blob/master/flink-runtime/src/main/java/org/apache/flink/runtime/asyncprocessing/declare/state/StateWithDeclaredNamespace.java
Apache-2.0
@SuppressWarnings("unchecked") public static <N, S extends State> S create(S state, DeclaredVariable<N> declaredNamespace) { if (state instanceof InternalReducingState) { return (S) new ReducingStateWithDeclaredNamespace<>( (InternalReducingState<?, N, ?>) state, declaredNamespace); } else if (state instanceof InternalAggregatingState) { return (S) new AggregatingStateWithDeclaredNamespace<>( (InternalAggregatingState<?, N, ?, ?, ?>) state, declaredNamespace); } else if (state instanceof InternalValueState) { return (S) new ValueStateWithDeclaredNamespace<>( (InternalValueState<?, N, ?>) state, declaredNamespace); } else if (state instanceof InternalMapState) { return (S) new MapStateWithDeclaredNamespace<>( (InternalMapState<?, N, ?, ?>) state, declaredNamespace); } else if (state instanceof InternalListState) { return (S) new ListStateWithDeclaredNamespace<>( (InternalListState<?, N, ?>) state, declaredNamespace); } else { throw new IllegalArgumentException( "Unsupported state type: " + state.getClass().getName()); } }
Automatically called before any async state access.
create
java
apache/flink
flink-runtime/src/main/java/org/apache/flink/runtime/asyncprocessing/declare/state/StateWithDeclaredNamespace.java
https://github.com/apache/flink/blob/master/flink-runtime/src/main/java/org/apache/flink/runtime/asyncprocessing/declare/state/StateWithDeclaredNamespace.java
Apache-2.0
@Override public final void processElement1(IN1 value, Context ctx, Collector<OUT> out) throws Exception { throw new IllegalAccessException("This method is replaced by declareProcess1."); }
Override and finalize this method. Please use {@link #declareProcess1} instead.
processElement1
java
apache/flink
flink-runtime/src/main/java/org/apache/flink/runtime/asyncprocessing/functions/DeclaringAsyncKeyedCoProcessFunction.java
https://github.com/apache/flink/blob/master/flink-runtime/src/main/java/org/apache/flink/runtime/asyncprocessing/functions/DeclaringAsyncKeyedCoProcessFunction.java
Apache-2.0
@Override public final void processElement2(IN2 value, Context ctx, Collector<OUT> out) throws Exception { throw new IllegalAccessException("This method is replaced by declareProcess2."); }
Override and finalize this method. Please use {@link #declareProcess2} instead.
processElement2
java
apache/flink
flink-runtime/src/main/java/org/apache/flink/runtime/asyncprocessing/functions/DeclaringAsyncKeyedCoProcessFunction.java
https://github.com/apache/flink/blob/master/flink-runtime/src/main/java/org/apache/flink/runtime/asyncprocessing/functions/DeclaringAsyncKeyedCoProcessFunction.java
Apache-2.0
@Override public final void processElement(I value, Context ctx, Collector<O> out) throws Exception { throw new IllegalAccessException("This method is replaced by declareProcess."); }
Override and finalize this method. Please use {@link #declareProcess} instead.
processElement
java
apache/flink
flink-runtime/src/main/java/org/apache/flink/runtime/asyncprocessing/functions/DeclaringAsyncKeyedProcessFunction.java
https://github.com/apache/flink/blob/master/flink-runtime/src/main/java/org/apache/flink/runtime/asyncprocessing/functions/DeclaringAsyncKeyedProcessFunction.java
Apache-2.0
private StateFuture<Void> clearAllState(W window, AppendingState<IN, ACC, ACC> windowState) { return windowState .asyncClear() .thenCompose(ignore -> triggerContext.clear()) .thenCompose( ignore -> { windowDeclaredVariable.set(window); return processContext.clear(); }); }
Drops all state for the given window and calls {@link AsyncTrigger#clear(Window, AsyncTrigger.TriggerContext)}. <p>The caller must ensure that the correct key is set in the state backend and the triggerContext object.
clearAllState
java
apache/flink
flink-runtime/src/main/java/org/apache/flink/runtime/asyncprocessing/operators/windowing/AsyncWindowOperator.java
https://github.com/apache/flink/blob/master/flink-runtime/src/main/java/org/apache/flink/runtime/asyncprocessing/operators/windowing/AsyncWindowOperator.java
Apache-2.0
public boolean isEndOfStreamTrigger() { return false; }
Indicate whether the trigger only trigger at the end of stream.
isEndOfStreamTrigger
java
apache/flink
flink-runtime/src/main/java/org/apache/flink/runtime/asyncprocessing/operators/windowing/triggers/AsyncTrigger.java
https://github.com/apache/flink/blob/master/flink-runtime/src/main/java/org/apache/flink/runtime/asyncprocessing/operators/windowing/triggers/AsyncTrigger.java
Apache-2.0
protected File getFileInternal(@Nullable JobID jobId, BlobKey blobKey) throws IOException { checkArgument(blobKey != null, "BLOB key cannot be null."); final File localFile = BlobUtils.getStorageLocation(storageDir.deref(), jobId, blobKey); readWriteLock.readLock().lock(); try { if (localFile.exists()) { return localFile; } } finally { readWriteLock.readLock().unlock(); } // first try the distributed blob store (if available) // use a temporary file (thread-safe without locking) File incomingFile = createTemporaryFilename(); try { try { if (blobView.get(jobId, blobKey, incomingFile)) { // now move the temp file to our local cache atomically readWriteLock.writeLock().lock(); try { BlobUtils.moveTempFileToStore( incomingFile, jobId, blobKey, localFile, log, null); } finally { readWriteLock.writeLock().unlock(); } return localFile; } } catch (Exception e) { log.info( "Failed to copy from blob store. Downloading from BLOB server instead.", e); } final InetSocketAddress currentServerAddress = serverAddress; if (currentServerAddress != null) { // fallback: download from the BlobServer BlobClient.downloadFromBlobServer( jobId, blobKey, incomingFile, currentServerAddress, blobClientConfig, numFetchRetries); readWriteLock.writeLock().lock(); try { BlobUtils.moveTempFileToStore( incomingFile, jobId, blobKey, localFile, log, null); } finally { readWriteLock.writeLock().unlock(); } } else { throw new IOException( "Cannot download from BlobServer, because the server address is unknown."); } return localFile; } finally { // delete incomingFile from a failed download if (!incomingFile.delete() && incomingFile.exists()) { log.warn( "Could not delete the staging file {} for blob key {} and job {}.", incomingFile, blobKey, jobId); } } }
Returns local copy of the file for the BLOB with the given key. <p>The method will first attempt to serve the BLOB from its local cache. If the BLOB is not in the cache, the method will try to download it from this cache's BLOB server via a distributed BLOB store (if available) or direct end-to-end download. @param jobId ID of the job this blob belongs to (or <tt>null</tt> if job-unrelated) @param blobKey The key of the desired BLOB. @return file referring to the local storage location of the BLOB. @throws IOException Thrown if an I/O error occurs while downloading the BLOBs from the BLOB server.
getFileInternal
java
apache/flink
flink-runtime/src/main/java/org/apache/flink/runtime/blob/AbstractBlobCache.java
https://github.com/apache/flink/blob/master/flink-runtime/src/main/java/org/apache/flink/runtime/blob/AbstractBlobCache.java
Apache-2.0
public int getPort() { final InetSocketAddress currentServerAddress = serverAddress; if (currentServerAddress != null) { return currentServerAddress.getPort(); } else { return -1; } }
Returns the port the BLOB server is listening on. @return BLOB server port or {@code -1} if no server address
getPort
java
apache/flink
flink-runtime/src/main/java/org/apache/flink/runtime/blob/AbstractBlobCache.java
https://github.com/apache/flink/blob/master/flink-runtime/src/main/java/org/apache/flink/runtime/blob/AbstractBlobCache.java
Apache-2.0
public void track(JobID jobId, BlobKey blobKey, long size) { checkNotNull(jobId); checkNotNull(blobKey); checkArgument(size >= 0); synchronized (lock) { if (caches.putIfAbsent(Tuple2.of(jobId, blobKey), size) == null) { blobKeyByJob.computeIfAbsent(jobId, ignore -> new HashSet<>()).add(blobKey); total += size; if (total > sizeLimit) { LOG.warn( "The overall size of BLOBs in the cache exceeds " + "the limit. Limit = [{}], Current: [{}], " + "The size of next BLOB: [{}].", sizeLimit, total, size); } } else { LOG.warn( "Attempt to track a duplicated BLOB. This may indicate a duplicate upload " + "or a hash collision. Ignoring newest upload. " + "JobID = [{}], BlobKey = [{}]", jobId, blobKey); } } }
Register the BLOB to the tracker.
track
java
apache/flink
flink-runtime/src/main/java/org/apache/flink/runtime/blob/BlobCacheSizeTracker.java
https://github.com/apache/flink/blob/master/flink-runtime/src/main/java/org/apache/flink/runtime/blob/BlobCacheSizeTracker.java
Apache-2.0
public void update(JobID jobId, BlobKey blobKey) { checkNotNull(jobId); checkNotNull(blobKey); synchronized (lock) { caches.get(Tuple2.of(jobId, blobKey)); } }
Update the least used index for the BLOBs so that the tracker can easily find out the least recently used BLOBs.
update
java
apache/flink
flink-runtime/src/main/java/org/apache/flink/runtime/blob/BlobCacheSizeTracker.java
https://github.com/apache/flink/blob/master/flink-runtime/src/main/java/org/apache/flink/runtime/blob/BlobCacheSizeTracker.java
Apache-2.0
InputStream getInternal(@Nullable JobID jobId, BlobKey blobKey) throws IOException { if (this.socket.isClosed()) { throw new IllegalStateException( "BLOB Client is not connected. " + "Client has been shut down or encountered an error before."); } if (LOG.isDebugEnabled()) { LOG.debug("GET BLOB {}/{} from {}.", jobId, blobKey, socket.getLocalSocketAddress()); } try { OutputStream os = this.socket.getOutputStream(); InputStream is = this.socket.getInputStream(); // Send GET header sendGetHeader(os, jobId, blobKey); receiveAndCheckGetResponse(is); return new BlobInputStream(is, blobKey, os); } catch (Throwable t) { BlobUtils.closeSilently(socket, LOG); throw new IOException("GET operation failed: " + t.getMessage(), t); } }
Downloads the BLOB identified by the given BLOB key from the BLOB server. @param jobId ID of the job this blob belongs to (or <tt>null</tt> if job-unrelated) @param blobKey blob key associated with the requested file @return an input stream to read the retrieved data from @throws FileNotFoundException if there is no such file; @throws IOException if an I/O error occurs during the download
getInternal
java
apache/flink
flink-runtime/src/main/java/org/apache/flink/runtime/blob/BlobClient.java
https://github.com/apache/flink/blob/master/flink-runtime/src/main/java/org/apache/flink/runtime/blob/BlobClient.java
Apache-2.0
private static void sendGetHeader( OutputStream outputStream, @Nullable JobID jobId, BlobKey blobKey) throws IOException { checkNotNull(blobKey); checkArgument( jobId != null || blobKey instanceof TransientBlobKey, "permanent BLOBs must be job-related"); // Signal type of operation outputStream.write(GET_OPERATION); // Send job ID and key if (jobId == null) { outputStream.write(JOB_UNRELATED_CONTENT); } else { outputStream.write(JOB_RELATED_CONTENT); outputStream.write(jobId.getBytes()); } blobKey.writeToOutputStream(outputStream); }
Constructs and writes the header data for a GET operation to the given output stream. @param outputStream the output stream to write the header data to @param jobId ID of the job this blob belongs to (or <tt>null</tt> if job-unrelated) @param blobKey blob key associated with the requested file @throws IOException thrown if an I/O error occurs while writing the header data to the output stream
sendGetHeader
java
apache/flink
flink-runtime/src/main/java/org/apache/flink/runtime/blob/BlobClient.java
https://github.com/apache/flink/blob/master/flink-runtime/src/main/java/org/apache/flink/runtime/blob/BlobClient.java
Apache-2.0
private static void receiveAndCheckGetResponse(InputStream is) throws IOException { int response = is.read(); if (response < 0) { throw new EOFException("Premature end of response"); } if (response == RETURN_ERROR) { Throwable cause = readExceptionFromStream(is); throw new IOException("Server side error: " + cause.getMessage(), cause); } else if (response != RETURN_OKAY) { throw new IOException("Unrecognized response"); } }
Reads the response from the input stream and throws in case of errors. @param is stream to read from @throws IOException if the response is an error or reading the response failed
receiveAndCheckGetResponse
java
apache/flink
flink-runtime/src/main/java/org/apache/flink/runtime/blob/BlobClient.java
https://github.com/apache/flink/blob/master/flink-runtime/src/main/java/org/apache/flink/runtime/blob/BlobClient.java
Apache-2.0
BlobKey putBuffer( @Nullable JobID jobId, byte[] value, int offset, int len, BlobKey.BlobType blobType) throws IOException { if (this.socket.isClosed()) { throw new IllegalStateException( "BLOB Client is not connected. " + "Client has been shut down or encountered an error before."); } checkNotNull(value); if (LOG.isDebugEnabled()) { LOG.debug( "PUT BLOB buffer (" + len + " bytes) to " + socket.getLocalSocketAddress() + "."); } try (BlobOutputStream os = new BlobOutputStream(jobId, blobType, socket)) { os.write(value, offset, len); // Receive blob key and compare return os.finish(); } catch (Throwable t) { BlobUtils.closeSilently(socket, LOG); throw new IOException("PUT operation failed: " + t.getMessage(), t); } }
Uploads data from the given byte buffer to the BLOB server. @param jobId the ID of the job the BLOB belongs to (or <tt>null</tt> if job-unrelated) @param value the buffer to read the data from @param offset the read offset within the buffer @param len the number of bytes to read from the buffer @param blobType whether the BLOB should become permanent or transient @return the computed BLOB key of the uploaded BLOB @throws IOException thrown if an I/O error occurs while uploading the data to the BLOB server
putBuffer
java
apache/flink
flink-runtime/src/main/java/org/apache/flink/runtime/blob/BlobClient.java
https://github.com/apache/flink/blob/master/flink-runtime/src/main/java/org/apache/flink/runtime/blob/BlobClient.java
Apache-2.0
BlobKey putInputStream( @Nullable JobID jobId, InputStream inputStream, BlobKey.BlobType blobType) throws IOException { if (this.socket.isClosed()) { throw new IllegalStateException( "BLOB Client is not connected. " + "Client has been shut down or encountered an error before."); } checkNotNull(inputStream); if (LOG.isDebugEnabled()) { LOG.debug("PUT BLOB stream to {}.", socket.getLocalSocketAddress()); } try (BlobOutputStream os = new BlobOutputStream(jobId, blobType, socket)) { IOUtils.copyBytes(inputStream, os, BUFFER_SIZE, false); return os.finish(); } catch (Throwable t) { BlobUtils.closeSilently(socket, LOG); throw new IOException("PUT operation failed: " + t.getMessage(), t); } }
Uploads data from the given input stream to the BLOB server. @param jobId the ID of the job the BLOB belongs to (or <tt>null</tt> if job-unrelated) @param inputStream the input stream to read the data from @param blobType whether the BLOB should become permanent or transient @return the computed BLOB key of the uploaded BLOB @throws IOException thrown if an I/O error occurs while uploading the data to the BLOB server
putInputStream
java
apache/flink
flink-runtime/src/main/java/org/apache/flink/runtime/blob/BlobClient.java
https://github.com/apache/flink/blob/master/flink-runtime/src/main/java/org/apache/flink/runtime/blob/BlobClient.java
Apache-2.0
public static List<PermanentBlobKey> uploadFiles( InetSocketAddress serverAddress, Configuration clientConfig, JobID jobId, List<Path> files) throws IOException { checkNotNull(jobId); if (files.isEmpty()) { return Collections.emptyList(); } else { List<PermanentBlobKey> blobKeys = new ArrayList<>(); try (BlobClient blobClient = new BlobClient(serverAddress, clientConfig)) { for (final Path file : files) { final PermanentBlobKey key = blobClient.uploadFile(jobId, file); blobKeys.add(key); } } return blobKeys; } }
Uploads the JAR files to the {@link PermanentBlobService} of the {@link BlobServer} at the given address with HA as configured. @param serverAddress Server address of the {@link BlobServer} @param clientConfig Any additional configuration for the blob client @param jobId ID of the job this blob belongs to (or <tt>null</tt> if job-unrelated) @param files List of files to upload @throws IOException if the upload fails
uploadFiles
java
apache/flink
flink-runtime/src/main/java/org/apache/flink/runtime/blob/BlobClient.java
https://github.com/apache/flink/blob/master/flink-runtime/src/main/java/org/apache/flink/runtime/blob/BlobClient.java
Apache-2.0
public PermanentBlobKey uploadFile(JobID jobId, Path file) throws IOException { final FileSystem fs = file.getFileSystem(); try (InputStream is = fs.open(file)) { return (PermanentBlobKey) putInputStream(jobId, is, PERMANENT_BLOB); } }
Uploads a single file to the {@link PermanentBlobService} of the given {@link BlobServer}. @param jobId ID of the job this blob belongs to (or <tt>null</tt> if job-unrelated) @param file file to upload @throws IOException if the upload fails
uploadFile
java
apache/flink
flink-runtime/src/main/java/org/apache/flink/runtime/blob/BlobClient.java
https://github.com/apache/flink/blob/master/flink-runtime/src/main/java/org/apache/flink/runtime/blob/BlobClient.java
Apache-2.0
@VisibleForTesting static BlobKey createKey(BlobType type) { if (type == PERMANENT_BLOB) { return new PermanentBlobKey(); } else { return new TransientBlobKey(); } }
Returns the right {@link BlobKey} subclass for the given parameters. @param type whether the referenced BLOB is permanent or transient @return BlobKey subclass
createKey
java
apache/flink
flink-runtime/src/main/java/org/apache/flink/runtime/blob/BlobKey.java
https://github.com/apache/flink/blob/master/flink-runtime/src/main/java/org/apache/flink/runtime/blob/BlobKey.java
Apache-2.0
static BlobKey createKey(BlobType type, byte[] key) { if (type == PERMANENT_BLOB) { return new PermanentBlobKey(key); } else { return new TransientBlobKey(key); } }
Returns the right {@link BlobKey} subclass for the given parameters. @param type whether the referenced BLOB is permanent or transient @param key the actual key data @return BlobKey subclass
createKey
java
apache/flink
flink-runtime/src/main/java/org/apache/flink/runtime/blob/BlobKey.java
https://github.com/apache/flink/blob/master/flink-runtime/src/main/java/org/apache/flink/runtime/blob/BlobKey.java
Apache-2.0
static BlobKey createKey(BlobType type, byte[] key, byte[] random) { if (type == PERMANENT_BLOB) { return new PermanentBlobKey(key, random); } else { return new TransientBlobKey(key, random); } }
Returns the right {@link BlobKey} subclass for the given parameters. @param type whether the referenced BLOB is permanent or transient @param key the actual key data @param random the random component of the key @return BlobKey subclass
createKey
java
apache/flink
flink-runtime/src/main/java/org/apache/flink/runtime/blob/BlobKey.java
https://github.com/apache/flink/blob/master/flink-runtime/src/main/java/org/apache/flink/runtime/blob/BlobKey.java
Apache-2.0
@VisibleForTesting public byte[] getHash() { return key; }
Returns the hash component of this key. @return a 20 bit hash of the contents the key refers to
getHash
java
apache/flink
flink-runtime/src/main/java/org/apache/flink/runtime/blob/BlobKey.java
https://github.com/apache/flink/blob/master/flink-runtime/src/main/java/org/apache/flink/runtime/blob/BlobKey.java
Apache-2.0
BlobType getType() { return type; }
Returns the (internal) BLOB type which is reflected by the inheriting sub-class. @return BLOB type, i.e. permanent or transient
getType
java
apache/flink
flink-runtime/src/main/java/org/apache/flink/runtime/blob/BlobKey.java
https://github.com/apache/flink/blob/master/flink-runtime/src/main/java/org/apache/flink/runtime/blob/BlobKey.java
Apache-2.0
static BlobKey readFromInputStream(InputStream inputStream) throws IOException { final byte[] key = new byte[BlobKey.SIZE]; final byte[] random = new byte[AbstractID.SIZE]; int bytesRead = 0; // read key while (bytesRead < key.length) { final int read = inputStream.read(key, bytesRead, key.length - bytesRead); if (read < 0) { throw new EOFException("Read an incomplete BLOB key"); } bytesRead += read; } // read BLOB type final BlobType blobType; { final int read = inputStream.read(); if (read < 0) { throw new EOFException("Read an incomplete BLOB type"); } else if (read == TRANSIENT_BLOB.ordinal()) { blobType = TRANSIENT_BLOB; } else if (read == PERMANENT_BLOB.ordinal()) { blobType = PERMANENT_BLOB; } else { throw new IOException("Invalid data received for the BLOB type: " + read); } } // read random component bytesRead = 0; while (bytesRead < AbstractID.SIZE) { final int read = inputStream.read(random, bytesRead, AbstractID.SIZE - bytesRead); if (read < 0) { throw new EOFException("Read an incomplete BLOB key"); } bytesRead += read; } return createKey(blobType, key, random); }
Auxiliary method to read a BLOB key from an input stream. @param inputStream the input stream to read the BLOB key from @return the read BLOB key @throws IOException throw if an I/O error occurs while reading from the input stream
readFromInputStream
java
apache/flink
flink-runtime/src/main/java/org/apache/flink/runtime/blob/BlobKey.java
https://github.com/apache/flink/blob/master/flink-runtime/src/main/java/org/apache/flink/runtime/blob/BlobKey.java
Apache-2.0
private static void sendPutHeader( OutputStream outputStream, @Nullable JobID jobId, BlobKey.BlobType blobType) throws IOException { // Signal type of operation outputStream.write(PUT_OPERATION); if (jobId == null) { outputStream.write(JOB_UNRELATED_CONTENT); } else { outputStream.write(JOB_RELATED_CONTENT); outputStream.write(jobId.getBytes()); } outputStream.write(blobType.ordinal()); }
Constructs and writes the header data for a PUT request to the given output stream. @param outputStream the output stream to write the PUT header data to @param jobId the ID of job the BLOB belongs to (or <tt>null</tt> if job-unrelated) @param blobType whether the BLOB should become permanent or transient @throws IOException thrown if an I/O error occurs while writing the header data to the output stream
sendPutHeader
java
apache/flink
flink-runtime/src/main/java/org/apache/flink/runtime/blob/BlobOutputStream.java
https://github.com/apache/flink/blob/master/flink-runtime/src/main/java/org/apache/flink/runtime/blob/BlobOutputStream.java
Apache-2.0
private static BlobKey receiveAndCheckPutResponse( InputStream is, MessageDigest md, BlobKey.BlobType blobType) throws IOException { int response = is.read(); if (response < 0) { throw new EOFException("Premature end of response"); } else if (response == RETURN_OKAY) { BlobKey remoteKey = BlobKey.readFromInputStream(is); byte[] localHash = md.digest(); if (blobType != remoteKey.getType()) { throw new IOException("Detected data corruption during transfer"); } if (!Arrays.equals(localHash, remoteKey.getHash())) { throw new IOException("Detected data corruption during transfer"); } return remoteKey; } else if (response == RETURN_ERROR) { Throwable cause = BlobUtils.readExceptionFromStream(is); throw new IOException("Server side error: " + cause.getMessage(), cause); } else { throw new IOException("Unrecognized response: " + response + '.'); } }
Reads the response from the input stream and throws in case of errors. @param is stream to read from @param md message digest to check the response against @param blobType whether the BLOB should be permanent or transient @throws IOException if the response is an error, the message digest does not match or reading the response failed
receiveAndCheckPutResponse
java
apache/flink
flink-runtime/src/main/java/org/apache/flink/runtime/blob/BlobOutputStream.java
https://github.com/apache/flink/blob/master/flink-runtime/src/main/java/org/apache/flink/runtime/blob/BlobOutputStream.java
Apache-2.0
@VisibleForTesting public File getStorageLocation(@Nullable JobID jobId, BlobKey key) throws IOException { return BlobUtils.getStorageLocation(storageDir.deref(), jobId, key); }
Returns a file handle to the file associated with the given blob key on the blob server. <p><strong>This is only called from {@link BlobServerConnection} or unit tests.</strong> @param jobId ID of the job this blob belongs to (or <tt>null</tt> if job-unrelated) @param key identifying the file @return file handle to the file @throws IOException if creating the directory fails
getStorageLocation
java
apache/flink
flink-runtime/src/main/java/org/apache/flink/runtime/blob/BlobServer.java
https://github.com/apache/flink/blob/master/flink-runtime/src/main/java/org/apache/flink/runtime/blob/BlobServer.java
Apache-2.0
@Override public File getFile(TransientBlobKey key) throws IOException { return getFileInternalWithReadLock(null, key); }
Retrieves the local path of a (job-unrelated) file associated with a job and a blob key. <p>The blob server looks the blob key up in its local storage. If the file exists, it is returned. If the file does not exist, it is retrieved from the HA blob store (if available) or a {@link FileNotFoundException} is thrown. @param key blob key associated with the requested file @return file referring to the local storage location of the BLOB @throws IOException Thrown if the file retrieval failed.
getFile
java
apache/flink
flink-runtime/src/main/java/org/apache/flink/runtime/blob/BlobServer.java
https://github.com/apache/flink/blob/master/flink-runtime/src/main/java/org/apache/flink/runtime/blob/BlobServer.java
Apache-2.0
@Override public File getFile(JobID jobId, TransientBlobKey key) throws IOException { checkNotNull(jobId); return getFileInternalWithReadLock(jobId, key); }
Retrieves the local path of a file associated with a job and a blob key. <p>The blob server looks the blob key up in its local storage. If the file exists, it is returned. If the file does not exist, it is retrieved from the HA blob store (if available) or a {@link FileNotFoundException} is thrown. @param jobId ID of the job this blob belongs to @param key blob key associated with the requested file @return file referring to the local storage location of the BLOB @throws IOException Thrown if the file retrieval failed.
getFile
java
apache/flink
flink-runtime/src/main/java/org/apache/flink/runtime/blob/BlobServer.java
https://github.com/apache/flink/blob/master/flink-runtime/src/main/java/org/apache/flink/runtime/blob/BlobServer.java
Apache-2.0
@Override public File getFile(JobID jobId, PermanentBlobKey key) throws IOException { checkNotNull(jobId); return getFileInternalWithReadLock(jobId, key); }
Returns the path to a local copy of the file associated with the provided job ID and blob key. <p>We will first attempt to serve the BLOB from the local storage. If the BLOB is not in there, we will try to download it from the HA store. @param jobId ID of the job this blob belongs to @param key blob key associated with the requested file @return The path to the file. @throws java.io.FileNotFoundException if the BLOB does not exist; @throws IOException if any other error occurs when retrieving the file
getFile
java
apache/flink
flink-runtime/src/main/java/org/apache/flink/runtime/blob/BlobServer.java
https://github.com/apache/flink/blob/master/flink-runtime/src/main/java/org/apache/flink/runtime/blob/BlobServer.java
Apache-2.0
private File getFileInternalWithReadLock(@Nullable JobID jobId, BlobKey blobKey) throws IOException { checkArgument(blobKey != null, "BLOB key cannot be null."); readWriteLock.readLock().lock(); try { return getFileInternal(jobId, blobKey); } finally { readWriteLock.readLock().unlock(); } }
Retrieves the local path of a file associated with a job and a blob key. <p>The blob server looks the blob key up in its local storage. If the file exists, it is returned. If the file does not exist, it is retrieved from the HA blob store (if available) or a {@link FileNotFoundException} is thrown. @param jobId ID of the job this blob belongs to (or <tt>null</tt> if job-unrelated) @param blobKey blob key associated with the requested file @return file referring to the local storage location of the BLOB @throws IOException Thrown if the file retrieval failed.
getFileInternalWithReadLock
java
apache/flink
flink-runtime/src/main/java/org/apache/flink/runtime/blob/BlobServer.java
https://github.com/apache/flink/blob/master/flink-runtime/src/main/java/org/apache/flink/runtime/blob/BlobServer.java
Apache-2.0
private BlobKey putBuffer(@Nullable JobID jobId, byte[] value, BlobKey.BlobType blobType) throws IOException { if (LOG.isDebugEnabled()) { LOG.debug("Received PUT call for BLOB of job {}.", jobId); } File incomingFile = createTemporaryFilename(); MessageDigest md = BlobUtils.createMessageDigest(); BlobKey blobKey = null; try (FileOutputStream fos = new FileOutputStream(incomingFile)) { md.update(value); fos.write(value); } catch (IOException ioe) { // delete incomingFile from a failed download if (!incomingFile.delete() && incomingFile.exists()) { LOG.warn("Could not delete the staging file {} for job {}.", incomingFile, jobId); } throw ioe; } try { // persist file blobKey = moveTempFileToStore(incomingFile, jobId, md.digest(), blobType); return blobKey; } finally { // delete incomingFile from a failed download if (!incomingFile.delete() && incomingFile.exists()) { LOG.warn( "Could not delete the staging file {} for blob key {} and job {}.", incomingFile, blobKey, jobId); } } }
Uploads the data of the given byte array for the given job to the BLOB server. @param jobId the ID of the job the BLOB belongs to @param value the buffer to upload @param blobType whether to make the data permanent or transient @return the computed BLOB key identifying the BLOB on the server @throws IOException thrown if an I/O error occurs while writing it to a local file, or uploading it to the HA store
putBuffer
java
apache/flink
flink-runtime/src/main/java/org/apache/flink/runtime/blob/BlobServer.java
https://github.com/apache/flink/blob/master/flink-runtime/src/main/java/org/apache/flink/runtime/blob/BlobServer.java
Apache-2.0
BlobKey moveTempFileToStore( File incomingFile, @Nullable JobID jobId, byte[] digest, BlobKey.BlobType blobType) throws IOException { int retries = 10; int attempt = 0; while (true) { // add unique component independent of the BLOB content BlobKey blobKey = BlobKey.createKey(blobType, digest); File storageFile = BlobUtils.getStorageLocation(storageDir.deref(), jobId, blobKey); // try again until the key is unique (put the existence check into the lock!) readWriteLock.writeLock().lock(); try { if (!storageFile.exists()) { BlobUtils.moveTempFileToStore( incomingFile, jobId, blobKey, storageFile, LOG, blobKey instanceof PermanentBlobKey ? blobStore : null); // add TTL for transient BLOBs: if (blobKey instanceof TransientBlobKey) { // must be inside read or write lock to add a TTL blobExpiryTimes.put( Tuple2.of(jobId, (TransientBlobKey) blobKey), System.currentTimeMillis() + cleanupInterval); } return blobKey; } } finally { readWriteLock.writeLock().unlock(); } ++attempt; if (attempt >= retries) { String message = "Failed to find a unique key for BLOB of job " + jobId + " (last tried " + storageFile.getAbsolutePath() + "."; LOG.error(message + " No retries left."); throw new IOException(message); } else { if (LOG.isDebugEnabled()) { LOG.debug( "Trying to find a unique key for BLOB of job {} (retry {}, last tried {})", jobId, attempt, storageFile.getAbsolutePath()); } } } }
Moves the temporary <tt>incomingFile</tt> to its permanent location where it is available for use. @param incomingFile temporary file created during transfer @param jobId ID of the job this blob belongs to or <tt>null</tt> if job-unrelated @param digest BLOB content digest, i.e. hash @param blobType whether this file is a permanent or transient BLOB @return unique BLOB key that identifies the BLOB on the server @throws IOException thrown if an I/O error occurs while moving the file or uploading it to the HA store
moveTempFileToStore
java
apache/flink
flink-runtime/src/main/java/org/apache/flink/runtime/blob/BlobServer.java
https://github.com/apache/flink/blob/master/flink-runtime/src/main/java/org/apache/flink/runtime/blob/BlobServer.java
Apache-2.0
@Override public boolean deleteFromCache(TransientBlobKey key) { return deleteInternal(null, key); }
Deletes the (job-unrelated) file associated with the blob key in the local storage of the blob server. @param key blob key associated with the file to be deleted @return <tt>true</tt> if the given blob is successfully deleted or non-existing; <tt>false</tt> otherwise
deleteFromCache
java
apache/flink
flink-runtime/src/main/java/org/apache/flink/runtime/blob/BlobServer.java
https://github.com/apache/flink/blob/master/flink-runtime/src/main/java/org/apache/flink/runtime/blob/BlobServer.java
Apache-2.0
@Override public boolean deleteFromCache(JobID jobId, TransientBlobKey key) { checkNotNull(jobId); return deleteInternal(jobId, key); }
Deletes the file associated with the blob key in the local storage of the blob server. @param jobId ID of the job this blob belongs to @param key blob key associated with the file to be deleted @return <tt>true</tt> if the given blob is successfully deleted or non-existing; <tt>false</tt> otherwise
deleteFromCache
java
apache/flink
flink-runtime/src/main/java/org/apache/flink/runtime/blob/BlobServer.java
https://github.com/apache/flink/blob/master/flink-runtime/src/main/java/org/apache/flink/runtime/blob/BlobServer.java
Apache-2.0
@Override public boolean deletePermanent(JobID jobId, PermanentBlobKey key) { return deleteInternal(jobId, key); }
Delete the uploaded data with the given {@link JobID} and {@link PermanentBlobKey}. @param jobId ID of the job this blob belongs to @param key the key of this blob
deletePermanent
java
apache/flink
flink-runtime/src/main/java/org/apache/flink/runtime/blob/BlobServer.java
https://github.com/apache/flink/blob/master/flink-runtime/src/main/java/org/apache/flink/runtime/blob/BlobServer.java
Apache-2.0