code
stringlengths
25
201k
docstring
stringlengths
19
96.2k
func_name
stringlengths
0
235
language
stringclasses
1 value
repo
stringlengths
8
51
path
stringlengths
11
314
url
stringlengths
62
377
license
stringclasses
7 values
public static DefaultLeaderRetrievalService createLeaderRetrievalService( final CuratorFramework client) { return createLeaderRetrievalService(client, "", new Configuration()); }
Creates a {@link DefaultLeaderRetrievalService} instance with {@link ZooKeeperLeaderRetrievalDriver}. @param client The {@link CuratorFramework} ZooKeeper client to use @return {@link DefaultLeaderRetrievalService} instance.
createLeaderRetrievalService
java
apache/flink
flink-runtime/src/main/java/org/apache/flink/runtime/util/ZooKeeperUtils.java
https://github.com/apache/flink/blob/master/flink-runtime/src/main/java/org/apache/flink/runtime/util/ZooKeeperUtils.java
Apache-2.0
public static DefaultLeaderRetrievalService createLeaderRetrievalService( final CuratorFramework client, final String path, final Configuration configuration) { return new DefaultLeaderRetrievalService( createLeaderRetrievalDriverFactory(client, path, configuration)); }
Creates a {@link DefaultLeaderRetrievalService} instance with {@link ZooKeeperLeaderRetrievalDriver}. @param client The {@link CuratorFramework} ZooKeeper client to use @param path The path for the leader retrieval @param configuration configuration for further config options @return {@link DefaultLeaderRetrievalService} instance.
createLeaderRetrievalService
java
apache/flink
flink-runtime/src/main/java/org/apache/flink/runtime/util/ZooKeeperUtils.java
https://github.com/apache/flink/blob/master/flink-runtime/src/main/java/org/apache/flink/runtime/util/ZooKeeperUtils.java
Apache-2.0
public static ZooKeeperLeaderRetrievalDriverFactory createLeaderRetrievalDriverFactory( final CuratorFramework client) { return createLeaderRetrievalDriverFactory(client, ""); }
Creates a {@link LeaderRetrievalDriverFactory} implemented by ZooKeeper. @param client The {@link CuratorFramework} ZooKeeper client to use @return {@link LeaderRetrievalDriverFactory} instance.
createLeaderRetrievalDriverFactory
java
apache/flink
flink-runtime/src/main/java/org/apache/flink/runtime/util/ZooKeeperUtils.java
https://github.com/apache/flink/blob/master/flink-runtime/src/main/java/org/apache/flink/runtime/util/ZooKeeperUtils.java
Apache-2.0
public static ZooKeeperLeaderRetrievalDriverFactory createLeaderRetrievalDriverFactory( final CuratorFramework client, String path) { return createLeaderRetrievalDriverFactory(client, path, new Configuration()); }
Creates a {@link LeaderRetrievalDriverFactory} implemented by ZooKeeper. @param client The {@link CuratorFramework} ZooKeeper client to use @param path The parent path that shall be used by the client. @return {@link LeaderRetrievalDriverFactory} instance.
createLeaderRetrievalDriverFactory
java
apache/flink
flink-runtime/src/main/java/org/apache/flink/runtime/util/ZooKeeperUtils.java
https://github.com/apache/flink/blob/master/flink-runtime/src/main/java/org/apache/flink/runtime/util/ZooKeeperUtils.java
Apache-2.0
public static CompletedCheckpointStore createCompletedCheckpoints( CuratorFramework client, Configuration configuration, int maxNumberOfCheckpointsToRetain, SharedStateRegistryFactory sharedStateRegistryFactory, Executor ioExecutor, Executor executor, RecoveryClaimMode recoveryClaimMode) throws Exception { checkNotNull(configuration, "Configuration"); RetrievableStateStorageHelper<CompletedCheckpoint> stateStorage = createFileSystemStateStorage(configuration, HA_STORAGE_COMPLETED_CHECKPOINT); final ZooKeeperStateHandleStore<CompletedCheckpoint> completedCheckpointStateHandleStore = createZooKeeperStateHandleStore(client, getCheckpointsPath(), stateStorage); Collection<CompletedCheckpoint> completedCheckpoints = DefaultCompletedCheckpointStoreUtils.retrieveCompletedCheckpoints( completedCheckpointStateHandleStore, ZooKeeperCheckpointStoreUtil.INSTANCE); final CompletedCheckpointStore zooKeeperCompletedCheckpointStore = new DefaultCompletedCheckpointStore<>( maxNumberOfCheckpointsToRetain, completedCheckpointStateHandleStore, ZooKeeperCheckpointStoreUtil.INSTANCE, completedCheckpoints, sharedStateRegistryFactory.create( ioExecutor, completedCheckpoints, recoveryClaimMode), executor); LOG.info( "Initialized {} in '{}' with {}.", DefaultCompletedCheckpointStore.class.getSimpleName(), completedCheckpointStateHandleStore, getCheckpointsPath()); return zooKeeperCompletedCheckpointStore; }
Creates a {@link DefaultCompletedCheckpointStore} instance with {@link ZooKeeperStateHandleStore}. @param client The {@link CuratorFramework} ZooKeeper client to use @param configuration {@link Configuration} object @param maxNumberOfCheckpointsToRetain The maximum number of checkpoints to retain @param executor to run ZooKeeper callbacks @param recoveryClaimMode the mode in which the job is being restored @return {@link DefaultCompletedCheckpointStore} instance @throws Exception if the completed checkpoint store cannot be created
createCompletedCheckpoints
java
apache/flink
flink-runtime/src/main/java/org/apache/flink/runtime/util/ZooKeeperUtils.java
https://github.com/apache/flink/blob/master/flink-runtime/src/main/java/org/apache/flink/runtime/util/ZooKeeperUtils.java
Apache-2.0
public static String getPathForJob(JobID jobId) { checkNotNull(jobId, "Job ID"); return String.format("/%s", jobId); }
Returns the JobID as a String (with leading slash).
getPathForJob
java
apache/flink
flink-runtime/src/main/java/org/apache/flink/runtime/util/ZooKeeperUtils.java
https://github.com/apache/flink/blob/master/flink-runtime/src/main/java/org/apache/flink/runtime/util/ZooKeeperUtils.java
Apache-2.0
public static <T extends Serializable> ZooKeeperStateHandleStore<T> createZooKeeperStateHandleStore( final CuratorFramework client, final String path, final RetrievableStateStorageHelper<T> stateStorage) throws Exception { return new ZooKeeperStateHandleStore<>( useNamespaceAndEnsurePath(client, path), stateStorage); }
Creates an instance of {@link ZooKeeperStateHandleStore}. @param client ZK client @param path Path to use for the client namespace @param stateStorage RetrievableStateStorageHelper that persist the actual state and whose returned state handle is then written to ZooKeeper @param <T> Type of state @return {@link ZooKeeperStateHandleStore} instance @throws Exception ZK errors
createZooKeeperStateHandleStore
java
apache/flink
flink-runtime/src/main/java/org/apache/flink/runtime/util/ZooKeeperUtils.java
https://github.com/apache/flink/blob/master/flink-runtime/src/main/java/org/apache/flink/runtime/util/ZooKeeperUtils.java
Apache-2.0
public static ZooKeeperCheckpointIDCounter createCheckpointIDCounter(CuratorFramework client) { return new ZooKeeperCheckpointIDCounter( client, new DefaultLastStateConnectionStateListener()); }
Creates a {@link ZooKeeperCheckpointIDCounter} instance. @param client The {@link CuratorFramework} ZooKeeper client to use @return {@link ZooKeeperCheckpointIDCounter} instance
createCheckpointIDCounter
java
apache/flink
flink-runtime/src/main/java/org/apache/flink/runtime/util/ZooKeeperUtils.java
https://github.com/apache/flink/blob/master/flink-runtime/src/main/java/org/apache/flink/runtime/util/ZooKeeperUtils.java
Apache-2.0
public static <T extends Serializable> FileSystemStateStorageHelper<T> createFileSystemStateStorage( Configuration configuration, String prefix) throws IOException { return new FileSystemStateStorageHelper<>( HighAvailabilityServicesUtils.getClusterHighAvailableStoragePath(configuration), prefix); }
Creates a {@link FileSystemStateStorageHelper} instance. @param configuration {@link Configuration} object @param prefix Prefix for the created files @param <T> Type of the state objects @return {@link FileSystemStateStorageHelper} instance @throws IOException if file system state storage cannot be created
createFileSystemStateStorage
java
apache/flink
flink-runtime/src/main/java/org/apache/flink/runtime/util/ZooKeeperUtils.java
https://github.com/apache/flink/blob/master/flink-runtime/src/main/java/org/apache/flink/runtime/util/ZooKeeperUtils.java
Apache-2.0
public static String generateZookeeperPath(String... paths) { return Arrays.stream(paths) .map(ZooKeeperUtils::trimSlashes) .filter(s -> !s.isEmpty()) .collect(Collectors.joining("/", "/", "")); }
Creates a ZooKeeper path of the form "/a/b/.../z".
generateZookeeperPath
java
apache/flink
flink-runtime/src/main/java/org/apache/flink/runtime/util/ZooKeeperUtils.java
https://github.com/apache/flink/blob/master/flink-runtime/src/main/java/org/apache/flink/runtime/util/ZooKeeperUtils.java
Apache-2.0
public static CuratorFramework useNamespaceAndEnsurePath( final CuratorFramework client, final String path) throws Exception { checkNotNull(client, "client must not be null"); checkNotNull(path, "path must not be null"); // Ensure that the checkpoints path exists client.newNamespaceAwareEnsurePath(path).ensure(client.getZookeeperClient()); // All operations will have the path as root final String newNamespace = generateZookeeperPath(client.getNamespace(), path); return client.usingNamespace( // Curator prepends a '/' manually and throws an Exception if the // namespace starts with a '/'. trimStartingSlash(newNamespace)); }
Returns a facade of the client that uses the specified namespace, and ensures that all nodes in the path exist. @param client ZK client @param path the new namespace @return ZK Client that uses the new namespace @throws Exception ZK errors
useNamespaceAndEnsurePath
java
apache/flink
flink-runtime/src/main/java/org/apache/flink/runtime/util/ZooKeeperUtils.java
https://github.com/apache/flink/blob/master/flink-runtime/src/main/java/org/apache/flink/runtime/util/ZooKeeperUtils.java
Apache-2.0
private static List<String> getTmResourceParams(Configuration configuration) { TaskExecutorProcessSpec taskExecutorProcessSpec = TaskExecutorProcessUtils.processSpecFromConfig(configuration); logTaskExecutorConfiguration(taskExecutorProcessSpec); return Arrays.asList( ProcessMemoryUtils.generateJvmParametersStr(taskExecutorProcessSpec), TaskExecutorProcessUtils.generateDynamicConfigsStr(taskExecutorProcessSpec)); }
Generate and print JVM parameters and dynamic configs of task executor resources. The last two lines of the output should be JVM parameters and dynamic configs respectively.
getTmResourceParams
java
apache/flink
flink-runtime/src/main/java/org/apache/flink/runtime/util/bash/BashJavaUtils.java
https://github.com/apache/flink/blob/master/flink-runtime/src/main/java/org/apache/flink/runtime/util/bash/BashJavaUtils.java
Apache-2.0
public boolean isDone() { return this.future == null || this.future.isDone(); }
Verify the profiling request was done or not. @return true represents the request was finished.
isDone
java
apache/flink
flink-runtime/src/main/java/org/apache/flink/runtime/util/profiler/ProfilingService.java
https://github.com/apache/flink/blob/master/flink-runtime/src/main/java/org/apache/flink/runtime/util/profiler/ProfilingService.java
Apache-2.0
public boolean cancel() { if (isDone()) { return true; } if (!future.cancel(true)) { return false; } // If cancelled, we have to trigger handler immediately for stopping profiler. handler.run(); return true; }
Try to cancel the Profiling Request. Note that we'll trigger the handler once cancelled successfully. @return true if cancelled, false for failed.
cancel
java
apache/flink
flink-runtime/src/main/java/org/apache/flink/runtime/util/profiler/ProfilingService.java
https://github.com/apache/flink/blob/master/flink-runtime/src/main/java/org/apache/flink/runtime/util/profiler/ProfilingService.java
Apache-2.0
default CompletableFuture<ArchivedExecutionGraph> requestJob( JobID jobId, @RpcTimeout Duration timeout) { return requestExecutionGraphInfo(jobId, timeout) .thenApply(ExecutionGraphInfo::getArchivedExecutionGraph); }
Requests the {@link ArchivedExecutionGraph} for the given jobId. If there is no such graph, then the future is completed with a {@link FlinkJobNotFoundException}. @param jobId identifying the job whose {@link ArchivedExecutionGraph} is requested @param timeout for the asynchronous operation @return Future containing the {@link ArchivedExecutionGraph} for the given jobId, otherwise {@link FlinkJobNotFoundException}
requestJob
java
apache/flink
flink-runtime/src/main/java/org/apache/flink/runtime/webmonitor/RestfulGateway.java
https://github.com/apache/flink/blob/master/flink-runtime/src/main/java/org/apache/flink/runtime/webmonitor/RestfulGateway.java
Apache-2.0
default CompletableFuture<Acknowledge> triggerCheckpoint( AsynchronousJobOperationKey operationKey, CheckpointType checkpointType, @RpcTimeout Duration timeout) { throw new UnsupportedOperationException(); }
Triggers a checkpoint with the given savepoint directory as a target. @param operationKey the key of the operation, for deduplication purposes @param checkpointType checkpoint backup type (configured / full / incremental) @param timeout Timeout for the asynchronous operation @return A future to the {@link CompletedCheckpoint#getExternalPointer() external pointer} of the savepoint.
triggerCheckpoint
java
apache/flink
flink-runtime/src/main/java/org/apache/flink/runtime/webmonitor/RestfulGateway.java
https://github.com/apache/flink/blob/master/flink-runtime/src/main/java/org/apache/flink/runtime/webmonitor/RestfulGateway.java
Apache-2.0
default CompletableFuture<OperationResult<Long>> getTriggeredCheckpointStatus( AsynchronousJobOperationKey operationKey) { throw new UnsupportedOperationException(); }
Get the status of a checkpoint triggered under the specified operation key. @param operationKey key of the operation @return Future which completes immediately with the status, or fails if no operation is registered for the key
getTriggeredCheckpointStatus
java
apache/flink
flink-runtime/src/main/java/org/apache/flink/runtime/webmonitor/RestfulGateway.java
https://github.com/apache/flink/blob/master/flink-runtime/src/main/java/org/apache/flink/runtime/webmonitor/RestfulGateway.java
Apache-2.0
default CompletableFuture<Acknowledge> triggerSavepoint( AsynchronousJobOperationKey operationKey, String targetDirectory, SavepointFormatType formatType, TriggerSavepointMode savepointMode, @RpcTimeout Duration timeout) { throw new UnsupportedOperationException(); }
Triggers a savepoint with the given savepoint directory as a target, returning a future that completes when the operation is started. @param operationKey the key of the operation, for deduplication purposes @param targetDirectory Target directory for the savepoint. @param formatType Binary format of the savepoint. @param savepointMode context of the savepoint operation @param timeout Timeout for the asynchronous operation @return Future which is completed once the operation is triggered successfully
triggerSavepoint
java
apache/flink
flink-runtime/src/main/java/org/apache/flink/runtime/webmonitor/RestfulGateway.java
https://github.com/apache/flink/blob/master/flink-runtime/src/main/java/org/apache/flink/runtime/webmonitor/RestfulGateway.java
Apache-2.0
default CompletableFuture<Acknowledge> stopWithSavepoint( final AsynchronousJobOperationKey operationKey, final String targetDirectory, SavepointFormatType formatType, final TriggerSavepointMode savepointMode, @RpcTimeout final Duration timeout) { throw new UnsupportedOperationException(); }
Stops the job with a savepoint, returning a future that completes when the operation is started. @param operationKey key of the operation, for deduplication @param targetDirectory Target directory for the savepoint. @param formatType Binary format of the savepoint. @param savepointMode context of the savepoint operation @param timeout for the rpc call @return Future which is completed once the operation is triggered successfully
stopWithSavepoint
java
apache/flink
flink-runtime/src/main/java/org/apache/flink/runtime/webmonitor/RestfulGateway.java
https://github.com/apache/flink/blob/master/flink-runtime/src/main/java/org/apache/flink/runtime/webmonitor/RestfulGateway.java
Apache-2.0
default CompletableFuture<OperationResult<String>> getTriggeredSavepointStatus( AsynchronousJobOperationKey operationKey) { throw new UnsupportedOperationException(); }
Get the status of a savepoint triggered under the specified operation key. @param operationKey key of the operation @return Future which completes immediately with the status, or fails if no operation is registered for the key
getTriggeredSavepointStatus
java
apache/flink
flink-runtime/src/main/java/org/apache/flink/runtime/webmonitor/RestfulGateway.java
https://github.com/apache/flink/blob/master/flink-runtime/src/main/java/org/apache/flink/runtime/webmonitor/RestfulGateway.java
Apache-2.0
default CompletableFuture<Acknowledge> disposeSavepoint( final String savepointPath, @RpcTimeout final Duration timeout) { throw new UnsupportedOperationException(); }
Dispose the given savepoint. @param savepointPath identifying the savepoint to dispose @param timeout RPC timeout @return A future acknowledge if the disposal succeeded
disposeSavepoint
java
apache/flink
flink-runtime/src/main/java/org/apache/flink/runtime/webmonitor/RestfulGateway.java
https://github.com/apache/flink/blob/master/flink-runtime/src/main/java/org/apache/flink/runtime/webmonitor/RestfulGateway.java
Apache-2.0
default CompletableFuture<Void> reportJobClientHeartbeat( JobID jobId, long expiredTimestamp, Duration timeout) { return FutureUtils.completedVoidFuture(); }
The client reports the heartbeat to the dispatcher for aliveness.
reportJobClientHeartbeat
java
apache/flink
flink-runtime/src/main/java/org/apache/flink/runtime/webmonitor/RestfulGateway.java
https://github.com/apache/flink/blob/master/flink-runtime/src/main/java/org/apache/flink/runtime/webmonitor/RestfulGateway.java
Apache-2.0
default CompletableFuture<JobResourceRequirements> requestJobResourceRequirements(JobID jobId) { throw new UnsupportedOperationException("Operation is not yet implemented."); }
Read current {@link JobResourceRequirements job resource requirements} for a given job. @param jobId job to read the resource requirements for @return Future which that contains current resource requirements.
requestJobResourceRequirements
java
apache/flink
flink-runtime/src/main/java/org/apache/flink/runtime/webmonitor/RestfulGateway.java
https://github.com/apache/flink/blob/master/flink-runtime/src/main/java/org/apache/flink/runtime/webmonitor/RestfulGateway.java
Apache-2.0
default CompletableFuture<Acknowledge> updateJobResourceRequirements( JobID jobId, JobResourceRequirements jobResourceRequirements) { throw new UnsupportedOperationException("Operation is not yet implemented."); }
Update {@link JobResourceRequirements job resource requirements} for a given job. When the returned future is complete the requirements have been updated and were persisted in HA, but the job may not have been rescaled (yet). @param jobId job the given requirements belong to @param jobResourceRequirements new resource requirements for the job @return Future which is completed successfully when requirements are updated
updateJobResourceRequirements
java
apache/flink
flink-runtime/src/main/java/org/apache/flink/runtime/webmonitor/RestfulGateway.java
https://github.com/apache/flink/blob/master/flink-runtime/src/main/java/org/apache/flink/runtime/webmonitor/RestfulGateway.java
Apache-2.0
static WebMonitorExtension empty() { return EmptyWebMonitorExtension.INSTANCE; }
Gets the collection of extension handlers to register at the {@link WebMonitorEndpoint}. @return Collection of handlers to register at the {@link WebMonitorEndpoint}.
empty
java
apache/flink
flink-runtime/src/main/java/org/apache/flink/runtime/webmonitor/WebMonitorExtension.java
https://github.com/apache/flink/blob/master/flink-runtime/src/main/java/org/apache/flink/runtime/webmonitor/WebMonitorExtension.java
Apache-2.0
public static LogFileLocation find(Configuration config) { final String logEnv = "log.file"; String logFilePath = System.getProperty(logEnv); if (logFilePath == null) { LOG.warn("Log file environment variable '{}' is not set.", logEnv); logFilePath = config.get(WebOptions.LOG_PATH); } // not configured, cannot serve log files if (logFilePath == null || logFilePath.length() < 4) { LOG.warn( "JobManager log files are unavailable in the web dashboard. " + "Log file location not found in environment variable '{}' or configuration key '{}'.", logEnv, WebOptions.LOG_PATH.key()); return new LogFileLocation(null, null, null); } String outFilePath = logFilePath.substring(0, logFilePath.length() - 3).concat("out"); File logFile = resolveFileLocation(logFilePath); File logDir = null; if (logFile != null) { logDir = resolveFileLocation(logFile.getParent()); } LOG.info("Determined location of main cluster component log file: {}", logFilePath); LOG.info("Determined location of main cluster component stdout file: {}", outFilePath); return new LogFileLocation(logFile, resolveFileLocation(outFilePath), logDir); }
Finds the Flink log directory using log.file Java property that is set during startup.
find
java
apache/flink
flink-runtime/src/main/java/org/apache/flink/runtime/webmonitor/WebMonitorUtils.java
https://github.com/apache/flink/blob/master/flink-runtime/src/main/java/org/apache/flink/runtime/webmonitor/WebMonitorUtils.java
Apache-2.0
public static <T extends RestfulGateway> Optional<StaticFileServerHandler<T>> tryLoadWebContent( GatewayRetriever<? extends T> leaderRetriever, Duration timeout, File tmpDir) throws IOException { if (isFlinkRuntimeWebInClassPath()) { return Optional.of(new StaticFileServerHandler<>(leaderRetriever, timeout, tmpDir)); } else { return Optional.empty(); } }
Checks whether the flink-runtime-web dependency is available and if so returns a StaticFileServerHandler which can serve the static file contents. @param leaderRetriever to be used by the StaticFileServerHandler @param timeout for lookup requests @param tmpDir to be used by the StaticFileServerHandler to store temporary files @param <T> type of the gateway to retrieve @return StaticFileServerHandler if flink-runtime-web is in the classpath; Otherwise Optional.empty @throws IOException if we cannot create the StaticFileServerHandler
tryLoadWebContent
java
apache/flink
flink-runtime/src/main/java/org/apache/flink/runtime/webmonitor/WebMonitorUtils.java
https://github.com/apache/flink/blob/master/flink-runtime/src/main/java/org/apache/flink/runtime/webmonitor/WebMonitorUtils.java
Apache-2.0
public static WebMonitorExtension loadWebSubmissionExtension( GatewayRetriever<? extends DispatcherGateway> leaderRetriever, Duration timeout, Map<String, String> responseHeaders, CompletableFuture<String> localAddressFuture, java.nio.file.Path uploadDir, Executor executor, Configuration configuration) throws FlinkException { if (isFlinkRuntimeWebInClassPath()) { try { final Constructor<?> webSubmissionExtensionConstructor = Class.forName("org.apache.flink.runtime.webmonitor.WebSubmissionExtension") .getConstructor( Configuration.class, GatewayRetriever.class, Map.class, CompletableFuture.class, java.nio.file.Path.class, Executor.class, Duration.class); return (WebMonitorExtension) webSubmissionExtensionConstructor.newInstance( configuration, leaderRetriever, responseHeaders, localAddressFuture, uploadDir, executor, timeout); } catch (ClassNotFoundException | NoSuchMethodException | InstantiationException | InvocationTargetException | IllegalAccessException e) { throw new FlinkException("Could not load web submission extension.", e); } } else { throw new FlinkException( "The module flink-runtime-web could not be found in the class path. Please add " + "this jar in order to enable web based job submission."); } }
Loads the {@link WebMonitorExtension} which enables web submission. @param leaderRetriever to retrieve the leader @param timeout for asynchronous requests @param responseHeaders for the web submission handlers @param localAddressFuture of the underlying REST server endpoint @param uploadDir where the web submission handler store uploaded jars @param executor to run asynchronous operations @param configuration used to instantiate the web submission extension @return Web submission extension @throws FlinkException if the web submission extension could not be loaded
loadWebSubmissionExtension
java
apache/flink
flink-runtime/src/main/java/org/apache/flink/runtime/webmonitor/WebMonitorUtils.java
https://github.com/apache/flink/blob/master/flink-runtime/src/main/java/org/apache/flink/runtime/webmonitor/WebMonitorUtils.java
Apache-2.0
private static boolean isFlinkRuntimeWebInClassPath() { try { Class.forName(WEB_FRONTEND_BOOTSTRAP_CLASS_FQN); return true; } catch (ClassNotFoundException e) { // class not found means that there is no flink-runtime-web in the classpath return false; } }
Returns {@code true} if the optional dependency {@code flink-runtime-web} is in the classpath.
isFlinkRuntimeWebInClassPath
java
apache/flink
flink-runtime/src/main/java/org/apache/flink/runtime/webmonitor/WebMonitorUtils.java
https://github.com/apache/flink/blob/master/flink-runtime/src/main/java/org/apache/flink/runtime/webmonitor/WebMonitorUtils.java
Apache-2.0
@Override default Collection<ArchivedJson> archiveJsonWithPath(ExecutionGraphInfo executionGraphInfo) throws IOException { return archiveJsonWithPath(executionGraphInfo.getArchivedExecutionGraph()); }
Returns a {@link Collection} of {@link ArchivedJson}s containing JSON responses and their respective REST URL for a given job. <p>The collection should contain one entry for every response that could be generated for the given job, for example one entry for each task. The REST URLs should be unique and must not contain placeholders. @param graph AccessExecutionGraph for which the responses should be generated @return Collection containing an ArchivedJson for every response that could be generated for the given job @throws IOException thrown if the JSON generation fails
archiveJsonWithPath
java
apache/flink
flink-runtime/src/main/java/org/apache/flink/runtime/webmonitor/history/OnlyExecutionGraphJsonArchivist.java
https://github.com/apache/flink/blob/master/flink-runtime/src/main/java/org/apache/flink/runtime/webmonitor/history/OnlyExecutionGraphJsonArchivist.java
Apache-2.0
default Optional<T> getNow() { CompletableFuture<T> leaderFuture = getFuture(); if (leaderFuture != null) { if (leaderFuture.isCompletedExceptionally() || leaderFuture.isCancelled()) { return Optional.empty(); } else if (leaderFuture.isDone()) { try { return Optional.of(leaderFuture.get()); } catch (Exception e) { // this should never happen throw new FlinkRuntimeException( "Unexpected error while accessing the retrieved gateway.", e); } } else { return Optional.empty(); } } else { return Optional.empty(); } }
Returns the currently retrieved gateway if there is such an object. Otherwise it returns an empty optional. @return Optional object to retrieve
getNow
java
apache/flink
flink-runtime/src/main/java/org/apache/flink/runtime/webmonitor/retriever/GatewayRetriever.java
https://github.com/apache/flink/blob/master/flink-runtime/src/main/java/org/apache/flink/runtime/webmonitor/retriever/GatewayRetriever.java
Apache-2.0
public Optional<Tuple2<String, UUID>> getLeaderNow() throws Exception { CompletableFuture<Tuple2<String, UUID>> leaderFuture = this.atomicLeaderFuture.get(); if (leaderFuture != null) { if (leaderFuture.isDone()) { return Optional.of(leaderFuture.get()); } else { return Optional.empty(); } } else { return Optional.empty(); } }
Returns the current leader information if available. Otherwise it returns an empty optional. @return The current leader information if available. Otherwise it returns an empty optional. @throws Exception if the leader future has been completed with an exception
getLeaderNow
java
apache/flink
flink-runtime/src/main/java/org/apache/flink/runtime/webmonitor/retriever/LeaderRetriever.java
https://github.com/apache/flink/blob/master/flink-runtime/src/main/java/org/apache/flink/runtime/webmonitor/retriever/LeaderRetriever.java
Apache-2.0
@Override protected CompletableFuture<T> createGateway( CompletableFuture<Tuple2<String, UUID>> leaderFuture) { return FutureUtils.retryWithDelay( () -> leaderFuture.thenCompose( (Tuple2<String, UUID> addressLeaderTuple) -> rpcService.connect( addressLeaderTuple.f0, fencingTokenMapper.apply(addressLeaderTuple.f1), gatewayType)), retryStrategy, rpcService.getScheduledExecutor()); }
{@link LeaderGatewayRetriever} implementation using the {@link RpcService}. @param <F> type of the fencing token @param <T> type of the fenced gateway to retrieve
createGateway
java
apache/flink
flink-runtime/src/main/java/org/apache/flink/runtime/webmonitor/retriever/impl/RpcGatewayRetriever.java
https://github.com/apache/flink/blob/master/flink-runtime/src/main/java/org/apache/flink/runtime/webmonitor/retriever/impl/RpcGatewayRetriever.java
Apache-2.0
@Override public CompletableFuture<MetricQueryServiceGateway> retrieveService(String rpcServiceAddress) { return rpcService.connect(rpcServiceAddress, MetricQueryServiceGateway.class); }
{@link MetricQueryServiceRetriever} implementation for rpc based {@link MetricQueryService}.
retrieveService
java
apache/flink
flink-runtime/src/main/java/org/apache/flink/runtime/webmonitor/retriever/impl/RpcMetricQueryServiceRetriever.java
https://github.com/apache/flink/blob/master/flink-runtime/src/main/java/org/apache/flink/runtime/webmonitor/retriever/impl/RpcMetricQueryServiceRetriever.java
Apache-2.0
public void handleFailedResponse(int requestId, @Nullable Throwable cause) { synchronized (lock) { if (isShutDown) { return; } PendingStatsRequest<T, V> pendingRequest = pendingRequests.remove(requestId); if (pendingRequest != null) { log.info("Cancelling request {}", requestId, cause); pendingRequest.discard(cause); rememberRecentRequestId(requestId); } } }
Handles the failed stats response by canceling the corresponding unfinished pending request. @param requestId ID of the request to cancel. @param cause Cause of the cancelling (can be <code>null</code>).
handleFailedResponse
java
apache/flink
flink-runtime/src/main/java/org/apache/flink/runtime/webmonitor/stats/TaskStatsRequestCoordinator.java
https://github.com/apache/flink/blob/master/flink-runtime/src/main/java/org/apache/flink/runtime/webmonitor/stats/TaskStatsRequestCoordinator.java
Apache-2.0
public void shutDown() { synchronized (lock) { if (!isShutDown) { log.info("Shutting down task stats request coordinator."); for (PendingStatsRequest<T, V> pending : pendingRequests.values()) { pending.discard(new RuntimeException("Shut down")); } pendingRequests.clear(); recentPendingRequestIds.clear(); isShutDown = true; } } }
Shuts down the coordinator. <p>After shut down, no further operations are executed.
shutDown
java
apache/flink
flink-runtime/src/main/java/org/apache/flink/runtime/webmonitor/stats/TaskStatsRequestCoordinator.java
https://github.com/apache/flink/blob/master/flink-runtime/src/main/java/org/apache/flink/runtime/webmonitor/stats/TaskStatsRequestCoordinator.java
Apache-2.0
public CompletableFuture<V> getStatsFuture() { return resultFuture; }
A Future, which will either complete successfully if all of the samples from individual tasks are collected or exceptionally, if at least one of the task responses fails. @return A future with the result of collecting tasks statistics.
getStatsFuture
java
apache/flink
flink-runtime/src/main/java/org/apache/flink/runtime/webmonitor/stats/TaskStatsRequestCoordinator.java
https://github.com/apache/flink/blob/master/flink-runtime/src/main/java/org/apache/flink/runtime/webmonitor/stats/TaskStatsRequestCoordinator.java
Apache-2.0
public CompletableFuture<VertexThreadInfoStats> triggerThreadInfoRequest( Map<ImmutableSet<ExecutionAttemptID>, CompletableFuture<TaskExecutorThreadInfoGateway>> executionsWithGateways, int numSamples, Duration delayBetweenSamples, int maxStackTraceDepth) { checkNotNull(executionsWithGateways, "Tasks to sample"); checkArgument(executionsWithGateways.size() > 0, "No tasks to sample"); checkArgument(numSamples >= 1, "No number of samples"); checkArgument(maxStackTraceDepth >= 0, "Negative maximum stack trace depth"); // Execution IDs of running tasks grouped by the task manager Collection<ImmutableSet<ExecutionAttemptID>> runningSubtasksIds = executionsWithGateways.keySet(); synchronized (lock) { if (isShutDown) { return FutureUtils.completedExceptionally(new IllegalStateException("Shut down")); } final int requestId = requestIdCounter++; log.debug("Triggering thread info request {}", requestId); final PendingThreadInfoRequest pending = new PendingThreadInfoRequest(requestId, runningSubtasksIds); // requestTimeout is treated as the time on top of the expected sampling duration. // Discard the request if it takes too long. We don't send cancel // messages to the task managers, but only wait for the responses // and then ignore them. long expectedDuration = numSamples * delayBetweenSamples.toMillis(); Duration timeout = requestTimeout.plusMillis(expectedDuration); // Add the pending request before scheduling the discard task to // prevent races with removing it again. pendingRequests.put(requestId, pending); ThreadInfoSamplesRequest requestParams = new ThreadInfoSamplesRequest( requestId, numSamples, delayBetweenSamples, maxStackTraceDepth); requestThreadInfo(executionsWithGateways, requestParams, timeout); return pending.getStatsFuture(); } }
Triggers collection of thread info stats of a job vertex by combining thread info responses from given subtasks. A thread info response of a subtask in turn consists of {@code numSamples}, collected with {@code delayBetweenSamples} milliseconds delay between them. @param executionsWithGateways Execution attempts together with TaskExecutors running them. @param numSamples Number of thread info samples to collect from each subtask. @param delayBetweenSamples Delay between consecutive samples (ms). @param maxStackTraceDepth Maximum depth of the stack traces collected within thread info samples. @return A future of the completed thread info stats.
triggerThreadInfoRequest
java
apache/flink
flink-runtime/src/main/java/org/apache/flink/runtime/webmonitor/threadinfo/ThreadInfoRequestCoordinator.java
https://github.com/apache/flink/blob/master/flink-runtime/src/main/java/org/apache/flink/runtime/webmonitor/threadinfo/ThreadInfoRequestCoordinator.java
Apache-2.0
public int getRequestId() { return requestId; }
Returns the ID of the sampling request. @return ID of the request.
getRequestId
java
apache/flink
flink-runtime/src/main/java/org/apache/flink/runtime/webmonitor/threadinfo/ThreadInfoSamplesRequest.java
https://github.com/apache/flink/blob/master/flink-runtime/src/main/java/org/apache/flink/runtime/webmonitor/threadinfo/ThreadInfoSamplesRequest.java
Apache-2.0
public int getNumSamples() { return numSubSamples; }
Returns the number of samples that are requested to be collected. @return the number of requested samples.
getNumSamples
java
apache/flink
flink-runtime/src/main/java/org/apache/flink/runtime/webmonitor/threadinfo/ThreadInfoSamplesRequest.java
https://github.com/apache/flink/blob/master/flink-runtime/src/main/java/org/apache/flink/runtime/webmonitor/threadinfo/ThreadInfoSamplesRequest.java
Apache-2.0
public Duration getDelayBetweenSamples() { return delayBetweenSamples; }
Returns the configured delay between the individual samples. @return the delay between the individual samples.
getDelayBetweenSamples
java
apache/flink
flink-runtime/src/main/java/org/apache/flink/runtime/webmonitor/threadinfo/ThreadInfoSamplesRequest.java
https://github.com/apache/flink/blob/master/flink-runtime/src/main/java/org/apache/flink/runtime/webmonitor/threadinfo/ThreadInfoSamplesRequest.java
Apache-2.0
public int getMaxStackTraceDepth() { return maxStackTraceDepth; }
Returns the configured maximum depth of the collected stack traces. @return the maximum depth of the collected stack traces.
getMaxStackTraceDepth
java
apache/flink
flink-runtime/src/main/java/org/apache/flink/runtime/webmonitor/threadinfo/ThreadInfoSamplesRequest.java
https://github.com/apache/flink/blob/master/flink-runtime/src/main/java/org/apache/flink/runtime/webmonitor/threadinfo/ThreadInfoSamplesRequest.java
Apache-2.0
public static VertexFlameGraph createFullFlameGraphFrom(VertexThreadInfoStats sample) { EnumSet<Thread.State> included = EnumSet.allOf(Thread.State.class); return createFlameGraphFromSample(sample, included); }
Converts {@link VertexThreadInfoStats} into a FlameGraph. @param sample Thread details sample containing stack traces. @return FlameGraph data structure
createFullFlameGraphFrom
java
apache/flink
flink-runtime/src/main/java/org/apache/flink/runtime/webmonitor/threadinfo/VertexFlameGraphFactory.java
https://github.com/apache/flink/blob/master/flink-runtime/src/main/java/org/apache/flink/runtime/webmonitor/threadinfo/VertexFlameGraphFactory.java
Apache-2.0
public static VertexFlameGraph createOffCpuFlameGraph(VertexThreadInfoStats sample) { EnumSet<Thread.State> included = EnumSet.of(Thread.State.TIMED_WAITING, Thread.State.BLOCKED, Thread.State.WAITING); return createFlameGraphFromSample(sample, included); }
Converts {@link VertexThreadInfoStats} into a FlameGraph representing blocked (Off-CPU) threads. <p>Includes threads in states Thread.State.[TIMED_WAITING, BLOCKED, WAITING]. @param sample Thread details sample containing stack traces. @return FlameGraph data structure.
createOffCpuFlameGraph
java
apache/flink
flink-runtime/src/main/java/org/apache/flink/runtime/webmonitor/threadinfo/VertexFlameGraphFactory.java
https://github.com/apache/flink/blob/master/flink-runtime/src/main/java/org/apache/flink/runtime/webmonitor/threadinfo/VertexFlameGraphFactory.java
Apache-2.0
public long getStartTime() { return startTime; }
Returns the timestamp, when the sample was triggered. @return Timestamp, when the sample was triggered
getStartTime
java
apache/flink
flink-runtime/src/main/java/org/apache/flink/runtime/webmonitor/threadinfo/VertexThreadInfoStats.java
https://github.com/apache/flink/blob/master/flink-runtime/src/main/java/org/apache/flink/runtime/webmonitor/threadinfo/VertexThreadInfoStats.java
Apache-2.0
@Override public long getEndTime() { return endTime; }
Returns the timestamp, when all samples where collected. @return Timestamp, when all samples where collected
getEndTime
java
apache/flink
flink-runtime/src/main/java/org/apache/flink/runtime/webmonitor/threadinfo/VertexThreadInfoStats.java
https://github.com/apache/flink/blob/master/flink-runtime/src/main/java/org/apache/flink/runtime/webmonitor/threadinfo/VertexThreadInfoStats.java
Apache-2.0
private void triggerThreadInfoSampleInternal( final JobVertexKey jobVertexKey, final AccessExecutionJobVertex vertex) { assert (Thread.holdsLock(lock)); if (shutDown) { return; } if (pendingJobVertexStats.contains(jobVertexKey)) { return; } pendingJobVertexStats.add(jobVertexKey); triggerThreadInfoRequestForVertices( new JobVertexThreadInfoSampleCompletionCallback(jobVertexKey, vertex.getName()), vertex.getTaskVertices()); }
Triggers a request for a job vertex to gather the thread info statistics. If there is a sample in progress for the vertex, the call is ignored. @param jobVertexKey cache key @param vertex Vertex to get the stats for.
triggerThreadInfoSampleInternal
java
apache/flink
flink-runtime/src/main/java/org/apache/flink/runtime/webmonitor/threadinfo/VertexThreadInfoTracker.java
https://github.com/apache/flink/blob/master/flink-runtime/src/main/java/org/apache/flink/runtime/webmonitor/threadinfo/VertexThreadInfoTracker.java
Apache-2.0
public VertexThreadInfoTrackerBuilder setCoordinator(ThreadInfoRequestCoordinator coordinator) { this.coordinator = coordinator; return this; }
Sets {@code cleanUpInterval}. @param coordinator Coordinator for thread info stats request. @return Builder.
setCoordinator
java
apache/flink
flink-runtime/src/main/java/org/apache/flink/runtime/webmonitor/threadinfo/VertexThreadInfoTrackerBuilder.java
https://github.com/apache/flink/blob/master/flink-runtime/src/main/java/org/apache/flink/runtime/webmonitor/threadinfo/VertexThreadInfoTrackerBuilder.java
Apache-2.0
public VertexThreadInfoTrackerBuilder setCleanUpInterval(Duration cleanUpInterval) { this.cleanUpInterval = cleanUpInterval; return this; }
Sets {@code cleanUpInterval}. @param cleanUpInterval Clean up interval for completed stats. @return Builder.
setCleanUpInterval
java
apache/flink
flink-runtime/src/main/java/org/apache/flink/runtime/webmonitor/threadinfo/VertexThreadInfoTrackerBuilder.java
https://github.com/apache/flink/blob/master/flink-runtime/src/main/java/org/apache/flink/runtime/webmonitor/threadinfo/VertexThreadInfoTrackerBuilder.java
Apache-2.0
public VertexThreadInfoTrackerBuilder setNumSamples(int numSamples) { this.numSamples = numSamples; return this; }
Sets {@code numSamples}. @param numSamples Number of thread info samples to collect for each subtask. @return Builder.
setNumSamples
java
apache/flink
flink-runtime/src/main/java/org/apache/flink/runtime/webmonitor/threadinfo/VertexThreadInfoTrackerBuilder.java
https://github.com/apache/flink/blob/master/flink-runtime/src/main/java/org/apache/flink/runtime/webmonitor/threadinfo/VertexThreadInfoTrackerBuilder.java
Apache-2.0
public VertexThreadInfoTrackerBuilder setStatsRefreshInterval(Duration statsRefreshInterval) { this.statsRefreshInterval = statsRefreshInterval; return this; }
Sets {@code statsRefreshInterval}. @param statsRefreshInterval Time interval after which the available thread info stats are deprecated and need to be refreshed. @return Builder.
setStatsRefreshInterval
java
apache/flink
flink-runtime/src/main/java/org/apache/flink/runtime/webmonitor/threadinfo/VertexThreadInfoTrackerBuilder.java
https://github.com/apache/flink/blob/master/flink-runtime/src/main/java/org/apache/flink/runtime/webmonitor/threadinfo/VertexThreadInfoTrackerBuilder.java
Apache-2.0
public VertexThreadInfoTrackerBuilder setDelayBetweenSamples(Duration delayBetweenSamples) { this.delayBetweenSamples = delayBetweenSamples; return this; }
Sets {@code delayBetweenSamples}. @param delayBetweenSamples Delay between individual samples per task. @return Builder.
setDelayBetweenSamples
java
apache/flink
flink-runtime/src/main/java/org/apache/flink/runtime/webmonitor/threadinfo/VertexThreadInfoTrackerBuilder.java
https://github.com/apache/flink/blob/master/flink-runtime/src/main/java/org/apache/flink/runtime/webmonitor/threadinfo/VertexThreadInfoTrackerBuilder.java
Apache-2.0
@VisibleForTesting VertexThreadInfoTrackerBuilder setJobVertexStatsCache( Cache<VertexThreadInfoTracker.JobVertexKey, VertexThreadInfoStats> jobVertexStatsCache) { this.jobVertexStatsCache = jobVertexStatsCache; return this; }
Sets {@code jobVertexStatsCache}. This is currently only used for testing. @param jobVertexStatsCache The Cache instance to use for caching statistics. Will use the default defined in {@link VertexThreadInfoTrackerBuilder#defaultCache()} if not set. @return Builder.
setJobVertexStatsCache
java
apache/flink
flink-runtime/src/main/java/org/apache/flink/runtime/webmonitor/threadinfo/VertexThreadInfoTrackerBuilder.java
https://github.com/apache/flink/blob/master/flink-runtime/src/main/java/org/apache/flink/runtime/webmonitor/threadinfo/VertexThreadInfoTrackerBuilder.java
Apache-2.0
@VisibleForTesting VertexThreadInfoTrackerBuilder setExecutionVertexStatsCache( Cache<VertexThreadInfoTracker.ExecutionVertexKey, VertexThreadInfoStats> executionVertexStatsCache) { this.executionVertexStatsCache = executionVertexStatsCache; return this; }
Sets {@code executionVertexStatsCache}. This is currently only used for testing. @param executionVertexStatsCache The Cache instance to use for caching statistics. Will use the default defined in {@link VertexThreadInfoTrackerBuilder#defaultCache()} if not set. @return Builder.
setExecutionVertexStatsCache
java
apache/flink
flink-runtime/src/main/java/org/apache/flink/runtime/webmonitor/threadinfo/VertexThreadInfoTrackerBuilder.java
https://github.com/apache/flink/blob/master/flink-runtime/src/main/java/org/apache/flink/runtime/webmonitor/threadinfo/VertexThreadInfoTrackerBuilder.java
Apache-2.0
public static VertexThreadInfoTrackerBuilder newBuilder( GatewayRetriever<ResourceManagerGateway> resourceManagerGatewayRetriever, ScheduledExecutorService executor, Duration restTimeout) { return new VertexThreadInfoTrackerBuilder( resourceManagerGatewayRetriever, executor, restTimeout); }
Create a new {@link VertexThreadInfoTrackerBuilder}. @return Builder.
newBuilder
java
apache/flink
flink-runtime/src/main/java/org/apache/flink/runtime/webmonitor/threadinfo/VertexThreadInfoTrackerBuilder.java
https://github.com/apache/flink/blob/master/flink-runtime/src/main/java/org/apache/flink/runtime/webmonitor/threadinfo/VertexThreadInfoTrackerBuilder.java
Apache-2.0
public static void runFlinkZkQuorumPeer(String zkConfigFile, int peerId) throws Exception { Properties zkProps = new Properties(); try (InputStream inStream = new FileInputStream(new File(zkConfigFile))) { zkProps.load(inStream); } LOG.info("Configuration: " + zkProps); // Set defaults for required properties setRequiredProperties(zkProps); // Write peer id to myid file writeMyIdToDataDir(zkProps, peerId); // The myid file needs to be written before creating the instance. Otherwise, this // will fail. QuorumPeerConfig conf = new QuorumPeerConfig(); conf.parseProperties(zkProps); if (conf.isDistributed()) { // Run quorum peer LOG.info( "Running distributed ZooKeeper quorum peer (total peers: {}).", conf.getServers().size()); QuorumPeerMain qp = new QuorumPeerMain(); qp.runFromConfig(conf); } else { // Run standalone LOG.info("Running standalone ZooKeeper quorum peer."); ZooKeeperServerMain zk = new ZooKeeperServerMain(); ServerConfig sc = new ServerConfig(); sc.readFrom(conf); zk.runFromConfig(sc); } }
Runs a ZooKeeper {@link QuorumPeer} if further peers are configured or a single {@link ZooKeeperServer} if no further peers are configured. @param zkConfigFile ZooKeeper config file 'zoo.cfg' @param peerId ID for the 'myid' file
runFlinkZkQuorumPeer
java
apache/flink
flink-runtime/src/main/java/org/apache/flink/runtime/zookeeper/FlinkZooKeeperQuorumPeer.java
https://github.com/apache/flink/blob/master/flink-runtime/src/main/java/org/apache/flink/runtime/zookeeper/FlinkZooKeeperQuorumPeer.java
Apache-2.0
private static void setRequiredProperties(Properties zkProps) { // Set default client port if (zkProps.getProperty("clientPort") == null) { zkProps.setProperty("clientPort", String.valueOf(DEFAULT_ZOOKEEPER_CLIENT_PORT)); LOG.warn("No 'clientPort' configured. Set to '{}'.", DEFAULT_ZOOKEEPER_CLIENT_PORT); } // Set default init limit if (zkProps.getProperty("initLimit") == null) { zkProps.setProperty("initLimit", String.valueOf(DEFAULT_ZOOKEEPER_INIT_LIMIT)); LOG.warn("No 'initLimit' configured. Set to '{}'.", DEFAULT_ZOOKEEPER_INIT_LIMIT); } // Set default sync limit if (zkProps.getProperty("syncLimit") == null) { zkProps.setProperty("syncLimit", String.valueOf(DEFAULT_ZOOKEEPER_SYNC_LIMIT)); LOG.warn("No 'syncLimit' configured. Set to '{}'.", DEFAULT_ZOOKEEPER_SYNC_LIMIT); } // Set default data dir if (zkProps.getProperty("dataDir") == null) { String dataDir = String.format( "%s/%s/zookeeper", System.getProperty("java.io.tmpdir"), UUID.randomUUID().toString()); zkProps.setProperty("dataDir", dataDir); LOG.warn("No 'dataDir' configured. Set to '{}'.", dataDir); } int peerPort = DEFAULT_ZOOKEEPER_PEER_PORT; int leaderPort = DEFAULT_ZOOKEEPER_LEADER_PORT; // Set peer and leader ports if none given, because ZooKeeper complains if multiple // servers are configured, but no ports are given. for (Map.Entry<Object, Object> entry : zkProps.entrySet()) { String key = (String) entry.getKey(); if (entry.getKey().toString().startsWith("server.")) { String value = (String) entry.getValue(); String[] parts = value.split(":"); if (parts.length == 1) { String address = String.format("%s:%d:%d", parts[0], peerPort, leaderPort); zkProps.setProperty(key, address); LOG.info( "Set peer and leader port of '{}': '{}' => '{}'.", key, value, address); } else if (parts.length == 2) { String address = String.format( "%s:%d:%d", parts[0], Integer.valueOf(parts[1]), leaderPort); zkProps.setProperty(key, address); LOG.info("Set peer port of '{}': '{}' => '{}'.", key, value, address); } } } }
Sets required properties to reasonable defaults and logs it.
setRequiredProperties
java
apache/flink
flink-runtime/src/main/java/org/apache/flink/runtime/zookeeper/FlinkZooKeeperQuorumPeer.java
https://github.com/apache/flink/blob/master/flink-runtime/src/main/java/org/apache/flink/runtime/zookeeper/FlinkZooKeeperQuorumPeer.java
Apache-2.0
public byte[] getValue() { return sharedValue.getValue(); }
Wrapper class for a {@link SharedValue} so that we don't expose a curator dependency in our internal APIs. Such an exposure is problematic due to the relocation of curator.
getValue
java
apache/flink
flink-runtime/src/main/java/org/apache/flink/runtime/zookeeper/ZooKeeperSharedValue.java
https://github.com/apache/flink/blob/master/flink-runtime/src/main/java/org/apache/flink/runtime/zookeeper/ZooKeeperSharedValue.java
Apache-2.0
@Override public boolean releaseAndTryRemove(String pathInZooKeeper) throws Exception { checkNotNull(pathInZooKeeper, "Path in ZooKeeper"); final String path = normalizePath(pathInZooKeeper); RetrievableStateHandle<T> stateHandle = null; try { stateHandle = get(path, false); } catch (Exception e) { LOG.warn("Could not retrieve the state handle from node {}.", path, e); } release(pathInZooKeeper); try { deleteIfExists(getRootLockPath(path)); } catch (KeeperException.NotEmptyException ignored) { LOG.debug( "Could not delete znode {} because it is still locked.", getRootLockPath(path)); return false; } if (stateHandle != null) { stateHandle.discardState(); } // we can now commit the deletion by removing the parent node deleteIfExists(path); return true; }
Releases the lock for the given state node and tries to remove the state node if it is no longer locked. @param pathInZooKeeper Path of state handle to remove @return {@code true} if the state handle could be deleted; {@code false}, if the handle is locked by another connection. @throws Exception If the ZooKeeper operation or discarding the state handle fails
releaseAndTryRemove
java
apache/flink
flink-runtime/src/main/java/org/apache/flink/runtime/zookeeper/ZooKeeperStateHandleStore.java
https://github.com/apache/flink/blob/master/flink-runtime/src/main/java/org/apache/flink/runtime/zookeeper/ZooKeeperStateHandleStore.java
Apache-2.0
@Override public void releaseAll() throws Exception { Collection<String> children = getAllHandles(); Exception exception = null; for (String child : children) { try { release(child); } catch (Exception e) { exception = ExceptionUtils.firstOrSuppressed(e, exception); } } if (exception != null) { throw new Exception("Could not properly release all state nodes.", exception); } }
Releases all lock nodes of this ZooKeeperStateHandleStore. @throws Exception if the delete operation of a lock file fails
releaseAll
java
apache/flink
flink-runtime/src/main/java/org/apache/flink/runtime/zookeeper/ZooKeeperStateHandleStore.java
https://github.com/apache/flink/blob/master/flink-runtime/src/main/java/org/apache/flink/runtime/zookeeper/ZooKeeperStateHandleStore.java
Apache-2.0
private boolean hasLock(String rootPath) throws Exception { final String normalizedRootPath = normalizePath(rootPath); try { return client.checkExists().forPath(getInstanceLockPath(normalizedRootPath)) != null; } catch (KeeperException.NoNodeException e) { // this is the case if the node is marked for deletion or already deleted return false; } }
Checks whether a lock is created for this instance on the passed ZooKeeper node. @param rootPath The node that shall be checked. @return {@code true} if the lock exists; {@code false} otherwise.
hasLock
java
apache/flink
flink-runtime/src/main/java/org/apache/flink/runtime/zookeeper/ZooKeeperStateHandleStore.java
https://github.com/apache/flink/blob/master/flink-runtime/src/main/java/org/apache/flink/runtime/zookeeper/ZooKeeperStateHandleStore.java
Apache-2.0
@VisibleForTesting String getInstanceLockPath(String rootPath) { return getRootLockPath(rootPath) + '/' + lockNode; }
Returns the path for the lock node relative to the given path. @param rootPath Root path under which the lock node shall be created @return Path for the lock node
getInstanceLockPath
java
apache/flink
flink-runtime/src/main/java/org/apache/flink/runtime/zookeeper/ZooKeeperStateHandleStore.java
https://github.com/apache/flink/blob/master/flink-runtime/src/main/java/org/apache/flink/runtime/zookeeper/ZooKeeperStateHandleStore.java
Apache-2.0
@VisibleForTesting static String getRootLockPath(String rootPath) { return rootPath + "/locks"; }
Returns the sub-path for lock nodes of the corresponding node (referred to through the passed {@code rooPath}. The returned sub-path collects the lock nodes for the {@code rootPath}'s node. The {@code rootPath} is marked for deletion if the sub-path for lock nodes is deleted.
getRootLockPath
java
apache/flink
flink-runtime/src/main/java/org/apache/flink/runtime/zookeeper/ZooKeeperStateHandleStore.java
https://github.com/apache/flink/blob/master/flink-runtime/src/main/java/org/apache/flink/runtime/zookeeper/ZooKeeperStateHandleStore.java
Apache-2.0
private RetrievableStateHandle<T> get(String pathInZooKeeper, boolean lock) throws Exception { checkNotNull(pathInZooKeeper, "Path in ZooKeeper"); final String path = normalizePath(pathInZooKeeper); if (lock) { // try to lock the node try { client.create().withMode(CreateMode.EPHEMERAL).forPath(getInstanceLockPath(path)); } catch (KeeperException.NodeExistsException ignored) { // we have already created the lock } catch (KeeperException.NoNodeException ex) { // We could run into this exception because the parent node does not exist when we // are trying to lock. // We wrap the exception here so that it could be caught in // DefaultExecutionPlanStore throw new NotExistException("ZooKeeper node " + path + " does not exist.", ex); } } boolean success = false; try { byte[] data = client.getData().forPath(path); RetrievableStateHandle<T> retrievableStateHandle = deserialize(data); success = true; return retrievableStateHandle; } catch (KeeperException.NoNodeException ex) { // We wrap the exception here so that it could be caught in DefaultExecutionPlanStore throw new NotExistException("ZooKeeper node " + path + " does not exist.", ex); } catch (IOException | ClassNotFoundException e) { throw new IOException( "Failed to deserialize state handle from ZooKeeper data from " + path + '.', e); } finally { if (!success && lock) { // release the lock release(path); } } }
Gets a state handle from ZooKeeper and optionally locks it. @param pathInZooKeeper Path in ZooKeeper to get the state handle from @param lock True if we should lock the node; otherwise false @return The state handle @throws IOException Thrown if the method failed to deserialize the stored state handle @throws Exception Thrown if a ZooKeeper operation failed
get
java
apache/flink
flink-runtime/src/main/java/org/apache/flink/runtime/zookeeper/ZooKeeperStateHandleStore.java
https://github.com/apache/flink/blob/master/flink-runtime/src/main/java/org/apache/flink/runtime/zookeeper/ZooKeeperStateHandleStore.java
Apache-2.0
private static String normalizePath(String path) { if (path.startsWith("/")) { return path; } else { return '/' + path; } }
Makes sure that every path starts with a "/". @param path Path to normalize @return Normalized path such that it starts with a "/"
normalizePath
java
apache/flink
flink-runtime/src/main/java/org/apache/flink/runtime/zookeeper/ZooKeeperStateHandleStore.java
https://github.com/apache/flink/blob/master/flink-runtime/src/main/java/org/apache/flink/runtime/zookeeper/ZooKeeperStateHandleStore.java
Apache-2.0
@PublicEvolving public <R> SingleOutputStreamOperator<R> reduce( ReduceFunction<T> reduceFunction, ProcessAllWindowFunction<T, R, W> function) { TypeInformation<R> resultType = getProcessAllWindowFunctionReturnType(function, input.getType()); return reduce(reduceFunction, function, resultType); }
Applies the given window function to each window. The window function is called for each evaluation of the window for each key individually. The output of the window function is interpreted as a regular non-windowed stream. <p>Arriving data is incrementally aggregated using the given reducer. @param reduceFunction The reduce function that is used for incremental aggregation. @param function The process window function. @return The data stream that is the result of applying the window function to the window.
reduce
java
apache/flink
flink-runtime/src/main/java/org/apache/flink/streaming/api/datastream/AllWindowedStream.java
https://github.com/apache/flink/blob/master/flink-runtime/src/main/java/org/apache/flink/streaming/api/datastream/AllWindowedStream.java
Apache-2.0
@PublicEvolving public <R> SingleOutputStreamOperator<R> reduce( ReduceFunction<T> reduceFunction, ProcessAllWindowFunction<T, R, W> function, TypeInformation<R> resultType) { if (reduceFunction instanceof RichFunction) { throw new UnsupportedOperationException( "ReduceFunction of reduce can not be a RichFunction."); } // clean the closures function = input.getExecutionEnvironment().clean(function); reduceFunction = input.getExecutionEnvironment().clean(reduceFunction); String callLocation = Utils.getCallLocationName(); String udfName = "AllWindowedStream." + callLocation; String opName; KeySelector<T, Byte> keySel = input.getKeySelector(); OneInputStreamOperatorFactory<T, R> operator; if (evictor != null) { @SuppressWarnings({"unchecked", "rawtypes"}) TypeSerializer<StreamRecord<T>> streamRecordSerializer = (TypeSerializer<StreamRecord<T>>) new StreamElementSerializer( input.getType() .createSerializer( getExecutionEnvironment() .getConfig() .getSerializerConfig())); ListStateDescriptor<StreamRecord<T>> stateDesc = new ListStateDescriptor<>("window-contents", streamRecordSerializer); opName = "TriggerWindow(" + windowAssigner + ", " + stateDesc + ", " + trigger + ", " + evictor + ", " + udfName + ")"; operator = new EvictingWindowOperatorFactory<>( windowAssigner, windowAssigner.getWindowSerializer( getExecutionEnvironment().getConfig()), keySel, input.getKeyType() .createSerializer( getExecutionEnvironment() .getConfig() .getSerializerConfig()), stateDesc, new InternalIterableProcessAllWindowFunction<>( new ReduceApplyProcessAllWindowFunction<>( reduceFunction, function)), trigger, evictor, allowedLateness, lateDataOutputTag); } else { ReducingStateDescriptor<T> stateDesc = new ReducingStateDescriptor<>( "window-contents", reduceFunction, input.getType() .createSerializer( getExecutionEnvironment() .getConfig() .getSerializerConfig())); opName = "TriggerWindow(" + windowAssigner + ", " + stateDesc + ", " + trigger + ", " + udfName + ")"; operator = new WindowOperatorFactory<>( windowAssigner, windowAssigner.getWindowSerializer( getExecutionEnvironment().getConfig()), keySel, input.getKeyType() .createSerializer( getExecutionEnvironment() .getConfig() .getSerializerConfig()), stateDesc, new InternalSingleValueProcessAllWindowFunction<>(function), trigger, allowedLateness, lateDataOutputTag); } return input.transform(opName, resultType, operator).forceNonParallel(); }
Applies the given window function to each window. The window function is called for each evaluation of the window for each key individually. The output of the window function is interpreted as a regular non-windowed stream. <p>Arriving data is incrementally aggregated using the given reducer. @param reduceFunction The reduce function that is used for incremental aggregation. @param function The process window function. @param resultType Type information for the result type of the window function @return The data stream that is the result of applying the window function to the window.
reduce
java
apache/flink
flink-runtime/src/main/java/org/apache/flink/streaming/api/datastream/AllWindowedStream.java
https://github.com/apache/flink/blob/master/flink-runtime/src/main/java/org/apache/flink/streaming/api/datastream/AllWindowedStream.java
Apache-2.0
@PublicEvolving public <ACC, R> SingleOutputStreamOperator<R> aggregate(AggregateFunction<T, ACC, R> function) { checkNotNull(function, "function"); if (function instanceof RichFunction) { throw new UnsupportedOperationException( "This aggregation function cannot be a RichFunction."); } TypeInformation<ACC> accumulatorType = TypeExtractor.getAggregateFunctionAccumulatorType( function, input.getType(), null, false); TypeInformation<R> resultType = TypeExtractor.getAggregateFunctionReturnType( function, input.getType(), null, false); return aggregate(function, accumulatorType, resultType); }
Applies the given {@code AggregateFunction} to each window. The AggregateFunction aggregates all elements of a window into a single result element. The stream of these result elements (one per window) is interpreted as a regular non-windowed stream. @param function The aggregation function. @return The data stream that is the result of applying the fold function to the window. @param <ACC> The type of the AggregateFunction's accumulator @param <R> The type of the elements in the resulting stream, equal to the AggregateFunction's result type
aggregate
java
apache/flink
flink-runtime/src/main/java/org/apache/flink/streaming/api/datastream/AllWindowedStream.java
https://github.com/apache/flink/blob/master/flink-runtime/src/main/java/org/apache/flink/streaming/api/datastream/AllWindowedStream.java
Apache-2.0
@PublicEvolving public <ACC, R> SingleOutputStreamOperator<R> aggregate( AggregateFunction<T, ACC, R> function, TypeInformation<ACC> accumulatorType, TypeInformation<R> resultType) { checkNotNull(function, "function"); checkNotNull(accumulatorType, "accumulatorType"); checkNotNull(resultType, "resultType"); if (function instanceof RichFunction) { throw new UnsupportedOperationException( "This aggregation function cannot be a RichFunction."); } return aggregate( function, new PassThroughAllWindowFunction<W, R>(), accumulatorType, resultType); }
Applies the given {@code AggregateFunction} to each window. The AggregateFunction aggregates all elements of a window into a single result element. The stream of these result elements (one per window) is interpreted as a regular non-windowed stream. @param function The aggregation function. @return The data stream that is the result of applying the aggregation function to the window. @param <ACC> The type of the AggregateFunction's accumulator @param <R> The type of the elements in the resulting stream, equal to the AggregateFunction's result type
aggregate
java
apache/flink
flink-runtime/src/main/java/org/apache/flink/streaming/api/datastream/AllWindowedStream.java
https://github.com/apache/flink/blob/master/flink-runtime/src/main/java/org/apache/flink/streaming/api/datastream/AllWindowedStream.java
Apache-2.0
@PublicEvolving public <ACC, V, R> SingleOutputStreamOperator<R> aggregate( AggregateFunction<T, ACC, V> aggFunction, ProcessAllWindowFunction<V, R, W> windowFunction) { checkNotNull(aggFunction, "aggFunction"); checkNotNull(windowFunction, "windowFunction"); TypeInformation<ACC> accumulatorType = TypeExtractor.getAggregateFunctionAccumulatorType( aggFunction, input.getType(), null, false); TypeInformation<V> aggResultType = TypeExtractor.getAggregateFunctionReturnType( aggFunction, input.getType(), null, false); TypeInformation<R> resultType = getProcessAllWindowFunctionReturnType(windowFunction, aggResultType); return aggregate(aggFunction, windowFunction, accumulatorType, aggResultType, resultType); }
Applies the given window function to each window. The window function is called for each evaluation of the window for each key individually. The output of the window function is interpreted as a regular non-windowed stream. <p>Arriving data is incrementally aggregated using the given aggregate function. This means that the window function typically has only a single value to process when called. @param aggFunction The aggregate function that is used for incremental aggregation. @param windowFunction The process window function. @return The data stream that is the result of applying the window function to the window. @param <ACC> The type of the AggregateFunction's accumulator @param <V> The type of AggregateFunction's result, and the WindowFunction's input @param <R> The type of the elements in the resulting stream, equal to the WindowFunction's result type
aggregate
java
apache/flink
flink-runtime/src/main/java/org/apache/flink/streaming/api/datastream/AllWindowedStream.java
https://github.com/apache/flink/blob/master/flink-runtime/src/main/java/org/apache/flink/streaming/api/datastream/AllWindowedStream.java
Apache-2.0
@PublicEvolving public <ACC, V, R> SingleOutputStreamOperator<R> aggregate( AggregateFunction<T, ACC, V> aggregateFunction, ProcessAllWindowFunction<V, R, W> windowFunction, TypeInformation<ACC> accumulatorType, TypeInformation<V> aggregateResultType, TypeInformation<R> resultType) { checkNotNull(aggregateFunction, "aggregateFunction"); checkNotNull(windowFunction, "windowFunction"); checkNotNull(accumulatorType, "accumulatorType"); checkNotNull(aggregateResultType, "aggregateResultType"); checkNotNull(resultType, "resultType"); if (aggregateFunction instanceof RichFunction) { throw new UnsupportedOperationException( "This aggregate function cannot be a RichFunction."); } // clean the closures windowFunction = input.getExecutionEnvironment().clean(windowFunction); aggregateFunction = input.getExecutionEnvironment().clean(aggregateFunction); final String callLocation = Utils.getCallLocationName(); final String udfName = "AllWindowedStream." + callLocation; final String opName = windowAssigner.getClass().getSimpleName(); final String opDescription; final KeySelector<T, Byte> keySel = input.getKeySelector(); OneInputStreamOperatorFactory<T, R> operator; if (evictor != null) { @SuppressWarnings({"unchecked", "rawtypes"}) TypeSerializer<StreamRecord<T>> streamRecordSerializer = (TypeSerializer<StreamRecord<T>>) new StreamElementSerializer( input.getType() .createSerializer( getExecutionEnvironment() .getConfig() .getSerializerConfig())); ListStateDescriptor<StreamRecord<T>> stateDesc = new ListStateDescriptor<>("window-contents", streamRecordSerializer); opDescription = "TriggerWindow(" + windowAssigner + ", " + stateDesc + ", " + trigger + ", " + evictor + ", " + udfName + ")"; operator = new EvictingWindowOperatorFactory<>( windowAssigner, windowAssigner.getWindowSerializer( getExecutionEnvironment().getConfig()), keySel, input.getKeyType() .createSerializer( getExecutionEnvironment() .getConfig() .getSerializerConfig()), stateDesc, new InternalAggregateProcessAllWindowFunction<>( aggregateFunction, windowFunction), trigger, evictor, allowedLateness, lateDataOutputTag); } else { AggregatingStateDescriptor<T, ACC, V> stateDesc = new AggregatingStateDescriptor<>( "window-contents", aggregateFunction, accumulatorType.createSerializer( getExecutionEnvironment().getConfig().getSerializerConfig())); opDescription = "TriggerWindow(" + windowAssigner + ", " + stateDesc + ", " + trigger + ", " + udfName + ")"; operator = new WindowOperatorFactory<>( windowAssigner, windowAssigner.getWindowSerializer( getExecutionEnvironment().getConfig()), keySel, input.getKeyType() .createSerializer( getExecutionEnvironment() .getConfig() .getSerializerConfig()), stateDesc, new InternalSingleValueProcessAllWindowFunction<>(windowFunction), trigger, allowedLateness, lateDataOutputTag); } return input.transform(opName, resultType, operator) .setDescription(opDescription) .forceNonParallel(); }
Applies the given window function to each window. The window function is called for each evaluation of the window for each key individually. The output of the window function is interpreted as a regular non-windowed stream. <p>Arriving data is incrementally aggregated using the given aggregate function. This means that the window function typically has only a single value to process when called. @param aggregateFunction The aggregation function that is used for incremental aggregation. @param windowFunction The process window function. @param accumulatorType Type information for the internal accumulator type of the aggregation function @param resultType Type information for the result type of the window function @return The data stream that is the result of applying the window function to the window. @param <ACC> The type of the AggregateFunction's accumulator @param <V> The type of AggregateFunction's result, and the WindowFunction's input @param <R> The type of the elements in the resulting stream, equal to the WindowFunction's result type
aggregate
java
apache/flink
flink-runtime/src/main/java/org/apache/flink/streaming/api/datastream/AllWindowedStream.java
https://github.com/apache/flink/blob/master/flink-runtime/src/main/java/org/apache/flink/streaming/api/datastream/AllWindowedStream.java
Apache-2.0
public SingleOutputStreamOperator<T> min(String field) { return aggregate( new ComparableAggregator<>( field, input.getType(), AggregationFunction.AggregationType.MIN, false, input.getExecutionConfig())); }
Applies an aggregation that gives the minimum value of the pojo data stream at the given field expression for every window. <p>A field expression is either the name of a public field or a getter method with parentheses of the {@link DataStream}S underlying type. A dot can be used to drill down into objects, as in {@code "field1.getInnerField2()" }. @param field The field expression based on which the aggregation will be applied. @return The transformed DataStream.
min
java
apache/flink
flink-runtime/src/main/java/org/apache/flink/streaming/api/datastream/AllWindowedStream.java
https://github.com/apache/flink/blob/master/flink-runtime/src/main/java/org/apache/flink/streaming/api/datastream/AllWindowedStream.java
Apache-2.0
public DataStream<IN1> getFirstInput() { return nonBroadcastStream; }
Returns the non-broadcast {@link DataStream}. @return The stream which, by convention, is not broadcasted.
getFirstInput
java
apache/flink
flink-runtime/src/main/java/org/apache/flink/streaming/api/datastream/BroadcastConnectedStream.java
https://github.com/apache/flink/blob/master/flink-runtime/src/main/java/org/apache/flink/streaming/api/datastream/BroadcastConnectedStream.java
Apache-2.0
public BroadcastStream<IN2> getSecondInput() { return broadcastStream; }
Returns the {@link BroadcastStream}. @return The stream which, by convention, is the broadcast one.
getSecondInput
java
apache/flink
flink-runtime/src/main/java/org/apache/flink/streaming/api/datastream/BroadcastConnectedStream.java
https://github.com/apache/flink/blob/master/flink-runtime/src/main/java/org/apache/flink/streaming/api/datastream/BroadcastConnectedStream.java
Apache-2.0
@PublicEvolving public <KEY, OUT> SingleOutputStreamOperator<OUT> process( final KeyedBroadcastProcessFunction<KEY, IN1, IN2, OUT> function) { TypeInformation<OUT> outTypeInfo = TypeExtractor.getBinaryOperatorReturnType( function, KeyedBroadcastProcessFunction.class, 1, 2, 3, TypeExtractor.NO_INDEX, getType1(), getType2(), Utils.getCallLocationName(), true); return process(function, outTypeInfo); }
Assumes as inputs a {@link BroadcastStream} and a {@link KeyedStream} and applies the given {@link KeyedBroadcastProcessFunction} on them, thereby creating a transformed output stream. @param function The {@link KeyedBroadcastProcessFunction} that is called for each element in the stream. @param <KEY> The type of the keys in the keyed stream. @param <OUT> The type of the output elements. @return The transformed {@link DataStream}.
process
java
apache/flink
flink-runtime/src/main/java/org/apache/flink/streaming/api/datastream/BroadcastConnectedStream.java
https://github.com/apache/flink/blob/master/flink-runtime/src/main/java/org/apache/flink/streaming/api/datastream/BroadcastConnectedStream.java
Apache-2.0
@PublicEvolving public <KEY, OUT> SingleOutputStreamOperator<OUT> process( final KeyedBroadcastProcessFunction<KEY, IN1, IN2, OUT> function, final TypeInformation<OUT> outTypeInfo) { Preconditions.checkNotNull(function); Preconditions.checkArgument( nonBroadcastStream instanceof KeyedStream, "A KeyedBroadcastProcessFunction can only be used on a keyed stream."); return transform(function, outTypeInfo); }
Assumes as inputs a {@link BroadcastStream} and a {@link KeyedStream} and applies the given {@link KeyedBroadcastProcessFunction} on them, thereby creating a transformed output stream. @param function The {@link KeyedBroadcastProcessFunction} that is called for each element in the stream. @param outTypeInfo The type of the output elements. @param <KEY> The type of the keys in the keyed stream. @param <OUT> The type of the output elements. @return The transformed {@link DataStream}.
process
java
apache/flink
flink-runtime/src/main/java/org/apache/flink/streaming/api/datastream/BroadcastConnectedStream.java
https://github.com/apache/flink/blob/master/flink-runtime/src/main/java/org/apache/flink/streaming/api/datastream/BroadcastConnectedStream.java
Apache-2.0
@PublicEvolving public <OUT> SingleOutputStreamOperator<OUT> process( final BroadcastProcessFunction<IN1, IN2, OUT> function) { TypeInformation<OUT> outTypeInfo = TypeExtractor.getBinaryOperatorReturnType( function, BroadcastProcessFunction.class, 0, 1, 2, TypeExtractor.NO_INDEX, getType1(), getType2(), Utils.getCallLocationName(), true); return process(function, outTypeInfo); }
Assumes as inputs a {@link BroadcastStream} and a non-keyed {@link DataStream} and applies the given {@link BroadcastProcessFunction} on them, thereby creating a transformed output stream. @param function The {@link BroadcastProcessFunction} that is called for each element in the stream. @param <OUT> The type of the output elements. @return The transformed {@link DataStream}.
process
java
apache/flink
flink-runtime/src/main/java/org/apache/flink/streaming/api/datastream/BroadcastConnectedStream.java
https://github.com/apache/flink/blob/master/flink-runtime/src/main/java/org/apache/flink/streaming/api/datastream/BroadcastConnectedStream.java
Apache-2.0
public void invalidate() throws Exception { final CacheTransformation<T> t = (CacheTransformation<T>) this.getTransformation(); environment.invalidateClusterDataset(t.getDatasetId()); }
Invalidate the cache intermediate result of this DataStream to release the physical resources. Users are not required to invoke this method to release physical resources unless they want to. Cache will be recreated if it is used after invalidated.
invalidate
java
apache/flink
flink-runtime/src/main/java/org/apache/flink/streaming/api/datastream/CachedDataStream.java
https://github.com/apache/flink/blob/master/flink-runtime/src/main/java/org/apache/flink/streaming/api/datastream/CachedDataStream.java
Apache-2.0
public <KEY> Where<KEY> where(KeySelector<T1, KEY> keySelector) { Preconditions.checkNotNull(keySelector); final TypeInformation<KEY> keyType = TypeExtractor.getKeySelectorTypes(keySelector, input1.getType()); return where(keySelector, keyType); }
Specifies a {@link KeySelector} for elements from the first input. @param keySelector The KeySelector to be used for extracting the first input's key for partitioning.
where
java
apache/flink
flink-runtime/src/main/java/org/apache/flink/streaming/api/datastream/CoGroupedStreams.java
https://github.com/apache/flink/blob/master/flink-runtime/src/main/java/org/apache/flink/streaming/api/datastream/CoGroupedStreams.java
Apache-2.0
public <KEY> Where<KEY> where(KeySelector<T1, KEY> keySelector, TypeInformation<KEY> keyType) { Preconditions.checkNotNull(keySelector); Preconditions.checkNotNull(keyType); return new Where<>(input1.clean(keySelector), keyType); }
Specifies a {@link KeySelector} for elements from the first input with explicit type information. @param keySelector The KeySelector to be used for extracting the first input's key for partitioning. @param keyType The type information describing the key type.
where
java
apache/flink
flink-runtime/src/main/java/org/apache/flink/streaming/api/datastream/CoGroupedStreams.java
https://github.com/apache/flink/blob/master/flink-runtime/src/main/java/org/apache/flink/streaming/api/datastream/CoGroupedStreams.java
Apache-2.0
public EqualTo equalTo(KeySelector<T2, KEY> keySelector, TypeInformation<KEY> keyType) { Preconditions.checkNotNull(keySelector); Preconditions.checkNotNull(keyType); if (!keyType.equals(this.keyType)) { throw new IllegalArgumentException( "The keys for the two inputs are not equal: " + "first key = " + this.keyType + " , second key = " + keyType); } return new EqualTo(input2.clean(keySelector)); }
Specifies a {@link KeySelector} for elements from the second input with explicit type information for the key type. @param keySelector The KeySelector to be used for extracting the key for partitioning. @param keyType The type information describing the key type.
equalTo
java
apache/flink
flink-runtime/src/main/java/org/apache/flink/streaming/api/datastream/CoGroupedStreams.java
https://github.com/apache/flink/blob/master/flink-runtime/src/main/java/org/apache/flink/streaming/api/datastream/CoGroupedStreams.java
Apache-2.0
@PublicEvolving public <W extends Window> WithWindow<T1, T2, KEY, W> window( WindowAssigner<? super TaggedUnion<T1, T2>, W> assigner) { return new WithWindow<>( input1, input2, keySelector1, keySelector2, keyType, assigner, null, null, null); }
Specifies the window on which the co-group operation works.
window
java
apache/flink
flink-runtime/src/main/java/org/apache/flink/streaming/api/datastream/CoGroupedStreams.java
https://github.com/apache/flink/blob/master/flink-runtime/src/main/java/org/apache/flink/streaming/api/datastream/CoGroupedStreams.java
Apache-2.0
public StreamExecutionEnvironment getExecutionEnvironment() { return environment; }
ConnectedStreams represent two connected streams of (possibly) different data types. Connected streams are useful for cases where operations on one stream directly affect the operations on the other stream, usually via shared state between the streams. <p>An example for the use of connected streams would be to apply rules that change over time onto another stream. One of the connected streams has the rules, the other stream the elements to apply the rules to. The operation on the connected stream maintains the current set of rules in the state. It may receive either a rule update and update the state or a data element and apply the rules in the state to the element. <p>The connected stream can be conceptually viewed as a union stream of an Either type, that holds either the first stream's type or the second stream's type. @param <IN1> Type of the first input data steam. @param <IN2> Type of the second input data stream.
getExecutionEnvironment
java
apache/flink
flink-runtime/src/main/java/org/apache/flink/streaming/api/datastream/ConnectedStreams.java
https://github.com/apache/flink/blob/master/flink-runtime/src/main/java/org/apache/flink/streaming/api/datastream/ConnectedStreams.java
Apache-2.0
public DataStream<IN1> getFirstInput() { return inputStream1; }
Returns the first {@link DataStream}. @return The first DataStream.
getFirstInput
java
apache/flink
flink-runtime/src/main/java/org/apache/flink/streaming/api/datastream/ConnectedStreams.java
https://github.com/apache/flink/blob/master/flink-runtime/src/main/java/org/apache/flink/streaming/api/datastream/ConnectedStreams.java
Apache-2.0
public DataStream<IN2> getSecondInput() { return inputStream2; }
Returns the second {@link DataStream}. @return The second DataStream.
getSecondInput
java
apache/flink
flink-runtime/src/main/java/org/apache/flink/streaming/api/datastream/ConnectedStreams.java
https://github.com/apache/flink/blob/master/flink-runtime/src/main/java/org/apache/flink/streaming/api/datastream/ConnectedStreams.java
Apache-2.0
@Deprecated public ConnectedStreams<IN1, IN2> keyBy(int keyPosition1, int keyPosition2) { return new ConnectedStreams<>( this.environment, keyBy(inputStream1, keyPosition1), keyBy(inputStream2, keyPosition2)); }
KeyBy operation for connected data stream. Assigns keys to the elements of input1 and input2 according to keyPosition1 and keyPosition2. @deprecated use {@link #keyBy(KeySelector, KeySelector)} @param keyPosition1 The field used to compute the hashcode of the elements in the first input stream. @param keyPosition2 The field used to compute the hashcode of the elements in the second input stream. @return The grouped {@link ConnectedStreams}
keyBy
java
apache/flink
flink-runtime/src/main/java/org/apache/flink/streaming/api/datastream/ConnectedStreams.java
https://github.com/apache/flink/blob/master/flink-runtime/src/main/java/org/apache/flink/streaming/api/datastream/ConnectedStreams.java
Apache-2.0
@Deprecated public ConnectedStreams<IN1, IN2> keyBy(int[] keyPositions1, int[] keyPositions2) { return new ConnectedStreams<>( environment, keyBy(inputStream1, keyPositions1), keyBy(inputStream2, keyPositions2)); }
KeyBy operation for connected data stream. Assigns keys to the elements of input1 and input2 according to keyPositions1 and keyPositions2. @deprecated use {@link #keyBy(KeySelector, KeySelector)} @param keyPositions1 The fields used to group the first input stream. @param keyPositions2 The fields used to group the second input stream. @return The grouped {@link ConnectedStreams}
keyBy
java
apache/flink
flink-runtime/src/main/java/org/apache/flink/streaming/api/datastream/ConnectedStreams.java
https://github.com/apache/flink/blob/master/flink-runtime/src/main/java/org/apache/flink/streaming/api/datastream/ConnectedStreams.java
Apache-2.0
@Deprecated public ConnectedStreams<IN1, IN2> keyBy(String field1, String field2) { return new ConnectedStreams<>( environment, keyBy(inputStream1, field1), keyBy(inputStream2, field2)); }
KeyBy operation for connected data stream using key expressions. Assigns keys to the elements of input1 and input2 according to field1 and field2. A field expression is either the name of a public field or a getter method with parentheses of the {@link DataStream}S underlying type. A dot can be used to drill down into objects, as in {@code "field1.getInnerField2()" }. @deprecated use {@link #keyBy(KeySelector, KeySelector)} @param field1 The grouping expression for the first input @param field2 The grouping expression for the second input @return The grouped {@link ConnectedStreams}
keyBy
java
apache/flink
flink-runtime/src/main/java/org/apache/flink/streaming/api/datastream/ConnectedStreams.java
https://github.com/apache/flink/blob/master/flink-runtime/src/main/java/org/apache/flink/streaming/api/datastream/ConnectedStreams.java
Apache-2.0
public <KEY> ConnectedStreams<IN1, IN2> keyBy( KeySelector<IN1, KEY> keySelector1, KeySelector<IN2, KEY> keySelector2) { return new ConnectedStreams<>( environment, inputStream1.keyBy(keySelector1), inputStream2.keyBy(keySelector2)); }
KeyBy operation for connected data stream. Assigns keys to the elements of input1 and input2 using keySelector1 and keySelector2. @param keySelector1 The {@link KeySelector} used for grouping the first input @param keySelector2 The {@link KeySelector} used for grouping the second input @return The partitioned {@link ConnectedStreams}
keyBy
java
apache/flink
flink-runtime/src/main/java/org/apache/flink/streaming/api/datastream/ConnectedStreams.java
https://github.com/apache/flink/blob/master/flink-runtime/src/main/java/org/apache/flink/streaming/api/datastream/ConnectedStreams.java
Apache-2.0
public <KEY> ConnectedStreams<IN1, IN2> keyBy( KeySelector<IN1, KEY> keySelector1, KeySelector<IN2, KEY> keySelector2, TypeInformation<KEY> keyType) { return new ConnectedStreams<>( environment, inputStream1.keyBy(keySelector1, keyType), inputStream2.keyBy(keySelector2, keyType)); }
KeyBy operation for connected data stream. Assigns keys to the elements of input1 and input2 using keySelector1 and keySelector2 with explicit type information for the common key type. @param keySelector1 The {@link KeySelector} used for grouping the first input @param keySelector2 The {@link KeySelector} used for grouping the second input @param keyType The type information of the common key type. @return The partitioned {@link ConnectedStreams}
keyBy
java
apache/flink
flink-runtime/src/main/java/org/apache/flink/streaming/api/datastream/ConnectedStreams.java
https://github.com/apache/flink/blob/master/flink-runtime/src/main/java/org/apache/flink/streaming/api/datastream/ConnectedStreams.java
Apache-2.0
public <R> SingleOutputStreamOperator<R> map(CoMapFunction<IN1, IN2, R> coMapper) { TypeInformation<R> outTypeInfo = TypeExtractor.getBinaryOperatorReturnType( coMapper, CoMapFunction.class, 0, 1, 2, TypeExtractor.NO_INDEX, getType1(), getType2(), Utils.getCallLocationName(), true); return map(coMapper, outTypeInfo); }
Applies a CoMap transformation on a {@link ConnectedStreams} and maps the output to a common type. The transformation calls a {@link CoMapFunction#map1} for each element of the first input and {@link CoMapFunction#map2} for each element of the second input. Each CoMapFunction call returns exactly one element. @param coMapper The CoMapFunction used to jointly transform the two input DataStreams @return The transformed {@link DataStream}
map
java
apache/flink
flink-runtime/src/main/java/org/apache/flink/streaming/api/datastream/ConnectedStreams.java
https://github.com/apache/flink/blob/master/flink-runtime/src/main/java/org/apache/flink/streaming/api/datastream/ConnectedStreams.java
Apache-2.0
public <R> SingleOutputStreamOperator<R> map( CoMapFunction<IN1, IN2, R> coMapper, TypeInformation<R> outputType) { return transform("Co-Map", outputType, new CoStreamMap<>(inputStream1.clean(coMapper))); }
Applies a CoMap transformation on a {@link ConnectedStreams} and maps the output to a common type. The transformation calls a {@link CoMapFunction#map1} for each element of the first input and {@link CoMapFunction#map2} for each element of the second input. Each CoMapFunction call returns exactly one element. @param coMapper The CoMapFunction used to jointly transform the two input DataStreams @param outputType {@link TypeInformation} for the result type of the function. @return The transformed {@link DataStream}
map
java
apache/flink
flink-runtime/src/main/java/org/apache/flink/streaming/api/datastream/ConnectedStreams.java
https://github.com/apache/flink/blob/master/flink-runtime/src/main/java/org/apache/flink/streaming/api/datastream/ConnectedStreams.java
Apache-2.0
public <R> SingleOutputStreamOperator<R> flatMap(CoFlatMapFunction<IN1, IN2, R> coFlatMapper) { TypeInformation<R> outTypeInfo = TypeExtractor.getBinaryOperatorReturnType( coFlatMapper, CoFlatMapFunction.class, 0, 1, 2, TypeExtractor.NO_INDEX, getType1(), getType2(), Utils.getCallLocationName(), true); return flatMap(coFlatMapper, outTypeInfo); }
Applies a CoFlatMap transformation on a {@link ConnectedStreams} and maps the output to a common type. The transformation calls a {@link CoFlatMapFunction#flatMap1} for each element of the first input and {@link CoFlatMapFunction#flatMap2} for each element of the second input. Each CoFlatMapFunction call returns any number of elements including none. @param coFlatMapper The CoFlatMapFunction used to jointly transform the two input DataStreams @return The transformed {@link DataStream}
flatMap
java
apache/flink
flink-runtime/src/main/java/org/apache/flink/streaming/api/datastream/ConnectedStreams.java
https://github.com/apache/flink/blob/master/flink-runtime/src/main/java/org/apache/flink/streaming/api/datastream/ConnectedStreams.java
Apache-2.0
public <R> SingleOutputStreamOperator<R> flatMap( CoFlatMapFunction<IN1, IN2, R> coFlatMapper, TypeInformation<R> outputType) { return transform( "Co-Flat Map", outputType, new CoStreamFlatMap<>(inputStream1.clean(coFlatMapper))); }
Applies a CoFlatMap transformation on a {@link ConnectedStreams} and maps the output to a common type. The transformation calls a {@link CoFlatMapFunction#flatMap1} for each element of the first input and {@link CoFlatMapFunction#flatMap2} for each element of the second input. Each CoFlatMapFunction call returns any number of elements including none. @param coFlatMapper The CoFlatMapFunction used to jointly transform the two input DataStreams @param outputType {@link TypeInformation} for the result type of the function. @return The transformed {@link DataStream}
flatMap
java
apache/flink
flink-runtime/src/main/java/org/apache/flink/streaming/api/datastream/ConnectedStreams.java
https://github.com/apache/flink/blob/master/flink-runtime/src/main/java/org/apache/flink/streaming/api/datastream/ConnectedStreams.java
Apache-2.0
@Experimental public ConnectedStreams<IN1, IN2> enableAsyncState() { if ((inputStream1 instanceof KeyedStream) && (inputStream2 instanceof KeyedStream)) { ((KeyedStream<?, ?>) inputStream1).enableAsyncState(); ((KeyedStream<?, ?>) inputStream2).enableAsyncState(); this.isEnableAsyncState = true; } else { throw new UnsupportedOperationException( "The connected streams do not support async state, " + "please ensure that two input streams of your connected streams are " + "keyed stream(not behind a keyBy())."); } return this; }
Enable the async state processing for following keyed processing function on connected streams. This also requires only State V2 APIs are used in the function. @return the configured ConnectedStreams itself.
enableAsyncState
java
apache/flink
flink-runtime/src/main/java/org/apache/flink/streaming/api/datastream/ConnectedStreams.java
https://github.com/apache/flink/blob/master/flink-runtime/src/main/java/org/apache/flink/streaming/api/datastream/ConnectedStreams.java
Apache-2.0
public SinkOperatorUidHashesBuilder setWriterUidHash(String writerUidHash) { this.writerUidHash = writerUidHash; return this; }
Sets the uid hash of the writer operator used to recover state. @param writerUidHash uid hash denoting writer operator @return {@link SinkOperatorUidHashesBuilder}
setWriterUidHash
java
apache/flink
flink-runtime/src/main/java/org/apache/flink/streaming/api/datastream/CustomSinkOperatorUidHashes.java
https://github.com/apache/flink/blob/master/flink-runtime/src/main/java/org/apache/flink/streaming/api/datastream/CustomSinkOperatorUidHashes.java
Apache-2.0
public CustomSinkOperatorUidHashes build() { return new CustomSinkOperatorUidHashes( writerUidHash, committerUidHash, globalCommitterUidHash); }
Constructs the {@link CustomSinkOperatorUidHashes} with the given uid hashes. @return {@link CustomSinkOperatorUidHashes}
build
java
apache/flink
flink-runtime/src/main/java/org/apache/flink/streaming/api/datastream/CustomSinkOperatorUidHashes.java
https://github.com/apache/flink/blob/master/flink-runtime/src/main/java/org/apache/flink/streaming/api/datastream/CustomSinkOperatorUidHashes.java
Apache-2.0
@Internal public int getId() { return transformation.getId(); }
Returns the ID of the {@link DataStream} in the current {@link StreamExecutionEnvironment}. @return ID of the DataStream
getId
java
apache/flink
flink-runtime/src/main/java/org/apache/flink/streaming/api/datastream/DataStream.java
https://github.com/apache/flink/blob/master/flink-runtime/src/main/java/org/apache/flink/streaming/api/datastream/DataStream.java
Apache-2.0
public int getParallelism() { return transformation.getParallelism(); }
Gets the parallelism for this operator. @return The parallelism set for this operator.
getParallelism
java
apache/flink
flink-runtime/src/main/java/org/apache/flink/streaming/api/datastream/DataStream.java
https://github.com/apache/flink/blob/master/flink-runtime/src/main/java/org/apache/flink/streaming/api/datastream/DataStream.java
Apache-2.0
@PublicEvolving public ResourceSpec getMinResources() { return transformation.getMinResources(); }
Gets the minimum resources for this operator. @return The minimum resources set for this operator.
getMinResources
java
apache/flink
flink-runtime/src/main/java/org/apache/flink/streaming/api/datastream/DataStream.java
https://github.com/apache/flink/blob/master/flink-runtime/src/main/java/org/apache/flink/streaming/api/datastream/DataStream.java
Apache-2.0