code
stringlengths
25
201k
docstring
stringlengths
19
96.2k
func_name
stringlengths
0
235
language
stringclasses
1 value
repo
stringlengths
8
51
path
stringlengths
11
314
url
stringlengths
62
377
license
stringclasses
7 values
public Collection<TaskManagerLocation> getPreferredLocations() { return preferredLocations; }
Returns the preferred locations for the slot.
getPreferredLocations
java
apache/flink
flink-runtime/src/main/java/org/apache/flink/runtime/clusterframework/types/SlotProfile.java
https://github.com/apache/flink/blob/master/flink-runtime/src/main/java/org/apache/flink/runtime/clusterframework/types/SlotProfile.java
Apache-2.0
public Collection<AllocationID> getPreferredAllocations() { return preferredAllocations; }
Returns the desired allocation ids for the slot.
getPreferredAllocations
java
apache/flink
flink-runtime/src/main/java/org/apache/flink/runtime/clusterframework/types/SlotProfile.java
https://github.com/apache/flink/blob/master/flink-runtime/src/main/java/org/apache/flink/runtime/clusterframework/types/SlotProfile.java
Apache-2.0
public Set<AllocationID> getReservedAllocations() { return reservedAllocations; }
Returns a set of all reserved allocation ids from the execution graph. It will used by {@link PreviousAllocationSlotSelectionStrategy} to support local recovery. In this case, a vertex cannot take an reserved allocation unless it exactly prefers that allocation. <p>This is optional and can be empty if unused.
getReservedAllocations
java
apache/flink
flink-runtime/src/main/java/org/apache/flink/runtime/clusterframework/types/SlotProfile.java
https://github.com/apache/flink/blob/master/flink-runtime/src/main/java/org/apache/flink/runtime/clusterframework/types/SlotProfile.java
Apache-2.0
@Override public void execute(Runnable command) { throw new UnsupportedOperationException("This executor should never been used."); }
{@link Executor} implementation which fails when {@link #execute(Runnable)} is called. This can be helpful if one wants to make sure that an executor is never been used.
execute
java
apache/flink
flink-runtime/src/main/java/org/apache/flink/runtime/concurrent/UnsupportedOperationExecutor.java
https://github.com/apache/flink/blob/master/flink-runtime/src/main/java/org/apache/flink/runtime/concurrent/UnsupportedOperationExecutor.java
Apache-2.0
public static ConsumedSubpartitionContext buildConsumedSubpartitionContext( Map<IndexRange, IndexRange> consumedSubpartitionGroups, ConsumedPartitionGroup consumedPartitionGroup, Function<Integer, IntermediateResultPartitionID> partitionIdRetriever) { Map<IntermediateResultPartitionID, Integer> resultPartitionsInOrder = consumedPartitionGroup.getResultPartitionsInOrder(); // If only one range is included and the index range size is the same as the number of // shuffle descriptors, it means that the task will subscribe to all partitions, i.e., the // partition range is one-to-one corresponding to the shuffle descriptors. Therefore, we can // directly construct the ConsumedSubpartitionContext using the subpartition range. if (consumedSubpartitionGroups.size() == 1 && consumedSubpartitionGroups.keySet().iterator().next().size() == resultPartitionsInOrder.size()) { return buildConsumedSubpartitionContext( resultPartitionsInOrder.size(), consumedSubpartitionGroups.values().iterator().next()); } Map<IndexRange, IndexRange> consumedShuffleDescriptorToSubpartitionRangeMap = new LinkedHashMap<>(); for (Map.Entry<IndexRange, IndexRange> entry : consumedSubpartitionGroups.entrySet()) { IndexRange partitionRange = entry.getKey(); IndexRange subpartitionRange = entry.getValue(); // The shuffle descriptor index is consistent with the index in resultPartitionsInOrder. IndexRange shuffleDescriptorRange = new IndexRange( resultPartitionsInOrder.get( partitionIdRetriever.apply(partitionRange.getStartIndex())), resultPartitionsInOrder.get( partitionIdRetriever.apply(partitionRange.getEndIndex()))); checkState( partitionRange.size() == shuffleDescriptorRange.size() && !consumedShuffleDescriptorToSubpartitionRangeMap.containsKey( shuffleDescriptorRange)); consumedShuffleDescriptorToSubpartitionRangeMap.put( shuffleDescriptorRange, subpartitionRange); } // For ALL_TO_ALL, there might be overlaps in shuffle descriptor to subpartition range map: // [0,10] -> [2,2], [0,5] -> [3,3], so we need to count consumed shuffle descriptors after // merging. int numConsumedShuffleDescriptors = 0; List<IndexRange> mergedConsumedShuffleDescriptor = mergeIndexRanges(consumedShuffleDescriptorToSubpartitionRangeMap.keySet()); for (IndexRange range : mergedConsumedShuffleDescriptor) { numConsumedShuffleDescriptors += range.size(); } return new ConsumedSubpartitionContext( numConsumedShuffleDescriptors, consumedShuffleDescriptorToSubpartitionRangeMap); }
Builds a {@link ConsumedSubpartitionContext} based on the provided inputs. <p>Note: The construction is based on subscribing to consecutive subpartitions of the same partition. If this assumption is violated, an exception will be thrown. @param consumedSubpartitionGroups a mapping of consumed partition index ranges to subpartition ranges. @param consumedPartitionGroup partition group consumed by the task. @param partitionIdRetriever a function that retrieves the {@link IntermediateResultPartitionID} for a given index. @return a {@link ConsumedSubpartitionContext} instance constructed from the input parameters.
buildConsumedSubpartitionContext
java
apache/flink
flink-runtime/src/main/java/org/apache/flink/runtime/deployment/ConsumedSubpartitionContext.java
https://github.com/apache/flink/blob/master/flink-runtime/src/main/java/org/apache/flink/runtime/deployment/ConsumedSubpartitionContext.java
Apache-2.0
public JobInformation getJobInformation() throws IOException, ClassNotFoundException { if (jobInformation != null) { return jobInformation; } if (serializedJobInformation instanceof NonOffloaded) { NonOffloaded<JobInformation> jobInformation = (NonOffloaded<JobInformation>) serializedJobInformation; return jobInformation.serializedValue.deserializeValue(getClass().getClassLoader()); } throw new IllegalStateException( "Trying to work with offloaded serialized job information."); }
Return the sub task's job information. @return job information (may throw {@link IllegalStateException} if {@link #loadBigData} is not called beforehand). @throws IllegalStateException If job information is offloaded to BLOB store.
getJobInformation
java
apache/flink
flink-runtime/src/main/java/org/apache/flink/runtime/deployment/TaskDeploymentDescriptor.java
https://github.com/apache/flink/blob/master/flink-runtime/src/main/java/org/apache/flink/runtime/deployment/TaskDeploymentDescriptor.java
Apache-2.0
public TaskInformation getTaskInformation() throws IOException, ClassNotFoundException { if (taskInformation != null) { return taskInformation; } if (serializedTaskInformation instanceof NonOffloaded) { NonOffloaded<TaskInformation> taskInformation = (NonOffloaded<TaskInformation>) serializedTaskInformation; return taskInformation.serializedValue.deserializeValue(getClass().getClassLoader()); } throw new IllegalStateException( "Trying to work with offloaded serialized task information."); }
Return the sub task's task information. @return task information (may throw {@link IllegalStateException} if {@link #loadBigData} is not called beforehand)). @throws IllegalStateException If job information is offloaded to BLOB store.
getTaskInformation
java
apache/flink
flink-runtime/src/main/java/org/apache/flink/runtime/deployment/TaskDeploymentDescriptor.java
https://github.com/apache/flink/blob/master/flink-runtime/src/main/java/org/apache/flink/runtime/deployment/TaskDeploymentDescriptor.java
Apache-2.0
public int getSubtaskIndex() { return executionId.getSubtaskIndex(); }
Returns the task's index in the subtask group. @return the task's index in the subtask group
getSubtaskIndex
java
apache/flink
flink-runtime/src/main/java/org/apache/flink/runtime/deployment/TaskDeploymentDescriptor.java
https://github.com/apache/flink/blob/master/flink-runtime/src/main/java/org/apache/flink/runtime/deployment/TaskDeploymentDescriptor.java
Apache-2.0
public static PartitionLocationConstraint fromJobType(JobType jobType) { switch (jobType) { case BATCH: return CAN_BE_UNKNOWN; case STREAMING: return MUST_BE_KNOWN; default: throw new IllegalArgumentException( String.format( "Unknown JobType %s. Cannot derive partition location constraint for it.", jobType)); } }
Defines whether the partition's location must be known at deployment time or can be unknown and, therefore, updated later.
fromJobType
java
apache/flink
flink-runtime/src/main/java/org/apache/flink/runtime/deployment/TaskDeploymentDescriptorFactory.java
https://github.com/apache/flink/blob/master/flink-runtime/src/main/java/org/apache/flink/runtime/deployment/TaskDeploymentDescriptorFactory.java
Apache-2.0
public ShuffleDescriptorAndIndex[] getShuffleDescriptors() { return shuffleDescriptors; }
A set of shuffle descriptors that will be serialized together.
getShuffleDescriptors
java
apache/flink
flink-runtime/src/main/java/org/apache/flink/runtime/deployment/TaskDeploymentDescriptorFactory.java
https://github.com/apache/flink/blob/master/flink-runtime/src/main/java/org/apache/flink/runtime/deployment/TaskDeploymentDescriptorFactory.java
Apache-2.0
@Override public MaybeOffloaded<ShuffleDescriptorGroup> serializeAndTryOffloadShuffleDescriptor( ShuffleDescriptorGroup shuffleDescriptorGroup, int numConsumer) throws IOException { final CompressedSerializedValue<ShuffleDescriptorGroup> compressedSerializedValue = CompressedSerializedValue.fromObject(shuffleDescriptorGroup); final Either<SerializedValue<ShuffleDescriptorGroup>, PermanentBlobKey> serializedValueOrBlobKey = shouldOffload( shuffleDescriptorGroup.getShuffleDescriptors(), numConsumer) ? BlobWriter.offloadWithException( compressedSerializedValue, jobID, blobWriter) : Either.Left(compressedSerializedValue); if (serializedValueOrBlobKey.isLeft()) { return new TaskDeploymentDescriptor.NonOffloaded<>(serializedValueOrBlobKey.left()); } else { return new TaskDeploymentDescriptor.Offloaded<>(serializedValueOrBlobKey.right()); } }
Serialize and try offload shuffle descriptors. @param shuffleDescriptorGroup to serialize @param numConsumer consumers number of these shuffle descriptors, it means how many times serialized shuffle descriptor should be sent @return offloaded or non-offloaded serialized shuffle descriptors
serializeAndTryOffloadShuffleDescriptor
java
apache/flink
flink-runtime/src/main/java/org/apache/flink/runtime/deployment/TaskDeploymentDescriptorFactory.java
https://github.com/apache/flink/blob/master/flink-runtime/src/main/java/org/apache/flink/runtime/deployment/TaskDeploymentDescriptorFactory.java
Apache-2.0
private boolean shouldOffload( ShuffleDescriptorAndIndex[] shuffleDescriptorsToSerialize, int numConsumers) { return shuffleDescriptorsToSerialize.length * numConsumers >= offloadShuffleDescriptorsThreshold; }
Determine whether shuffle descriptors should be offloaded to blob server. @param shuffleDescriptorsToSerialize shuffle descriptors to serialize @param numConsumers how many consumers this serialized shuffle descriptor should be sent @return whether shuffle descriptors should be offloaded to blob server
shouldOffload
java
apache/flink
flink-runtime/src/main/java/org/apache/flink/runtime/deployment/TaskDeploymentDescriptorFactory.java
https://github.com/apache/flink/blob/master/flink-runtime/src/main/java/org/apache/flink/runtime/deployment/TaskDeploymentDescriptorFactory.java
Apache-2.0
private CompletableFuture<Void> createDirtyJobResultEntryIfMissingAsync( AccessExecutionGraph executionGraph, boolean hasCleanJobResultEntry) { final JobID jobId = executionGraph.getJobID(); if (hasCleanJobResultEntry) { log.warn("Job {} is already marked as clean but clean up was triggered again.", jobId); return FutureUtils.completedVoidFuture(); } else { return jobResultStore .hasDirtyJobResultEntryAsync(jobId) .thenCompose( hasDirtyJobResultEntry -> createDirtyJobResultEntryAsync( executionGraph, hasDirtyJobResultEntry)); } }
Creates a dirty entry in the {@link #jobResultStore} if there's no entry at all for the given {@code executionGraph} in the {@code JobResultStore}. @param executionGraph The {@link AccessExecutionGraph} for which the {@link JobResult} shall be persisted. @param hasCleanJobResultEntry The decision the dirty entry check is based on. @return {@code CompletableFuture} that completes as soon as the entry exists.
createDirtyJobResultEntryIfMissingAsync
java
apache/flink
flink-runtime/src/main/java/org/apache/flink/runtime/dispatcher/Dispatcher.java
https://github.com/apache/flink/blob/master/flink-runtime/src/main/java/org/apache/flink/runtime/dispatcher/Dispatcher.java
Apache-2.0
default CompletableFuture<String> triggerSavepointAndGetLocation( JobID jobId, String targetDirectory, SavepointFormatType formatType, TriggerSavepointMode savepointMode, @RpcTimeout Duration timeout) { throw new UnsupportedOperationException(); }
Triggers a savepoint with the given savepoint directory as a target, returning a future that completes with the savepoint location when it is complete. @param jobId the job id @param targetDirectory Target directory for the savepoint. @param formatType Binary format of the savepoint. @param savepointMode context of the savepoint operation @param timeout Timeout for the asynchronous operation @return Future which is completed once the operation is triggered successfully
triggerSavepointAndGetLocation
java
apache/flink
flink-runtime/src/main/java/org/apache/flink/runtime/dispatcher/DispatcherGateway.java
https://github.com/apache/flink/blob/master/flink-runtime/src/main/java/org/apache/flink/runtime/dispatcher/DispatcherGateway.java
Apache-2.0
default CompletableFuture<String> stopWithSavepointAndGetLocation( JobID jobId, String targetDirectory, SavepointFormatType formatType, TriggerSavepointMode savepointMode, @RpcTimeout final Duration timeout) { throw new UnsupportedOperationException(); }
Stops the job with a savepoint, returning a future that completes with the savepoint location when the savepoint is completed. @param jobId the job id @param targetDirectory Target directory for the savepoint. @param savepointMode context of the savepoint operation @param timeout for the rpc call @return Future which is completed with the savepoint location once it is completed
stopWithSavepointAndGetLocation
java
apache/flink
flink-runtime/src/main/java/org/apache/flink/runtime/dispatcher/DispatcherGateway.java
https://github.com/apache/flink/blob/master/flink-runtime/src/main/java/org/apache/flink/runtime/dispatcher/DispatcherGateway.java
Apache-2.0
default CompletableFuture<Long> triggerCheckpointAndGetCheckpointID( final JobID jobId, final CheckpointType checkpointType, final Duration timeout) { throw new UnsupportedOperationException(); }
Triggers a checkpoint, returning a future that completes with the checkpoint id when it is complete. @param jobId the job id @param checkpointType checkpoint type of this checkpoint (configured / full / incremental) @param timeout Timeout for the asynchronous operation @return Future which is completed once the operation is triggered successfully
triggerCheckpointAndGetCheckpointID
java
apache/flink
flink-runtime/src/main/java/org/apache/flink/runtime/dispatcher/DispatcherGateway.java
https://github.com/apache/flink/blob/master/flink-runtime/src/main/java/org/apache/flink/runtime/dispatcher/DispatcherGateway.java
Apache-2.0
public UUID toUUID() { return new UUID(getUpperPart(), getLowerPart()); }
Fencing token of the {@link Dispatcher}.
toUUID
java
apache/flink
flink-runtime/src/main/java/org/apache/flink/runtime/dispatcher/DispatcherId.java
https://github.com/apache/flink/blob/master/flink-runtime/src/main/java/org/apache/flink/runtime/dispatcher/DispatcherId.java
Apache-2.0
public static DispatcherId fromUuid(UUID uuid) { return new DispatcherId(uuid); }
Creates a new DispatcherId that corresponds to the UUID.
fromUuid
java
apache/flink
flink-runtime/src/main/java/org/apache/flink/runtime/dispatcher/DispatcherId.java
https://github.com/apache/flink/blob/master/flink-runtime/src/main/java/org/apache/flink/runtime/dispatcher/DispatcherId.java
Apache-2.0
static HistoryServerArchivist createHistoryServerArchivist( Configuration configuration, JsonArchivist jsonArchivist, Executor ioExecutor) { final String configuredArchivePath = configuration.get(JobManagerOptions.ARCHIVE_DIR); if (configuredArchivePath != null) { final Path archivePath = new Path(configuredArchivePath); return new JsonResponseHistoryServerArchivist(jsonArchivist, archivePath, ioExecutor); } else { return VoidHistoryServerArchivist.INSTANCE; } }
Archives the given {@link ExecutionGraphInfo} on the history server. @param executionGraphInfo to store on the history server @return Future which is completed once the archiving has been completed.
createHistoryServerArchivist
java
apache/flink
flink-runtime/src/main/java/org/apache/flink/runtime/dispatcher/HistoryServerArchivist.java
https://github.com/apache/flink/blob/master/flink-runtime/src/main/java/org/apache/flink/runtime/dispatcher/HistoryServerArchivist.java
Apache-2.0
@Override public CompletableFuture<Acknowledge> archiveExecutionGraph( ExecutionGraphInfo executionGraphInfo) { return CompletableFuture.runAsync( ThrowingRunnable.unchecked( () -> FsJobArchivist.archiveJob( archivePath, executionGraphInfo.getJobId(), jsonArchivist.archiveJsonWithPath( executionGraphInfo))), ioExecutor) .thenApply(ignored -> Acknowledge.get()); }
Implementation which archives an {@link AccessExecutionGraph} such that it stores the JSON requests for all possible history server requests.
archiveExecutionGraph
java
apache/flink
flink-runtime/src/main/java/org/apache/flink/runtime/dispatcher/JsonResponseHistoryServerArchivist.java
https://github.com/apache/flink/blob/master/flink-runtime/src/main/java/org/apache/flink/runtime/dispatcher/JsonResponseHistoryServerArchivist.java
Apache-2.0
@Override public JobManagerRunnerRegistry getWrappedDelegate() { return this.delegate; }
Returns the delegated {@link JobManagerRunnerRegistry}. This method can be used to workaround the main thread safeguard.
getWrappedDelegate
java
apache/flink
flink-runtime/src/main/java/org/apache/flink/runtime/dispatcher/OnMainThreadJobManagerRunnerRegistry.java
https://github.com/apache/flink/blob/master/flink-runtime/src/main/java/org/apache/flink/runtime/dispatcher/OnMainThreadJobManagerRunnerRegistry.java
Apache-2.0
@Override public StandaloneDispatcher createDispatcher( RpcService rpcService, DispatcherId fencingToken, Collection<ExecutionPlan> recoveredJobs, Collection<JobResult> recoveredDirtyJobResults, DispatcherBootstrapFactory dispatcherBootstrapFactory, PartialDispatcherServicesWithJobPersistenceComponents partialDispatcherServicesWithJobPersistenceComponents) throws Exception { // create the default dispatcher return new StandaloneDispatcher( rpcService, fencingToken, recoveredJobs, recoveredDirtyJobResults, dispatcherBootstrapFactory, DispatcherServices.from( partialDispatcherServicesWithJobPersistenceComponents, JobMasterServiceLeadershipRunnerFactory.INSTANCE, CheckpointResourcesCleanupRunnerFactory.INSTANCE)); }
{@link DispatcherFactory} which creates a {@link StandaloneDispatcher}.
createDispatcher
java
apache/flink
flink-runtime/src/main/java/org/apache/flink/runtime/dispatcher/SessionDispatcherFactory.java
https://github.com/apache/flink/blob/master/flink-runtime/src/main/java/org/apache/flink/runtime/dispatcher/SessionDispatcherFactory.java
Apache-2.0
public boolean isTerminalMode() { return this == CANCEL_WITH_SAVEPOINT || this == TERMINATE_WITH_SAVEPOINT; }
Whether the operation will result in a globally terminal job status.
isTerminalMode
java
apache/flink
flink-runtime/src/main/java/org/apache/flink/runtime/dispatcher/TriggerSavepointMode.java
https://github.com/apache/flink/blob/master/flink-runtime/src/main/java/org/apache/flink/runtime/dispatcher/TriggerSavepointMode.java
Apache-2.0
@Override public CompletableFuture<Acknowledge> archiveExecutionGraph(ExecutionGraphInfo executionGraph) { return CompletableFuture.completedFuture(Acknowledge.get()); }
No-op implementation of the {@link HistoryServerArchivist}.
archiveExecutionGraph
java
apache/flink
flink-runtime/src/main/java/org/apache/flink/runtime/dispatcher/VoidHistoryServerArchivist.java
https://github.com/apache/flink/blob/master/flink-runtime/src/main/java/org/apache/flink/runtime/dispatcher/VoidHistoryServerArchivist.java
Apache-2.0
@Override public CheckpointResourcesCleanupRunner create( JobResult jobResult, CheckpointRecoveryFactory checkpointRecoveryFactory, Configuration configuration, Executor cleanupExecutor) { return new CheckpointResourcesCleanupRunner( jobResult, checkpointRecoveryFactory, SharedStateRegistry.DEFAULT_FACTORY, configuration, cleanupExecutor, System.currentTimeMillis()); }
{@code CheckpointResourcesCleanupRunnerFactory} implements {@link CleanupRunnerFactory} providing a factory method for creating {@link CheckpointResourcesCleanupRunner} instances.
create
java
apache/flink
flink-runtime/src/main/java/org/apache/flink/runtime/dispatcher/cleanup/CheckpointResourcesCleanupRunnerFactory.java
https://github.com/apache/flink/blob/master/flink-runtime/src/main/java/org/apache/flink/runtime/dispatcher/cleanup/CheckpointResourcesCleanupRunnerFactory.java
Apache-2.0
public Builder<T> withPrioritizedCleanup(String label, T prioritizedCleanup) { this.prioritizedCleanup.add(new CleanupWithLabel<>(prioritizedCleanup, label)); return this; }
Prioritized cleanups run before their regular counterparts. This method enables the caller to model dependencies between cleanup tasks. The order in which cleanable resources are added matters, i.e. if two cleanable resources are added as prioritized cleanup tasks, the resource being added first will block the cleanup of the second resource. All prioritized cleanup resources will run and finish before any resource that is added using {@link #withRegularCleanup(String, Object)} is started. @param label The label being used when logging errors in the given cleanup. @param prioritizedCleanup The cleanup callback that is going to be prioritized.
withPrioritizedCleanup
java
apache/flink
flink-runtime/src/main/java/org/apache/flink/runtime/dispatcher/cleanup/DefaultResourceCleaner.java
https://github.com/apache/flink/blob/master/flink-runtime/src/main/java/org/apache/flink/runtime/dispatcher/cleanup/DefaultResourceCleaner.java
Apache-2.0
private CompletableFuture<Void> closeClusterComponent( ApplicationStatus applicationStatus, ShutdownBehaviour shutdownBehaviour, @Nullable String diagnostics) { synchronized (lock) { if (clusterComponent != null) { switch (shutdownBehaviour) { case GRACEFUL_SHUTDOWN: return clusterComponent.stopApplication(applicationStatus, diagnostics); case PROCESS_FAILURE: default: return clusterComponent.stopProcess(); } } else { return CompletableFuture.completedFuture(null); } } }
Close cluster components and deregister the Flink application from the resource management system by signalling the {@link ResourceManager}. @param applicationStatus to terminate the application with @param shutdownBehaviour shutdown behaviour @param diagnostics additional information about the shut down, can be {@code null} @return Future which is completed once the shut down
closeClusterComponent
java
apache/flink
flink-runtime/src/main/java/org/apache/flink/runtime/entrypoint/ClusterEntrypoint.java
https://github.com/apache/flink/blob/master/flink-runtime/src/main/java/org/apache/flink/runtime/entrypoint/ClusterEntrypoint.java
Apache-2.0
public static <T> T parseParametersOrExit( String[] args, ParserResultFactory<T> parserResultFactory, Class<?> mainClass) { final CommandLineParser<T> commandLineParser = new CommandLineParser<>(parserResultFactory); try { return commandLineParser.parse(args); } catch (Exception e) { LOG.error("Could not parse command line arguments {}.", args, e); commandLineParser.printHelp(mainClass.getSimpleName()); System.exit(ClusterEntrypoint.STARTUP_FAILURE_RETURN_CODE); } return null; }
Parses passed String array using the parameter definitions of the passed {@code ParserResultFactory}. The method will call {@code System.exit} and print the usage information to stdout in case of a parsing error. @param args The String array that shall be parsed. @param parserResultFactory The {@code ParserResultFactory} that collects the parameter parsing instructions. @param mainClass The main class initiating the parameter parsing. @param <T> The parsing result type. @return The parsing result.
parseParametersOrExit
java
apache/flink
flink-runtime/src/main/java/org/apache/flink/runtime/entrypoint/ClusterEntrypointUtils.java
https://github.com/apache/flink/blob/master/flink-runtime/src/main/java/org/apache/flink/runtime/entrypoint/ClusterEntrypointUtils.java
Apache-2.0
public static int getPoolSize(Configuration config) { final int poolSize = config.get( ClusterOptions.CLUSTER_IO_EXECUTOR_POOL_SIZE, 4 * Hardware.getNumberCPUCores()); Preconditions.checkArgument( poolSize > 0, "Illegal pool size (%s) of io-executor, please re-configure '%s'.", poolSize, ClusterOptions.CLUSTER_IO_EXECUTOR_POOL_SIZE.key()); return poolSize; }
Gets and verify the io-executor pool size based on configuration. @param config The configuration to read. @return The legal io-executor pool size.
getPoolSize
java
apache/flink
flink-runtime/src/main/java/org/apache/flink/runtime/entrypoint/ClusterEntrypointUtils.java
https://github.com/apache/flink/blob/master/flink-runtime/src/main/java/org/apache/flink/runtime/entrypoint/ClusterEntrypointUtils.java
Apache-2.0
public static void configureUncaughtExceptionHandler(Configuration config) { Thread.setDefaultUncaughtExceptionHandler( new ClusterUncaughtExceptionHandler( config.get(ClusterOptions.UNCAUGHT_EXCEPTION_HANDLING))); }
Sets the uncaught exception handler for current thread based on configuration. @param config the configuration to read.
configureUncaughtExceptionHandler
java
apache/flink
flink-runtime/src/main/java/org/apache/flink/runtime/entrypoint/ClusterEntrypointUtils.java
https://github.com/apache/flink/blob/master/flink-runtime/src/main/java/org/apache/flink/runtime/entrypoint/ClusterEntrypointUtils.java
Apache-2.0
public static DeterminismEnvelope<WorkingDirectory> createTaskManagerWorkingDirectory( Configuration configuration, DeterminismEnvelope<ResourceID> envelopedResourceId) throws IOException { return envelopedResourceId.map( resourceId -> WorkingDirectory.create( generateTaskManagerWorkingDirectoryFile( configuration, resourceId))); }
Creates the working directory for the TaskManager process. This method ensures that the working directory exists. @param configuration to extract the required settings from @param envelopedResourceId identifying the TaskManager process @return working directory @throws IOException if the working directory could not be created
createTaskManagerWorkingDirectory
java
apache/flink
flink-runtime/src/main/java/org/apache/flink/runtime/entrypoint/ClusterEntrypointUtils.java
https://github.com/apache/flink/blob/master/flink-runtime/src/main/java/org/apache/flink/runtime/entrypoint/ClusterEntrypointUtils.java
Apache-2.0
@VisibleForTesting public static File generateTaskManagerWorkingDirectoryFile( Configuration configuration, ResourceID resourceId) { return generateWorkingDirectoryFile( configuration, Optional.of(ClusterOptions.TASK_MANAGER_PROCESS_WORKING_DIR_BASE), "tm_" + resourceId); }
Generates the working directory {@link File} for the TaskManager process. This method does not ensure that the working directory exists. @param configuration to extract the required settings from @param resourceId identifying the TaskManager process @return working directory file
generateTaskManagerWorkingDirectoryFile
java
apache/flink
flink-runtime/src/main/java/org/apache/flink/runtime/entrypoint/ClusterEntrypointUtils.java
https://github.com/apache/flink/blob/master/flink-runtime/src/main/java/org/apache/flink/runtime/entrypoint/ClusterEntrypointUtils.java
Apache-2.0
@VisibleForTesting public static File generateJobManagerWorkingDirectoryFile( Configuration configuration, ResourceID resourceId) { return generateWorkingDirectoryFile( configuration, Optional.of(ClusterOptions.JOB_MANAGER_PROCESS_WORKING_DIR_BASE), "jm_" + resourceId); }
Generates the working directory {@link File} for the JobManager process. This method does not ensure that the working directory exists. @param configuration to extract the required settings from @param resourceId identifying the JobManager process @return working directory file
generateJobManagerWorkingDirectoryFile
java
apache/flink
flink-runtime/src/main/java/org/apache/flink/runtime/entrypoint/ClusterEntrypointUtils.java
https://github.com/apache/flink/blob/master/flink-runtime/src/main/java/org/apache/flink/runtime/entrypoint/ClusterEntrypointUtils.java
Apache-2.0
public static File generateWorkingDirectoryFile( Configuration configuration, Optional<ConfigOption<String>> workingDirOption, String workingDirectoryName) { final Optional<String> optionalWorkingDirectory = workingDirOption.flatMap(configuration::getOptional); final File workingDirectoryBase = optionalWorkingDirectory .map(File::new) .orElseGet( () -> { final File tempDirectory = ConfigurationUtils.getRandomTempDirectory( configuration); LOG.debug( "Picked {} randomly from the configured temporary directories to be used as working directory base.", tempDirectory); return tempDirectory; }); return new File(workingDirectoryBase, workingDirectoryName); }
Generate the working directory from the given configuration. If a working dir option is specified, then this config option will be read first. At last, {@link CoreOptions#TMP_DIRS} will be used to extract the working directory base from. @param configuration to extract the working directory from @param workingDirOption optional working dir option @param workingDirectoryName name of the working directory to create @return working directory
generateWorkingDirectoryFile
java
apache/flink
flink-runtime/src/main/java/org/apache/flink/runtime/entrypoint/ClusterEntrypointUtils.java
https://github.com/apache/flink/blob/master/flink-runtime/src/main/java/org/apache/flink/runtime/entrypoint/ClusterEntrypointUtils.java
Apache-2.0
public static DeterminismEnvelope<WorkingDirectory> createJobManagerWorkingDirectory( Configuration configuration, DeterminismEnvelope<ResourceID> envelopedResourceId) throws IOException { return envelopedResourceId.map( resourceId -> WorkingDirectory.create( generateJobManagerWorkingDirectoryFile(configuration, resourceId))); }
Creates the working directory for the JobManager process. This method ensures that the working diretory exists. @param configuration to extract the required settings from @param envelopedResourceId identifying the TaskManager process @return working directory @throws IOException if the working directory could not be created
createJobManagerWorkingDirectory
java
apache/flink
flink-runtime/src/main/java/org/apache/flink/runtime/entrypoint/ClusterEntrypointUtils.java
https://github.com/apache/flink/blob/master/flink-runtime/src/main/java/org/apache/flink/runtime/entrypoint/ClusterEntrypointUtils.java
Apache-2.0
@Override protected ExecutionGraphInfoStore createSerializableExecutionGraphStore( Configuration configuration, ScheduledExecutor scheduledExecutor) throws IOException { final JobManagerOptions.JobStoreType jobStoreType = configuration.get(JobManagerOptions.JOB_STORE_TYPE); final Duration expirationTime = Duration.ofSeconds(configuration.get(JobManagerOptions.JOB_STORE_EXPIRATION_TIME)); final int maximumCapacity = configuration.get(JobManagerOptions.JOB_STORE_MAX_CAPACITY); switch (jobStoreType) { case File: { final File tmpDir = new File(ConfigurationUtils.parseTempDirectories(configuration)[0]); final long maximumCacheSizeBytes = configuration.get(JobManagerOptions.JOB_STORE_CACHE_SIZE); return new FileExecutionGraphInfoStore( tmpDir, expirationTime, maximumCapacity, maximumCacheSizeBytes, scheduledExecutor, Ticker.systemTicker()); } case Memory: { return new MemoryExecutionGraphInfoStore( expirationTime, maximumCapacity, scheduledExecutor, Ticker.systemTicker()); } default: { throw new IllegalArgumentException( "Unsupported job store type " + jobStoreType); } } }
Base class for session cluster entry points.
createSerializableExecutionGraphStore
java
apache/flink
flink-runtime/src/main/java/org/apache/flink/runtime/entrypoint/SessionClusterEntrypoint.java
https://github.com/apache/flink/blob/master/flink-runtime/src/main/java/org/apache/flink/runtime/entrypoint/SessionClusterEntrypoint.java
Apache-2.0
protected Collection<URL> getUserClassPaths() { return userClassPaths; }
User classpaths in relative form to the working directory.
getUserClassPaths
java
apache/flink
flink-runtime/src/main/java/org/apache/flink/runtime/entrypoint/component/AbstractUserClassPathJobGraphRetriever.java
https://github.com/apache/flink/blob/master/flink-runtime/src/main/java/org/apache/flink/runtime/entrypoint/component/AbstractUserClassPathJobGraphRetriever.java
Apache-2.0
public CompletableFuture<Void> stopApplication( final ApplicationStatus applicationStatus, final @Nullable String diagnostics) { return internalShutdown( () -> resourceManagerService.deregisterApplication(applicationStatus, diagnostics)); }
Deregister the Flink application from the resource management system by signalling the {@link ResourceManager} and also stop the process. @param applicationStatus to terminate the application with @param diagnostics additional information about the shut down, can be {@code null} @return Future which is completed once the shut down
stopApplication
java
apache/flink
flink-runtime/src/main/java/org/apache/flink/runtime/entrypoint/component/DispatcherResourceManagerComponent.java
https://github.com/apache/flink/blob/master/flink-runtime/src/main/java/org/apache/flink/runtime/entrypoint/component/DispatcherResourceManagerComponent.java
Apache-2.0
public boolean isTerminal() { return this == FINISHED || this == CANCELED || this == FAILED; }
Restoring last possible valid state of the task if it has it.
isTerminal
java
apache/flink
flink-runtime/src/main/java/org/apache/flink/runtime/execution/ExecutionState.java
https://github.com/apache/flink/blob/master/flink-runtime/src/main/java/org/apache/flink/runtime/execution/ExecutionState.java
Apache-2.0
@Override public ClassLoaderLease registerClassLoaderLease(JobID jobId) { synchronized (lockObject) { return cacheEntries .computeIfAbsent(jobId, jobID -> new LibraryCacheEntry(jobId)) .obtainLease(); } }
If true, it will use system class loader when the jars and classpaths of job are empty.
registerClassLoaderLease
java
apache/flink
flink-runtime/src/main/java/org/apache/flink/runtime/execution/librarycache/BlobLibraryCacheManager.java
https://github.com/apache/flink/blob/master/flink-runtime/src/main/java/org/apache/flink/runtime/execution/librarycache/BlobLibraryCacheManager.java
Apache-2.0
int getNumberOfReferenceHolders(JobID jobId) { synchronized (lockObject) { LibraryCacheEntry entry = cacheEntries.get(jobId); return entry == null ? 0 : entry.getReferenceCount(); } }
Gets the number of tasks holding {@link ClassLoader} references for the given job. @param jobId ID of a job @return number of reference holders
getNumberOfReferenceHolders
java
apache/flink
flink-runtime/src/main/java/org/apache/flink/runtime/execution/librarycache/BlobLibraryCacheManager.java
https://github.com/apache/flink/blob/master/flink-runtime/src/main/java/org/apache/flink/runtime/execution/librarycache/BlobLibraryCacheManager.java
Apache-2.0
public static ArchivedExecutionGraph createFrom(ExecutionGraph executionGraph) { return createFrom(executionGraph, null); }
Create a {@link ArchivedExecutionGraph} from the given {@link ExecutionGraph}. @param executionGraph to create the ArchivedExecutionGraph from @return ArchivedExecutionGraph created from the given ExecutionGraph
createFrom
java
apache/flink
flink-runtime/src/main/java/org/apache/flink/runtime/executiongraph/ArchivedExecutionGraph.java
https://github.com/apache/flink/blob/master/flink-runtime/src/main/java/org/apache/flink/runtime/executiongraph/ArchivedExecutionGraph.java
Apache-2.0
public static ArchivedExecutionGraph createFrom( ExecutionGraph executionGraph, @Nullable JobStatus statusOverride) { Preconditions.checkArgument( statusOverride == null || !statusOverride.isGloballyTerminalState(), "Status override is only allowed for non-globally-terminal states."); Map<JobVertexID, ArchivedExecutionJobVertex> archivedTasks = new HashMap<>(); List<ArchivedExecutionJobVertex> archivedVerticesInCreationOrder = new ArrayList<>(); for (ExecutionJobVertex task : executionGraph.getVerticesTopologically()) { ArchivedExecutionJobVertex archivedTask = task.archive(); archivedVerticesInCreationOrder.add(archivedTask); archivedTasks.put(task.getJobVertexId(), archivedTask); } final Map<String, SerializedValue<OptionalFailure<Object>>> serializedUserAccumulators = executionGraph.getAccumulatorsSerialized(); final long[] timestamps = new long[JobStatus.values().length]; // if the state is overridden with a non-globally-terminal state then we need to erase // traces of globally-terminal states for consistency final boolean clearGloballyTerminalStateTimestamps = statusOverride != null; for (JobStatus jobStatus : JobStatus.values()) { final int ordinal = jobStatus.ordinal(); if (!(clearGloballyTerminalStateTimestamps && jobStatus.isGloballyTerminalState())) { timestamps[ordinal] = executionGraph.getStatusTimestamp(jobStatus); } } return new ArchivedExecutionGraph( executionGraph.getJobID(), executionGraph.getJobName(), archivedTasks, archivedVerticesInCreationOrder, timestamps, statusOverride == null ? executionGraph.getState() : statusOverride, executionGraph.getJobType(), executionGraph.getFailureInfo(), executionGraph.getPlan(), executionGraph.getAccumulatorResultsStringified(), serializedUserAccumulators, executionGraph.getArchivedExecutionConfig(), executionGraph.isStoppable(), executionGraph.getCheckpointCoordinatorConfiguration(), executionGraph.getCheckpointStatsSnapshot(), executionGraph.getStateBackendName().orElse(null), executionGraph.getCheckpointStorageName().orElse(null), executionGraph.isChangelogStateBackendEnabled(), executionGraph.getChangelogStorageName().orElse(null), executionGraph.getStreamGraphJson(), executionGraph.getPendingOperatorCount()); }
Create a {@link ArchivedExecutionGraph} from the given {@link ExecutionGraph}. @param executionGraph to create the ArchivedExecutionGraph from @param statusOverride optionally overrides the JobStatus of the ExecutionGraph with a non-globally-terminal state and clears timestamps of globally-terminal states @return ArchivedExecutionGraph created from the given ExecutionGraph
createFrom
java
apache/flink
flink-runtime/src/main/java/org/apache/flink/runtime/executiongraph/ArchivedExecutionGraph.java
https://github.com/apache/flink/blob/master/flink-runtime/src/main/java/org/apache/flink/runtime/executiongraph/ArchivedExecutionGraph.java
Apache-2.0
@Override public int getAttemptCount(int subtaskIndex) { Preconditions.checkArgument(subtaskIndex >= 0); if (subtaskIndex >= attemptCounts.size()) { return 0; } return attemptCounts.get(subtaskIndex); }
Simple container for subtask attempt counts backed by a list.
getAttemptCount
java
apache/flink
flink-runtime/src/main/java/org/apache/flink/runtime/executiongraph/DefaultSubtaskAttemptNumberStore.java
https://github.com/apache/flink/blob/master/flink-runtime/src/main/java/org/apache/flink/runtime/executiongraph/DefaultSubtaskAttemptNumberStore.java
Apache-2.0
static void connectVertexToResult( ExecutionJobVertex vertex, IntermediateResult intermediateResult) { final DistributionPattern distributionPattern = intermediateResult.getConsumingDistributionPattern(); final JobVertexInputInfo jobVertexInputInfo = vertex.getGraph() .getJobVertexInputInfo(vertex.getJobVertexId(), intermediateResult.getId()); switch (distributionPattern) { case POINTWISE: connectPointwise(vertex, intermediateResult, jobVertexInputInfo); break; case ALL_TO_ALL: connectAllToAll(vertex, intermediateResult); break; default: throw new IllegalArgumentException("Unrecognized distribution pattern."); } }
Calculate the connections between {@link ExecutionJobVertex} and {@link IntermediateResult} * based on the {@link DistributionPattern}. @param vertex the downstream consumer {@link ExecutionJobVertex} @param intermediateResult the upstream consumed {@link IntermediateResult}
connectVertexToResult
java
apache/flink
flink-runtime/src/main/java/org/apache/flink/runtime/executiongraph/EdgeManagerBuildUtil.java
https://github.com/apache/flink/blob/master/flink-runtime/src/main/java/org/apache/flink/runtime/executiongraph/EdgeManagerBuildUtil.java
Apache-2.0
public static ErrorInfo createErrorInfoWithNullableCause( @Nullable Throwable exception, long timestamp) { return new ErrorInfo(handleMissingThrowable(exception), timestamp); }
Instantiates an {@code ErrorInfo} to cover inconsistent behavior due to FLINK-21376. @param exception The error cause that might be {@code null}. @param timestamp The timestamp the error was noticed. @return a {@code ErrorInfo} containing a generic {@link FlinkException} in case of a missing error cause.
createErrorInfoWithNullableCause
java
apache/flink
flink-runtime/src/main/java/org/apache/flink/runtime/executiongraph/ErrorInfo.java
https://github.com/apache/flink/blob/master/flink-runtime/src/main/java/org/apache/flink/runtime/executiongraph/ErrorInfo.java
Apache-2.0
public static Throwable handleMissingThrowable(@Nullable Throwable throwable) { return throwable != null ? throwable : new FlinkException( "Unknown cause for Execution failure (this might be caused by FLINK-21376)."); }
Utility method to cover FLINK-21376. @param throwable The actual exception. @return a {@link FlinkException} if no exception was passed.
handleMissingThrowable
java
apache/flink
flink-runtime/src/main/java/org/apache/flink/runtime/executiongraph/ErrorInfo.java
https://github.com/apache/flink/blob/master/flink-runtime/src/main/java/org/apache/flink/runtime/executiongraph/ErrorInfo.java
Apache-2.0
public SerializedThrowable getException() { return exception; }
Returns the serialized form of the original exception.
getException
java
apache/flink
flink-runtime/src/main/java/org/apache/flink/runtime/executiongraph/ErrorInfo.java
https://github.com/apache/flink/blob/master/flink-runtime/src/main/java/org/apache/flink/runtime/executiongraph/ErrorInfo.java
Apache-2.0
public String getExceptionAsString() { return exception.getFullStringifiedStackTrace(); }
Returns the contained exception as a string. @return failure causing exception as a string, or {@code "(null)"}
getExceptionAsString
java
apache/flink
flink-runtime/src/main/java/org/apache/flink/runtime/executiongraph/ErrorInfo.java
https://github.com/apache/flink/blob/master/flink-runtime/src/main/java/org/apache/flink/runtime/executiongraph/ErrorInfo.java
Apache-2.0
public void setInitialState(JobManagerTaskRestore taskRestore) { this.taskRestore = taskRestore; }
Sets the initial state for the execution. The serialized state is then shipped via the {@link TaskDeploymentDescriptor} to the TaskManagers. @param taskRestore information to restore the state
setInitialState
java
apache/flink
flink-runtime/src/main/java/org/apache/flink/runtime/executiongraph/Execution.java
https://github.com/apache/flink/blob/master/flink-runtime/src/main/java/org/apache/flink/runtime/executiongraph/Execution.java
Apache-2.0
public CompletableFuture<?> getInitializingOrRunningFuture() { return initializingOrRunningFuture; }
Gets a future that completes once the task execution reaches one of the states {@link ExecutionState#INITIALIZING} or {@link ExecutionState#RUNNING}. If this task never reaches these states (for example because the task is cancelled before it was properly deployed and restored), then this future will never complete. <p>The future is completed already in the {@link ExecutionState#INITIALIZING} state, because various running actions are already possible in that state (the task already accepts and sends events and network data for task recovery). (Note that in earlier versions, the INITIALIZING state was not separate but part of the RUNNING state). <p>This future is always completed from the job master's main thread.
getInitializingOrRunningFuture
java
apache/flink
flink-runtime/src/main/java/org/apache/flink/runtime/executiongraph/Execution.java
https://github.com/apache/flink/blob/master/flink-runtime/src/main/java/org/apache/flink/runtime/executiongraph/Execution.java
Apache-2.0
@Override public CompletableFuture<ExecutionState> getTerminalStateFuture() { return terminalStateFuture; }
Gets a future that completes once the task execution reaches a terminal state. The future will be completed with specific state that the execution reached. This future is always completed from the job master's main thread. @return A future which is completed once the execution reaches a terminal state
getTerminalStateFuture
java
apache/flink
flink-runtime/src/main/java/org/apache/flink/runtime/executiongraph/Execution.java
https://github.com/apache/flink/blob/master/flink-runtime/src/main/java/org/apache/flink/runtime/executiongraph/Execution.java
Apache-2.0
@Override public void fail(Throwable t) { processFail(t, true); }
This method fails the vertex due to an external condition. The task will move to state FAILED. If the task was in state RUNNING or DEPLOYING before, it will send a cancel call to the TaskManager. @param t The exception that caused the task to fail.
fail
java
apache/flink
flink-runtime/src/main/java/org/apache/flink/runtime/executiongraph/Execution.java
https://github.com/apache/flink/blob/master/flink-runtime/src/main/java/org/apache/flink/runtime/executiongraph/Execution.java
Apache-2.0
public void notifyCheckpointAborted( long abortCheckpointId, long latestCompletedCheckpointId, long timestamp) { final LogicalSlot slot = assignedResource; if (slot != null) { final TaskManagerGateway taskManagerGateway = slot.getTaskManagerGateway(); taskManagerGateway.notifyCheckpointAborted( attemptId, getVertex().getJobId(), abortCheckpointId, latestCompletedCheckpointId, timestamp); } else { LOG.debug( "The execution has no slot assigned. This indicates that the execution is " + "no longer running."); } }
Notify the task of this execution about a aborted checkpoint. @param abortCheckpointId of the subsumed checkpoint @param latestCompletedCheckpointId of the latest completed checkpoint @param timestamp of the subsumed checkpoint
notifyCheckpointAborted
java
apache/flink
flink-runtime/src/main/java/org/apache/flink/runtime/executiongraph/Execution.java
https://github.com/apache/flink/blob/master/flink-runtime/src/main/java/org/apache/flink/runtime/executiongraph/Execution.java
Apache-2.0
public CompletableFuture<Acknowledge> sendOperatorEvent( OperatorID operatorId, SerializedValue<OperatorEvent> event) { assertRunningInJobMasterMainThread(); final LogicalSlot slot = assignedResource; if (slot != null && (getState() == RUNNING || getState() == INITIALIZING)) { final TaskExecutorOperatorEventGateway eventGateway = slot.getTaskManagerGateway(); return eventGateway.sendOperatorEventToTask(getAttemptId(), operatorId, event); } else { return FutureUtils.completedExceptionally( new TaskNotRunningException( '"' + vertex.getTaskNameWithSubtaskIndex() + "\" is not running, but in state " + getState())); } }
Sends the operator event to the Task on the Task Executor. @return True, of the message was sent, false is the task is currently not running.
sendOperatorEvent
java
apache/flink
flink-runtime/src/main/java/org/apache/flink/runtime/executiongraph/Execution.java
https://github.com/apache/flink/blob/master/flink-runtime/src/main/java/org/apache/flink/runtime/executiongraph/Execution.java
Apache-2.0
private void sendUpdatePartitionInfoRpcCall(final Iterable<PartitionInfo> partitionInfos) { final LogicalSlot slot = assignedResource; if (slot != null) { final TaskManagerGateway taskManagerGateway = slot.getTaskManagerGateway(); final TaskManagerLocation taskManagerLocation = slot.getTaskManagerLocation(); CompletableFuture<Acknowledge> updatePartitionsResultFuture = taskManagerGateway.updatePartitions(attemptId, partitionInfos, rpcTimeout); updatePartitionsResultFuture.whenCompleteAsync( (ack, failure) -> { // fail if there was a failure if (failure != null) { fail( new IllegalStateException( "Update to task [" + getVertexWithAttempt() + "] on TaskManager " + taskManagerLocation + " failed", failure)); } }, getVertex().getExecutionGraphAccessor().getJobMasterMainThreadExecutor()); } }
Update the partition infos on the assigned resource. @param partitionInfos for the remote task
sendUpdatePartitionInfoRpcCall
java
apache/flink
flink-runtime/src/main/java/org/apache/flink/runtime/executiongraph/Execution.java
https://github.com/apache/flink/blob/master/flink-runtime/src/main/java/org/apache/flink/runtime/executiongraph/Execution.java
Apache-2.0
default void initializeJobVertex(ExecutionJobVertex ejv, long createTimestamp) throws JobException { initializeJobVertex( ejv, createTimestamp, VertexInputInfoComputationUtils.computeVertexInputInfos( ejv, getAllIntermediateResults()::get)); }
Updates the state of one of the ExecutionVertex's Execution attempts. If the new status if "FINISHED", this also updates the accumulators. @param state The state update. @return True, if the task update was properly applied, false, if the execution attempt was not found.
initializeJobVertex
java
apache/flink
flink-runtime/src/main/java/org/apache/flink/runtime/executiongraph/ExecutionGraph.java
https://github.com/apache/flink/blob/master/flink-runtime/src/main/java/org/apache/flink/runtime/executiongraph/ExecutionGraph.java
Apache-2.0
@Override public boolean isAvailable(final IntermediateResultPartitionID resultPartitionID) { return partitionTracker.isPartitionTracked(partitionIDMapper.apply(resultPartitionID)); }
The tracker that tracks all available result partitions.
isAvailable
java
apache/flink
flink-runtime/src/main/java/org/apache/flink/runtime/executiongraph/ExecutionGraphResultPartitionAvailabilityChecker.java
https://github.com/apache/flink/blob/master/flink-runtime/src/main/java/org/apache/flink/runtime/executiongraph/ExecutionGraphResultPartitionAvailabilityChecker.java
Apache-2.0
public static boolean isAnyOutputBlocking(ExecutionGraph graph) { return graph.getRegisteredExecutions().values().stream() .anyMatch(x -> x.getVertex().getJobVertex().getJobVertex().isAnyOutputBlocking()); }
@return Whether there is any blocking output edge in the execution graph.
isAnyOutputBlocking
java
apache/flink
flink-runtime/src/main/java/org/apache/flink/runtime/executiongraph/ExecutionGraphUtils.java
https://github.com/apache/flink/blob/master/flink-runtime/src/main/java/org/apache/flink/runtime/executiongraph/ExecutionGraphUtils.java
Apache-2.0
public void cancel() { for (ExecutionVertex ev : getTaskVertices()) { ev.cancel(); } }
Cancels all currently running vertex executions.
cancel
java
apache/flink
flink-runtime/src/main/java/org/apache/flink/runtime/executiongraph/ExecutionJobVertex.java
https://github.com/apache/flink/blob/master/flink-runtime/src/main/java/org/apache/flink/runtime/executiongraph/ExecutionJobVertex.java
Apache-2.0
public static ExecutionState getAggregateJobVertexState( int[] verticesPerState, int parallelism) { if (verticesPerState == null || verticesPerState.length != ExecutionState.values().length) { throw new IllegalArgumentException( "Must provide an array as large as there are execution states."); } if (verticesPerState[ExecutionState.FAILED.ordinal()] > 0) { return ExecutionState.FAILED; } if (verticesPerState[ExecutionState.CANCELING.ordinal()] > 0) { return ExecutionState.CANCELING; } else if (verticesPerState[ExecutionState.CANCELED.ordinal()] > 0) { return ExecutionState.CANCELED; } else if (verticesPerState[ExecutionState.INITIALIZING.ordinal()] > 0) { return ExecutionState.INITIALIZING; } else if (verticesPerState[ExecutionState.RUNNING.ordinal()] > 0) { return ExecutionState.RUNNING; } else if (verticesPerState[ExecutionState.FINISHED.ordinal()] > 0) { return verticesPerState[ExecutionState.FINISHED.ordinal()] == parallelism ? ExecutionState.FINISHED : ExecutionState.RUNNING; } else { // all else collapses under created return ExecutionState.CREATED; } }
A utility function that computes an "aggregated" state for the vertex. <p>This state is not used anywhere in the coordination, but can be used for display in dashboards to as a summary for how the particular parallel operation represented by this ExecutionJobVertex is currently behaving. <p>For example, if at least one parallel task is failed, the aggregate state is failed. If not, and at least one parallel task is cancelling (or cancelled), the aggregate state is cancelling (or cancelled). If all tasks are finished, the aggregate state is finished, and so on. @param verticesPerState The number of vertices in each state (indexed by the ordinal of the ExecutionState values). @param parallelism The parallelism of the ExecutionJobVertex @return The aggregate state of this ExecutionJobVertex.
getAggregateJobVertexState
java
apache/flink
flink-runtime/src/main/java/org/apache/flink/runtime/executiongraph/ExecutionJobVertex.java
https://github.com/apache/flink/blob/master/flink-runtime/src/main/java/org/apache/flink/runtime/executiongraph/ExecutionJobVertex.java
Apache-2.0
public Optional<TaskManagerLocation> getPreferredLocationBasedOnState() { // only restore to same execution if it has state if (currentExecution.getTaskRestore() != null && currentExecution.getTaskRestore().getTaskStateSnapshot().hasState()) { return findLastLocation(); } return Optional.empty(); }
Gets the preferred location to execute the current task execution attempt, based on the state that the execution attempt will resume.
getPreferredLocationBasedOnState
java
apache/flink
flink-runtime/src/main/java/org/apache/flink/runtime/executiongraph/ExecutionVertex.java
https://github.com/apache/flink/blob/master/flink-runtime/src/main/java/org/apache/flink/runtime/executiongraph/ExecutionVertex.java
Apache-2.0
public Map<IndexRange, IndexRange> getConsumedSubpartitionGroups() { return consumedSubpartitionGroups; }
Get the subpartition groups this subtask should consume.
getConsumedSubpartitionGroups
java
apache/flink
flink-runtime/src/main/java/org/apache/flink/runtime/executiongraph/ExecutionVertexInputInfo.java
https://github.com/apache/flink/blob/master/flink-runtime/src/main/java/org/apache/flink/runtime/executiongraph/ExecutionVertexInputInfo.java
Apache-2.0
public static List<IndexRange> mergeIndexRanges(Collection<IndexRange> ranges) { if (ranges == null || ranges.isEmpty()) { return new ArrayList<>(); } List<IndexRange> sortedRanges = ranges.stream() .sorted(Comparator.comparingInt(IndexRange::getStartIndex)) .collect(Collectors.toList()); List<IndexRange> merged = new ArrayList<>(); IndexRange current = sortedRanges.get(0); for (int i = 1; i < ranges.size(); i++) { IndexRange next = sortedRanges.get(i); if (next.getStartIndex() <= current.getEndIndex() + 1) { current = new IndexRange( current.getStartIndex(), Math.max(current.getEndIndex(), next.getEndIndex())); } else { merged.add(current); current = next; } } merged.add(current); return merged; }
Merges overlapping or consecutive {@link IndexRange} instances from the given collection. <p>The method sorts the provided ranges by their start index, then iteratively merges ranges that either overlap or are directly adjacent. The result is a list of non-overlapping and consolidated {@link IndexRange} instances. @param ranges the collection of {@link IndexRange} instances to merge. @return a list of merged {@link IndexRange} instances. If the input is null or empty, an empty list is returned.
mergeIndexRanges
java
apache/flink
flink-runtime/src/main/java/org/apache/flink/runtime/executiongraph/IndexRangeUtil.java
https://github.com/apache/flink/blob/master/flink-runtime/src/main/java/org/apache/flink/runtime/executiongraph/IndexRangeUtil.java
Apache-2.0
public long[] getSubpartitionBytes() { return subpartitionBytes; }
This class represents a snapshot of the result partition bytes metrics.
getSubpartitionBytes
java
apache/flink
flink-runtime/src/main/java/org/apache/flink/runtime/executiongraph/ResultPartitionBytes.java
https://github.com/apache/flink/blob/master/flink-runtime/src/main/java/org/apache/flink/runtime/executiongraph/ResultPartitionBytes.java
Apache-2.0
public static ResultPartitionBytes mergeAll(List<ResultPartitionBytes> partitions) { checkArgument(!partitions.isEmpty()); int expectedLength = partitions.get(0).getSubpartitionBytes().length; for (ResultPartitionBytes resultPartitionByte : partitions) { if (resultPartitionByte.getSubpartitionBytes().length != expectedLength) { throw new IllegalArgumentException( "only all ResultPartitionBytes with the same length can be merged"); } } long[] mergedSubpartitionBytes = new long[expectedLength]; for (int i = 0; i < expectedLength; i++) { for (ResultPartitionBytes resultPartitionByte : partitions) { mergedSubpartitionBytes[i] += resultPartitionByte.getSubpartitionBytes()[i]; } } return new ResultPartitionBytes(mergedSubpartitionBytes); }
Merge all {@link ResultPartitionBytes} by sum up them per-subpartition.
mergeAll
java
apache/flink
flink-runtime/src/main/java/org/apache/flink/runtime/executiongraph/ResultPartitionBytes.java
https://github.com/apache/flink/blob/master/flink-runtime/src/main/java/org/apache/flink/runtime/executiongraph/ResultPartitionBytes.java
Apache-2.0
public static JobVertexInputInfo computeVertexInputInfoForPointwise( int sourceCount, int targetCount, Function<Integer, Integer> numOfSubpartitionsRetriever, boolean isDynamicGraph) { final List<ExecutionVertexInputInfo> executionVertexInputInfos = new ArrayList<>(); if (sourceCount >= targetCount) { for (int index = 0; index < targetCount; index++) { int start = index * sourceCount / targetCount; int end = (index + 1) * sourceCount / targetCount; IndexRange partitionRange = new IndexRange(start, end - 1); IndexRange subpartitionRange = computeConsumedSubpartitionRange( index, 1, () -> numOfSubpartitionsRetriever.apply(start), isDynamicGraph, false, false); executionVertexInputInfos.add( new ExecutionVertexInputInfo(index, partitionRange, subpartitionRange)); } } else { for (int partitionNum = 0; partitionNum < sourceCount; partitionNum++) { int start = (partitionNum * targetCount + sourceCount - 1) / sourceCount; int end = ((partitionNum + 1) * targetCount + sourceCount - 1) / sourceCount; int numConsumers = end - start; IndexRange partitionRange = new IndexRange(partitionNum, partitionNum); // Variable used in lambda expression should be final or effectively final final int finalPartitionNum = partitionNum; for (int i = start; i < end; i++) { IndexRange subpartitionRange = computeConsumedSubpartitionRange( i, numConsumers, () -> numOfSubpartitionsRetriever.apply(finalPartitionNum), isDynamicGraph, false, false); executionVertexInputInfos.add( new ExecutionVertexInputInfo(i, partitionRange, subpartitionRange)); } } } return new JobVertexInputInfo(executionVertexInputInfos); }
Compute the {@link JobVertexInputInfo} for a {@link DistributionPattern#POINTWISE} edge. This computation algorithm will evenly distribute subpartitions to downstream subtasks according to the number of subpartitions. Different downstream subtasks consume roughly the same number of subpartitions. @param sourceCount the parallelism of upstream @param targetCount the parallelism of downstream @param numOfSubpartitionsRetriever a retriever to get the number of subpartitions @param isDynamicGraph whether is dynamic graph @return the computed {@link JobVertexInputInfo}
computeVertexInputInfoForPointwise
java
apache/flink
flink-runtime/src/main/java/org/apache/flink/runtime/executiongraph/VertexInputInfoComputationUtils.java
https://github.com/apache/flink/blob/master/flink-runtime/src/main/java/org/apache/flink/runtime/executiongraph/VertexInputInfoComputationUtils.java
Apache-2.0
public static JobVertexInputInfo computeVertexInputInfoForAllToAll( int sourceCount, int targetCount, Function<Integer, Integer> numOfSubpartitionsRetriever, boolean isDynamicGraph, boolean isBroadcast, boolean isSingleSubpartitionContainsAllData) { final List<ExecutionVertexInputInfo> executionVertexInputInfos = new ArrayList<>(); IndexRange partitionRange = new IndexRange(0, sourceCount - 1); for (int i = 0; i < targetCount; ++i) { IndexRange subpartitionRange = computeConsumedSubpartitionRange( i, targetCount, () -> numOfSubpartitionsRetriever.apply(0), isDynamicGraph, isBroadcast, isSingleSubpartitionContainsAllData); executionVertexInputInfos.add( new ExecutionVertexInputInfo(i, partitionRange, subpartitionRange)); } return new JobVertexInputInfo(executionVertexInputInfos); }
Compute the {@link JobVertexInputInfo} for a {@link DistributionPattern#ALL_TO_ALL} edge. This computation algorithm will evenly distribute subpartitions to downstream subtasks according to the number of subpartitions. Different downstream subtasks consume roughly the same number of subpartitions. @param sourceCount the parallelism of upstream @param targetCount the parallelism of downstream @param numOfSubpartitionsRetriever a retriever to get the number of subpartitions @param isDynamicGraph whether is dynamic graph @param isBroadcast whether the edge is broadcast @param isSingleSubpartitionContainsAllData whether single subpartition contains all data @return the computed {@link JobVertexInputInfo}
computeVertexInputInfoForAllToAll
java
apache/flink
flink-runtime/src/main/java/org/apache/flink/runtime/executiongraph/VertexInputInfoComputationUtils.java
https://github.com/apache/flink/blob/master/flink-runtime/src/main/java/org/apache/flink/runtime/executiongraph/VertexInputInfoComputationUtils.java
Apache-2.0
public void put( JobVertexID jobVertexId, IntermediateDataSetID resultId, JobVertexInputInfo info) { checkNotNull(jobVertexId); checkNotNull(resultId); checkNotNull(info); jobVertexInputInfos.compute( jobVertexId, (ignored, inputInfos) -> { if (inputInfos == null) { inputInfos = new HashMap<>(); } inputInfos.putIfAbsent(resultId, info); return inputInfos; }); }
Put a {@link JobVertexInputInfo}. @param jobVertexId the job vertex id @param resultId the intermediate result id @param info the {@link JobVertexInputInfo} to put
put
java
apache/flink
flink-runtime/src/main/java/org/apache/flink/runtime/executiongraph/VertexInputInfoStore.java
https://github.com/apache/flink/blob/master/flink-runtime/src/main/java/org/apache/flink/runtime/executiongraph/VertexInputInfoStore.java
Apache-2.0
public JobVertexInputInfo get(JobVertexID jobVertexId, IntermediateDataSetID resultId) { checkNotNull(jobVertexId); checkNotNull(resultId); return checkNotNull(jobVertexInputInfos.get(jobVertexId).get(resultId)); }
Get a {@link JobVertexInputInfo}. @param jobVertexId the job vertex id @param resultId the intermediate result id @return the {@link JobVertexInputInfo} identified by the job vertex id and intermediate result id
get
java
apache/flink
flink-runtime/src/main/java/org/apache/flink/runtime/executiongraph/VertexInputInfoStore.java
https://github.com/apache/flink/blob/master/flink-runtime/src/main/java/org/apache/flink/runtime/executiongraph/VertexInputInfoStore.java
Apache-2.0
public FailureHandlingResult getFailureHandlingResult( Execution failedExecution, Throwable cause, long timestamp) { return handleFailureAndReport( failedExecution, cause, timestamp, failoverStrategy.getTasksNeedingRestart(failedExecution.getVertex().getID(), cause), false); }
Return result of failure handling. Can be a set of task vertices to restart and a delay of the restarting. Or that the failure is not recoverable and the reason for it. @param failedExecution is the failed execution @param cause of the task failure @param timestamp of the task failure @return result of the failure handling
getFailureHandlingResult
java
apache/flink
flink-runtime/src/main/java/org/apache/flink/runtime/executiongraph/failover/ExecutionFailureHandler.java
https://github.com/apache/flink/blob/master/flink-runtime/src/main/java/org/apache/flink/runtime/executiongraph/failover/ExecutionFailureHandler.java
Apache-2.0
@Override public RestartBackoffTimeStrategy create() { return new ExponentialDelayRestartBackoffTimeStrategy( clock, initialBackoffMS, maxBackoffMS, backoffMultiplier, resetBackoffThresholdMS, jitterFactor, attemptsBeforeResetBackoff); }
The factory for creating {@link ExponentialDelayRestartBackoffTimeStrategy}.
create
java
apache/flink
flink-runtime/src/main/java/org/apache/flink/runtime/executiongraph/failover/ExponentialDelayRestartBackoffTimeStrategy.java
https://github.com/apache/flink/blob/master/flink-runtime/src/main/java/org/apache/flink/runtime/executiongraph/failover/ExponentialDelayRestartBackoffTimeStrategy.java
Apache-2.0
public static FailoverStrategy.Factory loadFailoverStrategyFactory(final Configuration config) { checkNotNull(config); final String strategyParam = config.get(JobManagerOptions.EXECUTION_FAILOVER_STRATEGY); switch (strategyParam.toLowerCase()) { case FULL_RESTART_STRATEGY_NAME: return new RestartAllFailoverStrategy.Factory(); case PIPELINED_REGION_RESTART_STRATEGY_NAME: return new RestartPipelinedRegionFailoverStrategy.Factory(); default: throw new IllegalConfigurationException( "Unknown failover strategy: " + strategyParam); } }
Loads a {@link FailoverStrategy.Factory} from the given configuration. @param config which specifies the failover strategy factory to load @return failover strategy factory loaded
loadFailoverStrategyFactory
java
apache/flink
flink-runtime/src/main/java/org/apache/flink/runtime/executiongraph/failover/FailoverStrategyFactoryLoader.java
https://github.com/apache/flink/blob/master/flink-runtime/src/main/java/org/apache/flink/runtime/executiongraph/failover/FailoverStrategyFactoryLoader.java
Apache-2.0
public Set<ExecutionVertexID> getVerticesToRestart() { if (canRestart()) { return verticesToRestart; } else { throw new IllegalStateException( "Cannot get vertices to restart when the restarting is suppressed."); } }
Returns the tasks to restart. @return the tasks to restart
getVerticesToRestart
java
apache/flink
flink-runtime/src/main/java/org/apache/flink/runtime/executiongraph/failover/FailureHandlingResult.java
https://github.com/apache/flink/blob/master/flink-runtime/src/main/java/org/apache/flink/runtime/executiongraph/failover/FailureHandlingResult.java
Apache-2.0
public long getRestartDelayMS() { if (canRestart()) { return restartDelayMS; } else { throw new IllegalStateException( "Cannot get restart delay when the restarting is suppressed."); } }
Returns the delay before the restarting. @return the delay before the restarting
getRestartDelayMS
java
apache/flink
flink-runtime/src/main/java/org/apache/flink/runtime/executiongraph/failover/FailureHandlingResult.java
https://github.com/apache/flink/blob/master/flink-runtime/src/main/java/org/apache/flink/runtime/executiongraph/failover/FailureHandlingResult.java
Apache-2.0
public Optional<Execution> getFailedExecution() { return Optional.ofNullable(failedExecution); }
Returns an {@code Optional} with the {@link Execution} causing this failure or an empty {@code Optional} if it's a global failure. @return The {@code Optional} with the failed {@code Execution} or an empty {@code Optional} if it's a global failure.
getFailedExecution
java
apache/flink
flink-runtime/src/main/java/org/apache/flink/runtime/executiongraph/failover/FailureHandlingResult.java
https://github.com/apache/flink/blob/master/flink-runtime/src/main/java/org/apache/flink/runtime/executiongraph/failover/FailureHandlingResult.java
Apache-2.0
@Nullable public Throwable getError() { return error; }
Returns reason why the restarting cannot be conducted. @return reason why the restarting cannot be conducted
getError
java
apache/flink
flink-runtime/src/main/java/org/apache/flink/runtime/executiongraph/failover/FailureHandlingResult.java
https://github.com/apache/flink/blob/master/flink-runtime/src/main/java/org/apache/flink/runtime/executiongraph/failover/FailureHandlingResult.java
Apache-2.0
public CompletableFuture<Map<String, String>> getFailureLabels() { return failureLabels; }
Returns the labels future associated with the failure. @return the CompletableFuture Map of String labels
getFailureLabels
java
apache/flink
flink-runtime/src/main/java/org/apache/flink/runtime/executiongraph/failover/FailureHandlingResult.java
https://github.com/apache/flink/blob/master/flink-runtime/src/main/java/org/apache/flink/runtime/executiongraph/failover/FailureHandlingResult.java
Apache-2.0
public boolean canRestart() { return verticesToRestart != null; }
Returns whether the restarting can be conducted. @return whether the restarting can be conducted
canRestart
java
apache/flink
flink-runtime/src/main/java/org/apache/flink/runtime/executiongraph/failover/FailureHandlingResult.java
https://github.com/apache/flink/blob/master/flink-runtime/src/main/java/org/apache/flink/runtime/executiongraph/failover/FailureHandlingResult.java
Apache-2.0
public boolean isGlobalFailure() { return globalFailure; }
Checks if this failure was a global failure, i.e., coming from a "safety net" failover that involved all tasks and should reset also components like the coordinators.
isGlobalFailure
java
apache/flink
flink-runtime/src/main/java/org/apache/flink/runtime/executiongraph/failover/FailureHandlingResult.java
https://github.com/apache/flink/blob/master/flink-runtime/src/main/java/org/apache/flink/runtime/executiongraph/failover/FailureHandlingResult.java
Apache-2.0
public static FailureHandlingResult restartable( @Nullable Execution failedExecution, @Nullable Throwable cause, long timestamp, CompletableFuture<Map<String, String>> failureLabels, Set<ExecutionVertexID> verticesToRestart, long restartDelayMS, boolean globalFailure, boolean isRootCause) { return new FailureHandlingResult( failedExecution, cause, timestamp, failureLabels, verticesToRestart, restartDelayMS, globalFailure, isRootCause); }
Creates a result of a set of tasks to restart to recover from the failure. <p>The result can be flagged to be from a global failure triggered by the scheduler, rather than from the failure of an individual task. @param failedExecution the {@link Execution} that the failure is originating from. Passing {@code null} as a value indicates that the failure was issued by Flink itself. @param cause The reason of the failure. @param timestamp The time of the failure. @param failureLabels Map of labels characterizing the failure produced by the FailureEnrichers. @param verticesToRestart containing task vertices to restart to recover from the failure. {@code null} indicates that the failure is not restartable. @param restartDelayMS indicate a delay before conducting the restart @return result of a set of tasks to restart to recover from the failure
restartable
java
apache/flink
flink-runtime/src/main/java/org/apache/flink/runtime/executiongraph/failover/FailureHandlingResult.java
https://github.com/apache/flink/blob/master/flink-runtime/src/main/java/org/apache/flink/runtime/executiongraph/failover/FailureHandlingResult.java
Apache-2.0
public static FailureHandlingResult unrecoverable( @Nullable Execution failedExecution, @Nonnull Throwable error, long timestamp, CompletableFuture<Map<String, String>> failureLabels, boolean globalFailure, boolean isRootCause) { return new FailureHandlingResult( failedExecution, error, timestamp, failureLabels, globalFailure, isRootCause); }
Creates a result that the failure is not recoverable and no restarting should be conducted. <p>The result can be flagged to be from a global failure triggered by the scheduler, rather than from the failure of an individual task. @param failedExecution the {@link Execution} that the failure is originating from. Passing {@code null} as a value indicates that the failure was issued by Flink itself. @param error reason why the failure is not recoverable @param timestamp The time of the failure. @param failureLabels Map of labels characterizing the failure produced by the FailureEnrichers. @return result indicating the failure is not recoverable
unrecoverable
java
apache/flink
flink-runtime/src/main/java/org/apache/flink/runtime/executiongraph/failover/FailureHandlingResult.java
https://github.com/apache/flink/blob/master/flink-runtime/src/main/java/org/apache/flink/runtime/executiongraph/failover/FailureHandlingResult.java
Apache-2.0
@Override public RestartBackoffTimeStrategy create() { return new FailureRateRestartBackoffTimeStrategy( SystemClock.getInstance(), maxFailuresPerInterval, failuresIntervalMS, backoffTimeMS); }
The factory for creating {@link FailureRateRestartBackoffTimeStrategy}.
create
java
apache/flink
flink-runtime/src/main/java/org/apache/flink/runtime/executiongraph/failover/FailureRateRestartBackoffTimeStrategy.java
https://github.com/apache/flink/blob/master/flink-runtime/src/main/java/org/apache/flink/runtime/executiongraph/failover/FailureRateRestartBackoffTimeStrategy.java
Apache-2.0
@Override public RestartBackoffTimeStrategy create() { return new FixedDelayRestartBackoffTimeStrategy( maxNumberRestartAttempts, backoffTimeMS); }
The factory for creating {@link FixedDelayRestartBackoffTimeStrategy}.
create
java
apache/flink
flink-runtime/src/main/java/org/apache/flink/runtime/executiongraph/failover/FixedDelayRestartBackoffTimeStrategy.java
https://github.com/apache/flink/blob/master/flink-runtime/src/main/java/org/apache/flink/runtime/executiongraph/failover/FixedDelayRestartBackoffTimeStrategy.java
Apache-2.0
static <V extends Vertex<?, ?, V, R>, R extends Result<?, ?, V, R>> Map<V, Set<V>> buildRawRegions( final Iterable<? extends V> topologicallySortedVertices, final Function<V, Iterable<R>> getMustBePipelinedConsumedResults) { final Map<V, Set<V>> vertexToRegion = new IdentityHashMap<>(); // iterate all the vertices which are topologically sorted for (V vertex : topologicallySortedVertices) { Set<V> currentRegion = new HashSet<>(); currentRegion.add(vertex); vertexToRegion.put(vertex, currentRegion); // Each vertex connected through not mustBePipelined consumingConstraint is considered // as a // single region. for (R consumedResult : getMustBePipelinedConsumedResults.apply(vertex)) { final V producerVertex = consumedResult.getProducer(); final Set<V> producerRegion = vertexToRegion.get(producerVertex); if (producerRegion == null) { throw new IllegalStateException( "Producer task " + producerVertex.getId() + " failover region is null" + " while calculating failover region for the consumer task " + vertex.getId() + ". This should be a failover region building bug."); } // check if it is the same as the producer region, if so skip the merge // this check can significantly reduce compute complexity in All-to-All // PIPELINED edge case if (currentRegion != producerRegion) { currentRegion = VertexGroupComputeUtil.mergeVertexGroups( currentRegion, producerRegion, vertexToRegion); } } } return vertexToRegion; }
Common utils for computing pipelined regions.
buildRawRegions
java
apache/flink
flink-runtime/src/main/java/org/apache/flink/runtime/executiongraph/failover/PipelinedRegionComputeUtil.java
https://github.com/apache/flink/blob/master/flink-runtime/src/main/java/org/apache/flink/runtime/executiongraph/failover/PipelinedRegionComputeUtil.java
Apache-2.0
@Override public Set<ExecutionVertexID> getTasksNeedingRestart( ExecutionVertexID executionVertexId, Throwable cause) { return IterableUtils.toStream(topology.getVertices()) .map(SchedulingExecutionVertex::getId) .collect(Collectors.toSet()); }
Returns all vertices on any task failure. @param executionVertexId ID of the failed task @param cause cause of the failure @return set of IDs of vertices to restart
getTasksNeedingRestart
java
apache/flink
flink-runtime/src/main/java/org/apache/flink/runtime/executiongraph/failover/RestartAllFailoverStrategy.java
https://github.com/apache/flink/blob/master/flink-runtime/src/main/java/org/apache/flink/runtime/executiongraph/failover/RestartAllFailoverStrategy.java
Apache-2.0
@Override public FailoverStrategy create( final SchedulingTopology topology, final ResultPartitionAvailabilityChecker resultPartitionAvailabilityChecker) { return new RestartAllFailoverStrategy(topology); }
The factory to instantiate {@link RestartAllFailoverStrategy}.
create
java
apache/flink
flink-runtime/src/main/java/org/apache/flink/runtime/executiongraph/failover/RestartAllFailoverStrategy.java
https://github.com/apache/flink/blob/master/flink-runtime/src/main/java/org/apache/flink/runtime/executiongraph/failover/RestartAllFailoverStrategy.java
Apache-2.0
@Override public Set<ExecutionVertexID> getTasksNeedingRestart( ExecutionVertexID executionVertexId, Throwable cause) { final SchedulingPipelinedRegion failedRegion = topology.getPipelinedRegionOfVertex(executionVertexId); if (failedRegion == null) { // TODO: show the task name in the log throw new IllegalStateException( "Can not find the failover region for task " + executionVertexId, cause); } // if the failure cause is data consumption error, mark the corresponding data partition to // be failed, // so that the failover process will try to recover it Optional<PartitionException> dataConsumptionException = ExceptionUtils.findThrowable(cause, PartitionException.class); if (dataConsumptionException.isPresent()) { resultPartitionAvailabilityChecker.markResultPartitionFailed( dataConsumptionException.get().getPartitionId().getPartitionId()); } // calculate the tasks to restart based on the result of regions to restart Set<ExecutionVertexID> tasksToRestart = new HashSet<>(); for (SchedulingPipelinedRegion region : getRegionsToRestart(failedRegion)) { for (SchedulingExecutionVertex vertex : region.getVertices()) { // we do not need to restart tasks which are already in the initial state if (vertex.getState() != ExecutionState.CREATED) { tasksToRestart.add(vertex.getId()); } } } // the previous failed partition will be recovered. remove its failed state from the checker if (dataConsumptionException.isPresent()) { resultPartitionAvailabilityChecker.removeResultPartitionFromFailedState( dataConsumptionException.get().getPartitionId().getPartitionId()); } return tasksToRestart; }
Returns a set of IDs corresponding to the set of vertices that should be restarted. In this strategy, all task vertices in 'involved' regions are proposed to be restarted. The 'involved' regions are calculated with rules below: 1. The region containing the failed task is always involved 2. If an input result partition of an involved region is not available, i.e. Missing or Corrupted, the region containing the partition producer task is involved 3. If a region is involved, all of its consumer regions are involved @param executionVertexId ID of the failed task @param cause cause of the failure @return set of IDs of vertices to restart
getTasksNeedingRestart
java
apache/flink
flink-runtime/src/main/java/org/apache/flink/runtime/executiongraph/failover/RestartPipelinedRegionFailoverStrategy.java
https://github.com/apache/flink/blob/master/flink-runtime/src/main/java/org/apache/flink/runtime/executiongraph/failover/RestartPipelinedRegionFailoverStrategy.java
Apache-2.0
@VisibleForTesting public SchedulingPipelinedRegion getFailoverRegion(ExecutionVertexID vertexID) { return topology.getPipelinedRegionOfVertex(vertexID); }
Returns the failover region that contains the given execution vertex. @return the failover region that contains the given execution vertex
getFailoverRegion
java
apache/flink
flink-runtime/src/main/java/org/apache/flink/runtime/executiongraph/failover/RestartPipelinedRegionFailoverStrategy.java
https://github.com/apache/flink/blob/master/flink-runtime/src/main/java/org/apache/flink/runtime/executiongraph/failover/RestartPipelinedRegionFailoverStrategy.java
Apache-2.0
@Override public FailoverStrategy create( final SchedulingTopology topology, final ResultPartitionAvailabilityChecker resultPartitionAvailabilityChecker) { return new RestartPipelinedRegionFailoverStrategy( topology, resultPartitionAvailabilityChecker); }
The factory to instantiate {@link RestartPipelinedRegionFailoverStrategy}.
create
java
apache/flink
flink-runtime/src/main/java/org/apache/flink/runtime/executiongraph/failover/RestartPipelinedRegionFailoverStrategy.java
https://github.com/apache/flink/blob/master/flink-runtime/src/main/java/org/apache/flink/runtime/executiongraph/failover/RestartPipelinedRegionFailoverStrategy.java
Apache-2.0
@Override public Long getValue() { final JobStatus status = jobStatusProvider.getState(); // not running any more -> finished or not on leader if (status.isTerminalState()) { return NO_LONGER_RUNNING; } final long runningTimestamp = jobStatusProvider.getStatusTimestamp(JobStatus.RUNNING); final long failingTimestamp = jobStatusProvider.getStatusTimestamp(JobStatus.FAILING); if (failingTimestamp <= runningTimestamp) { return NOT_FAILING; } else { // we use 'Math.max' here to avoid negative timestamps when clocks change return Math.max(System.currentTimeMillis() - failingTimestamp, 0); } }
A gauge that returns (in milliseconds) how long a job has not been not running any more, in case it is in a failing/recovering situation. Running jobs return naturally a value of zero. <p>For jobs that have never run (new not yet scheduled jobs) or jobs that have run again after failing, this gauge returns {@value NOT_FAILING}, and for jobs that are not running any more, it returns {@value NO_LONGER_RUNNING}.
getValue
java
apache/flink
flink-runtime/src/main/java/org/apache/flink/runtime/executiongraph/metrics/DownTimeGauge.java
https://github.com/apache/flink/blob/master/flink-runtime/src/main/java/org/apache/flink/runtime/executiongraph/metrics/DownTimeGauge.java
Apache-2.0
@Override public Long getValue() { final JobStatus status = jobStatusProvider.getState(); if (status == JobStatus.RUNNING) { // running right now - report the uptime final long runningTimestamp = jobStatusProvider.getStatusTimestamp(JobStatus.RUNNING); // we use 'Math.max' here to avoid negative timestamps when clocks change return Math.max(System.currentTimeMillis() - runningTimestamp, 0); } else if (status.isTerminalState()) { // not running any more -> finished or not on leader return NO_LONGER_RUNNING; } else { // not yet running or not up at the moment return 0L; } }
A gauge that returns (in milliseconds) how long a job has been running. <p>For jobs that are not running any more, it returns {@value NO_LONGER_RUNNING}.
getValue
java
apache/flink
flink-runtime/src/main/java/org/apache/flink/runtime/executiongraph/metrics/UpTimeGauge.java
https://github.com/apache/flink/blob/master/flink-runtime/src/main/java/org/apache/flink/runtime/executiongraph/metrics/UpTimeGauge.java
Apache-2.0
private static Set<String> getExternalResourceSet(Configuration config) { if (config.getValue(ExternalResourceOptions.EXTERNAL_RESOURCE_LIST) .equals(ExternalResourceOptions.NONE)) { return Collections.emptySet(); } return new HashSet<>(config.get(ExternalResourceOptions.EXTERNAL_RESOURCE_LIST)); }
Get the enabled external resource list from configuration.
getExternalResourceSet
java
apache/flink
flink-runtime/src/main/java/org/apache/flink/runtime/externalresource/ExternalResourceUtils.java
https://github.com/apache/flink/blob/master/flink-runtime/src/main/java/org/apache/flink/runtime/externalresource/ExternalResourceUtils.java
Apache-2.0
public static Map<String, String> getExternalResourceConfigurationKeys( Configuration config, String suffix) { final Set<String> resourceSet = getExternalResourceSet(config); final Map<String, String> configKeysToResourceNameMap = new HashMap<>(); LOG.info("Enabled external resources: {}", resourceSet); if (resourceSet.isEmpty()) { return Collections.emptyMap(); } final Map<String, String> externalResourceConfigs = new HashMap<>(); for (String resourceName : resourceSet) { final ConfigOption<String> configKeyOption = key(ExternalResourceOptions.getSystemConfigKeyConfigOptionForResource( resourceName, suffix)) .stringType() .noDefaultValue(); final String configKey = config.get(configKeyOption); if (StringUtils.isNullOrWhitespaceOnly(configKey)) { LOG.warn( "Could not find valid {} for {}. Will ignore that resource.", configKeyOption.key(), resourceName); } else { configKeysToResourceNameMap.compute( configKey, (ignored, previousResource) -> { if (previousResource != null) { LOG.warn( "Duplicate config key {} occurred for external resources, the one named {} will overwrite the value.", configKey, resourceName); externalResourceConfigs.remove(previousResource); } return resourceName; }); externalResourceConfigs.put(resourceName, configKey); } } return externalResourceConfigs; }
Get the external resource configuration keys map, indexed by the resource name. The configuration key should be used for deployment specific container request. @param config Configurations @param suffix suffix of config option for deployment specific configuration key @return external resource configuration keys map, map the resource name to the configuration key for deployment * specific container request
getExternalResourceConfigurationKeys
java
apache/flink
flink-runtime/src/main/java/org/apache/flink/runtime/externalresource/ExternalResourceUtils.java
https://github.com/apache/flink/blob/master/flink-runtime/src/main/java/org/apache/flink/runtime/externalresource/ExternalResourceUtils.java
Apache-2.0
@VisibleForTesting static Map<String, Long> getExternalResourceAmountMap(Configuration config) { final Set<String> resourceSet = getExternalResourceSet(config); if (resourceSet.isEmpty()) { return Collections.emptyMap(); } final Map<String, Long> externalResourceAmountMap = new HashMap<>(); for (String resourceName : resourceSet) { final ConfigOption<Long> amountOption = key(ExternalResourceOptions.getAmountConfigOptionForResource(resourceName)) .longType() .noDefaultValue(); final Optional<Long> amountOpt = config.getOptional(amountOption); if (!amountOpt.isPresent()) { LOG.warn( "The amount of the {} should be configured. Will ignore that resource.", resourceName); } else if (amountOpt.get() <= 0) { LOG.warn( "The amount of the {} should be positive while finding {}. Will ignore that resource.", amountOpt.get(), resourceName); } else { externalResourceAmountMap.put(resourceName, amountOpt.get()); } } return externalResourceAmountMap; }
Get the map of resource name and amount of all of enabled external resources.
getExternalResourceAmountMap
java
apache/flink
flink-runtime/src/main/java/org/apache/flink/runtime/externalresource/ExternalResourceUtils.java
https://github.com/apache/flink/blob/master/flink-runtime/src/main/java/org/apache/flink/runtime/externalresource/ExternalResourceUtils.java
Apache-2.0
public static Collection<ExternalResource> getExternalResourcesCollection( Configuration config) { return getExternalResourceAmountMap(config).entrySet().stream() .map(entry -> new ExternalResource(entry.getKey(), entry.getValue())) .collect(Collectors.toList()); }
Get the collection of all enabled external resources.
getExternalResourcesCollection
java
apache/flink
flink-runtime/src/main/java/org/apache/flink/runtime/externalresource/ExternalResourceUtils.java
https://github.com/apache/flink/blob/master/flink-runtime/src/main/java/org/apache/flink/runtime/externalresource/ExternalResourceUtils.java
Apache-2.0
public static String generateExternalResourcesString( Collection<ExternalResource> extendedResources) { return extendedResources.stream() .map(resource -> resource.getName() + "=" + resource.getValue()) .collect(Collectors.joining(", ")); }
Generate the string expression of the given external resources.
generateExternalResourcesString
java
apache/flink
flink-runtime/src/main/java/org/apache/flink/runtime/externalresource/ExternalResourceUtils.java
https://github.com/apache/flink/blob/master/flink-runtime/src/main/java/org/apache/flink/runtime/externalresource/ExternalResourceUtils.java
Apache-2.0
@VisibleForTesting static Map<String, ExternalResourceDriver> externalResourceDriversFromConfig( Configuration config, PluginManager pluginManager) { final Set<String> resourceSet = getExternalResourceSet(config); if (resourceSet.isEmpty()) { return Collections.emptyMap(); } final Iterator<ExternalResourceDriverFactory> factoryIterator = pluginManager.load(ExternalResourceDriverFactory.class); final Map<String, ExternalResourceDriverFactory> externalResourceFactories = new HashMap<>(); factoryIterator.forEachRemaining( externalResourceDriverFactory -> externalResourceFactories.put( externalResourceDriverFactory.getClass().getName(), externalResourceDriverFactory)); final Map<String, ExternalResourceDriver> externalResourceDrivers = new HashMap<>(); for (String resourceName : resourceSet) { final ConfigOption<String> driverClassOption = key(ExternalResourceOptions .getExternalResourceDriverFactoryConfigOptionForResource( resourceName)) .stringType() .noDefaultValue(); final String driverFactoryClassName = config.get(driverClassOption); if (StringUtils.isNullOrWhitespaceOnly(driverFactoryClassName)) { LOG.warn( "Could not find driver class name for {}. Please make sure {} is configured.", resourceName, driverClassOption.key()); continue; } ExternalResourceDriverFactory externalResourceDriverFactory = externalResourceFactories.get(driverFactoryClassName); if (externalResourceDriverFactory != null) { DelegatingConfiguration delegatingConfiguration = new DelegatingConfiguration( config, ExternalResourceOptions .getExternalResourceParamConfigPrefixForResource( resourceName)); try { externalResourceDrivers.put( resourceName, externalResourceDriverFactory.createExternalResourceDriver( delegatingConfiguration)); LOG.info("Add external resources driver for {}.", resourceName); } catch (Exception e) { LOG.warn( "Could not instantiate driver with factory {} for {}. {}", driverFactoryClassName, resourceName, e); } } else { LOG.warn( "Could not find factory class {} for {}.", driverFactoryClassName, resourceName); } } return externalResourceDrivers; }
Instantiate the {@link ExternalResourceDriver ExternalResourceDrivers} for all of enabled external resources. {@link ExternalResourceDriver ExternalResourceDrivers} are mapped to its resource name.
externalResourceDriversFromConfig
java
apache/flink
flink-runtime/src/main/java/org/apache/flink/runtime/externalresource/ExternalResourceUtils.java
https://github.com/apache/flink/blob/master/flink-runtime/src/main/java/org/apache/flink/runtime/externalresource/ExternalResourceUtils.java
Apache-2.0
public static Context forTaskFailure( JobInfo jobInfo, MetricGroup metricGroup, Executor ioExecutor, ClassLoader classLoader) { return new DefaultFailureEnricherContext( jobInfo, metricGroup, FailureType.TASK, ioExecutor, classLoader); }
Factory method returning a Task failure Context for the given params.
forTaskFailure
java
apache/flink
flink-runtime/src/main/java/org/apache/flink/runtime/failure/DefaultFailureEnricherContext.java
https://github.com/apache/flink/blob/master/flink-runtime/src/main/java/org/apache/flink/runtime/failure/DefaultFailureEnricherContext.java
Apache-2.0
public static Context forGlobalFailure( JobInfo jobInfo, MetricGroup metricGroup, Executor ioExecutor, ClassLoader classLoader) { return new DefaultFailureEnricherContext( jobInfo, metricGroup, FailureType.GLOBAL, ioExecutor, classLoader); }
Factory method returning a Global failure Context for the given params.
forGlobalFailure
java
apache/flink
flink-runtime/src/main/java/org/apache/flink/runtime/failure/DefaultFailureEnricherContext.java
https://github.com/apache/flink/blob/master/flink-runtime/src/main/java/org/apache/flink/runtime/failure/DefaultFailureEnricherContext.java
Apache-2.0