code
stringlengths
25
201k
docstring
stringlengths
19
96.2k
func_name
stringlengths
0
235
language
stringclasses
1 value
repo
stringlengths
8
51
path
stringlengths
11
314
url
stringlengths
62
377
license
stringclasses
7 values
public static RuntimeException unexpectedStateHandleException( Class<? extends StateObject>[] expectedStateHandleClasses, Class<? extends StateObject> actualStateHandleClass) { return new IllegalStateException( "Unexpected state handle type, expected one of: " + Joiner.on(", ").join(expectedStateHandleClasses) + ", but found: " + actualStateHandleClass + ". " + "This can mostly happen when a different StateBackend from the one " + "that was used for taking a checkpoint/savepoint is used when restoring."); }
Creates a {@link RuntimeException} that signals that an operation did not get the type of {@link StateObject} that was expected. This can mostly happen when a different {@link StateBackend} from the one that was used for taking a checkpoint/savepoint is used when restoring.
unexpectedStateHandleException
java
apache/flink
flink-runtime/src/main/java/org/apache/flink/runtime/state/StateUtil.java
https://github.com/apache/flink/blob/master/flink-runtime/src/main/java/org/apache/flink/runtime/state/StateUtil.java
Apache-2.0
public final OutputStream decorateWithCompression(OutputStream stream) throws IOException { return decorateWithCompression(new NonClosingOutputStreamDecorator(stream)); }
Decorates the stream by wrapping it into a stream that applies a compression. <p>IMPORTANT: For streams returned by this method, {@link OutputStream#close()} is not propagated to the inner stream. The inner stream must be closed separately. @param stream the stream to decorate. @return an output stream that is decorated by the compression scheme.
decorateWithCompression
java
apache/flink
flink-runtime/src/main/java/org/apache/flink/runtime/state/StreamCompressionDecorator.java
https://github.com/apache/flink/blob/master/flink-runtime/src/main/java/org/apache/flink/runtime/state/StreamCompressionDecorator.java
Apache-2.0
public final InputStream decorateWithCompression(InputStream stream) throws IOException { return decorateWithCompression(new NonClosingInputStreamDecorator(stream)); }
IMPORTANT: For streams returned by this method, {@link InputStream#close()} is not propagated to the inner stream. The inner stream must be closed separately. @param stream the stream to decorate. @return an input stream that is decorated by the compression scheme.
decorateWithCompression
java
apache/flink
flink-runtime/src/main/java/org/apache/flink/runtime/state/StreamCompressionDecorator.java
https://github.com/apache/flink/blob/master/flink-runtime/src/main/java/org/apache/flink/runtime/state/StreamCompressionDecorator.java
Apache-2.0
default Optional<org.apache.flink.core.fs.Path> maybeGetPath() { return Optional.empty(); }
@return Path to an underlying file represented by this {@link StreamStateHandle} or {@link Optional#empty()} if there is no such file.
maybeGetPath
java
apache/flink
flink-runtime/src/main/java/org/apache/flink/runtime/state/StreamStateHandle.java
https://github.com/apache/flink/blob/master/flink-runtime/src/main/java/org/apache/flink/runtime/state/StreamStateHandle.java
Apache-2.0
private void cleanupAllocationBaseDirs(AllocationID allocationID) { // clear the base dirs for this allocation id. File[] allocationDirectories = allocationBaseDirectories(allocationID); for (File directory : allocationDirectories) { try { FileUtils.deleteFileOrDirectory(directory); } catch (IOException e) { LOG.warn( "Exception while deleting local state directory for allocation id {}.", allocationID, e); } } }
Deletes the base dirs for this allocation id (recursively).
cleanupAllocationBaseDirs
java
apache/flink
flink-runtime/src/main/java/org/apache/flink/runtime/state/TaskExecutorLocalStateStoresManager.java
https://github.com/apache/flink/blob/master/flink-runtime/src/main/java/org/apache/flink/runtime/state/TaskExecutorLocalStateStoresManager.java
Apache-2.0
@Override public void storeLocalState( @Nonnegative long checkpointId, @Nullable TaskStateSnapshot localState) { if (localState == null) { localState = NULL_DUMMY; } if (LOG.isTraceEnabled()) { LOG.trace( "Stored local state for checkpoint {} in subtask ({} - {} - {}) : {}.", checkpointId, jobID, jobVertexID, subtaskIndex, localState); } else if (LOG.isDebugEnabled()) { LOG.debug( "Stored local state for checkpoint {} in subtask ({} - {} - {})", checkpointId, jobID, jobVertexID, subtaskIndex); } Tuple2<Long, TaskStateSnapshot> toDiscard = null; synchronized (lock) { if (disposed) { // we ignore late stores and simply discard the state. toDiscard = Tuple2.of(checkpointId, localState); } else { TaskStateSnapshot previous = storedTaskStateByCheckpointID.put(checkpointId, localState); persistLocalStateMetadata(checkpointId, localState); if (previous != null) { toDiscard = Tuple2.of(checkpointId, previous); } } } if (toDiscard != null) { asyncDiscardLocalStateForCollection(Collections.singletonList(toDiscard)); } }
Maps checkpoint ids to local TaskStateSnapshots.
storeLocalState
java
apache/flink
flink-runtime/src/main/java/org/apache/flink/runtime/state/TaskLocalStateStoreImpl.java
https://github.com/apache/flink/blob/master/flink-runtime/src/main/java/org/apache/flink/runtime/state/TaskLocalStateStoreImpl.java
Apache-2.0
private void discardLocalStateForCheckpoint(long checkpointID, Optional<TaskStateSnapshot> o) { if (LOG.isTraceEnabled()) { LOG.trace( "Discarding local task state snapshot of checkpoint {} for subtask ({} - {} - {}).", checkpointID, jobID, jobVertexID, subtaskIndex); } else { LOG.debug( "Discarding local task state snapshot {} of checkpoint {} for subtask ({} - {} - {}).", o, checkpointID, jobID, jobVertexID, subtaskIndex); } o.ifPresent( taskStateSnapshot -> { try { taskStateSnapshot.discardState(); } catch (Exception discardEx) { LOG.warn( "Exception while discarding local task state snapshot of checkpoint {} in subtask ({} - {} - {}).", checkpointID, jobID, jobVertexID, subtaskIndex, discardEx); } }); File checkpointDir = getCheckpointDirectory(checkpointID); LOG.debug( "Deleting local state directory {} of checkpoint {} for subtask ({} - {} - {}).", checkpointDir, checkpointID, jobID, jobVertexID, subtaskIndex); try { deleteDirectory(checkpointDir); } catch (IOException ex) { LOG.warn( "Exception while deleting local state directory of checkpoint {} in subtask ({} - {} - {}).", checkpointID, jobID, jobVertexID, subtaskIndex, ex); } }
Helper method that discards state objects with an executor and reports exceptions to the log.
discardLocalStateForCheckpoint
java
apache/flink
flink-runtime/src/main/java/org/apache/flink/runtime/state/TaskLocalStateStoreImpl.java
https://github.com/apache/flink/blob/master/flink-runtime/src/main/java/org/apache/flink/runtime/state/TaskLocalStateStoreImpl.java
Apache-2.0
@Override public void notifyCheckpointComplete(long checkpointId) throws Exception { localStateStore.confirmCheckpoint(checkpointId); }
Tracking when local state can be confirmed and disposed.
notifyCheckpointComplete
java
apache/flink
flink-runtime/src/main/java/org/apache/flink/runtime/state/TaskStateManagerImpl.java
https://github.com/apache/flink/blob/master/flink-runtime/src/main/java/org/apache/flink/runtime/state/TaskStateManagerImpl.java
Apache-2.0
default KeyedStateBackend<K> getDelegatedKeyedStateBackend(boolean recursive) { return this; }
@return delegated {@link KeyedStateBackend} if this backends delegates its responisibilities.. @param recursive true if the call should be recursive
getDelegatedKeyedStateBackend
java
apache/flink
flink-runtime/src/main/java/org/apache/flink/runtime/state/TestableKeyedStateBackend.java
https://github.com/apache/flink/blob/master/flink-runtime/src/main/java/org/apache/flink/runtime/state/TestableKeyedStateBackend.java
Apache-2.0
default AvailabilityProvider getAvailabilityProvider() { return () -> AvailabilityProvider.AVAILABLE; }
A storage for changelog. Could produce {@link StateChangelogHandleReader} and {@link StateChangelogWriter} for read and write. Please use {@link StateChangelogStorageLoader} to obtain an instance.
getAvailabilityProvider
java
apache/flink
flink-runtime/src/main/java/org/apache/flink/runtime/state/changelog/StateChangelogStorage.java
https://github.com/apache/flink/blob/master/flink-runtime/src/main/java/org/apache/flink/runtime/state/changelog/StateChangelogStorage.java
Apache-2.0
@Override default String getName() { return getDelegatedStateBackend().getName(); }
An interface to delegate state backend. <p>As its name, it should include a state backend to delegate.
getName
java
apache/flink
flink-runtime/src/main/java/org/apache/flink/runtime/state/delegate/DelegatingStateBackend.java
https://github.com/apache/flink/blob/master/flink-runtime/src/main/java/org/apache/flink/runtime/state/delegate/DelegatingStateBackend.java
Apache-2.0
public static EmptyFileMergingOperatorStreamStateHandle create( DirectoryStreamStateHandle taskownedDirHandle, DirectoryStreamStateHandle sharedDirHandle) { final Map<String, OperatorStateHandle.StateMetaInfo> writtenStatesMetaData = Collections.emptyMap(); return new EmptyFileMergingOperatorStreamStateHandle( taskownedDirHandle, sharedDirHandle, writtenStatesMetaData, EmptySegmentFileStateHandle.INSTANCE); }
Create an empty {@link EmptyFileMergingOperatorStreamStateHandle}. @param taskownedDirHandle the directory where operator state is stored. @param sharedDirHandle the directory where shared state is stored.
create
java
apache/flink
flink-runtime/src/main/java/org/apache/flink/runtime/state/filemerging/EmptyFileMergingOperatorStreamStateHandle.java
https://github.com/apache/flink/blob/master/flink-runtime/src/main/java/org/apache/flink/runtime/state/filemerging/EmptyFileMergingOperatorStreamStateHandle.java
Apache-2.0
@Override public FSDataInputStream openInputStream() throws IOException { throw new UnsupportedEncodingException( "Cannot open input stream from an EmptySegmentFileStateHandle."); }
An empty {@link SegmentFileStateHandle} that is only used as a placeholder.
openInputStream
java
apache/flink
flink-runtime/src/main/java/org/apache/flink/runtime/state/filemerging/EmptySegmentFileStateHandle.java
https://github.com/apache/flink/blob/master/flink-runtime/src/main/java/org/apache/flink/runtime/state/filemerging/EmptySegmentFileStateHandle.java
Apache-2.0
@Override public void discardState() {}
This method should be empty, so that JM is not in charge of the lifecycle of files in a file-merging checkpoint.
discardState
java
apache/flink
flink-runtime/src/main/java/org/apache/flink/runtime/state/filemerging/SegmentFileStateHandle.java
https://github.com/apache/flink/blob/master/flink-runtime/src/main/java/org/apache/flink/runtime/state/filemerging/SegmentFileStateHandle.java
Apache-2.0
@Nullable public Path getCheckpointPath() { return baseCheckpointPath; }
Gets the checkpoint base directory. Jobs will create job-specific subdirectories for checkpoints within this directory. May be null, if not configured. @return The checkpoint base directory
getCheckpointPath
java
apache/flink
flink-runtime/src/main/java/org/apache/flink/runtime/state/filesystem/AbstractFileStateBackend.java
https://github.com/apache/flink/blob/master/flink-runtime/src/main/java/org/apache/flink/runtime/state/filesystem/AbstractFileStateBackend.java
Apache-2.0
@Override public CheckpointStorageLocation initializeLocationForSavepoint( @SuppressWarnings("unused") long checkpointId, @Nullable String externalLocationPointer) throws IOException { // determine where to write the savepoint to final Path savepointBasePath; if (externalLocationPointer != null) { savepointBasePath = new Path(externalLocationPointer); } else if (defaultSavepointDirectory != null) { savepointBasePath = defaultSavepointDirectory; } else { throw new IllegalArgumentException( "No savepoint location given and no default location configured."); } // generate the savepoint directory final FileSystem fs = savepointBasePath.getFileSystem(); final String prefix = "savepoint-" + jobId.toString().substring(0, 6) + '-'; Exception latestException = null; for (int attempt = 0; attempt < 10; attempt++) { final Path path = new Path(savepointBasePath, FileUtils.getRandomFilename(prefix)); try { if (fs.mkdirs(path)) { // we make the path qualified, to make it independent of default schemes and // authorities final Path qp = path.makeQualified(fs); return createSavepointLocation(fs, qp); } } catch (Exception e) { latestException = e; } } throw new IOException( "Failed to create savepoint directory at " + savepointBasePath, latestException); }
Creates a file system based storage location for a savepoint. <p>This methods implements the logic that decides which location to use (given optional parameters for a configured location and a location passed for this specific savepoint) and how to name and initialize the savepoint directory. @param externalLocationPointer The target location pointer for the savepoint. Must be a valid URI. Null, if not supplied. @param checkpointId The checkpoint ID of the savepoint. @return The checkpoint storage location for the savepoint. @throws IOException Thrown if the target directory could not be created.
initializeLocationForSavepoint
java
apache/flink
flink-runtime/src/main/java/org/apache/flink/runtime/state/filesystem/AbstractFsCheckpointStorageAccess.java
https://github.com/apache/flink/blob/master/flink-runtime/src/main/java/org/apache/flink/runtime/state/filesystem/AbstractFsCheckpointStorageAccess.java
Apache-2.0
protected static Path getCheckpointDirectoryForJob(Path baseCheckpointPath, JobID jobId) { return new Path(baseCheckpointPath, jobId.toString()); }
Builds directory into which a specific job checkpoints, meaning the directory inside which it creates the checkpoint-specific subdirectories. <p>This method only succeeds if a base checkpoint directory has been set; otherwise the method fails with an exception. @param jobId The ID of the job @return The job's checkpoint directory, re @throws UnsupportedOperationException Thrown, if no base checkpoint directory has been set.
getCheckpointDirectoryForJob
java
apache/flink
flink-runtime/src/main/java/org/apache/flink/runtime/state/filesystem/AbstractFsCheckpointStorageAccess.java
https://github.com/apache/flink/blob/master/flink-runtime/src/main/java/org/apache/flink/runtime/state/filesystem/AbstractFsCheckpointStorageAccess.java
Apache-2.0
protected static Path createCheckpointDirectory(Path baseDirectory, long checkpointId) { return new Path(baseDirectory, CHECKPOINT_DIR_PREFIX + checkpointId); }
Creates the directory path for the data exclusive to a specific checkpoint. @param baseDirectory The base directory into which the job checkpoints. @param checkpointId The ID (logical timestamp) of the checkpoint.
createCheckpointDirectory
java
apache/flink
flink-runtime/src/main/java/org/apache/flink/runtime/state/filesystem/AbstractFsCheckpointStorageAccess.java
https://github.com/apache/flink/blob/master/flink-runtime/src/main/java/org/apache/flink/runtime/state/filesystem/AbstractFsCheckpointStorageAccess.java
Apache-2.0
@Internal public static FsCompletedCheckpointStorageLocation resolveCheckpointPointer( String checkpointPointer) throws IOException { checkNotNull(checkpointPointer, "checkpointPointer"); checkArgument(!checkpointPointer.isEmpty(), "empty checkpoint pointer"); // check if the pointer is in fact a valid file path final Path path; try { path = new Path(checkpointPointer); } catch (Exception e) { throw new IOException( "Checkpoint/savepoint path '" + checkpointPointer + "' is not a valid file URI. " + "Either the pointer path is invalid, or the checkpoint was created by a different state backend."); } // check if the file system can be accessed final FileSystem fs; try { fs = path.getFileSystem(); } catch (IOException e) { throw new IOException( "Cannot access file system for checkpoint/savepoint path '" + checkpointPointer + "'.", e); } final FileStatus status; try { status = fs.getFileStatus(path); } catch (FileNotFoundException e) { throw new FileNotFoundException( "Cannot find checkpoint or savepoint " + "file/directory '" + checkpointPointer + "' on file system '" + fs.getUri().getScheme() + "'."); } // if we are here, the file / directory exists final Path checkpointDir; final FileStatus metadataFileStatus; // If this is a directory, we need to find the meta data file if (status.isDir()) { checkpointDir = status.getPath(); final Path metadataFilePath = new Path(path, METADATA_FILE_NAME); try { metadataFileStatus = fs.getFileStatus(metadataFilePath); } catch (FileNotFoundException e) { throw new FileNotFoundException( "Cannot find meta data file '" + METADATA_FILE_NAME + "' in directory '" + path + "'. Please try to load the checkpoint/savepoint " + "directly from the metadata file instead of the directory."); } } else { // this points to a file and we either do no name validation, or // the name is actually correct, so we can return the path metadataFileStatus = status; checkpointDir = status.getPath().getParent(); } final FileStateHandle metaDataFileHandle = new FileStateHandle(metadataFileStatus.getPath(), metadataFileStatus.getLen()); final String pointer = checkpointDir.makeQualified(fs).toString(); return new FsCompletedCheckpointStorageLocation( fs, checkpointDir, metaDataFileHandle, pointer); }
Takes the given string (representing a pointer to a checkpoint) and resolves it to a file status for the checkpoint's metadata file. @param checkpointPointer The pointer to resolve. @return A state handle to checkpoint/savepoint's metadata. @throws IOException Thrown, if the pointer cannot be resolved, the file system not accessed, or the pointer points to a location that does not seem to be a checkpoint/savepoint.
resolveCheckpointPointer
java
apache/flink
flink-runtime/src/main/java/org/apache/flink/runtime/state/filesystem/AbstractFsCheckpointStorageAccess.java
https://github.com/apache/flink/blob/master/flink-runtime/src/main/java/org/apache/flink/runtime/state/filesystem/AbstractFsCheckpointStorageAccess.java
Apache-2.0
public static CheckpointStorageLocationReference encodePathAsReference(Path path) { byte[] refBytes = path.toString().getBytes(StandardCharsets.UTF_8); byte[] bytes = new byte[REFERENCE_MAGIC_NUMBER.length + refBytes.length]; System.arraycopy(REFERENCE_MAGIC_NUMBER, 0, bytes, 0, REFERENCE_MAGIC_NUMBER.length); System.arraycopy(refBytes, 0, bytes, REFERENCE_MAGIC_NUMBER.length, refBytes.length); return new CheckpointStorageLocationReference(bytes); }
Encodes the given path as a reference in bytes. The path is encoded as a UTF-8 string and prepended as a magic number. @param path The path to encode. @return The location reference.
encodePathAsReference
java
apache/flink
flink-runtime/src/main/java/org/apache/flink/runtime/state/filesystem/AbstractFsCheckpointStorageAccess.java
https://github.com/apache/flink/blob/master/flink-runtime/src/main/java/org/apache/flink/runtime/state/filesystem/AbstractFsCheckpointStorageAccess.java
Apache-2.0
public static Path decodePathFromReference(CheckpointStorageLocationReference reference) { if (reference.isDefaultReference()) { throw new IllegalArgumentException("Cannot decode default reference"); } final byte[] bytes = reference.getReferenceBytes(); final int headerLen = REFERENCE_MAGIC_NUMBER.length; if (bytes.length > headerLen) { // compare magic number for (int i = 0; i < headerLen; i++) { if (bytes[i] != REFERENCE_MAGIC_NUMBER[i]) { throw new IllegalArgumentException( "Reference starts with the wrong magic number"); } } // covert to string and path try { return new Path( new String( bytes, headerLen, bytes.length - headerLen, StandardCharsets.UTF_8)); } catch (Exception e) { throw new IllegalArgumentException("Reference cannot be decoded to a path", e); } } else { throw new IllegalArgumentException("Reference too short."); } }
Decodes the given reference into a path. This method validates that the reference bytes start with the correct magic number (as written by {@link #encodePathAsReference(Path)}) and converts the remaining bytes back to a proper path. @param reference The bytes representing the reference. @return The path decoded from the reference. @throws IllegalArgumentException Thrown, if the bytes do not represent a proper reference.
decodePathFromReference
java
apache/flink
flink-runtime/src/main/java/org/apache/flink/runtime/state/filesystem/AbstractFsCheckpointStorageAccess.java
https://github.com/apache/flink/blob/master/flink-runtime/src/main/java/org/apache/flink/runtime/state/filesystem/AbstractFsCheckpointStorageAccess.java
Apache-2.0
@Override public Optional<Path> maybeGetPath() { return Optional.of(getFilePath()); }
Creates a new file state for the given file path. @param filePath The path to the file that stores the state.
maybeGetPath
java
apache/flink
flink-runtime/src/main/java/org/apache/flink/runtime/state/filesystem/FileStateHandle.java
https://github.com/apache/flink/blob/master/flink-runtime/src/main/java/org/apache/flink/runtime/state/filesystem/FileStateHandle.java
Apache-2.0
@Override public void discardState() throws Exception { final FileSystem fs = getFileSystem(); IOException actualException = null; boolean success = true; try { success = fs.delete(filePath, false); } catch (IOException e) { actualException = e; } if (!success || actualException != null) { if (fs.exists(filePath)) { throw Optional.ofNullable(actualException) .orElse( new IOException( "Unknown error caused the file '" + filePath + "' to not be deleted.")); } } }
Discard the state by deleting the file that stores the state. If the parent directory of the state is empty after deleting the state file, it is also deleted. @throws Exception Thrown, if the file deletion (not the directory deletion) fails.
discardState
java
apache/flink
flink-runtime/src/main/java/org/apache/flink/runtime/state/filesystem/FileStateHandle.java
https://github.com/apache/flink/blob/master/flink-runtime/src/main/java/org/apache/flink/runtime/state/filesystem/FileStateHandle.java
Apache-2.0
public boolean isClosed() { return closed; }
Checks whether the stream is closed. @return True if the stream was closed, false if it is still open.
isClosed
java
apache/flink
flink-runtime/src/main/java/org/apache/flink/runtime/state/filesystem/FsCheckpointStreamFactory.java
https://github.com/apache/flink/blob/master/flink-runtime/src/main/java/org/apache/flink/runtime/state/filesystem/FsCheckpointStreamFactory.java
Apache-2.0
@Override public void close() { fileMergingSnapshotManager.unregisterSubtask(subtaskKey); }
This will be registered to resource closer of {@code StreamTask}.
close
java
apache/flink
flink-runtime/src/main/java/org/apache/flink/runtime/state/filesystem/FsMergingCheckpointStorageAccess.java
https://github.com/apache/flink/blob/master/flink-runtime/src/main/java/org/apache/flink/runtime/state/filesystem/FsMergingCheckpointStorageAccess.java
Apache-2.0
final void close() throws IOException { if (closed) { return; } closeAction(); closed = true; }
The function will check output stream valid. If it has been closed before, it will do nothing. If not, it will invoke {@code closeAction()} and mark it closed.
close
java
apache/flink
flink-runtime/src/main/java/org/apache/flink/runtime/state/filesystem/MetadataOutputStreamWrapper.java
https://github.com/apache/flink/blob/master/flink-runtime/src/main/java/org/apache/flink/runtime/state/filesystem/MetadataOutputStreamWrapper.java
Apache-2.0
@Override public HashMapStateBackend createFromConfig(ReadableConfig config, ClassLoader classLoader) throws IllegalConfigurationException { return new HashMapStateBackend().configure(config, classLoader); }
A factory that creates an {@link HashMapStateBackend} from a configuration.
createFromConfig
java
apache/flink
flink-runtime/src/main/java/org/apache/flink/runtime/state/hashmap/HashMapStateBackendFactory.java
https://github.com/apache/flink/blob/master/flink-runtime/src/main/java/org/apache/flink/runtime/state/hashmap/HashMapStateBackendFactory.java
Apache-2.0
@Nonnull @Override public CloseableIterator<T> iterator() { return new HeapIterator(); }
Returns an iterator over the elements in this queue. The iterator does not return the elements in any particular order. @return an iterator over the elements in this queue.
iterator
java
apache/flink
flink-runtime/src/main/java/org/apache/flink/runtime/state/heap/AbstractHeapPriorityQueue.java
https://github.com/apache/flink/blob/master/flink-runtime/src/main/java/org/apache/flink/runtime/state/heap/AbstractHeapPriorityQueue.java
Apache-2.0
private static boolean isValidArraySize(int size) { return size >= 0 && size <= MAX_ARRAY_SIZE; }
Returns the start index of the queue elements in the array.
isValidArraySize
java
apache/flink
flink-runtime/src/main/java/org/apache/flink/runtime/state/heap/AbstractHeapPriorityQueue.java
https://github.com/apache/flink/blob/master/flink-runtime/src/main/java/org/apache/flink/runtime/state/heap/AbstractHeapPriorityQueue.java
Apache-2.0
@Override public void writeStateInKeyGroup(@Nonnull DataOutputView dov, int keyGroupId) throws IOException { StateMapSnapshot<K, N, S, ? extends StateMap<K, N, S>> stateMapSnapshot = getStateMapSnapshotForKeyGroup(keyGroupId); stateMapSnapshot.writeState( localKeySerializer, localNamespaceSerializer, localStateSerializer, dov, stateSnapshotTransformer); stateMapSnapshot.release(); }
{@link CopyOnWriteStateTable} could naturally support a kind of prefix-compressed format (grouping by namespace, writing the namespace only once per group instead for each mapping). We might implement support for different formats later (tailored towards different state table implementations).
writeStateInKeyGroup
java
apache/flink
flink-runtime/src/main/java/org/apache/flink/runtime/state/heap/AbstractStateTableSnapshot.java
https://github.com/apache/flink/blob/master/flink-runtime/src/main/java/org/apache/flink/runtime/state/heap/AbstractStateTableSnapshot.java
Apache-2.0
private StateMapEntry<K, N, S> putEntry(K key, N namespace) { final int hash = computeHashForOperationAndDoIncrementalRehash(key, namespace); final StateMapEntry<K, N, S>[] tab = selectActiveTable(hash); int index = hash & (tab.length - 1); for (StateMapEntry<K, N, S> e = tab[index]; e != null; e = e.next) { if (e.hash == hash && key.equals(e.key) && namespace.equals(e.namespace)) { // copy-on-write check for entry if (e.entryVersion < highestRequiredSnapshotVersion) { e = handleChainedEntryCopyOnWrite(tab, index, e); } return e; } } ++modCount; if (size() > threshold) { doubleCapacity(); } return addNewStateMapEntry(tab, key, namespace, hash); }
Helper method that is the basis for operations that add mappings.
putEntry
java
apache/flink
flink-runtime/src/main/java/org/apache/flink/runtime/state/heap/CopyOnWriteStateMap.java
https://github.com/apache/flink/blob/master/flink-runtime/src/main/java/org/apache/flink/runtime/state/heap/CopyOnWriteStateMap.java
Apache-2.0
private StateMapEntry<K, N, S>[] makeTable(int newCapacity) { if (newCapacity < MAXIMUM_CAPACITY) { threshold = (newCapacity >> 1) + (newCapacity >> 2); // 3/4 capacity } else { if (size() > MAX_ARRAY_SIZE) { throw new IllegalStateException( "Maximum capacity of CopyOnWriteStateMap is reached and the job " + "cannot continue. Please consider scaling-out your job or using a different keyed state backend " + "implementation!"); } else { LOG.warn( "Maximum capacity of 2^30 in StateMap reached. Cannot increase hash map size. This can " + "lead to more collisions and lower performance. Please consider scaling-out your job or using a " + "different keyed state backend implementation!"); threshold = MAX_ARRAY_SIZE; } } @SuppressWarnings("unchecked") StateMapEntry<K, N, S>[] newMap = (StateMapEntry<K, N, S>[]) new StateMapEntry[newCapacity]; return newMap; }
Allocate a table of the given capacity and set the threshold accordingly. @param newCapacity must be a power of two
makeTable
java
apache/flink
flink-runtime/src/main/java/org/apache/flink/runtime/state/heap/CopyOnWriteStateMap.java
https://github.com/apache/flink/blob/master/flink-runtime/src/main/java/org/apache/flink/runtime/state/heap/CopyOnWriteStateMap.java
Apache-2.0
private StateMapEntry<K, N, S> addNewStateMapEntry( StateMapEntry<K, N, S>[] table, K key, N namespace, int hash) { // small optimization that aims to avoid holding references on duplicate namespace objects if (namespace.equals(lastNamespace)) { namespace = lastNamespace; } else { lastNamespace = namespace; } int index = hash & (table.length - 1); StateMapEntry<K, N, S> newEntry = new StateMapEntry<>( key, namespace, null, hash, table[index], stateMapVersion, stateMapVersion); table[index] = newEntry; if (table == primaryTable) { ++primaryTableSize; } else { ++incrementalRehashTableSize; } return newEntry; }
Creates and inserts a new {@link StateMapEntry}.
addNewStateMapEntry
java
apache/flink
flink-runtime/src/main/java/org/apache/flink/runtime/state/heap/CopyOnWriteStateMap.java
https://github.com/apache/flink/blob/master/flink-runtime/src/main/java/org/apache/flink/runtime/state/heap/CopyOnWriteStateMap.java
Apache-2.0
private StateMapEntry<K, N, S>[] selectActiveTable(int hashCode) { return (hashCode & (primaryTable.length - 1)) >= rehashIndex ? primaryTable : incrementalRehashTable; }
Select the sub-table which is responsible for entries with the given hash code. @param hashCode the hash code which we use to decide about the table that is responsible. @return the index of the sub-table that is responsible for the entry with the given hash code.
selectActiveTable
java
apache/flink
flink-runtime/src/main/java/org/apache/flink/runtime/state/heap/CopyOnWriteStateMap.java
https://github.com/apache/flink/blob/master/flink-runtime/src/main/java/org/apache/flink/runtime/state/heap/CopyOnWriteStateMap.java
Apache-2.0
private void doubleCapacity() { // There can only be one rehash in flight. From the amount of incremental rehash steps we // take, this should always hold. Preconditions.checkState(!isRehashing(), "There is already a rehash in progress."); StateMapEntry<K, N, S>[] oldMap = primaryTable; int oldCapacity = oldMap.length; if (oldCapacity == MAXIMUM_CAPACITY) { return; } incrementalRehashTable = makeTable(oldCapacity * 2); }
Doubles the capacity of the hash table. Existing entries are placed in the correct bucket on the enlarged table. If the current capacity is, MAXIMUM_CAPACITY, this method is a no-op. Returns the table, which will be new unless we were already at MAXIMUM_CAPACITY.
doubleCapacity
java
apache/flink
flink-runtime/src/main/java/org/apache/flink/runtime/state/heap/CopyOnWriteStateMap.java
https://github.com/apache/flink/blob/master/flink-runtime/src/main/java/org/apache/flink/runtime/state/heap/CopyOnWriteStateMap.java
Apache-2.0
@VisibleForTesting boolean isRehashing() { // if we rehash, the secondary table is not empty return EMPTY_TABLE != incrementalRehashTable; }
Returns true, if an incremental rehash is in progress.
isRehashing
java
apache/flink
flink-runtime/src/main/java/org/apache/flink/runtime/state/heap/CopyOnWriteStateMap.java
https://github.com/apache/flink/blob/master/flink-runtime/src/main/java/org/apache/flink/runtime/state/heap/CopyOnWriteStateMap.java
Apache-2.0
private int computeHashForOperationAndDoIncrementalRehash(K key, N namespace) { if (isRehashing()) { incrementalRehash(); } return compositeHash(key, namespace); }
Computes the hash for the composite of key and namespace and performs some steps of incremental rehash if incremental rehashing is in progress.
computeHashForOperationAndDoIncrementalRehash
java
apache/flink
flink-runtime/src/main/java/org/apache/flink/runtime/state/heap/CopyOnWriteStateMap.java
https://github.com/apache/flink/blob/master/flink-runtime/src/main/java/org/apache/flink/runtime/state/heap/CopyOnWriteStateMap.java
Apache-2.0
@SuppressWarnings("unchecked") private void incrementalRehash() { StateMapEntry<K, N, S>[] oldMap = primaryTable; StateMapEntry<K, N, S>[] newMap = incrementalRehashTable; int oldCapacity = oldMap.length; int newMask = newMap.length - 1; int requiredVersion = highestRequiredSnapshotVersion; int rhIdx = rehashIndex; int transferred = 0; // we migrate a certain minimum amount of entries from the old to the new table while (transferred < MIN_TRANSFERRED_PER_INCREMENTAL_REHASH) { StateMapEntry<K, N, S> e = oldMap[rhIdx]; while (e != null) { // copy-on-write check for entry if (e.entryVersion < requiredVersion) { e = new StateMapEntry<>(e, stateMapVersion); } StateMapEntry<K, N, S> n = e.next; int pos = e.hash & newMask; e.next = newMap[pos]; newMap[pos] = e; e = n; ++transferred; } oldMap[rhIdx] = null; if (++rhIdx == oldCapacity) { // here, the rehash is complete and we release resources and reset fields primaryTable = newMap; incrementalRehashTable = (StateMapEntry<K, N, S>[]) EMPTY_TABLE; primaryTableSize += incrementalRehashTableSize; incrementalRehashTableSize = 0; rehashIndex = 0; return; } } // sync our local bookkeeping the with official bookkeeping fields primaryTableSize -= transferred; incrementalRehashTableSize += transferred; rehashIndex = rhIdx; }
Runs a number of steps for incremental rehashing.
incrementalRehash
java
apache/flink
flink-runtime/src/main/java/org/apache/flink/runtime/state/heap/CopyOnWriteStateMap.java
https://github.com/apache/flink/blob/master/flink-runtime/src/main/java/org/apache/flink/runtime/state/heap/CopyOnWriteStateMap.java
Apache-2.0
private static int compositeHash(Object key, Object namespace) { // create composite key through XOR, then apply some bit-mixing for better distribution of // skewed keys. return MathUtils.bitMix(key.hashCode() ^ namespace.hashCode()); }
Helper function that creates and scrambles a composite hash for key and namespace.
compositeHash
java
apache/flink
flink-runtime/src/main/java/org/apache/flink/runtime/state/heap/CopyOnWriteStateMap.java
https://github.com/apache/flink/blob/master/flink-runtime/src/main/java/org/apache/flink/runtime/state/heap/CopyOnWriteStateMap.java
Apache-2.0
@Nonnull @Override public CopyOnWriteStateMapSnapshot<K, N, S> stateSnapshot() { return new CopyOnWriteStateMapSnapshot<>(this); }
Creates a snapshot of this {@link CopyOnWriteStateMap}, to be written in checkpointing. The snapshot integrity is protected through copy-on-write from the {@link CopyOnWriteStateMap}. Users should call {@link #releaseSnapshot(StateMapSnapshot)} after using the returned object. @return a snapshot from this {@link CopyOnWriteStateMap}, for checkpointing.
stateSnapshot
java
apache/flink
flink-runtime/src/main/java/org/apache/flink/runtime/state/heap/CopyOnWriteStateMap.java
https://github.com/apache/flink/blob/master/flink-runtime/src/main/java/org/apache/flink/runtime/state/heap/CopyOnWriteStateMap.java
Apache-2.0
@VisibleForTesting @Override public int numKeyValueStateEntries() { int sum = 0; for (StateSnapshotRestore state : registeredKVStates.values()) { sum += ((StateTable<?, ?, ?>) state).size(); } return sum; }
Returns the total number of state entries across all keys/namespaces.
numKeyValueStateEntries
java
apache/flink
flink-runtime/src/main/java/org/apache/flink/runtime/state/heap/HeapKeyedStateBackend.java
https://github.com/apache/flink/blob/master/flink-runtime/src/main/java/org/apache/flink/runtime/state/heap/HeapKeyedStateBackend.java
Apache-2.0
@Override @Nullable public T poll() { final T toRemove = super.poll(); return toRemove != null ? getDedupMapForElement(toRemove).remove(toRemove) : null; }
Creates an empty {@link HeapPriorityQueueSet} with the requested initial capacity. @param elementPriorityComparator comparator for the priority of contained elements. @param keyExtractor function to extract a key from the contained elements. @param minimumCapacity the minimum and initial capacity of this priority queue. @param keyGroupRange the key-group range of the elements in this set. @param totalNumberOfKeyGroups the total number of key-groups of the job.
poll
java
apache/flink
flink-runtime/src/main/java/org/apache/flink/runtime/state/heap/HeapPriorityQueueSet.java
https://github.com/apache/flink/blob/master/flink-runtime/src/main/java/org/apache/flink/runtime/state/heap/HeapPriorityQueueSet.java
Apache-2.0
@Override public boolean add(@Nonnull T element) { return getDedupMapForElement(element).putIfAbsent(element, element) == null && super.add(element); }
Adds the element to the queue. In contrast to the superclass and to maintain set semantics, this happens only if no such element is already contained (determined by {@link #equals(Object)}). @return <code>true</code> if the operation changed the head element or if is it unclear if the head element changed. Only returns <code>false</code> iff the head element was not changed by this operation.
add
java
apache/flink
flink-runtime/src/main/java/org/apache/flink/runtime/state/heap/HeapPriorityQueueSet.java
https://github.com/apache/flink/blob/master/flink-runtime/src/main/java/org/apache/flink/runtime/state/heap/HeapPriorityQueueSet.java
Apache-2.0
public HeapPriorityQueueSnapshotRestoreWrapper<T> forUpdatedSerializer( @Nonnull TypeSerializer<T> updatedSerializer) { return forUpdatedSerializer(updatedSerializer, false); }
Returns a deep copy of the snapshot, where the serializer is changed to the given serializer.
forUpdatedSerializer
java
apache/flink
flink-runtime/src/main/java/org/apache/flink/runtime/state/heap/HeapPriorityQueueSnapshotRestoreWrapper.java
https://github.com/apache/flink/blob/master/flink-runtime/src/main/java/org/apache/flink/runtime/state/heap/HeapPriorityQueueSnapshotRestoreWrapper.java
Apache-2.0
public HeapPriorityQueueSnapshotRestoreWrapper<T> forUpdatedSerializer( @Nonnull TypeSerializer<T> updatedSerializer, boolean allowFutureMetadataUpdates) { RegisteredPriorityQueueStateBackendMetaInfo<T> updatedMetaInfo = new RegisteredPriorityQueueStateBackendMetaInfo<>( metaInfo.getName(), updatedSerializer); updatedMetaInfo = allowFutureMetadataUpdates ? updatedMetaInfo.withSerializerUpgradesAllowed() : updatedMetaInfo; return new HeapPriorityQueueSnapshotRestoreWrapper<>( priorityQueue, updatedMetaInfo, keyExtractorFunction, localKeyGroupRange, totalKeyGroups); }
Returns a deep copy of the snapshot, where the serializer is re-registered by the serializer snapshot or changed to the given serializer. @param updatedSerializer updated serializer. @param allowFutureMetadataUpdates whether allow metadata to update in the future or not. @return the queue with the specified unique name.
forUpdatedSerializer
java
apache/flink
flink-runtime/src/main/java/org/apache/flink/runtime/state/heap/HeapPriorityQueueSnapshotRestoreWrapper.java
https://github.com/apache/flink/blob/master/flink-runtime/src/main/java/org/apache/flink/runtime/state/heap/HeapPriorityQueueSnapshotRestoreWrapper.java
Apache-2.0
@Override public int comparePriority(Q o1, Q o2) { final T left = o1.peek(); final T right = o2.peek(); if (left == null) { return (right == null ? 0 : 1); } else { return (right == null ? -1 : elementPriorityComparator.comparePriority(left, right)); } }
Comparator for the queue elements, so we can compare their heads.
comparePriority
java
apache/flink
flink-runtime/src/main/java/org/apache/flink/runtime/state/heap/KeyGroupPartitionedPriorityQueue.java
https://github.com/apache/flink/blob/master/flink-runtime/src/main/java/org/apache/flink/runtime/state/heap/KeyGroupPartitionedPriorityQueue.java
Apache-2.0
public boolean isEmpty() { return size() == 0; }
Returns whether this {@link StateMap} is empty. @return {@code true} if this {@link StateMap} has no elements, {@code false} otherwise. @see #size()
isEmpty
java
apache/flink
flink-runtime/src/main/java/org/apache/flink/runtime/state/heap/StateMap.java
https://github.com/apache/flink/blob/master/flink-runtime/src/main/java/org/apache/flink/runtime/state/heap/StateMap.java
Apache-2.0
public void releaseSnapshot( StateMapSnapshot<K, N, S, ? extends StateMap<K, N, S>> snapshotToRelease) {}
Releases a snapshot for this {@link StateMap}. This method should be called once a snapshot is no more needed. @param snapshotToRelease the snapshot to release, which was previously created by this state map.
releaseSnapshot
java
apache/flink
flink-runtime/src/main/java/org/apache/flink/runtime/state/heap/StateMap.java
https://github.com/apache/flink/blob/master/flink-runtime/src/main/java/org/apache/flink/runtime/state/heap/StateMap.java
Apache-2.0
public boolean isOwner(T stateMap) { return owningStateMap == stateMap; }
Returns true iff the given state map is the owner of this snapshot object.
isOwner
java
apache/flink
flink-runtime/src/main/java/org/apache/flink/runtime/state/heap/StateMapSnapshot.java
https://github.com/apache/flink/blob/master/flink-runtime/src/main/java/org/apache/flink/runtime/state/heap/StateMapSnapshot.java
Apache-2.0
public int size() { int count = 0; for (StateMap<K, N, S> stateMap : keyGroupedStateMaps) { count += stateMap.size(); } return count; }
Returns the total number of entries in this {@link StateTable}. This is the sum of both sub-tables. @return the number of entries in this {@link StateTable}.
size
java
apache/flink
flink-runtime/src/main/java/org/apache/flink/runtime/state/heap/StateTable.java
https://github.com/apache/flink/blob/master/flink-runtime/src/main/java/org/apache/flink/runtime/state/heap/StateTable.java
Apache-2.0
public S get(N namespace) { return get(keyContext.getCurrentKey(), keyContext.getCurrentKeyGroupIndex(), namespace); }
Returns the state of the mapping for the composite of active key and given namespace. @param namespace the namespace. Not null. @return the states of the mapping with the specified key/namespace composite key, or {@code null} if no mapping for the specified key is found.
get
java
apache/flink
flink-runtime/src/main/java/org/apache/flink/runtime/state/heap/StateTable.java
https://github.com/apache/flink/blob/master/flink-runtime/src/main/java/org/apache/flink/runtime/state/heap/StateTable.java
Apache-2.0
public boolean containsKey(N namespace) { return containsKey( keyContext.getCurrentKey(), keyContext.getCurrentKeyGroupIndex(), namespace); }
Returns whether this table contains a mapping for the composite of active key and given namespace. @param namespace the namespace in the composite key to search for. Not null. @return {@code true} if this map contains the specified key/namespace composite key, {@code false} otherwise.
containsKey
java
apache/flink
flink-runtime/src/main/java/org/apache/flink/runtime/state/heap/StateTable.java
https://github.com/apache/flink/blob/master/flink-runtime/src/main/java/org/apache/flink/runtime/state/heap/StateTable.java
Apache-2.0
public void put(N namespace, S state) { put(keyContext.getCurrentKey(), keyContext.getCurrentKeyGroupIndex(), namespace, state); }
Maps the composite of active key and given namespace to the specified state. @param namespace the namespace. Not null. @param state the state. Can be null.
put
java
apache/flink
flink-runtime/src/main/java/org/apache/flink/runtime/state/heap/StateTable.java
https://github.com/apache/flink/blob/master/flink-runtime/src/main/java/org/apache/flink/runtime/state/heap/StateTable.java
Apache-2.0
public void remove(N namespace) { remove(keyContext.getCurrentKey(), keyContext.getCurrentKeyGroupIndex(), namespace); }
Removes the mapping for the composite of active key and given namespace. This method should be preferred over {@link #removeAndGetOld(N)} when the caller is not interested in the old state. @param namespace the namespace of the mapping to remove. Not null.
remove
java
apache/flink
flink-runtime/src/main/java/org/apache/flink/runtime/state/heap/StateTable.java
https://github.com/apache/flink/blob/master/flink-runtime/src/main/java/org/apache/flink/runtime/state/heap/StateTable.java
Apache-2.0
public S removeAndGetOld(N namespace) { return removeAndGetOld( keyContext.getCurrentKey(), keyContext.getCurrentKeyGroupIndex(), namespace); }
Removes the mapping for the composite of active key and given namespace, returning the state that was found under the entry. @param namespace the namespace of the mapping to remove. Not null. @return the state of the removed mapping or {@code null} if no mapping for the specified key was found.
removeAndGetOld
java
apache/flink
flink-runtime/src/main/java/org/apache/flink/runtime/state/heap/StateTable.java
https://github.com/apache/flink/blob/master/flink-runtime/src/main/java/org/apache/flink/runtime/state/heap/StateTable.java
Apache-2.0
public <T> void transform( N namespace, T value, StateTransformationFunction<S, T> transformation) throws Exception { K key = keyContext.getCurrentKey(); checkKeyNamespacePreconditions(key, namespace); int keyGroup = keyContext.getCurrentKeyGroupIndex(); getMapForKeyGroup(keyGroup).transform(key, namespace, value, transformation); }
Applies the given {@link StateTransformationFunction} to the state (1st input argument), using the given value as second input argument. The result of {@link StateTransformationFunction#apply(Object, Object)} is then stored as the new state. This function is basically an optimization for get-update-put pattern. @param namespace the namespace. Not null. @param value the value to use in transforming the state. Can be null. @param transformation the transformation function. @throws Exception if some exception happens in the transformation function.
transform
java
apache/flink
flink-runtime/src/main/java/org/apache/flink/runtime/state/heap/StateTable.java
https://github.com/apache/flink/blob/master/flink-runtime/src/main/java/org/apache/flink/runtime/state/heap/StateTable.java
Apache-2.0
public byte[] closeAndGetBytes() throws IOException { if (closed.compareAndSet(false, true)) { checkSize(os.size(), maxSize); byte[] bytes = os.toByteArray(); closeInternal(); return bytes; } else { throw new IOException("stream has already been closed"); } }
Closes the stream and returns the byte array containing the stream's data. @return The byte array containing the stream's data. @throws IOException Thrown if the size of the data exceeds the maximal
closeAndGetBytes
java
apache/flink
flink-runtime/src/main/java/org/apache/flink/runtime/state/memory/MemCheckpointStreamFactory.java
https://github.com/apache/flink/blob/master/flink-runtime/src/main/java/org/apache/flink/runtime/state/memory/MemCheckpointStreamFactory.java
Apache-2.0
@Nullable public TypeSerializer<?> getTypeSerializer(@Nonnull String key) { return serializers.get(key); }
TODO this method should be removed once the serializer map is removed.
getTypeSerializer
java
apache/flink
flink-runtime/src/main/java/org/apache/flink/runtime/state/metainfo/StateMetaInfoSnapshot.java
https://github.com/apache/flink/blob/master/flink-runtime/src/main/java/org/apache/flink/runtime/state/metainfo/StateMetaInfoSnapshot.java
Apache-2.0
@Nonnull public static StateMetaInfoWriter getWriter() { return CurrentWriterImpl.INSTANCE; }
Returns the writer for {@link StateMetaInfoSnapshot}.
getWriter
java
apache/flink
flink-runtime/src/main/java/org/apache/flink/runtime/state/metainfo/StateMetaInfoSnapshotReadersWriters.java
https://github.com/apache/flink/blob/master/flink-runtime/src/main/java/org/apache/flink/runtime/state/metainfo/StateMetaInfoSnapshotReadersWriters.java
Apache-2.0
@Nonnull public static StateMetaInfoReader getReader(int readVersion) { checkArgument( readVersion <= CURRENT_STATE_META_INFO_SNAPSHOT_VERSION, "Unsupported read version for state meta info [%s]", readVersion); if (readVersion < 6) { // versions before 5 still had different state meta info formats between keyed / // operator state throw new UnsupportedOperationException( String.format( "No longer supported version [%d]. Please upgrade first to Flink 1.16. ", readVersion)); } return CurrentReaderImpl.INSTANCE; }
Returns a reader for {@link StateMetaInfoSnapshot} with the requested state type and version number. @param readVersion the format version to read. @return the requested reader.
getReader
java
apache/flink
flink-runtime/src/main/java/org/apache/flink/runtime/state/metainfo/StateMetaInfoSnapshotReadersWriters.java
https://github.com/apache/flink/blob/master/flink-runtime/src/main/java/org/apache/flink/runtime/state/metainfo/StateMetaInfoSnapshotReadersWriters.java
Apache-2.0
@Override public void writeStateMetaInfoSnapshot( @Nonnull StateMetaInfoSnapshot snapshot, @Nonnull DataOutputView outputView) throws IOException { final Map<String, String> optionsMap = snapshot.getOptionsImmutable(); final Map<String, TypeSerializerSnapshot<?>> serializerConfigSnapshotsMap = snapshot.getSerializerSnapshotsImmutable(); outputView.writeUTF(snapshot.getName()); outputView.writeInt(snapshot.getBackendStateType().ordinal()); outputView.writeInt(optionsMap.size()); for (Map.Entry<String, String> entry : optionsMap.entrySet()) { outputView.writeUTF(entry.getKey()); outputView.writeUTF(entry.getValue()); } outputView.writeInt(serializerConfigSnapshotsMap.size()); for (Map.Entry<String, TypeSerializerSnapshot<?>> entry : serializerConfigSnapshotsMap.entrySet()) { final String key = entry.getKey(); outputView.writeUTF(entry.getKey()); TypeSerializerSnapshotSerializationUtil.writeSerializerSnapshot( outputView, (TypeSerializerSnapshot) entry.getValue()); } }
Implementation of {@link StateMetaInfoWriter} for current implementation. The serialization format is as follows: <ul> <li>1. State name (UDF) <li>2. State backend type enum ordinal (int) <li>3. Meta info options map, consisting of the map size (int) followed by the key value pairs (String, String) <li>4. Serializer configuration map, consisting of the map size (int) followed by the key value pairs (String, TypeSerializerSnapshot) </ul>
writeStateMetaInfoSnapshot
java
apache/flink
flink-runtime/src/main/java/org/apache/flink/runtime/state/metainfo/StateMetaInfoSnapshotReadersWriters.java
https://github.com/apache/flink/blob/master/flink-runtime/src/main/java/org/apache/flink/runtime/state/metainfo/StateMetaInfoSnapshotReadersWriters.java
Apache-2.0
@Nonnull @Override public StateMetaInfoSnapshot readStateMetaInfoSnapshot( @Nonnull DataInputView inputView, @Nonnull ClassLoader userCodeClassLoader) throws IOException { final String stateName = inputView.readUTF(); final StateMetaInfoSnapshot.BackendStateType stateType = StateMetaInfoSnapshot.BackendStateType.values()[inputView.readInt()]; final int numOptions = inputView.readInt(); HashMap<String, String> optionsMap = CollectionUtil.newHashMapWithExpectedSize(numOptions); for (int i = 0; i < numOptions; ++i) { String key = inputView.readUTF(); String value = inputView.readUTF(); optionsMap.put(key, value); } final int numSerializerConfigSnapshots = inputView.readInt(); final HashMap<String, TypeSerializerSnapshot<?>> serializerConfigsMap = CollectionUtil.newHashMapWithExpectedSize(numSerializerConfigSnapshots); for (int i = 0; i < numSerializerConfigSnapshots; ++i) { serializerConfigsMap.put( inputView.readUTF(), TypeSerializerSnapshotSerializationUtil.readSerializerSnapshot( inputView, userCodeClassLoader)); } return new StateMetaInfoSnapshot( stateName, stateType, optionsMap, serializerConfigsMap); }
Implementation of {@link StateMetaInfoReader} for the current version and generic for all state types.
readStateMetaInfoSnapshot
java
apache/flink
flink-runtime/src/main/java/org/apache/flink/runtime/state/metainfo/StateMetaInfoSnapshotReadersWriters.java
https://github.com/apache/flink/blob/master/flink-runtime/src/main/java/org/apache/flink/runtime/state/metainfo/StateMetaInfoSnapshotReadersWriters.java
Apache-2.0
@Nonnull public Path getCheckpointPath() { // we know that this can never be null by the way of constructor checks //noinspection ConstantConditions return location.getBaseCheckpointPath(); }
Gets the base directory where all the checkpoints are stored. The job-specific checkpoint directory is created inside this directory. @return The base directory for checkpoints.
getCheckpointPath
java
apache/flink
flink-runtime/src/main/java/org/apache/flink/runtime/state/storage/FileSystemCheckpointStorage.java
https://github.com/apache/flink/blob/master/flink-runtime/src/main/java/org/apache/flink/runtime/state/storage/FileSystemCheckpointStorage.java
Apache-2.0
public int getMinFileSizeThreshold() { return fileStateThreshold >= 0 ? fileStateThreshold : MathUtils.checkedDownCast(FS_SMALL_FILE_THRESHOLD.defaultValue().getBytes()); }
Gets the threshold below which state is stored as part of the metadata, rather than in files. This threshold ensures that the backend does not create a large amount of very small files, where potentially the file pointers are larger than the state itself. <p>If not explicitly configured, this is the default value of {@link CheckpointingOptions#FS_SMALL_FILE_THRESHOLD}. @return The file size threshold, in bytes.
getMinFileSizeThreshold
java
apache/flink
flink-runtime/src/main/java/org/apache/flink/runtime/state/storage/FileSystemCheckpointStorage.java
https://github.com/apache/flink/blob/master/flink-runtime/src/main/java/org/apache/flink/runtime/state/storage/FileSystemCheckpointStorage.java
Apache-2.0
public int getMaxStateSize() { return maxStateSize; }
Gets the maximum size that an individual state can have, as configured in the constructor (by default {@value #DEFAULT_MAX_STATE_SIZE}). @return The maximum size that an individual state can have
getMaxStateSize
java
apache/flink
flink-runtime/src/main/java/org/apache/flink/runtime/state/storage/JobManagerCheckpointStorage.java
https://github.com/apache/flink/blob/master/flink-runtime/src/main/java/org/apache/flink/runtime/state/storage/JobManagerCheckpointStorage.java
Apache-2.0
@Nullable public Path getCheckpointPath() { return location.getBaseCheckpointPath(); }
@return The location where checkpoints will be externalized if set.
getCheckpointPath
java
apache/flink
flink-runtime/src/main/java/org/apache/flink/runtime/state/storage/JobManagerCheckpointStorage.java
https://github.com/apache/flink/blob/master/flink-runtime/src/main/java/org/apache/flink/runtime/state/storage/JobManagerCheckpointStorage.java
Apache-2.0
@Override public JobManagerCheckpointStorage configure(ReadableConfig config, ClassLoader classLoader) { return new JobManagerCheckpointStorage(this, config); }
Creates a copy of this checkpoint storage that uses the values defined in the configuration for fields where that were not specified in this checkpoint storage. @param config The configuration @return The re-configured variant of the checkpoint storage
configure
java
apache/flink
flink-runtime/src/main/java/org/apache/flink/runtime/state/storage/JobManagerCheckpointStorage.java
https://github.com/apache/flink/blob/master/flink-runtime/src/main/java/org/apache/flink/runtime/state/storage/JobManagerCheckpointStorage.java
Apache-2.0
@Override public StateIncrementalVisitor<K, N, SV> getStateIncrementalVisitor( int recommendedMaxNumberOfReturnedRecords) { throw new UnsupportedOperationException(); }
Check if state has expired or not and update it if it has partially expired. @return either non expired (possibly updated) state or null if the state has expired.
getStateIncrementalVisitor
java
apache/flink
flink-runtime/src/main/java/org/apache/flink/runtime/state/ttl/AbstractTtlState.java
https://github.com/apache/flink/blob/master/flink-runtime/src/main/java/org/apache/flink/runtime/state/ttl/AbstractTtlState.java
Apache-2.0
@Override public TtlValue<T> reduce(TtlValue<T> value1, TtlValue<T> value2) throws Exception { T userValue1 = getUnexpired(value1); T userValue2 = getUnexpired(value2); if (userValue1 != null && userValue2 != null) { return wrapWithTs(original.reduce(userValue1, userValue2)); } else if (userValue1 != null) { return rewrapWithNewTs(value1); } else if (userValue2 != null) { return rewrapWithNewTs(value2); } else { return null; } }
This class wraps reducing function with TTL logic. @param <T> Type of the user value of state with TTL
reduce
java
apache/flink
flink-runtime/src/main/java/org/apache/flink/runtime/state/ttl/TtlReduceFunction.java
https://github.com/apache/flink/blob/master/flink-runtime/src/main/java/org/apache/flink/runtime/state/ttl/TtlReduceFunction.java
Apache-2.0
public Collection<AccumulatorSnapshot> getAccumulatorSnapshots() { return accumulatorSnapshots; }
A report about the current values of all accumulators of the TaskExecutor for a given job.
getAccumulatorSnapshots
java
apache/flink
flink-runtime/src/main/java/org/apache/flink/runtime/taskexecutor/AccumulatorReport.java
https://github.com/apache/flink/blob/master/flink-runtime/src/main/java/org/apache/flink/runtime/taskexecutor/AccumulatorReport.java
Apache-2.0
@Override protected CompletableFuture<RegistrationResponse> invokeRegistration( JobMasterGateway gateway, JobMasterId fencingToken, long timeoutMillis) { return gateway.registerTaskManager( jobId, taskManagerRegistrationInformation, Duration.ofMillis(timeoutMillis)); }
Retrying registration for the job manager <--> task manager connection.
invokeRegistration
java
apache/flink
flink-runtime/src/main/java/org/apache/flink/runtime/taskexecutor/DefaultJobLeaderService.java
https://github.com/apache/flink/blob/master/flink-runtime/src/main/java/org/apache/flink/runtime/taskexecutor/DefaultJobLeaderService.java
Apache-2.0
@Override @VisibleForTesting public boolean containsJob(JobID jobId) { Preconditions.checkState( DefaultJobLeaderService.State.STARTED == state, "The service is currently not running."); return jobLeaderServices.containsKey(jobId); }
Check whether the service monitors the given job. @param jobId identifying the job @return True if the given job is monitored; otherwise false
containsJob
java
apache/flink
flink-runtime/src/main/java/org/apache/flink/runtime/taskexecutor/DefaultJobLeaderService.java
https://github.com/apache/flink/blob/master/flink-runtime/src/main/java/org/apache/flink/runtime/taskexecutor/DefaultJobLeaderService.java
Apache-2.0
public static KvStateService fromConfiguration( TaskManagerServicesConfiguration taskManagerServicesConfiguration) { KvStateRegistry kvStateRegistry = new KvStateRegistry(); QueryableStateConfiguration qsConfig = taskManagerServicesConfiguration.getQueryableStateConfig(); KvStateClientProxy kvClientProxy = null; KvStateServer kvStateServer = null; if (qsConfig != null) { int numProxyServerNetworkThreads = qsConfig.numProxyServerThreads() == 0 ? taskManagerServicesConfiguration.getNumberOfSlots() : qsConfig.numProxyServerThreads(); int numProxyServerQueryThreads = qsConfig.numProxyQueryThreads() == 0 ? taskManagerServicesConfiguration.getNumberOfSlots() : qsConfig.numProxyQueryThreads(); kvClientProxy = QueryableStateUtils.createKvStateClientProxy( taskManagerServicesConfiguration.getExternalAddress(), qsConfig.getProxyPortRange(), numProxyServerNetworkThreads, numProxyServerQueryThreads, new DisabledKvStateRequestStats()); int numStateServerNetworkThreads = qsConfig.numStateServerThreads() == 0 ? taskManagerServicesConfiguration.getNumberOfSlots() : qsConfig.numStateServerThreads(); int numStateServerQueryThreads = qsConfig.numStateQueryThreads() == 0 ? taskManagerServicesConfiguration.getNumberOfSlots() : qsConfig.numStateQueryThreads(); kvStateServer = QueryableStateUtils.createKvStateServer( taskManagerServicesConfiguration.getExternalAddress(), qsConfig.getStateServerPortRange(), numStateServerNetworkThreads, numStateServerQueryThreads, kvStateRegistry, new DisabledKvStateRequestStats()); } return new KvStateService(kvStateRegistry, kvStateServer, kvClientProxy); }
Creates and returns the KvState service. @param taskManagerServicesConfiguration task manager configuration @return service for kvState related components
fromConfiguration
java
apache/flink
flink-runtime/src/main/java/org/apache/flink/runtime/taskexecutor/KvStateService.java
https://github.com/apache/flink/blob/master/flink-runtime/src/main/java/org/apache/flink/runtime/taskexecutor/KvStateService.java
Apache-2.0
public int numProxyServerThreads() { return numProxyThreads; }
Returns the number of threads for the query proxy NIO event loop. These threads only process network events and dispatch query requests to the query threads.
numProxyServerThreads
java
apache/flink
flink-runtime/src/main/java/org/apache/flink/runtime/taskexecutor/QueryableStateConfiguration.java
https://github.com/apache/flink/blob/master/flink-runtime/src/main/java/org/apache/flink/runtime/taskexecutor/QueryableStateConfiguration.java
Apache-2.0
public int numProxyQueryThreads() { return numPQueryThreads; }
Returns the number of query threads for the queryable state client proxy.
numProxyQueryThreads
java
apache/flink
flink-runtime/src/main/java/org/apache/flink/runtime/taskexecutor/QueryableStateConfiguration.java
https://github.com/apache/flink/blob/master/flink-runtime/src/main/java/org/apache/flink/runtime/taskexecutor/QueryableStateConfiguration.java
Apache-2.0
public int numStateServerThreads() { return numServerThreads; }
Returns the number of threads for the query server NIO event loop. These threads only process network events and dispatch query requests to the query threads.
numStateServerThreads
java
apache/flink
flink-runtime/src/main/java/org/apache/flink/runtime/taskexecutor/QueryableStateConfiguration.java
https://github.com/apache/flink/blob/master/flink-runtime/src/main/java/org/apache/flink/runtime/taskexecutor/QueryableStateConfiguration.java
Apache-2.0
public static QueryableStateConfiguration disabled() { final Iterator<Integer> proxyPorts = NetUtils.getPortRangeFromString( QueryableStateOptions.PROXY_PORT_RANGE.defaultValue()); final Iterator<Integer> serverPorts = NetUtils.getPortRangeFromString( QueryableStateOptions.SERVER_PORT_RANGE.defaultValue()); return new QueryableStateConfiguration(proxyPorts, serverPorts, 0, 0, 0, 0); }
Gets the configuration describing the queryable state as deactivated.
disabled
java
apache/flink
flink-runtime/src/main/java/org/apache/flink/runtime/taskexecutor/QueryableStateConfiguration.java
https://github.com/apache/flink/blob/master/flink-runtime/src/main/java/org/apache/flink/runtime/taskexecutor/QueryableStateConfiguration.java
Apache-2.0
public static QueryableStateConfiguration fromConfiguration(Configuration config) { if (!config.get(QueryableStateOptions.ENABLE_QUERYABLE_STATE_PROXY_SERVER)) { return null; } final Iterator<Integer> proxyPorts = NetUtils.getPortRangeFromString(config.get(QueryableStateOptions.PROXY_PORT_RANGE)); final Iterator<Integer> serverPorts = NetUtils.getPortRangeFromString( config.get(QueryableStateOptions.SERVER_PORT_RANGE)); final int numProxyServerNetworkThreads = config.get(QueryableStateOptions.PROXY_NETWORK_THREADS); final int numProxyServerQueryThreads = config.get(QueryableStateOptions.PROXY_ASYNC_QUERY_THREADS); final int numStateServerNetworkThreads = config.get(QueryableStateOptions.SERVER_NETWORK_THREADS); final int numStateServerQueryThreads = config.get(QueryableStateOptions.SERVER_ASYNC_QUERY_THREADS); return new QueryableStateConfiguration( proxyPorts, serverPorts, numProxyServerNetworkThreads, numProxyServerQueryThreads, numStateServerNetworkThreads, numStateServerQueryThreads); }
Creates the {@link QueryableStateConfiguration} from the given Configuration.
fromConfiguration
java
apache/flink
flink-runtime/src/main/java/org/apache/flink/runtime/taskexecutor/QueryableStateConfiguration.java
https://github.com/apache/flink/blob/master/flink-runtime/src/main/java/org/apache/flink/runtime/taskexecutor/QueryableStateConfiguration.java
Apache-2.0
public SlotID getSlotID() { return slotID; }
Get the unique identification of this slot. @return The slot id
getSlotID
java
apache/flink
flink-runtime/src/main/java/org/apache/flink/runtime/taskexecutor/SlotStatus.java
https://github.com/apache/flink/blob/master/flink-runtime/src/main/java/org/apache/flink/runtime/taskexecutor/SlotStatus.java
Apache-2.0
public ResourceProfile getResourceProfile() { return resourceProfile; }
Get the resource profile of this slot. @return The resource profile
getResourceProfile
java
apache/flink
flink-runtime/src/main/java/org/apache/flink/runtime/taskexecutor/SlotStatus.java
https://github.com/apache/flink/blob/master/flink-runtime/src/main/java/org/apache/flink/runtime/taskexecutor/SlotStatus.java
Apache-2.0
public AllocationID getAllocationID() { return allocationID; }
Get the allocation id of this slot. @return The allocation id if this slot is allocated, otherwise null
getAllocationID
java
apache/flink
flink-runtime/src/main/java/org/apache/flink/runtime/taskexecutor/SlotStatus.java
https://github.com/apache/flink/blob/master/flink-runtime/src/main/java/org/apache/flink/runtime/taskexecutor/SlotStatus.java
Apache-2.0
default CompletableFuture<Collection<PartitionWithMetrics>> getAndRetainPartitionWithMetrics( JobID jobId) { throw new UnsupportedOperationException(); }
Get and retain all partitions and their metrics located on this task executor, the metrics mainly includes the meta information of partition(partition bytes, etc). @param jobId ID of the target job @return All partitions belong to the target job and their metrics
getAndRetainPartitionWithMetrics
java
apache/flink
flink-runtime/src/main/java/org/apache/flink/runtime/taskexecutor/TaskExecutorGateway.java
https://github.com/apache/flink/blob/master/flink-runtime/src/main/java/org/apache/flink/runtime/taskexecutor/TaskExecutorGateway.java
Apache-2.0
private static Long getConfigurationValue( Configuration config, ConfigOption<? extends MemorySize> option) { MemorySize memorySize = config.get(option); return memorySize != null ? memorySize.getBytes() : null; }
TaskExecutorConfiguration collects the configuration of a TaskExecutor instance.
getConfigurationValue
java
apache/flink
flink-runtime/src/main/java/org/apache/flink/runtime/taskexecutor/TaskExecutorMemoryConfiguration.java
https://github.com/apache/flink/blob/master/flink-runtime/src/main/java/org/apache/flink/runtime/taskexecutor/TaskExecutorMemoryConfiguration.java
Apache-2.0
public static TaskExecutorMemoryConfiguration create(Configuration config) { return new TaskExecutorMemoryConfiguration( getConfigurationValue(config, FRAMEWORK_HEAP_MEMORY), getConfigurationValue(config, TASK_HEAP_MEMORY), getConfigurationValue(config, FRAMEWORK_OFF_HEAP_MEMORY), getConfigurationValue(config, TASK_OFF_HEAP_MEMORY), getConfigurationValue(config, NETWORK_MEMORY_MAX), getConfigurationValue(config, MANAGED_MEMORY_SIZE), getConfigurationValue(config, JVM_METASPACE), getConfigurationValue(config, JVM_OVERHEAD_MAX), calculateTotalFlinkMemoryFromComponents(config), calculateTotalProcessMemoryFromComponents(config)); }
Factory method for initializing a TaskExecutorMemoryConfiguration based on the passed Configuration. @param config The Configuration used for initializing the TaskExecutorMemoryConfiguration. @return The newly instantiated TaskExecutorMemoryConfiguration.
create
java
apache/flink
flink-runtime/src/main/java/org/apache/flink/runtime/taskexecutor/TaskExecutorMemoryConfiguration.java
https://github.com/apache/flink/blob/master/flink-runtime/src/main/java/org/apache/flink/runtime/taskexecutor/TaskExecutorMemoryConfiguration.java
Apache-2.0
public Long getFrameworkHeap() { return frameworkHeap; }
Returns the configured heap size used by the framework.
getFrameworkHeap
java
apache/flink
flink-runtime/src/main/java/org/apache/flink/runtime/taskexecutor/TaskExecutorMemoryConfiguration.java
https://github.com/apache/flink/blob/master/flink-runtime/src/main/java/org/apache/flink/runtime/taskexecutor/TaskExecutorMemoryConfiguration.java
Apache-2.0
public Long getTaskHeap() { return taskHeap; }
Returns the configured heap size used by the tasks.
getTaskHeap
java
apache/flink
flink-runtime/src/main/java/org/apache/flink/runtime/taskexecutor/TaskExecutorMemoryConfiguration.java
https://github.com/apache/flink/blob/master/flink-runtime/src/main/java/org/apache/flink/runtime/taskexecutor/TaskExecutorMemoryConfiguration.java
Apache-2.0
public Long getFrameworkOffHeap() { return frameworkOffHeap; }
Returns the configured off-heap size used by the framework.
getFrameworkOffHeap
java
apache/flink
flink-runtime/src/main/java/org/apache/flink/runtime/taskexecutor/TaskExecutorMemoryConfiguration.java
https://github.com/apache/flink/blob/master/flink-runtime/src/main/java/org/apache/flink/runtime/taskexecutor/TaskExecutorMemoryConfiguration.java
Apache-2.0
public Long getTaskOffHeap() { return taskOffHeap; }
Returns the configured off-heap size used by the tasks.
getTaskOffHeap
java
apache/flink
flink-runtime/src/main/java/org/apache/flink/runtime/taskexecutor/TaskExecutorMemoryConfiguration.java
https://github.com/apache/flink/blob/master/flink-runtime/src/main/java/org/apache/flink/runtime/taskexecutor/TaskExecutorMemoryConfiguration.java
Apache-2.0
public Long getNetworkMemory() { return networkMemory; }
Returns the configured maximum network memory.
getNetworkMemory
java
apache/flink
flink-runtime/src/main/java/org/apache/flink/runtime/taskexecutor/TaskExecutorMemoryConfiguration.java
https://github.com/apache/flink/blob/master/flink-runtime/src/main/java/org/apache/flink/runtime/taskexecutor/TaskExecutorMemoryConfiguration.java
Apache-2.0
public Long getManagedMemoryTotal() { return managedMemoryTotal; }
Returns the total amount of memory reserved for by the MemoryManager.
getManagedMemoryTotal
java
apache/flink
flink-runtime/src/main/java/org/apache/flink/runtime/taskexecutor/TaskExecutorMemoryConfiguration.java
https://github.com/apache/flink/blob/master/flink-runtime/src/main/java/org/apache/flink/runtime/taskexecutor/TaskExecutorMemoryConfiguration.java
Apache-2.0
public Long getJvmMetaspace() { return jvmMetaspace; }
Returns the maximum Metaspace size allowed for the task manager.
getJvmMetaspace
java
apache/flink
flink-runtime/src/main/java/org/apache/flink/runtime/taskexecutor/TaskExecutorMemoryConfiguration.java
https://github.com/apache/flink/blob/master/flink-runtime/src/main/java/org/apache/flink/runtime/taskexecutor/TaskExecutorMemoryConfiguration.java
Apache-2.0
public Long getJvmOverhead() { return jvmOverhead; }
Returns the threshold for defining the maximum amount of memory used for the JVM overhead.
getJvmOverhead
java
apache/flink
flink-runtime/src/main/java/org/apache/flink/runtime/taskexecutor/TaskExecutorMemoryConfiguration.java
https://github.com/apache/flink/blob/master/flink-runtime/src/main/java/org/apache/flink/runtime/taskexecutor/TaskExecutorMemoryConfiguration.java
Apache-2.0
public Long getTotalFlinkMemory() { return totalFlinkMemory; }
Returns the amount of memory configured to be used by Flink excluding things like JVM Metaspace and other JVM overhead.
getTotalFlinkMemory
java
apache/flink
flink-runtime/src/main/java/org/apache/flink/runtime/taskexecutor/TaskExecutorMemoryConfiguration.java
https://github.com/apache/flink/blob/master/flink-runtime/src/main/java/org/apache/flink/runtime/taskexecutor/TaskExecutorMemoryConfiguration.java
Apache-2.0
public InstanceID getRegistrationId() { return registrationId; }
Gets the ID that the ResourceManager assigned the registration.
getRegistrationId
java
apache/flink
flink-runtime/src/main/java/org/apache/flink/runtime/taskexecutor/TaskExecutorRegistrationSuccess.java
https://github.com/apache/flink/blob/master/flink-runtime/src/main/java/org/apache/flink/runtime/taskexecutor/TaskExecutorRegistrationSuccess.java
Apache-2.0
public ResourceID getResourceManagerId() { return resourceManagerResourceId; }
Gets the unique ID that identifies the ResourceManager.
getResourceManagerId
java
apache/flink
flink-runtime/src/main/java/org/apache/flink/runtime/taskexecutor/TaskExecutorRegistrationSuccess.java
https://github.com/apache/flink/blob/master/flink-runtime/src/main/java/org/apache/flink/runtime/taskexecutor/TaskExecutorRegistrationSuccess.java
Apache-2.0
public void shutDown() throws FlinkException { Exception exception = null; try { taskManagerStateStore.shutdown(); } catch (Exception e) { exception = e; } try { ioManager.close(); } catch (Exception e) { exception = ExceptionUtils.firstOrSuppressed(e, exception); } try { shuffleEnvironment.close(); } catch (Exception e) { exception = ExceptionUtils.firstOrSuppressed(e, exception); } try { kvStateService.shutdown(); } catch (Exception e) { exception = ExceptionUtils.firstOrSuppressed(e, exception); } try { taskSlotTable.close(); } catch (Exception e) { exception = ExceptionUtils.firstOrSuppressed(e, exception); } try { jobLeaderService.stop(); } catch (Exception e) { exception = ExceptionUtils.firstOrSuppressed(e, exception); } try { ioExecutor.shutdown(); } catch (Exception e) { exception = ExceptionUtils.firstOrSuppressed(e, exception); } try { jobTable.close(); } catch (Exception e) { exception = ExceptionUtils.firstOrSuppressed(e, exception); } try { libraryCacheManager.shutdown(); } catch (Exception e) { exception = ExceptionUtils.firstOrSuppressed(e, exception); } taskEventDispatcher.clearAll(); if (exception != null) { throw new FlinkException( "Could not properly shut down the TaskManager services.", exception); } }
Shuts the {@link TaskExecutor} services down.
shutDown
java
apache/flink
flink-runtime/src/main/java/org/apache/flink/runtime/taskexecutor/TaskManagerServices.java
https://github.com/apache/flink/blob/master/flink-runtime/src/main/java/org/apache/flink/runtime/taskexecutor/TaskManagerServices.java
Apache-2.0
public static TaskManagerServicesConfiguration fromConfiguration( Configuration configuration, ResourceID resourceID, String externalAddress, boolean localCommunicationOnly, TaskExecutorResourceSpec taskExecutorResourceSpec, WorkingDirectory workingDirectory) throws Exception { String[] localStateRootDirs = ConfigurationUtils.parseLocalStateDirectories(configuration); final Reference<File[]> localStateDirs; if (localStateRootDirs.length == 0) { localStateDirs = Reference.borrowed(new File[] {workingDirectory.getLocalStateDirectory()}); } else { File[] createdLocalStateDirs = new File[localStateRootDirs.length]; final String localStateDirectoryName = LOCAL_STATE_SUB_DIRECTORY_ROOT + resourceID; for (int i = 0; i < localStateRootDirs.length; i++) { createdLocalStateDirs[i] = new File(localStateRootDirs[i], localStateDirectoryName); } localStateDirs = Reference.owned(createdLocalStateDirs); } boolean localRecoveryEnabled = configuration.get(StateRecoveryOptions.LOCAL_RECOVERY); boolean localBackupEnabled = configuration.get(CheckpointingOptions.LOCAL_BACKUP_ENABLED); final QueryableStateConfiguration queryableStateConfig = QueryableStateConfiguration.fromConfiguration(configuration); long timerServiceShutdownTimeout = configuration.get(RpcOptions.ASK_TIMEOUT_DURATION).toMillis(); final RetryingRegistrationConfiguration retryingRegistrationConfiguration = RetryingRegistrationConfiguration.fromConfiguration(configuration); final int externalDataPort = configuration.get(NettyShuffleEnvironmentOptions.DATA_PORT); String bindAddr = configuration.get(TaskManagerOptions.BIND_HOST, NetUtils.getWildcardIPAddress()); InetAddress bindAddress = InetAddress.getByName(bindAddr); final String classLoaderResolveOrder = configuration.get(CoreOptions.CLASSLOADER_RESOLVE_ORDER); final String[] alwaysParentFirstLoaderPatterns = CoreOptions.getParentFirstLoaderPatterns(configuration); final int numIoThreads = ClusterEntrypointUtils.getPoolSize(configuration); final String[] tmpDirs = ConfigurationUtils.parseTempDirectories(configuration); // If TaskManagerOptionsInternal.TASK_MANAGER_NODE_ID is not set, use the external address // as the node id. final String nodeId = configuration .getOptional(TaskManagerOptionsInternal.TASK_MANAGER_NODE_ID) .orElse(externalAddress); return new TaskManagerServicesConfiguration( configuration, resourceID, externalAddress, bindAddress, externalDataPort, localCommunicationOnly, tmpDirs, localStateDirs, localRecoveryEnabled, localBackupEnabled, queryableStateConfig, ConfigurationParserUtils.getSlot(configuration), ConfigurationParserUtils.getPageSize(configuration), taskExecutorResourceSpec, timerServiceShutdownTimeout, retryingRegistrationConfiguration, ConfigurationUtils.getSystemResourceMetricsProbingInterval(configuration), FlinkUserCodeClassLoaders.ResolveOrder.fromString(classLoaderResolveOrder), alwaysParentFirstLoaderPatterns, numIoThreads, nodeId); }
Utility method to extract TaskManager config parameters from the configuration and to sanity check them. @param configuration The configuration. @param resourceID resource ID of the task manager @param externalAddress identifying the IP address under which the TaskManager will be accessible @param localCommunicationOnly True if only local communication is possible. Use only in cases where only one task manager runs. @param taskExecutorResourceSpec resource specification of the TaskManager to start @param workingDirectory working directory of the TaskManager @return configuration of task manager services used to create them
fromConfiguration
java
apache/flink
flink-runtime/src/main/java/org/apache/flink/runtime/taskexecutor/TaskManagerServicesConfiguration.java
https://github.com/apache/flink/blob/master/flink-runtime/src/main/java/org/apache/flink/runtime/taskexecutor/TaskManagerServicesConfiguration.java
Apache-2.0
public void startTrackingPartitions(K key, Collection<ResultPartitionID> newPartitionIds) { Preconditions.checkNotNull(key); Preconditions.checkNotNull(newPartitionIds); if (newPartitionIds.isEmpty()) { return; } trackedPartitionsPerKey.compute( key, (ignored, partitionIds) -> { if (partitionIds == null) { partitionIds = CollectionUtil.newHashSetWithExpectedSize(8); } partitionIds.addAll(newPartitionIds); return partitionIds; }); }
Starts the tracking of the given partition for the given key.
startTrackingPartitions
java
apache/flink
flink-runtime/src/main/java/org/apache/flink/runtime/taskexecutor/partition/PartitionTable.java
https://github.com/apache/flink/blob/master/flink-runtime/src/main/java/org/apache/flink/runtime/taskexecutor/partition/PartitionTable.java
Apache-2.0
public Collection<ResultPartitionID> stopTrackingPartitions(K key) { Preconditions.checkNotNull(key); Set<ResultPartitionID> storedPartitions = trackedPartitionsPerKey.remove(key); return storedPartitions == null ? Collections.emptyList() : storedPartitions; }
Stops the tracking of all partition for the given key.
stopTrackingPartitions
java
apache/flink
flink-runtime/src/main/java/org/apache/flink/runtime/taskexecutor/partition/PartitionTable.java
https://github.com/apache/flink/blob/master/flink-runtime/src/main/java/org/apache/flink/runtime/taskexecutor/partition/PartitionTable.java
Apache-2.0
public void stopTrackingPartitions(K key, Collection<ResultPartitionID> partitionIds) { Preconditions.checkNotNull(key); Preconditions.checkNotNull(partitionIds); // If the key is unknown we do not fail here, in line with // ShuffleEnvironment#releaseFinishedPartitions trackedPartitionsPerKey.computeIfPresent( key, (ignored, resultPartitionIDS) -> { resultPartitionIDS.removeAll(partitionIds); return resultPartitionIDS.isEmpty() ? null : resultPartitionIDS; }); }
Stops the tracking of the given set of partitions for the given key.
stopTrackingPartitions
java
apache/flink
flink-runtime/src/main/java/org/apache/flink/runtime/taskexecutor/partition/PartitionTable.java
https://github.com/apache/flink/blob/master/flink-runtime/src/main/java/org/apache/flink/runtime/taskexecutor/partition/PartitionTable.java
Apache-2.0