code stringlengths 25 201k | docstring stringlengths 19 96.2k | func_name stringlengths 0 235 | language stringclasses 1 value | repo stringlengths 8 51 | path stringlengths 11 314 | url stringlengths 62 377 | license stringclasses 7 values |
|---|---|---|---|---|---|---|---|
@Override
public CheckpointStatsStatus getStatus() {
return CheckpointStatsStatus.FAILED;
} | Creates a tracker for a failed checkpoint.
@param checkpointId ID of the checkpoint.
@param triggerTimestamp Timestamp when the checkpoint was triggered.
@param props Checkpoint properties of the checkpoint.
@param totalSubtaskCount Total number of subtasks for the checkpoint.
@param taskStats Task stats for each involved operator.
@param numAcknowledgedSubtasks Number of acknowledged subtasks.
@param checkpointedSize Total persisted data size over all subtasks during the sync and async
phases of this checkpoint.
@param stateSize Total checkpoint state size over all subtasks.
@param processedData Processed data during the checkpoint.
@param persistedData Persisted data during the checkpoint.
@param unalignedCheckpoint Whether the checkpoint is unaligned.
@param failureTimestamp Timestamp when this checkpoint failed.
@param latestAcknowledgedSubtask The latest acknowledged subtask stats or <code>null</code>.
@param cause Cause of the checkpoint failure or <code>null</code>. | getStatus | java | apache/flink | flink-runtime/src/main/java/org/apache/flink/runtime/checkpoint/FailedCheckpointStats.java | https://github.com/apache/flink/blob/master/flink-runtime/src/main/java/org/apache/flink/runtime/checkpoint/FailedCheckpointStats.java | Apache-2.0 |
@Override
public long getEndToEndDuration() {
return Math.max(0, failureTimestamp - triggerTimestamp);
} | Returns the end to end duration until the checkpoint failure. | getEndToEndDuration | java | apache/flink | flink-runtime/src/main/java/org/apache/flink/runtime/checkpoint/FailedCheckpointStats.java | https://github.com/apache/flink/blob/master/flink-runtime/src/main/java/org/apache/flink/runtime/checkpoint/FailedCheckpointStats.java | Apache-2.0 |
public long getFailureTimestamp() {
return failureTimestamp;
} | Returns the timestamp when this checkpoint failed.
@return Timestamp when the checkpoint failed. | getFailureTimestamp | java | apache/flink | flink-runtime/src/main/java/org/apache/flink/runtime/checkpoint/FailedCheckpointStats.java | https://github.com/apache/flink/blob/master/flink-runtime/src/main/java/org/apache/flink/runtime/checkpoint/FailedCheckpointStats.java | Apache-2.0 |
@Nullable
public String getFailureMessage() {
return failureMsg;
} | Returns the failure message or <code>null</code> if no cause was provided.
@return Failure message of the checkpoint failure or <code>null</code>. | getFailureMessage | java | apache/flink | flink-runtime/src/main/java/org/apache/flink/runtime/checkpoint/FailedCheckpointStats.java | https://github.com/apache/flink/blob/master/flink-runtime/src/main/java/org/apache/flink/runtime/checkpoint/FailedCheckpointStats.java | Apache-2.0 |
public void acknowledgeMasterState(String identifier, @Nullable MasterState state) {
synchronized (lock) {
if (!disposed) {
if (notYetAcknowledgedMasterStates.remove(identifier) && state != null) {
masterStates.add(state);
}
}
}
} | Acknowledges a master state (state generated on the checkpoint coordinator) to the pending
checkpoint.
@param identifier The identifier of the master state
@param state The state to acknowledge | acknowledgeMasterState | java | apache/flink | flink-runtime/src/main/java/org/apache/flink/runtime/checkpoint/PendingCheckpoint.java | https://github.com/apache/flink/blob/master/flink-runtime/src/main/java/org/apache/flink/runtime/checkpoint/PendingCheckpoint.java | Apache-2.0 |
@Nonnull
public List<StateObjectCollection<OperatorStateHandle>> getPrioritizedManagedOperatorState() {
return prioritizedManagedOperatorState;
} | Returns an immutable list with all alternative snapshots to restore the managed operator
state, in the order in which we should attempt to restore. | getPrioritizedManagedOperatorState | java | apache/flink | flink-runtime/src/main/java/org/apache/flink/runtime/checkpoint/PrioritizedOperatorSubtaskState.java | https://github.com/apache/flink/blob/master/flink-runtime/src/main/java/org/apache/flink/runtime/checkpoint/PrioritizedOperatorSubtaskState.java | Apache-2.0 |
@Nonnull
public List<StateObjectCollection<OperatorStateHandle>> getPrioritizedRawOperatorState() {
return prioritizedRawOperatorState;
} | Returns an immutable list with all alternative snapshots to restore the raw operator state,
in the order in which we should attempt to restore. | getPrioritizedRawOperatorState | java | apache/flink | flink-runtime/src/main/java/org/apache/flink/runtime/checkpoint/PrioritizedOperatorSubtaskState.java | https://github.com/apache/flink/blob/master/flink-runtime/src/main/java/org/apache/flink/runtime/checkpoint/PrioritizedOperatorSubtaskState.java | Apache-2.0 |
@Nonnull
public List<StateObjectCollection<KeyedStateHandle>> getPrioritizedManagedKeyedState() {
return prioritizedManagedKeyedState;
} | Returns an immutable list with all alternative snapshots to restore the managed keyed state,
in the order in which we should attempt to restore. | getPrioritizedManagedKeyedState | java | apache/flink | flink-runtime/src/main/java/org/apache/flink/runtime/checkpoint/PrioritizedOperatorSubtaskState.java | https://github.com/apache/flink/blob/master/flink-runtime/src/main/java/org/apache/flink/runtime/checkpoint/PrioritizedOperatorSubtaskState.java | Apache-2.0 |
@Nonnull
public List<StateObjectCollection<KeyedStateHandle>> getPrioritizedRawKeyedState() {
return prioritizedRawKeyedState;
} | Returns an immutable list with all alternative snapshots to restore the raw keyed state, in
the order in which we should attempt to restore. | getPrioritizedRawKeyedState | java | apache/flink | flink-runtime/src/main/java/org/apache/flink/runtime/checkpoint/PrioritizedOperatorSubtaskState.java | https://github.com/apache/flink/blob/master/flink-runtime/src/main/java/org/apache/flink/runtime/checkpoint/PrioritizedOperatorSubtaskState.java | Apache-2.0 |
@Nonnull
public StateObjectCollection<OperatorStateHandle> getJobManagerManagedOperatorState() {
return lastElement(prioritizedManagedOperatorState);
} | Returns the managed operator state from the job manager, which represents the ground truth
about what this state should represent. This is the alternative with lowest priority. | getJobManagerManagedOperatorState | java | apache/flink | flink-runtime/src/main/java/org/apache/flink/runtime/checkpoint/PrioritizedOperatorSubtaskState.java | https://github.com/apache/flink/blob/master/flink-runtime/src/main/java/org/apache/flink/runtime/checkpoint/PrioritizedOperatorSubtaskState.java | Apache-2.0 |
@Nonnull
public StateObjectCollection<OperatorStateHandle> getJobManagerRawOperatorState() {
return lastElement(prioritizedRawOperatorState);
} | Returns the raw operator state from the job manager, which represents the ground truth about
what this state should represent. This is the alternative with lowest priority. | getJobManagerRawOperatorState | java | apache/flink | flink-runtime/src/main/java/org/apache/flink/runtime/checkpoint/PrioritizedOperatorSubtaskState.java | https://github.com/apache/flink/blob/master/flink-runtime/src/main/java/org/apache/flink/runtime/checkpoint/PrioritizedOperatorSubtaskState.java | Apache-2.0 |
@Nonnull
public StateObjectCollection<KeyedStateHandle> getJobManagerManagedKeyedState() {
return lastElement(prioritizedManagedKeyedState);
} | Returns the managed keyed state from the job manager, which represents the ground truth about
what this state should represent. This is the alternative with lowest priority. | getJobManagerManagedKeyedState | java | apache/flink | flink-runtime/src/main/java/org/apache/flink/runtime/checkpoint/PrioritizedOperatorSubtaskState.java | https://github.com/apache/flink/blob/master/flink-runtime/src/main/java/org/apache/flink/runtime/checkpoint/PrioritizedOperatorSubtaskState.java | Apache-2.0 |
public PrioritizedOperatorSubtaskState build() {
int size = alternativesByPriority.size();
List<StateObjectCollection<OperatorStateHandle>> managedOperatorAlternatives =
new ArrayList<>(size);
List<StateObjectCollection<KeyedStateHandle>> managedKeyedAlternatives =
new ArrayList<>(size);
List<StateObjectCollection<OperatorStateHandle>> rawOperatorAlternatives =
new ArrayList<>(size);
List<StateObjectCollection<KeyedStateHandle>> rawKeyedAlternatives =
new ArrayList<>(size);
List<StateObjectCollection<InputStateHandle>> inputChannelStateAlternatives =
new ArrayList<>(size);
List<StateObjectCollection<OutputStateHandle>> resultSubpartitionStateAlternatives =
new ArrayList<>(size);
for (OperatorSubtaskState subtaskState : alternativesByPriority) {
if (subtaskState != null) {
managedKeyedAlternatives.add(subtaskState.getManagedKeyedState());
rawKeyedAlternatives.add(subtaskState.getRawKeyedState());
managedOperatorAlternatives.add(subtaskState.getManagedOperatorState());
rawOperatorAlternatives.add(subtaskState.getRawOperatorState());
inputChannelStateAlternatives.add(subtaskState.getInputChannelState());
resultSubpartitionStateAlternatives.add(
subtaskState.getResultSubpartitionState());
}
}
return new PrioritizedOperatorSubtaskState(
computePrioritizedAlternatives(
jobManagerState.getManagedKeyedState(),
managedKeyedAlternatives,
KeyedStateHandle::getKeyGroupRange),
computePrioritizedAlternatives(
jobManagerState.getRawKeyedState(),
rawKeyedAlternatives,
KeyedStateHandle::getKeyGroupRange),
resolvePrioritizedAlternatives(
jobManagerState.getManagedOperatorState(),
managedOperatorAlternatives,
eqStateApprover(OperatorStateHandle::getStateNameToPartitionOffsets)),
resolvePrioritizedAlternatives(
jobManagerState.getRawOperatorState(),
rawOperatorAlternatives,
eqStateApprover(OperatorStateHandle::getStateNameToPartitionOffsets)),
resolvePrioritizedAlternatives(
jobManagerState.getInputChannelState(),
inputChannelStateAlternatives,
channelStateApprover(
(InputChannelInfo i1, InputChannelInfo i2) -> {
if (i1.getGateIdx() == i2.getGateIdx()) {
return Integer.compare(
i1.getInputChannelIdx(),
i2.getInputChannelIdx());
} else {
return Integer.compare(
i1.getGateIdx(), i2.getGateIdx());
}
})),
resolvePrioritizedAlternatives(
jobManagerState.getResultSubpartitionState(),
resultSubpartitionStateAlternatives,
channelStateApprover(
(ResultSubpartitionInfo r1, ResultSubpartitionInfo r2) -> {
if (r1.getPartitionIdx() == r2.getPartitionIdx()) {
return Integer.compare(
r1.getSubPartitionIdx(),
r2.getSubPartitionIdx());
} else {
return Integer.compare(
r1.getPartitionIdx(), r2.getPartitionIdx());
}
})),
restoredCheckpointId);
} | Checkpoint id of the restored checkpoint or null if not restored. | build | java | apache/flink | flink-runtime/src/main/java/org/apache/flink/runtime/checkpoint/PrioritizedOperatorSubtaskState.java | https://github.com/apache/flink/blob/master/flink-runtime/src/main/java/org/apache/flink/runtime/checkpoint/PrioritizedOperatorSubtaskState.java | Apache-2.0 |
<STATE_OBJ_TYPE extends StateObject, ID_TYPE>
List<StateObjectCollection<STATE_OBJ_TYPE>> computePrioritizedAlternatives(
StateObjectCollection<STATE_OBJ_TYPE> jobManagerState,
List<StateObjectCollection<STATE_OBJ_TYPE>> alternativesByPriority,
Function<STATE_OBJ_TYPE, ID_TYPE> identityExtractor) {
if (alternativesByPriority != null
&& !alternativesByPriority.isEmpty()
&& jobManagerState.hasState()) {
Optional<StateObjectCollection<STATE_OBJ_TYPE>> mergedAlternative =
tryComputeMixedLocalAndRemoteAlternative(
jobManagerState, alternativesByPriority, identityExtractor);
// Return the mix of local/remote state as first and pure remote state as second
// alternative (in case that we fail to recover from the local state, e.g. because
// of corruption).
if (mergedAlternative.isPresent()) {
return Arrays.asList(mergedAlternative.get(), jobManagerState);
}
}
return Collections.singletonList(jobManagerState);
} | This method creates an alternative recovery option by replacing as much job manager state
with higher prioritized (=local) alternatives as possible.
@param jobManagerState the state that the task got assigned from the job manager (this
state lives in remote storage).
@param alternativesByPriority local alternatives to the job manager state, ordered by
priority.
@param identityExtractor function to extract an identifier from a state object.
@return prioritized state alternatives.
@param <STATE_OBJ_TYPE> the type of the state objects we process.
@param <ID_TYPE> the type of object that represents the id the state object type. | computePrioritizedAlternatives | java | apache/flink | flink-runtime/src/main/java/org/apache/flink/runtime/checkpoint/PrioritizedOperatorSubtaskState.java | https://github.com/apache/flink/blob/master/flink-runtime/src/main/java/org/apache/flink/runtime/checkpoint/PrioritizedOperatorSubtaskState.java | Apache-2.0 |
static <STATE_OBJ_TYPE extends StateObject, ID_TYPE>
Optional<StateObjectCollection<STATE_OBJ_TYPE>>
tryComputeMixedLocalAndRemoteAlternative(
StateObjectCollection<STATE_OBJ_TYPE> jobManagerState,
List<StateObjectCollection<STATE_OBJ_TYPE>> alternativesByPriority,
Function<STATE_OBJ_TYPE, ID_TYPE> identityExtractor) {
List<STATE_OBJ_TYPE> result = Collections.emptyList();
// Build hash index over ids of the JM state
Map<ID_TYPE, STATE_OBJ_TYPE> indexById =
jobManagerState.stream()
.collect(Collectors.toMap(identityExtractor, Function.identity()));
// Move through all alternative in order from high to low priority
for (StateObjectCollection<STATE_OBJ_TYPE> alternative : alternativesByPriority) {
// Check all the state objects in the alternative if they can replace JM state
for (STATE_OBJ_TYPE stateHandle : alternative) {
// Remove the current state object's id from the index to check for a match
if (indexById.remove(identityExtractor.apply(stateHandle)) != null) {
if (result.isEmpty()) {
// Lazy init result collection
result = new ArrayList<>(jobManagerState.size());
}
// If the id was still in the index, replace with higher prio alternative
result.add(stateHandle);
// If the index is empty we are already done, all JM state was replaces with
// the best alternative.
if (indexById.isEmpty()) {
return Optional.of(new StateObjectCollection<>(result));
}
}
}
}
// Nothing useful to return
if (result.isEmpty()) {
return Optional.empty();
}
// Add all remaining JM state objects that we could not replace from the index to the
// final result
result.addAll(indexById.values());
return Optional.of(new StateObjectCollection<>(result));
} | This method creates an alternative recovery option by replacing as much job manager state
with higher prioritized (=local) alternatives as possible. Returns empty Optional if the
JM state is empty or nothing could be replaced.
@param jobManagerState the state that the task got assigned from the job manager (this
state lives in remote storage).
@param alternativesByPriority local alternatives to the job manager state, ordered by
priority.
@param identityExtractor function to extract an identifier from a state object.
@return A state collection where all JM state handles for which we could find local *
alternatives are replaced by the alternative with the highest priority. Empty
optional if no state could be replaced.
@param <STATE_OBJ_TYPE> the type of the state objects we process.
@param <ID_TYPE> the type of object that represents the id the state object type. | tryComputeMixedLocalAndRemoteAlternative | java | apache/flink | flink-runtime/src/main/java/org/apache/flink/runtime/checkpoint/PrioritizedOperatorSubtaskState.java | https://github.com/apache/flink/blob/master/flink-runtime/src/main/java/org/apache/flink/runtime/checkpoint/PrioritizedOperatorSubtaskState.java | Apache-2.0 |
public long getRestoreTimestamp() {
return restoreTimestamp;
} | Returns the timestamp when the checkpoint was restored.
@return Timestamp when the checkpoint was restored. | getRestoreTimestamp | java | apache/flink | flink-runtime/src/main/java/org/apache/flink/runtime/checkpoint/RestoredCheckpointStats.java | https://github.com/apache/flink/blob/master/flink-runtime/src/main/java/org/apache/flink/runtime/checkpoint/RestoredCheckpointStats.java | Apache-2.0 |
@Override
public List<List<OperatorStateHandle>> repartitionState(
List<List<OperatorStateHandle>> previousParallelSubtaskStates,
int oldParallelism,
int newParallelism) {
Preconditions.checkNotNull(previousParallelSubtaskStates);
Preconditions.checkArgument(newParallelism > 0);
Preconditions.checkArgument(
previousParallelSubtaskStates.size() == oldParallelism,
"This method still depends on the order of the new and old operators");
// Assemble result from all merge maps
List<List<OperatorStateHandle>> result = new ArrayList<>(newParallelism);
List<Map<StreamStateHandle, OperatorStateHandle>> mergeMapList;
// We only round-robin repartition UNION state if new parallelism equals to the old one.
if (newParallelism == oldParallelism) {
Map<String, List<Tuple2<StreamStateHandle, OperatorStateHandle.StateMetaInfo>>>
unionStates = collectUnionStates(previousParallelSubtaskStates);
Map<String, List<Tuple2<StreamStateHandle, OperatorStateHandle.StateMetaInfo>>>
partlyFinishedBroadcastStates =
collectPartlyFinishedBroadcastStates(previousParallelSubtaskStates);
if (unionStates.isEmpty() && partlyFinishedBroadcastStates.isEmpty()) {
return previousParallelSubtaskStates;
}
// Initialize
mergeMapList = initMergeMapList(previousParallelSubtaskStates);
repartitionUnionState(unionStates, mergeMapList);
// TODO: Currently if some tasks is finished, we would rescale the
// remaining state. A better solution would be not touch the non-empty
// subtask state and only fix the empty ones.
repartitionBroadcastState(partlyFinishedBroadcastStates, mergeMapList);
} else {
// Reorganize: group by (State Name -> StreamStateHandle + Offsets)
GroupByStateNameResults nameToStateByMode =
groupByStateMode(previousParallelSubtaskStates);
if (OPTIMIZE_MEMORY_USE) {
previousParallelSubtaskStates
.clear(); // free for GC at to cost that old handles are no longer available
}
// Do the actual repartitioning for all named states
mergeMapList = repartition(nameToStateByMode, newParallelism);
}
for (int i = 0; i < mergeMapList.size(); ++i) {
result.add(i, new ArrayList<>(mergeMapList.get(i).values()));
}
return result;
} | Current default implementation of {@link OperatorStateRepartitioner} that redistributes state in
round robin fashion. | repartitionState | java | apache/flink | flink-runtime/src/main/java/org/apache/flink/runtime/checkpoint/RoundRobinOperatorStateRepartitioner.java | https://github.com/apache/flink/blob/master/flink-runtime/src/main/java/org/apache/flink/runtime/checkpoint/RoundRobinOperatorStateRepartitioner.java | Apache-2.0 |
private Map<String, StateEntry> collectStates(
List<List<OperatorStateHandle>> parallelSubtaskStates, OperatorStateHandle.Mode mode) {
Map<String, StateEntry> states =
CollectionUtil.newHashMapWithExpectedSize(parallelSubtaskStates.size());
for (int i = 0; i < parallelSubtaskStates.size(); ++i) {
final int subtaskIndex = i;
List<OperatorStateHandle> subTaskState = parallelSubtaskStates.get(i);
for (OperatorStateHandle operatorStateHandle : subTaskState) {
if (operatorStateHandle == null) {
continue;
}
final Set<Map.Entry<String, OperatorStateHandle.StateMetaInfo>>
partitionOffsetEntries =
operatorStateHandle.getStateNameToPartitionOffsets().entrySet();
partitionOffsetEntries.stream()
.filter(entry -> entry.getValue().getDistributionMode().equals(mode))
.forEach(
entry -> {
StateEntry stateEntry =
states.computeIfAbsent(
entry.getKey(),
k ->
new StateEntry(
parallelSubtaskStates.size()
* partitionOffsetEntries
.size(),
parallelSubtaskStates.size()));
stateEntry.addEntry(
subtaskIndex,
Tuple2.of(
operatorStateHandle.getDelegateStateHandle(),
entry.getValue()));
});
}
}
return states;
} | Collect the states from given parallelSubtaskStates with the specific {@code mode}. | collectStates | java | apache/flink | flink-runtime/src/main/java/org/apache/flink/runtime/checkpoint/RoundRobinOperatorStateRepartitioner.java | https://github.com/apache/flink/blob/master/flink-runtime/src/main/java/org/apache/flink/runtime/checkpoint/RoundRobinOperatorStateRepartitioner.java | Apache-2.0 |
@SuppressWarnings("unchecked, rawtype")
private GroupByStateNameResults groupByStateMode(
List<List<OperatorStateHandle>> previousParallelSubtaskStates) {
// Reorganize: group by (State Name -> StreamStateHandle + StateMetaInfo)
EnumMap<
OperatorStateHandle.Mode,
Map<
String,
List<Tuple2<StreamStateHandle, OperatorStateHandle.StateMetaInfo>>>>
nameToStateByMode = new EnumMap<>(OperatorStateHandle.Mode.class);
for (OperatorStateHandle.Mode mode : OperatorStateHandle.Mode.values()) {
nameToStateByMode.put(mode, new HashMap<>());
}
for (List<OperatorStateHandle> previousParallelSubtaskState :
previousParallelSubtaskStates) {
for (OperatorStateHandle operatorStateHandle : previousParallelSubtaskState) {
if (operatorStateHandle == null) {
continue;
}
final Set<Map.Entry<String, OperatorStateHandle.StateMetaInfo>>
partitionOffsetEntries =
operatorStateHandle.getStateNameToPartitionOffsets().entrySet();
for (Map.Entry<String, OperatorStateHandle.StateMetaInfo> e :
partitionOffsetEntries) {
OperatorStateHandle.StateMetaInfo metaInfo = e.getValue();
Map<String, List<Tuple2<StreamStateHandle, OperatorStateHandle.StateMetaInfo>>>
nameToState = nameToStateByMode.get(metaInfo.getDistributionMode());
List<Tuple2<StreamStateHandle, OperatorStateHandle.StateMetaInfo>>
stateLocations =
nameToState.computeIfAbsent(
e.getKey(),
k ->
new ArrayList<>(
previousParallelSubtaskStates.size()
* partitionOffsetEntries
.size()));
stateLocations.add(
Tuple2.of(operatorStateHandle.getDelegateStateHandle(), e.getValue()));
}
}
}
return new GroupByStateNameResults(nameToStateByMode);
} | Group by the different named states. | groupByStateMode | java | apache/flink | flink-runtime/src/main/java/org/apache/flink/runtime/checkpoint/RoundRobinOperatorStateRepartitioner.java | https://github.com/apache/flink/blob/master/flink-runtime/src/main/java/org/apache/flink/runtime/checkpoint/RoundRobinOperatorStateRepartitioner.java | Apache-2.0 |
public long getLast() {
return checkpointIdCounter.get() - 1;
} | Returns the last checkpoint ID (current - 1).
@return Last checkpoint ID. | getLast | java | apache/flink | flink-runtime/src/main/java/org/apache/flink/runtime/checkpoint/StandaloneCheckpointIDCounter.java | https://github.com/apache/flink/blob/master/flink-runtime/src/main/java/org/apache/flink/runtime/checkpoint/StandaloneCheckpointIDCounter.java | Apache-2.0 |
public static List<KeyedStateHandle> getManagedKeyedStateHandles(
OperatorState operatorState, KeyGroupRange subtaskKeyGroupRange) {
final int parallelism = operatorState.getParallelism();
List<KeyedStateHandle> subtaskKeyedStateHandles = null;
for (int i = 0; i < parallelism; i++) {
if (operatorState.getState(i) != null) {
Collection<KeyedStateHandle> keyedStateHandles =
operatorState.getState(i).getManagedKeyedState();
if (subtaskKeyedStateHandles == null) {
subtaskKeyedStateHandles =
new ArrayList<>(parallelism * keyedStateHandles.size());
}
extractIntersectingState(
keyedStateHandles, subtaskKeyGroupRange, subtaskKeyedStateHandles);
}
}
return subtaskKeyedStateHandles != null ? subtaskKeyedStateHandles : emptyList();
} | Collect {@link KeyGroupsStateHandle managedKeyedStateHandles} which have intersection with
given {@link KeyGroupRange} from {@link TaskState operatorState}.
@param operatorState all state handles of a operator
@param subtaskKeyGroupRange the KeyGroupRange of a subtask
@return all managedKeyedStateHandles which have intersection with given KeyGroupRange | getManagedKeyedStateHandles | java | apache/flink | flink-runtime/src/main/java/org/apache/flink/runtime/checkpoint/StateAssignmentOperation.java | https://github.com/apache/flink/blob/master/flink-runtime/src/main/java/org/apache/flink/runtime/checkpoint/StateAssignmentOperation.java | Apache-2.0 |
public static List<KeyedStateHandle> getRawKeyedStateHandles(
OperatorState operatorState, KeyGroupRange subtaskKeyGroupRange) {
final int parallelism = operatorState.getParallelism();
List<KeyedStateHandle> extractedKeyedStateHandles = null;
for (int i = 0; i < parallelism; i++) {
if (operatorState.getState(i) != null) {
Collection<KeyedStateHandle> rawKeyedState =
operatorState.getState(i).getRawKeyedState();
if (extractedKeyedStateHandles == null) {
extractedKeyedStateHandles =
new ArrayList<>(parallelism * rawKeyedState.size());
}
extractIntersectingState(
rawKeyedState, subtaskKeyGroupRange, extractedKeyedStateHandles);
}
}
return extractedKeyedStateHandles != null ? extractedKeyedStateHandles : emptyList();
} | Collect {@link KeyGroupsStateHandle rawKeyedStateHandles} which have intersection with given
{@link KeyGroupRange} from {@link TaskState operatorState}.
@param operatorState all state handles of a operator
@param subtaskKeyGroupRange the KeyGroupRange of a subtask
@return all rawKeyedStateHandles which have intersection with given KeyGroupRange | getRawKeyedStateHandles | java | apache/flink | flink-runtime/src/main/java/org/apache/flink/runtime/checkpoint/StateAssignmentOperation.java | https://github.com/apache/flink/blob/master/flink-runtime/src/main/java/org/apache/flink/runtime/checkpoint/StateAssignmentOperation.java | Apache-2.0 |
@VisibleForTesting
public static void extractIntersectingState(
Collection<? extends KeyedStateHandle> originalSubtaskStateHandles,
KeyGroupRange rangeToExtract,
List<KeyedStateHandle> extractedStateCollector) {
for (KeyedStateHandle keyedStateHandle : originalSubtaskStateHandles) {
if (keyedStateHandle != null) {
KeyedStateHandle intersectedKeyedStateHandle =
keyedStateHandle.getIntersection(rangeToExtract);
if (intersectedKeyedStateHandle != null) {
extractedStateCollector.add(intersectedKeyedStateHandle);
}
}
}
} | Extracts certain key group ranges from the given state handles and adds them to the
collector. | extractIntersectingState | java | apache/flink | flink-runtime/src/main/java/org/apache/flink/runtime/checkpoint/StateAssignmentOperation.java | https://github.com/apache/flink/blob/master/flink-runtime/src/main/java/org/apache/flink/runtime/checkpoint/StateAssignmentOperation.java | Apache-2.0 |
public static List<KeyGroupRange> createKeyGroupPartitions(
int numberKeyGroups, int parallelism) {
Preconditions.checkArgument(numberKeyGroups >= parallelism);
List<KeyGroupRange> result = new ArrayList<>(parallelism);
for (int i = 0; i < parallelism; ++i) {
result.add(
KeyGroupRangeAssignment.computeKeyGroupRangeForOperatorIndex(
numberKeyGroups, parallelism, i));
}
return result;
} | Groups the available set of key groups into key group partitions. A key group partition is
the set of key groups which is assigned to the same task. Each set of the returned list
constitutes a key group partition.
<p><b>IMPORTANT</b>: The assignment of key groups to partitions has to be in sync with the
KeyGroupStreamPartitioner.
@param numberKeyGroups Number of available key groups (indexed from 0 to numberKeyGroups - 1)
@param parallelism Parallelism to generate the key group partitioning for
@return List of key group partitions | createKeyGroupPartitions | java | apache/flink | flink-runtime/src/main/java/org/apache/flink/runtime/checkpoint/StateAssignmentOperation.java | https://github.com/apache/flink/blob/master/flink-runtime/src/main/java/org/apache/flink/runtime/checkpoint/StateAssignmentOperation.java | Apache-2.0 |
private static void checkParallelismPreconditions(
OperatorState operatorState, ExecutionJobVertex executionJobVertex) {
// ----------------------------------------max parallelism
// preconditions-------------------------------------
if (operatorState.getMaxParallelism() < executionJobVertex.getParallelism()) {
throw new IllegalStateException(
"The state for task "
+ executionJobVertex.getJobVertexId()
+ " can not be restored. The maximum parallelism ("
+ operatorState.getMaxParallelism()
+ ") of the restored state is lower than the configured parallelism ("
+ executionJobVertex.getParallelism()
+ "). Please reduce the parallelism of the task to be lower or equal to the maximum parallelism.");
}
// check that the number of key groups have not changed or if we need to override it to
// satisfy the restored state
if (operatorState.getMaxParallelism() != executionJobVertex.getMaxParallelism()) {
if (executionJobVertex.canRescaleMaxParallelism(operatorState.getMaxParallelism())) {
LOG.debug(
"Rescaling maximum parallelism for JobVertex {} from {} to {}",
executionJobVertex.getJobVertexId(),
executionJobVertex.getMaxParallelism(),
operatorState.getMaxParallelism());
executionJobVertex.setMaxParallelism(operatorState.getMaxParallelism());
} else {
// if the max parallelism cannot be rescaled, we complain on mismatch
throw new IllegalStateException(
"The maximum parallelism ("
+ operatorState.getMaxParallelism()
+ ") with which the latest "
+ "checkpoint of the execution job vertex "
+ executionJobVertex
+ " has been taken and the current maximum parallelism ("
+ executionJobVertex.getMaxParallelism()
+ ") changed. This "
+ "is currently not supported.");
}
}
} | Verifies conditions in regards to parallelism and maxParallelism that must be met when
restoring state.
@param operatorState state to restore
@param executionJobVertex task for which the state should be restored | checkParallelismPreconditions | java | apache/flink | flink-runtime/src/main/java/org/apache/flink/runtime/checkpoint/StateAssignmentOperation.java | https://github.com/apache/flink/blob/master/flink-runtime/src/main/java/org/apache/flink/runtime/checkpoint/StateAssignmentOperation.java | Apache-2.0 |
public static <T> List<List<T>> applyRepartitioner(
OperatorStateRepartitioner<T> opStateRepartitioner,
List<List<T>> chainOpParallelStates,
int oldParallelism,
int newParallelism) {
if (chainOpParallelStates == null) {
return emptyList();
}
return opStateRepartitioner.repartitionState(
chainOpParallelStates, oldParallelism, newParallelism);
} | Repartitions the given operator state using the given {@link OperatorStateRepartitioner} with
respect to the new parallelism.
@param opStateRepartitioner partitioner to use
@param chainOpParallelStates state to repartition
@param oldParallelism parallelism with which the state is currently partitioned
@param newParallelism parallelism with which the state should be partitioned
@return repartitioned state | applyRepartitioner | java | apache/flink | flink-runtime/src/main/java/org/apache/flink/runtime/checkpoint/StateAssignmentOperation.java | https://github.com/apache/flink/blob/master/flink-runtime/src/main/java/org/apache/flink/runtime/checkpoint/StateAssignmentOperation.java | Apache-2.0 |
void add(long value) {
if (value >= 0) {
if (count > 0) {
min = Math.min(min, value);
max = Math.max(max, value);
} else {
min = value;
max = value;
}
count++;
sum += value;
if (histogram != null) {
histogram.update(value);
}
}
} | Adds the value to the stats if it is >= 0.
@param value Value to add for min/max/avg stats.. | add | java | apache/flink | flink-runtime/src/main/java/org/apache/flink/runtime/checkpoint/StatsSummary.java | https://github.com/apache/flink/blob/master/flink-runtime/src/main/java/org/apache/flink/runtime/checkpoint/StatsSummary.java | Apache-2.0 |
public StatsSummarySnapshot createSnapshot() {
return new StatsSummarySnapshot(
min, max, sum, count, histogram == null ? null : histogram.getStatistics());
} | Returns a snapshot of the current state.
@return A snapshot of the current state. | createSnapshot | java | apache/flink | flink-runtime/src/main/java/org/apache/flink/runtime/checkpoint/StatsSummary.java | https://github.com/apache/flink/blob/master/flink-runtime/src/main/java/org/apache/flink/runtime/checkpoint/StatsSummary.java | Apache-2.0 |
public double getQuantile(double quantile) {
return histogram == null ? Double.NaN : histogram.getQuantile(quantile);
} | Returns the value for the given quantile based on the represented histogram statistics or
{@link Double#NaN} if the histogram was not built.
@param quantile Quantile to calculate the value for
@return Value for the given quantile | getQuantile | java | apache/flink | flink-runtime/src/main/java/org/apache/flink/runtime/checkpoint/StatsSummarySnapshot.java | https://github.com/apache/flink/blob/master/flink-runtime/src/main/java/org/apache/flink/runtime/checkpoint/StatsSummarySnapshot.java | Apache-2.0 |
public long getInitializationStartTs() {
return initializationStartTs;
} | A builder for {@link SubTaskInitializationMetrics}. Class is {@link ThreadSafe} to allow using it
from async threads when used by state backends. | getInitializationStartTs | java | apache/flink | flink-runtime/src/main/java/org/apache/flink/runtime/checkpoint/SubTaskInitializationMetricsBuilder.java | https://github.com/apache/flink/blob/master/flink-runtime/src/main/java/org/apache/flink/runtime/checkpoint/SubTaskInitializationMetricsBuilder.java | Apache-2.0 |
public int getSubtaskIndex() {
return subtaskIndex;
} | Is the checkpoint completed by this subtask. | getSubtaskIndex | java | apache/flink | flink-runtime/src/main/java/org/apache/flink/runtime/checkpoint/SubtaskStateStats.java | https://github.com/apache/flink/blob/master/flink-runtime/src/main/java/org/apache/flink/runtime/checkpoint/SubtaskStateStats.java | Apache-2.0 |
public long getStateSize() {
return stateSize;
} | Returns the size of the checkpointed state at this subtask.
@return Checkpoint state size of the sub task. | getStateSize | java | apache/flink | flink-runtime/src/main/java/org/apache/flink/runtime/checkpoint/SubtaskStateStats.java | https://github.com/apache/flink/blob/master/flink-runtime/src/main/java/org/apache/flink/runtime/checkpoint/SubtaskStateStats.java | Apache-2.0 |
public long getCheckpointedSize() {
return checkpointedSize;
} | Returns the incremental state size.
@return The incremental state size. | getCheckpointedSize | java | apache/flink | flink-runtime/src/main/java/org/apache/flink/runtime/checkpoint/SubtaskStateStats.java | https://github.com/apache/flink/blob/master/flink-runtime/src/main/java/org/apache/flink/runtime/checkpoint/SubtaskStateStats.java | Apache-2.0 |
public long getAckTimestamp() {
return ackTimestamp;
} | Returns the timestamp when the acknowledgement of this subtask was received at the
coordinator.
@return ACK timestamp at the coordinator. | getAckTimestamp | java | apache/flink | flink-runtime/src/main/java/org/apache/flink/runtime/checkpoint/SubtaskStateStats.java | https://github.com/apache/flink/blob/master/flink-runtime/src/main/java/org/apache/flink/runtime/checkpoint/SubtaskStateStats.java | Apache-2.0 |
public long getEndToEndDuration(long triggerTimestamp) {
return Math.max(0, ackTimestamp - triggerTimestamp);
} | Computes the duration since the given trigger timestamp.
<p>If the trigger timestamp is greater than the ACK timestamp, this returns <code>0</code>.
@param triggerTimestamp Trigger timestamp of the checkpoint.
@return Duration since the given trigger timestamp. | getEndToEndDuration | java | apache/flink | flink-runtime/src/main/java/org/apache/flink/runtime/checkpoint/SubtaskStateStats.java | https://github.com/apache/flink/blob/master/flink-runtime/src/main/java/org/apache/flink/runtime/checkpoint/SubtaskStateStats.java | Apache-2.0 |
public long getSyncCheckpointDuration() {
return syncCheckpointDuration;
} | @return Duration of the synchronous part of the checkpoint or <code>-1</code> if the runtime
did not report this. | getSyncCheckpointDuration | java | apache/flink | flink-runtime/src/main/java/org/apache/flink/runtime/checkpoint/SubtaskStateStats.java | https://github.com/apache/flink/blob/master/flink-runtime/src/main/java/org/apache/flink/runtime/checkpoint/SubtaskStateStats.java | Apache-2.0 |
public long getAsyncCheckpointDuration() {
return asyncCheckpointDuration;
} | @return Duration of the asynchronous part of the checkpoint or <code>-1</code> if the runtime
did not report this. | getAsyncCheckpointDuration | java | apache/flink | flink-runtime/src/main/java/org/apache/flink/runtime/checkpoint/SubtaskStateStats.java | https://github.com/apache/flink/blob/master/flink-runtime/src/main/java/org/apache/flink/runtime/checkpoint/SubtaskStateStats.java | Apache-2.0 |
public long getProcessedData() {
return processedData;
} | @return the total number of processed bytes during the checkpoint. | getProcessedData | java | apache/flink | flink-runtime/src/main/java/org/apache/flink/runtime/checkpoint/SubtaskStateStats.java | https://github.com/apache/flink/blob/master/flink-runtime/src/main/java/org/apache/flink/runtime/checkpoint/SubtaskStateStats.java | Apache-2.0 |
public long getPersistedData() {
return persistedData;
} | @return the total number of persisted bytes during the checkpoint. | getPersistedData | java | apache/flink | flink-runtime/src/main/java/org/apache/flink/runtime/checkpoint/SubtaskStateStats.java | https://github.com/apache/flink/blob/master/flink-runtime/src/main/java/org/apache/flink/runtime/checkpoint/SubtaskStateStats.java | Apache-2.0 |
public boolean isTaskDeployedAsFinished() {
return isTaskDeployedAsFinished;
} | Returns whether all the operators of the task are already finished on restoring. | isTaskDeployedAsFinished | java | apache/flink | flink-runtime/src/main/java/org/apache/flink/runtime/checkpoint/TaskStateSnapshot.java | https://github.com/apache/flink/blob/master/flink-runtime/src/main/java/org/apache/flink/runtime/checkpoint/TaskStateSnapshot.java | Apache-2.0 |
public boolean isTaskFinished() {
return isTaskFinished;
} | Returns whether all the operators of the task have called finished methods. | isTaskFinished | java | apache/flink | flink-runtime/src/main/java/org/apache/flink/runtime/checkpoint/TaskStateSnapshot.java | https://github.com/apache/flink/blob/master/flink-runtime/src/main/java/org/apache/flink/runtime/checkpoint/TaskStateSnapshot.java | Apache-2.0 |
@Nullable
public OperatorSubtaskState getSubtaskStateByOperatorID(OperatorID operatorID) {
return subtaskStatesByOperatorID.get(operatorID);
} | Returns the subtask state for the given operator id (or null if not contained). | getSubtaskStateByOperatorID | java | apache/flink | flink-runtime/src/main/java/org/apache/flink/runtime/checkpoint/TaskStateSnapshot.java | https://github.com/apache/flink/blob/master/flink-runtime/src/main/java/org/apache/flink/runtime/checkpoint/TaskStateSnapshot.java | Apache-2.0 |
public OperatorSubtaskState putSubtaskStateByOperatorID(
@Nonnull OperatorID operatorID, @Nonnull OperatorSubtaskState state) {
return subtaskStatesByOperatorID.put(operatorID, Preconditions.checkNotNull(state));
} | Maps the given operator id to the given subtask state. Returns the subtask state of a
previous mapping, if such a mapping existed or null otherwise. | putSubtaskStateByOperatorID | java | apache/flink | flink-runtime/src/main/java/org/apache/flink/runtime/checkpoint/TaskStateSnapshot.java | https://github.com/apache/flink/blob/master/flink-runtime/src/main/java/org/apache/flink/runtime/checkpoint/TaskStateSnapshot.java | Apache-2.0 |
public Set<Map.Entry<OperatorID, OperatorSubtaskState>> getSubtaskStateMappings() {
return subtaskStatesByOperatorID.entrySet();
} | Returns the set of all mappings from operator id to the corresponding subtask state. | getSubtaskStateMappings | java | apache/flink | flink-runtime/src/main/java/org/apache/flink/runtime/checkpoint/TaskStateSnapshot.java | https://github.com/apache/flink/blob/master/flink-runtime/src/main/java/org/apache/flink/runtime/checkpoint/TaskStateSnapshot.java | Apache-2.0 |
public boolean hasState() {
for (OperatorSubtaskState operatorSubtaskState : subtaskStatesByOperatorID.values()) {
if (operatorSubtaskState != null && operatorSubtaskState.hasState()) {
return true;
}
}
return isTaskDeployedAsFinished;
} | Returns true if at least one {@link OperatorSubtaskState} in subtaskStatesByOperatorID has
state. | hasState | java | apache/flink | flink-runtime/src/main/java/org/apache/flink/runtime/checkpoint/TaskStateSnapshot.java | https://github.com/apache/flink/blob/master/flink-runtime/src/main/java/org/apache/flink/runtime/checkpoint/TaskStateSnapshot.java | Apache-2.0 |
public InflightDataRescalingDescriptor getInputRescalingDescriptor() {
return getMapping(OperatorSubtaskState::getInputRescalingDescriptor);
} | Returns the input channel mapping for rescaling with in-flight data or {@link
InflightDataRescalingDescriptor#NO_RESCALE}. | getInputRescalingDescriptor | java | apache/flink | flink-runtime/src/main/java/org/apache/flink/runtime/checkpoint/TaskStateSnapshot.java | https://github.com/apache/flink/blob/master/flink-runtime/src/main/java/org/apache/flink/runtime/checkpoint/TaskStateSnapshot.java | Apache-2.0 |
@Nullable
public SubtaskStateStats getLatestAcknowledgedSubtaskStats() {
return latestAckedSubtaskStats;
} | @return The latest acknowledged subtask stats or <code>null</code> if none was acknowledged
yet. | getLatestAcknowledgedSubtaskStats | java | apache/flink | flink-runtime/src/main/java/org/apache/flink/runtime/checkpoint/TaskStateStats.java | https://github.com/apache/flink/blob/master/flink-runtime/src/main/java/org/apache/flink/runtime/checkpoint/TaskStateStats.java | Apache-2.0 |
public long getLatestAckTimestamp() {
SubtaskStateStats subtask = latestAckedSubtaskStats;
if (subtask != null) {
return subtask.getAckTimestamp();
} else {
return -1;
}
} | @return Ack timestamp of the latest acknowledged subtask or <code>-1</code> if none was
acknowledged yet.. | getLatestAckTimestamp | java | apache/flink | flink-runtime/src/main/java/org/apache/flink/runtime/checkpoint/TaskStateStats.java | https://github.com/apache/flink/blob/master/flink-runtime/src/main/java/org/apache/flink/runtime/checkpoint/TaskStateStats.java | Apache-2.0 |
public long getCheckpointedSize() {
return summaryStats.getCheckpointedSize().getSum();
} | @return Total persisted size over all subtasks of this checkpoint. | getCheckpointedSize | java | apache/flink | flink-runtime/src/main/java/org/apache/flink/runtime/checkpoint/TaskStateStats.java | https://github.com/apache/flink/blob/master/flink-runtime/src/main/java/org/apache/flink/runtime/checkpoint/TaskStateStats.java | Apache-2.0 |
public long getEndToEndDuration(long triggerTimestamp) {
SubtaskStateStats subtask = getLatestAcknowledgedSubtaskStats();
if (subtask != null) {
return Math.max(0, subtask.getAckTimestamp() - triggerTimestamp);
} else {
return -1;
}
} | Returns the duration of this checkpoint at the task/operator calculated as the time since
triggering until the latest acknowledged subtask or <code>-1</code> if no subtask was
acknowledged yet.
@return Duration of this checkpoint at the task/operator or <code>-1</code> if no subtask was
acknowledged yet. | getEndToEndDuration | java | apache/flink | flink-runtime/src/main/java/org/apache/flink/runtime/checkpoint/TaskStateStats.java | https://github.com/apache/flink/blob/master/flink-runtime/src/main/java/org/apache/flink/runtime/checkpoint/TaskStateStats.java | Apache-2.0 |
public SubtaskStateStats[] getSubtaskStats() {
return subtaskStats;
} | Returns the stats for all subtasks.
<p>Elements of the returned array are <code>null</code> if no stats are available yet for the
respective subtask.
<p>Note: The returned array must not be modified.
@return Array of subtask stats (elements are <code>null</code> if no stats available yet). | getSubtaskStats | java | apache/flink | flink-runtime/src/main/java/org/apache/flink/runtime/checkpoint/TaskStateStats.java | https://github.com/apache/flink/blob/master/flink-runtime/src/main/java/org/apache/flink/runtime/checkpoint/TaskStateStats.java | Apache-2.0 |
public TaskStateStatsSummary getSummaryStats() {
return summaryStats;
} | @return Summary of the subtask stats. | getSummaryStats | java | apache/flink | flink-runtime/src/main/java/org/apache/flink/runtime/checkpoint/TaskStateStats.java | https://github.com/apache/flink/blob/master/flink-runtime/src/main/java/org/apache/flink/runtime/checkpoint/TaskStateStats.java | Apache-2.0 |
@Override
public String checkpointIDToName(long checkpointId) {
return String.format("/%019d", checkpointId);
} | Convert a checkpoint id into a ZooKeeper path.
@param checkpointId to convert to the path
@return Path created from the given checkpoint id | checkpointIDToName | java | apache/flink | flink-runtime/src/main/java/org/apache/flink/runtime/checkpoint/ZooKeeperCheckpointStoreUtil.java | https://github.com/apache/flink/blob/master/flink-runtime/src/main/java/org/apache/flink/runtime/checkpoint/ZooKeeperCheckpointStoreUtil.java | Apache-2.0 |
@Override
public long nameToCheckpointID(String path) {
try {
String numberString;
// check if we have a leading slash
if ('/' == path.charAt(0)) {
numberString = path.substring(1);
} else {
numberString = path;
}
return Long.parseLong(numberString);
} catch (NumberFormatException e) {
LOG.warn(
"Could not parse checkpoint id from {}. This indicates that the "
+ "checkpoint id to path conversion has changed.",
path);
return INVALID_CHECKPOINT_ID;
}
} | Converts a path to the checkpoint id.
@param path in ZooKeeper
@return Checkpoint id parsed from the path | nameToCheckpointID | java | apache/flink | flink-runtime/src/main/java/org/apache/flink/runtime/checkpoint/ZooKeeperCheckpointStoreUtil.java | https://github.com/apache/flink/blob/master/flink-runtime/src/main/java/org/apache/flink/runtime/checkpoint/ZooKeeperCheckpointStoreUtil.java | Apache-2.0 |
ChannelStateWriteRequestExecutor getOrCreateExecutor(
JobVertexID jobVertexID,
int subtaskIndex,
SupplierWithException<CheckpointStorageWorkerView, ? extends IOException>
checkpointStorageWorkerViewSupplier,
int maxSubtasksPerChannelStateFile,
boolean startExecutor) {
synchronized (lock) {
if (executor == null) {
ChannelStateWriteRequestDispatcher dispatcher =
new ChannelStateWriteRequestDispatcherImpl(
checkpointStorageWorkerViewSupplier,
new ChannelStateSerializerImpl());
executor =
new ChannelStateWriteRequestExecutorImpl(
dispatcher,
maxSubtasksPerChannelStateFile,
executor -> {
assert Thread.holdsLock(lock);
checkState(this.executor == executor);
this.executor = null;
},
lock,
jobID);
if (startExecutor) {
executor.start();
}
}
ChannelStateWriteRequestExecutor currentExecutor = executor;
currentExecutor.registerSubtask(jobVertexID, subtaskIndex);
return currentExecutor;
}
} | @param startExecutor It is for test to prevent create too many threads when some unit tests
create executor frequently. | getOrCreateExecutor | java | apache/flink | flink-runtime/src/main/java/org/apache/flink/runtime/checkpoint/channel/ChannelStateWriteRequestExecutorFactory.java | https://github.com/apache/flink/blob/master/flink-runtime/src/main/java/org/apache/flink/runtime/checkpoint/channel/ChannelStateWriteRequestExecutorFactory.java | Apache-2.0 |
public void registerMetrics(FileMergingSnapshotManager.SpaceStat spaceStat) {
gauge(LOGICAL_FILE_COUNT, spaceStat.logicalFileCount::get);
gauge(LOGICAL_FILE_SIZE, spaceStat.logicalFileSize::get);
gauge(PHYSICAL_FILE_COUNT, spaceStat.physicalFileCount::get);
gauge(PHYSICAL_FILE_SIZE, spaceStat.physicalFileSize::get);
} | Metrics related to the file merging snapshot manager. Thread-safety is required because it is
used by multiple task threads. | registerMetrics | java | apache/flink | flink-runtime/src/main/java/org/apache/flink/runtime/checkpoint/filemerging/FileMergingMetricGroup.java | https://github.com/apache/flink/blob/master/flink-runtime/src/main/java/org/apache/flink/runtime/checkpoint/filemerging/FileMergingMetricGroup.java | Apache-2.0 |
protected LogicalFile createLogicalFile(
@Nonnull PhysicalFile physicalFile,
long startOffset,
long length,
@Nonnull SubtaskKey subtaskKey) {
LogicalFileId fileID = LogicalFileId.generateRandomId();
LogicalFile file = new LogicalFile(fileID, physicalFile, startOffset, length, subtaskKey);
knownLogicalFiles.put(fileID, file);
if (physicalFile.isOwned()) {
spaceStat.onLogicalFileCreate(length);
spaceStat.onPhysicalFileUpdate(length);
}
return file;
} | Create a logical file on a physical file.
@param physicalFile the underlying physical file.
@param startOffset the offset in the physical file that the logical file starts from.
@param length the length of the logical file.
@param subtaskKey the id of the subtask that the logical file belongs to.
@return the created logical file. | createLogicalFile | java | apache/flink | flink-runtime/src/main/java/org/apache/flink/runtime/checkpoint/filemerging/FileMergingSnapshotManagerBase.java | https://github.com/apache/flink/blob/master/flink-runtime/src/main/java/org/apache/flink/runtime/checkpoint/filemerging/FileMergingSnapshotManagerBase.java | Apache-2.0 |
protected final void deletePhysicalFile(Path filePath, long size) {
ioExecutor.execute(
() -> {
try {
fs.delete(filePath, false);
spaceStat.onPhysicalFileDelete(size);
LOG.debug("Physical file deleted: {}.", filePath);
} catch (IOException e) {
LOG.warn("Fail to delete file: {}", filePath);
}
});
} | Delete a physical file by given file path. Use the io executor to do the deletion.
@param filePath the given file path to delete. | deletePhysicalFile | java | apache/flink | flink-runtime/src/main/java/org/apache/flink/runtime/checkpoint/filemerging/FileMergingSnapshotManagerBase.java | https://github.com/apache/flink/blob/master/flink-runtime/src/main/java/org/apache/flink/runtime/checkpoint/filemerging/FileMergingSnapshotManagerBase.java | Apache-2.0 |
protected final PhysicalFilePool createPhysicalPool() {
switch (filePoolType) {
case NON_BLOCKING:
return new NonBlockingPhysicalFilePool(
maxPhysicalFileSize, this::createPhysicalFile);
case BLOCKING:
return new BlockingPhysicalFilePool(maxPhysicalFileSize, this::createPhysicalFile);
default:
throw new UnsupportedOperationException(
"Unsupported type of physical file pool: " + filePoolType);
}
} | Create physical pool by filePoolType.
@return physical file pool. | createPhysicalPool | java | apache/flink | flink-runtime/src/main/java/org/apache/flink/runtime/checkpoint/filemerging/FileMergingSnapshotManagerBase.java | https://github.com/apache/flink/blob/master/flink-runtime/src/main/java/org/apache/flink/runtime/checkpoint/filemerging/FileMergingSnapshotManagerBase.java | Apache-2.0 |
public FileMergingSnapshotManager build() {
switch (fileMergingType) {
case MERGE_WITHIN_CHECKPOINT:
return new WithinCheckpointFileMergingSnapshotManager(
String.format(ID_FORMAT, jobId, tmResourceId),
maxFileSize,
filePoolType,
maxSpaceAmplification,
ioExecutor == null ? Runnable::run : ioExecutor,
metricGroup == null
? new UnregisteredMetricGroups
.UnregisteredTaskManagerJobMetricGroup()
: metricGroup);
case MERGE_ACROSS_CHECKPOINT:
return new AcrossCheckpointFileMergingSnapshotManager(
String.format(ID_FORMAT, jobId, tmResourceId),
maxFileSize,
filePoolType,
maxSpaceAmplification,
ioExecutor == null ? Runnable::run : ioExecutor,
metricGroup == null
? new UnregisteredMetricGroups
.UnregisteredTaskManagerJobMetricGroup()
: metricGroup);
default:
throw new UnsupportedOperationException(
String.format(
"Unsupported type %s when creating file merging manager",
fileMergingType));
}
} | Create file-merging snapshot manager based on configuration.
@return the created manager. | build | java | apache/flink | flink-runtime/src/main/java/org/apache/flink/runtime/checkpoint/filemerging/FileMergingSnapshotManagerBuilder.java | https://github.com/apache/flink/blob/master/flink-runtime/src/main/java/org/apache/flink/runtime/checkpoint/filemerging/FileMergingSnapshotManagerBuilder.java | Apache-2.0 |
public static LogicalFileId generateRandomId() {
return new LogicalFileId(UUID.randomUUID().toString());
} | ID for {@link LogicalFile}. It should be unique for each file. | generateRandomId | java | apache/flink | flink-runtime/src/main/java/org/apache/flink/runtime/checkpoint/filemerging/LogicalFile.java | https://github.com/apache/flink/blob/master/flink-runtime/src/main/java/org/apache/flink/runtime/checkpoint/filemerging/LogicalFile.java | Apache-2.0 |
public LogicalFileId getFileId() {
return fileId;
} | The id of the subtask that this logical file belongs to. | getFileId | java | apache/flink | flink-runtime/src/main/java/org/apache/flink/runtime/checkpoint/filemerging/LogicalFile.java | https://github.com/apache/flink/blob/master/flink-runtime/src/main/java/org/apache/flink/runtime/checkpoint/filemerging/LogicalFile.java | Apache-2.0 |
public void advanceLastCheckpointId(long checkpointId) {
if (checkpointId > lastUsedCheckpointID) {
this.lastUsedCheckpointID = checkpointId;
}
} | A logical file may share across checkpoints (especially for shared state). When this logical
file is used/reused by a checkpoint, update the last checkpoint id that uses this logical
file.
@param checkpointId the checkpoint that uses this logical file. | advanceLastCheckpointId | java | apache/flink | flink-runtime/src/main/java/org/apache/flink/runtime/checkpoint/filemerging/LogicalFile.java | https://github.com/apache/flink/blob/master/flink-runtime/src/main/java/org/apache/flink/runtime/checkpoint/filemerging/LogicalFile.java | Apache-2.0 |
private void innerClose() throws IOException {
synchronized (this) {
if (closed) {
return;
}
closed = true;
if (outputStream != null) {
outputStream.close();
outputStream = null;
}
}
} | Close the physical file, stop reusing.
@throws IOException if anything goes wrong with file system. | innerClose | java | apache/flink | flink-runtime/src/main/java/org/apache/flink/runtime/checkpoint/filemerging/PhysicalFile.java | https://github.com/apache/flink/blob/master/flink-runtime/src/main/java/org/apache/flink/runtime/checkpoint/filemerging/PhysicalFile.java | Apache-2.0 |
protected Queue<PhysicalFile> getFileQueue(
FileMergingSnapshotManager.SubtaskKey subtaskKey, CheckpointedStateScope scope) {
return CheckpointedStateScope.SHARED.equals(scope)
? sharedPhysicalFilePoolBySubtask.computeIfAbsent(
subtaskKey, key -> createFileQueue())
: exclusivePhysicalFilePool;
} | Get or create a file queue for specific subtaskKey and checkpoint scope.
@param subtaskKey the key of current subtask.
@param scope the scope of the checkpoint.
@return an existing or created file queue. | getFileQueue | java | apache/flink | flink-runtime/src/main/java/org/apache/flink/runtime/checkpoint/filemerging/PhysicalFilePool.java | https://github.com/apache/flink/blob/master/flink-runtime/src/main/java/org/apache/flink/runtime/checkpoint/filemerging/PhysicalFilePool.java | Apache-2.0 |
public boolean isEmpty() {
return sharedPhysicalFilePoolBySubtask.isEmpty() && exclusivePhysicalFilePool.isEmpty();
} | Return whether the pool is empty or not.
@return whether the pool is empty or not. | isEmpty | java | apache/flink | flink-runtime/src/main/java/org/apache/flink/runtime/checkpoint/filemerging/PhysicalFilePool.java | https://github.com/apache/flink/blob/master/flink-runtime/src/main/java/org/apache/flink/runtime/checkpoint/filemerging/PhysicalFilePool.java | Apache-2.0 |
public static void reset(
final Collection<MasterTriggerRestoreHook<?>> hooks,
@SuppressWarnings("unused") final Logger log)
throws FlinkException {
for (MasterTriggerRestoreHook<?> hook : hooks) {
final String id = hook.getIdentifier();
try {
hook.reset();
} catch (Throwable t) {
ExceptionUtils.rethrowIfFatalErrorOrOOM(t);
throw new FlinkException(
"Error while resetting checkpoint master hook '" + id + '\'', t);
}
}
} | Resets the master hooks.
@param hooks The hooks to reset
@throws FlinkException Thrown, if the hooks throw an exception. | reset | java | apache/flink | flink-runtime/src/main/java/org/apache/flink/runtime/checkpoint/hooks/MasterHooks.java | https://github.com/apache/flink/blob/master/flink-runtime/src/main/java/org/apache/flink/runtime/checkpoint/hooks/MasterHooks.java | Apache-2.0 |
public static void close(
final Collection<MasterTriggerRestoreHook<?>> hooks, final Logger log) {
for (MasterTriggerRestoreHook<?> hook : hooks) {
try {
hook.close();
} catch (Throwable t) {
log.warn(
"Failed to cleanly close a checkpoint master hook ("
+ hook.getIdentifier()
+ ")",
t);
}
}
} | Closes the master hooks.
@param hooks The hooks to close | close | java | apache/flink | flink-runtime/src/main/java/org/apache/flink/runtime/checkpoint/hooks/MasterHooks.java | https://github.com/apache/flink/blob/master/flink-runtime/src/main/java/org/apache/flink/runtime/checkpoint/hooks/MasterHooks.java | Apache-2.0 |
public static <T> CompletableFuture<MasterState> triggerHook(
MasterTriggerRestoreHook<T> hook,
long checkpointId,
long timestamp,
Executor executor) {
final String id = hook.getIdentifier();
final SimpleVersionedSerializer<T> serializer = hook.createCheckpointDataSerializer();
try {
// call the hook!
final CompletableFuture<T> resultFuture =
hook.triggerCheckpoint(checkpointId, timestamp, executor);
if (resultFuture == null) {
return CompletableFuture.completedFuture(null);
}
return resultFuture
.thenApply(
result -> {
// if the result of the future is not null, return it as state
if (result == null) {
return null;
} else if (serializer != null) {
try {
final int version = serializer.getVersion();
final byte[] bytes = serializer.serialize(result);
return new MasterState(id, bytes, version);
} catch (Throwable t) {
ExceptionUtils.rethrowIfFatalErrorOrOOM(t);
throw new CompletionException(
new FlinkException(
"Failed to serialize state of master hook '"
+ id
+ '\'',
t));
}
} else {
throw new CompletionException(
new FlinkException(
"Checkpoint hook '"
+ id
+ " is stateful but creates no serializer"));
}
})
.exceptionally(
(throwable) -> {
throw new CompletionException(
new FlinkException(
"Checkpoint master hook '"
+ id
+ "' produced an exception",
throwable.getCause()));
});
} catch (Throwable t) {
return FutureUtils.completedExceptionally(
new FlinkException(
"Error while triggering checkpoint master hook '" + id + '\'', t));
}
} | Trigger master hook and return a completable future with state.
@param hook The master hook given
@param checkpointId The checkpoint ID of the triggering checkpoint
@param timestamp The (informational) timestamp for the triggering checkpoint
@param executor An executor that can be used for asynchronous I/O calls
@param <T> The type of data produced by the hook
@return the completable future with state | triggerHook | java | apache/flink | flink-runtime/src/main/java/org/apache/flink/runtime/checkpoint/hooks/MasterHooks.java | https://github.com/apache/flink/blob/master/flink-runtime/src/main/java/org/apache/flink/runtime/checkpoint/hooks/MasterHooks.java | Apache-2.0 |
private static void registerSerializer(MetadataSerializer serializer) {
SERIALIZERS.put(serializer.getVersion(), serializer);
} | Helper to access {@link MetadataSerializer}s for specific format versions.
<p>The serializer for a specific version can be obtained via {@link #getSerializer(int)}. | registerSerializer | java | apache/flink | flink-runtime/src/main/java/org/apache/flink/runtime/checkpoint/metadata/MetadataSerializers.java | https://github.com/apache/flink/blob/master/flink-runtime/src/main/java/org/apache/flink/runtime/checkpoint/metadata/MetadataSerializers.java | Apache-2.0 |
public static MetadataSerializer getSerializer(int version) {
MetadataSerializer serializer = SERIALIZERS.get(version);
if (serializer != null) {
return serializer;
} else {
throw new IllegalArgumentException(
"Unrecognized checkpoint version number: " + version);
}
} | Returns the {@link MetadataSerializer} for the given savepoint version.
@param version Savepoint version to get serializer for
@return Savepoint for the given version
@throws IllegalArgumentException If unknown savepoint version | getSerializer | java | apache/flink | flink-runtime/src/main/java/org/apache/flink/runtime/checkpoint/metadata/MetadataSerializers.java | https://github.com/apache/flink/blob/master/flink-runtime/src/main/java/org/apache/flink/runtime/checkpoint/metadata/MetadataSerializers.java | Apache-2.0 |
public static void extractAndUploadExecutionPlanFiles(
ExecutionPlan executionPlan,
SupplierWithException<BlobClient, IOException> clientSupplier)
throws FlinkException {
List<Path> userJars = executionPlan.getUserJars();
Collection<Tuple2<String, Path>> userArtifacts =
executionPlan.getUserArtifacts().entrySet().stream()
.map(
entry ->
Tuple2.of(
entry.getKey(),
new Path(entry.getValue().filePath)))
.collect(Collectors.toList());
uploadExecutionPlanFiles(executionPlan, userJars, userArtifacts, clientSupplier);
} | Extracts all files required for the execution from the given {@link ExecutionPlan} and
uploads them using the {@link BlobClient} from the given {@link Supplier}.
@param executionPlan executionPlan requiring files
@param clientSupplier supplier of blob client to upload files with
@throws FlinkException if the upload fails | extractAndUploadExecutionPlanFiles | java | apache/flink | flink-runtime/src/main/java/org/apache/flink/runtime/client/ClientUtils.java | https://github.com/apache/flink/blob/master/flink-runtime/src/main/java/org/apache/flink/runtime/client/ClientUtils.java | Apache-2.0 |
public static void uploadExecutionPlanFiles(
ExecutionPlan executionPlan,
Collection<Path> userJars,
Collection<Tuple2<String, org.apache.flink.core.fs.Path>> userArtifacts,
SupplierWithException<BlobClient, IOException> clientSupplier)
throws FlinkException {
if (!userJars.isEmpty() || !userArtifacts.isEmpty()) {
try (BlobClient client = clientSupplier.get()) {
uploadAndSetUserJars(executionPlan, userJars, client);
uploadAndSetUserArtifacts(executionPlan, userArtifacts, client);
} catch (IOException ioe) {
throw new FlinkException("Could not upload job files.", ioe);
}
}
executionPlan.writeUserArtifactEntriesToConfiguration();
} | Uploads the given jars and artifacts required for the execution of the given {@link
ExecutionPlan} using the {@link BlobClient} from the given {@link Supplier}.
@param executionPlan executionPlan requiring files
@param userJars jars to upload
@param userArtifacts artifacts to upload
@param clientSupplier supplier of blob client to upload files with
@throws FlinkException if the upload fails | uploadExecutionPlanFiles | java | apache/flink | flink-runtime/src/main/java/org/apache/flink/runtime/client/ClientUtils.java | https://github.com/apache/flink/blob/master/flink-runtime/src/main/java/org/apache/flink/runtime/client/ClientUtils.java | Apache-2.0 |
public boolean isGloballyTerminated() {
return globallyTerminated;
} | Checks whether the duplicate job has already been finished.
@return true if the job has already finished, either successfully or as a failure | isGloballyTerminated | java | apache/flink | flink-runtime/src/main/java/org/apache/flink/runtime/client/DuplicateJobSubmissionException.java | https://github.com/apache/flink/blob/master/flink-runtime/src/main/java/org/apache/flink/runtime/client/DuplicateJobSubmissionException.java | Apache-2.0 |
public int processExitCode() {
return processExitCode;
} | Gets the process exit code associated with this status.
@return The associated process exit code. | processExitCode | java | apache/flink | flink-runtime/src/main/java/org/apache/flink/runtime/clusterframework/ApplicationStatus.java | https://github.com/apache/flink/blob/master/flink-runtime/src/main/java/org/apache/flink/runtime/clusterframework/ApplicationStatus.java | Apache-2.0 |
public static ApplicationStatus fromJobStatus(JobStatus jobStatus) {
return JOB_STATUS_APPLICATION_STATUS_BI_MAP.getOrDefault(jobStatus, UNKNOWN);
} | Derives the ApplicationStatus that should be used for a job that resulted in the given job
status. If the job is not yet in a globally terminal state, this method returns {@link
#UNKNOWN}. | fromJobStatus | java | apache/flink | flink-runtime/src/main/java/org/apache/flink/runtime/clusterframework/ApplicationStatus.java | https://github.com/apache/flink/blob/master/flink-runtime/src/main/java/org/apache/flink/runtime/clusterframework/ApplicationStatus.java | Apache-2.0 |
public JobStatus deriveJobStatus() {
if (!JOB_STATUS_APPLICATION_STATUS_BI_MAP.inverse().containsKey(this)) {
throw new UnsupportedOperationException(
this.name() + " cannot be mapped to a JobStatus.");
}
return JOB_STATUS_APPLICATION_STATUS_BI_MAP.inverse().get(this);
} | Derives the {@link JobStatus} from the {@code ApplicationStatus}.
@return The corresponding {@code JobStatus}.
@throws UnsupportedOperationException for {@link #UNKNOWN}. | deriveJobStatus | java | apache/flink | flink-runtime/src/main/java/org/apache/flink/runtime/clusterframework/ApplicationStatus.java | https://github.com/apache/flink/blob/master/flink-runtime/src/main/java/org/apache/flink/runtime/clusterframework/ApplicationStatus.java | Apache-2.0 |
public static void writeConfiguration(Configuration cfg, File file) throws IOException {
try (FileWriter fwrt = new FileWriter(file);
PrintWriter out = new PrintWriter(fwrt)) {
for (String s : ConfigurationUtils.convertConfigToWritableLines(cfg, false)) {
out.println(s);
}
}
} | Writes a Flink YAML config file from a Flink Configuration object.
@param cfg The Flink config
@param file The File to write to
@throws IOException | writeConfiguration | java | apache/flink | flink-runtime/src/main/java/org/apache/flink/runtime/clusterframework/BootstrapTools.java | https://github.com/apache/flink/blob/master/flink-runtime/src/main/java/org/apache/flink/runtime/clusterframework/BootstrapTools.java | Apache-2.0 |
public static void substituteDeprecatedConfigKey(
Configuration config, String deprecated, String designated) {
// set the designated key only if it is not set already
if (!config.containsKey(designated)) {
final String valueForDeprecated = config.getString(deprecated, null);
if (valueForDeprecated != null) {
config.setString(designated, valueForDeprecated);
}
}
} | Sets the value of a new config key to the value of a deprecated config key.
@param config Config to write
@param deprecated The old config key
@param designated The new config key | substituteDeprecatedConfigKey | java | apache/flink | flink-runtime/src/main/java/org/apache/flink/runtime/clusterframework/BootstrapTools.java | https://github.com/apache/flink/blob/master/flink-runtime/src/main/java/org/apache/flink/runtime/clusterframework/BootstrapTools.java | Apache-2.0 |
public static void substituteDeprecatedConfigPrefix(
Configuration config, String deprecatedPrefix, String designatedPrefix) {
// set the designated key only if it is not set already
final int prefixLen = deprecatedPrefix.length();
Configuration replacement = new Configuration();
for (String key : config.keySet()) {
if (key.startsWith(deprecatedPrefix)) {
String newKey = designatedPrefix + key.substring(prefixLen);
if (!config.containsKey(newKey)) {
replacement.setString(newKey, config.getString(key, null));
}
}
}
config.addAll(replacement);
} | Sets the value of a new config key to the value of a deprecated config key. Taking into
account the changed prefix.
@param config Config to write
@param deprecatedPrefix Old prefix of key
@param designatedPrefix New prefix of key | substituteDeprecatedConfigPrefix | java | apache/flink | flink-runtime/src/main/java/org/apache/flink/runtime/clusterframework/BootstrapTools.java | https://github.com/apache/flink/blob/master/flink-runtime/src/main/java/org/apache/flink/runtime/clusterframework/BootstrapTools.java | Apache-2.0 |
public static Option newDynamicPropertiesOption() {
return new Option(DYNAMIC_PROPERTIES_OPT, true, "Dynamic properties");
} | Get an instance of the dynamic properties option.
<p>Dynamic properties allow the user to specify additional configuration values with -D, such
as <tt> -Dfs.overwrite-files=true -Dtaskmanager.memory.network.min=536346624</tt> | newDynamicPropertiesOption | java | apache/flink | flink-runtime/src/main/java/org/apache/flink/runtime/clusterframework/BootstrapTools.java | https://github.com/apache/flink/blob/master/flink-runtime/src/main/java/org/apache/flink/runtime/clusterframework/BootstrapTools.java | Apache-2.0 |
public static Configuration parseDynamicProperties(CommandLine cmd) {
final Configuration config = new Configuration();
String[] values = cmd.getOptionValues(DYNAMIC_PROPERTIES_OPT);
if (values != null) {
for (String value : values) {
String[] pair = value.split("=", 2);
if (pair.length == 1) {
config.setString(pair[0], Boolean.TRUE.toString());
} else if (pair.length == 2) {
config.setString(pair[0], pair[1]);
}
}
}
return config;
} | Parse the dynamic properties (passed on the command line). | parseDynamicProperties | java | apache/flink | flink-runtime/src/main/java/org/apache/flink/runtime/clusterframework/BootstrapTools.java | https://github.com/apache/flink/blob/master/flink-runtime/src/main/java/org/apache/flink/runtime/clusterframework/BootstrapTools.java | Apache-2.0 |
public static void updateTmpDirectoriesInConfiguration(
Configuration configuration, @Nullable String defaultDirs) {
if (configuration.contains(CoreOptions.TMP_DIRS)) {
LOG.info(
"Overriding Flink's temporary file directories with those "
+ "specified in the Flink config: {}",
configuration.getValue(CoreOptions.TMP_DIRS));
} else if (defaultDirs != null) {
LOG.info("Setting directories for temporary files to: {}", defaultDirs);
configuration.set(CoreOptions.TMP_DIRS, defaultDirs);
configuration.set(USE_LOCAL_DEFAULT_TMP_DIRS, true);
}
} | Set temporary configuration directories if necessary.
@param configuration flink config to patch
@param defaultDirs in case no tmp directories is set, next directories will be applied | updateTmpDirectoriesInConfiguration | java | apache/flink | flink-runtime/src/main/java/org/apache/flink/runtime/clusterframework/BootstrapTools.java | https://github.com/apache/flink/blob/master/flink-runtime/src/main/java/org/apache/flink/runtime/clusterframework/BootstrapTools.java | Apache-2.0 |
public static Configuration cloneConfiguration(Configuration configuration) {
final Configuration clonedConfiguration = new Configuration(configuration);
if (clonedConfiguration.get(USE_LOCAL_DEFAULT_TMP_DIRS)) {
clonedConfiguration.removeConfig(CoreOptions.TMP_DIRS);
clonedConfiguration.removeConfig(USE_LOCAL_DEFAULT_TMP_DIRS);
}
return clonedConfiguration;
} | Clones the given configuration and resets instance specific config options.
@param configuration to clone
@return Cloned configuration with reset instance specific config options | cloneConfiguration | java | apache/flink | flink-runtime/src/main/java/org/apache/flink/runtime/clusterframework/BootstrapTools.java | https://github.com/apache/flink/blob/master/flink-runtime/src/main/java/org/apache/flink/runtime/clusterframework/BootstrapTools.java | Apache-2.0 |
public static String getDynamicPropertiesAsString(
Configuration baseConfig, Configuration targetConfig) {
String[] newAddedConfigs =
targetConfig.keySet().stream()
.flatMap(
(String key) -> {
final String baseValue =
baseConfig.get(
ConfigOptions.key(key)
.stringType()
.noDefaultValue());
final String targetValue =
targetConfig.get(
ConfigOptions.key(key)
.stringType()
.noDefaultValue());
if (!baseConfig.keySet().contains(key)
|| !baseValue.equals(targetValue)) {
return Stream.of(
"-"
+ CommandLineOptions.DYNAMIC_PROPERTY_OPTION
.getOpt()
+ key
+ CommandLineOptions.DYNAMIC_PROPERTY_OPTION
.getValueSeparator()
+ escapeForDifferentOS(targetValue));
} else {
return Stream.empty();
}
})
.toArray(String[]::new);
return String.join(" ", newAddedConfigs);
} | Get dynamic properties based on two Flink configurations. If base config does not contain and
target config contains the key or the value is different, it should be added to results.
Otherwise, if the base config contains and target config does not contain the key, it will be
ignored.
@param baseConfig The base configuration.
@param targetConfig The target configuration.
@return Dynamic properties as string, separated by whitespace. | getDynamicPropertiesAsString | java | apache/flink | flink-runtime/src/main/java/org/apache/flink/runtime/clusterframework/BootstrapTools.java | https://github.com/apache/flink/blob/master/flink-runtime/src/main/java/org/apache/flink/runtime/clusterframework/BootstrapTools.java | Apache-2.0 |
public static ContaineredTaskManagerParameters create(
Configuration config, TaskExecutorProcessSpec taskExecutorProcessSpec) {
// obtain the additional environment variables from the configuration
final HashMap<String, String> envVars = new HashMap<>();
final String prefix = ResourceManagerOptions.CONTAINERIZED_TASK_MANAGER_ENV_PREFIX;
for (String key : config.keySet()) {
if (key.startsWith(prefix) && key.length() > prefix.length()) {
// remove prefix
String envVarKey = key.substring(prefix.length());
envVars.put(envVarKey, config.getString(key, null));
}
}
// set JAVA_HOME
config.getOptional(CoreOptions.FLINK_JAVA_HOME)
.ifPresent(javaHome -> envVars.put(ENV_JAVA_HOME, javaHome));
// done
return new ContaineredTaskManagerParameters(taskExecutorProcessSpec, envVars);
} | Computes the parameters to be used to start a TaskManager Java process.
@param config The Flink configuration.
@param taskExecutorProcessSpec The resource specifics of the task executor.
@return The parameters to start the TaskManager processes with. | create | java | apache/flink | flink-runtime/src/main/java/org/apache/flink/runtime/clusterframework/ContaineredTaskManagerParameters.java | https://github.com/apache/flink/blob/master/flink-runtime/src/main/java/org/apache/flink/runtime/clusterframework/ContaineredTaskManagerParameters.java | Apache-2.0 |
public static AllocationID fromHexString(String hexString) {
return new AllocationID(StringUtils.hexStringToByte(hexString));
} | Constructs a new AllocationID with the given parts.
@param lowerPart the lower bytes of the ID
@param upperPart the higher bytes of the ID | fromHexString | java | apache/flink | flink-runtime/src/main/java/org/apache/flink/runtime/clusterframework/types/AllocationID.java | https://github.com/apache/flink/blob/master/flink-runtime/src/main/java/org/apache/flink/runtime/clusterframework/types/AllocationID.java | Apache-2.0 |
public static ResourceID generate() {
return new ResourceID(new AbstractID().toString());
} | Generate a random resource id.
@return A random resource id. | generate | java | apache/flink | flink-runtime/src/main/java/org/apache/flink/runtime/clusterframework/types/ResourceID.java | https://github.com/apache/flink/blob/master/flink-runtime/src/main/java/org/apache/flink/runtime/clusterframework/types/ResourceID.java | Apache-2.0 |
public CPUResource getCpuCores() {
throwUnsupportedOperationExceptionIfUnknown();
return cpuCores;
} | Get the cpu cores needed.
@return The cpu cores, 1.0 means a full cpu thread | getCpuCores | java | apache/flink | flink-runtime/src/main/java/org/apache/flink/runtime/clusterframework/types/ResourceProfile.java | https://github.com/apache/flink/blob/master/flink-runtime/src/main/java/org/apache/flink/runtime/clusterframework/types/ResourceProfile.java | Apache-2.0 |
public MemorySize getTaskHeapMemory() {
throwUnsupportedOperationExceptionIfUnknown();
return taskHeapMemory;
} | Get the task heap memory needed.
@return The task heap memory | getTaskHeapMemory | java | apache/flink | flink-runtime/src/main/java/org/apache/flink/runtime/clusterframework/types/ResourceProfile.java | https://github.com/apache/flink/blob/master/flink-runtime/src/main/java/org/apache/flink/runtime/clusterframework/types/ResourceProfile.java | Apache-2.0 |
public MemorySize getTaskOffHeapMemory() {
throwUnsupportedOperationExceptionIfUnknown();
return taskOffHeapMemory;
} | Get the task off-heap memory needed.
@return The task off-heap memory | getTaskOffHeapMemory | java | apache/flink | flink-runtime/src/main/java/org/apache/flink/runtime/clusterframework/types/ResourceProfile.java | https://github.com/apache/flink/blob/master/flink-runtime/src/main/java/org/apache/flink/runtime/clusterframework/types/ResourceProfile.java | Apache-2.0 |
public MemorySize getManagedMemory() {
throwUnsupportedOperationExceptionIfUnknown();
return managedMemory;
} | Get the managed memory needed.
@return The managed memory | getManagedMemory | java | apache/flink | flink-runtime/src/main/java/org/apache/flink/runtime/clusterframework/types/ResourceProfile.java | https://github.com/apache/flink/blob/master/flink-runtime/src/main/java/org/apache/flink/runtime/clusterframework/types/ResourceProfile.java | Apache-2.0 |
public MemorySize getNetworkMemory() {
throwUnsupportedOperationExceptionIfUnknown();
return networkMemory;
} | Get the network memory needed.
@return The network memory | getNetworkMemory | java | apache/flink | flink-runtime/src/main/java/org/apache/flink/runtime/clusterframework/types/ResourceProfile.java | https://github.com/apache/flink/blob/master/flink-runtime/src/main/java/org/apache/flink/runtime/clusterframework/types/ResourceProfile.java | Apache-2.0 |
public MemorySize getTotalMemory() {
throwUnsupportedOperationExceptionIfUnknown();
return getOperatorsMemory().add(networkMemory);
} | Get the total memory needed.
@return The total memory | getTotalMemory | java | apache/flink | flink-runtime/src/main/java/org/apache/flink/runtime/clusterframework/types/ResourceProfile.java | https://github.com/apache/flink/blob/master/flink-runtime/src/main/java/org/apache/flink/runtime/clusterframework/types/ResourceProfile.java | Apache-2.0 |
public MemorySize getOperatorsMemory() {
throwUnsupportedOperationExceptionIfUnknown();
return taskHeapMemory.add(taskOffHeapMemory).add(managedMemory);
} | Get the memory the operators needed.
@return The operator memory | getOperatorsMemory | java | apache/flink | flink-runtime/src/main/java/org/apache/flink/runtime/clusterframework/types/ResourceProfile.java | https://github.com/apache/flink/blob/master/flink-runtime/src/main/java/org/apache/flink/runtime/clusterframework/types/ResourceProfile.java | Apache-2.0 |
public boolean isMatching(final ResourceProfile required) {
checkNotNull(required, "Cannot check matching with null resources");
throwUnsupportedOperationExceptionIfUnknown();
if (this.equals(ANY)) {
return true;
}
if (this.equals(required)) {
return true;
}
if (required.equals(UNKNOWN)) {
return true;
}
return false;
} | Check whether required resource profile can be matched.
@param required the required resource profile
@return true if the requirement is matched, otherwise false | isMatching | java | apache/flink | flink-runtime/src/main/java/org/apache/flink/runtime/clusterframework/types/ResourceProfile.java | https://github.com/apache/flink/blob/master/flink-runtime/src/main/java/org/apache/flink/runtime/clusterframework/types/ResourceProfile.java | Apache-2.0 |
@Nonnull
public ResourceProfile merge(final ResourceProfile other) {
checkNotNull(other, "Cannot merge with null resources");
if (equals(ANY) || other.equals(ANY)) {
return ANY;
}
if (this.equals(UNKNOWN) || other.equals(UNKNOWN)) {
return UNKNOWN;
}
Map<String, ExternalResource> resultExtendedResource = new HashMap<>(extendedResources);
other.extendedResources.forEach(
(String name, ExternalResource resource) -> {
resultExtendedResource.compute(
name,
(ignored, oldResource) ->
oldResource == null ? resource : oldResource.merge(resource));
});
return new ResourceProfile(
cpuCores.merge(other.cpuCores),
taskHeapMemory.add(other.taskHeapMemory),
taskOffHeapMemory.add(other.taskOffHeapMemory),
managedMemory.add(other.managedMemory),
networkMemory.add(other.networkMemory),
resultExtendedResource);
} | Calculates the sum of two resource profiles.
@param other The other resource profile to add.
@return The merged resource profile. | merge | java | apache/flink | flink-runtime/src/main/java/org/apache/flink/runtime/clusterframework/types/ResourceProfile.java | https://github.com/apache/flink/blob/master/flink-runtime/src/main/java/org/apache/flink/runtime/clusterframework/types/ResourceProfile.java | Apache-2.0 |
public static SlotID getDynamicSlotID(ResourceID resourceID) {
return new SlotID(resourceID);
} | Get a SlotID without actual slot index for dynamic slot allocation. | getDynamicSlotID | java | apache/flink | flink-runtime/src/main/java/org/apache/flink/runtime/clusterframework/types/SlotID.java | https://github.com/apache/flink/blob/master/flink-runtime/src/main/java/org/apache/flink/runtime/clusterframework/types/SlotID.java | Apache-2.0 |
public ResourceProfile getTaskResourceProfile() {
return taskResourceProfile;
} | Returns the desired resource profile for the task slot. | getTaskResourceProfile | java | apache/flink | flink-runtime/src/main/java/org/apache/flink/runtime/clusterframework/types/SlotProfile.java | https://github.com/apache/flink/blob/master/flink-runtime/src/main/java/org/apache/flink/runtime/clusterframework/types/SlotProfile.java | Apache-2.0 |
public ResourceProfile getPhysicalSlotResourceProfile() {
return physicalSlotResourceProfile;
} | Returns the desired resource profile for the physical slot to host this task slot. | getPhysicalSlotResourceProfile | java | apache/flink | flink-runtime/src/main/java/org/apache/flink/runtime/clusterframework/types/SlotProfile.java | https://github.com/apache/flink/blob/master/flink-runtime/src/main/java/org/apache/flink/runtime/clusterframework/types/SlotProfile.java | Apache-2.0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.