code
stringlengths
25
201k
docstring
stringlengths
19
96.2k
func_name
stringlengths
0
235
language
stringclasses
1 value
repo
stringlengths
8
51
path
stringlengths
11
314
url
stringlengths
62
377
license
stringclasses
7 values
public static OperatorID fromJobVertexID(JobVertexID id) { return new OperatorID(id.getLowerPart(), id.getUpperPart()); }
A class for statistically unique operator IDs.
fromJobVertexID
java
apache/flink
flink-runtime/src/main/java/org/apache/flink/runtime/jobgraph/OperatorID.java
https://github.com/apache/flink/blob/master/flink-runtime/src/main/java/org/apache/flink/runtime/jobgraph/OperatorID.java
Apache-2.0
public boolean restoreSavepoint() { return restorePath != null; }
Returns whether to restore from savepoint. @return <code>true</code> if should restore from savepoint.
restoreSavepoint
java
apache/flink
flink-runtime/src/main/java/org/apache/flink/runtime/jobgraph/SavepointRestoreSettings.java
https://github.com/apache/flink/blob/master/flink-runtime/src/main/java/org/apache/flink/runtime/jobgraph/SavepointRestoreSettings.java
Apache-2.0
public String getRestorePath() { return restorePath; }
Returns the path to the savepoint to restore from. @return Path to the savepoint to restore from or <code>null</code> if should not restore.
getRestorePath
java
apache/flink
flink-runtime/src/main/java/org/apache/flink/runtime/jobgraph/SavepointRestoreSettings.java
https://github.com/apache/flink/blob/master/flink-runtime/src/main/java/org/apache/flink/runtime/jobgraph/SavepointRestoreSettings.java
Apache-2.0
public boolean allowNonRestoredState() { return allowNonRestoredState; }
Returns whether non restored state is allowed if the savepoint contains state that cannot be mapped back to the job. @return <code>true</code> if non restored state is allowed if the savepoint contains state that cannot be mapped back to the job.
allowNonRestoredState
java
apache/flink
flink-runtime/src/main/java/org/apache/flink/runtime/jobgraph/SavepointRestoreSettings.java
https://github.com/apache/flink/blob/master/flink-runtime/src/main/java/org/apache/flink/runtime/jobgraph/SavepointRestoreSettings.java
Apache-2.0
public final Environment getEnvironment() { return this.environment; }
Returns the environment of this task. @return The environment of this task.
getEnvironment
java
apache/flink
flink-runtime/src/main/java/org/apache/flink/runtime/jobgraph/tasks/AbstractInvokable.java
https://github.com/apache/flink/blob/master/flink-runtime/src/main/java/org/apache/flink/runtime/jobgraph/tasks/AbstractInvokable.java
Apache-2.0
public final ClassLoader getUserCodeClassLoader() { return getEnvironment().getUserCodeClassLoader().asClassLoader(); }
Returns the user code class loader of this invokable. @return user code class loader of this invokable.
getUserCodeClassLoader
java
apache/flink
flink-runtime/src/main/java/org/apache/flink/runtime/jobgraph/tasks/AbstractInvokable.java
https://github.com/apache/flink/blob/master/flink-runtime/src/main/java/org/apache/flink/runtime/jobgraph/tasks/AbstractInvokable.java
Apache-2.0
public int getCurrentNumberOfSubtasks() { return this.environment.getTaskInfo().getNumberOfParallelSubtasks(); }
Returns the current number of subtasks the respective task is split into. @return the current number of subtasks the respective task is split into
getCurrentNumberOfSubtasks
java
apache/flink
flink-runtime/src/main/java/org/apache/flink/runtime/jobgraph/tasks/AbstractInvokable.java
https://github.com/apache/flink/blob/master/flink-runtime/src/main/java/org/apache/flink/runtime/jobgraph/tasks/AbstractInvokable.java
Apache-2.0
public int getIndexInSubtaskGroup() { return this.environment.getTaskInfo().getIndexOfThisSubtask(); }
Returns the index of this subtask in the subtask group. @return the index of this subtask in the subtask group
getIndexInSubtaskGroup
java
apache/flink
flink-runtime/src/main/java/org/apache/flink/runtime/jobgraph/tasks/AbstractInvokable.java
https://github.com/apache/flink/blob/master/flink-runtime/src/main/java/org/apache/flink/runtime/jobgraph/tasks/AbstractInvokable.java
Apache-2.0
private void verifyIsRunning() { checkState(running, "Not running. Forgot to call start()?"); }
Verifies that the state is running.
verifyIsRunning
java
apache/flink
flink-runtime/src/main/java/org/apache/flink/runtime/jobmanager/DefaultExecutionPlanStore.java
https://github.com/apache/flink/blob/master/flink-runtime/src/main/java/org/apache/flink/runtime/jobmanager/DefaultExecutionPlanStore.java
Apache-2.0
public static HighAvailabilityMode fromConfig(Configuration config) { String haMode = config.getValue(HighAvailabilityOptions.HA_MODE); if (haMode == null) { return HighAvailabilityMode.NONE; } else if (haMode.equalsIgnoreCase("standalone")) { // Map old default to new default return HighAvailabilityMode.NONE; } else { try { return HighAvailabilityMode.valueOf(haMode.toUpperCase()); } catch (IllegalArgumentException e) { return FACTORY_CLASS; } } }
Return the configured {@link HighAvailabilityMode}. @param config The config to parse @return Configured recovery mode or {@link HighAvailabilityMode#NONE} if not configured.
fromConfig
java
apache/flink
flink-runtime/src/main/java/org/apache/flink/runtime/jobmanager/HighAvailabilityMode.java
https://github.com/apache/flink/blob/master/flink-runtime/src/main/java/org/apache/flink/runtime/jobmanager/HighAvailabilityMode.java
Apache-2.0
public static boolean isHighAvailabilityModeActivated(Configuration configuration) { HighAvailabilityMode mode = fromConfig(configuration); return mode.haActive; }
Returns true if the defined recovery mode supports high availability. @param configuration Configuration which contains the recovery mode @return true if high availability is supported by the recovery mode, otherwise false
isHighAvailabilityModeActivated
java
apache/flink
flink-runtime/src/main/java/org/apache/flink/runtime/jobmanager/HighAvailabilityMode.java
https://github.com/apache/flink/blob/master/flink-runtime/src/main/java/org/apache/flink/runtime/jobmanager/HighAvailabilityMode.java
Apache-2.0
@Override public void childEvent(CuratorFramework client, PathChildrenCacheEvent event) { if (LOG.isDebugEnabled()) { if (event.getData() != null) { LOG.debug( "Received {} event (path: {})", event.getType(), event.getData().getPath()); } else { LOG.debug("Received {} event", event.getType()); } } switch (event.getType()) { case CHILD_ADDED: { JobID jobId = fromEvent(event); LOG.debug("Received CHILD_ADDED event notification for job {}", jobId); executionPlanListener.onAddedExecutionPlan(jobId); } break; case CHILD_UPDATED: { // Nothing to do } break; case CHILD_REMOVED: { JobID jobId = fromEvent(event); LOG.debug("Received CHILD_REMOVED event notification for job {}", jobId); executionPlanListener.onRemovedExecutionPlan(jobId); } break; case CONNECTION_SUSPENDED: { LOG.warn( "ZooKeeper connection SUSPENDING. Changes to the submitted job " + "graphs are not monitored (temporarily)."); } break; case CONNECTION_LOST: { LOG.warn( "ZooKeeper connection LOST. Changes to the submitted job " + "graphs are not monitored (permanently)."); } break; case CONNECTION_RECONNECTED: { LOG.info( "ZooKeeper connection RECONNECTED. Changes to the submitted job " + "graphs are monitored again."); } break; case INITIALIZED: { LOG.info("ExecutionPlansPathCacheListener initialized"); } break; } }
Monitors ZooKeeper for changes. <p>Detects modifications from other job managers in corner situations. The event notifications fire for changes from this job manager as well.
childEvent
java
apache/flink
flink-runtime/src/main/java/org/apache/flink/runtime/jobmanager/ZooKeeperExecutionPlanStoreWatcher.java
https://github.com/apache/flink/blob/master/flink-runtime/src/main/java/org/apache/flink/runtime/jobmanager/ZooKeeperExecutionPlanStoreWatcher.java
Apache-2.0
private JobID fromEvent(PathChildrenCacheEvent event) { return JobID.fromHexString(ZKPaths.getNodeFromPath(event.getData().getPath())); }
Returns a JobID for the event's path.
fromEvent
java
apache/flink
flink-runtime/src/main/java/org/apache/flink/runtime/jobmanager/ZooKeeperExecutionPlanStoreWatcher.java
https://github.com/apache/flink/blob/master/flink-runtime/src/main/java/org/apache/flink/runtime/jobmanager/ZooKeeperExecutionPlanStoreWatcher.java
Apache-2.0
@Override public void reconcileExecutionDeployments( ResourceID taskExecutorHost, ExecutionDeploymentReport executionDeploymentReport, Map<ExecutionAttemptID, ExecutionDeploymentState> expectedDeployedExecutions) { final Set<ExecutionAttemptID> unknownExecutions = new HashSet<>(executionDeploymentReport.getExecutions()); final Set<ExecutionAttemptID> missingExecutions = new HashSet<>(); for (Map.Entry<ExecutionAttemptID, ExecutionDeploymentState> execution : expectedDeployedExecutions.entrySet()) { boolean deployed = unknownExecutions.remove(execution.getKey()); if (!deployed && execution.getValue() != ExecutionDeploymentState.PENDING) { missingExecutions.add(execution.getKey()); } } if (!unknownExecutions.isEmpty()) { handler.onUnknownDeploymentsOf(unknownExecutions, taskExecutorHost); } if (!missingExecutions.isEmpty()) { handler.onMissingDeploymentsOf(missingExecutions, taskExecutorHost); } }
Default {@link ExecutionDeploymentReconciler} implementation. Detects missing/unknown deployments, and defers to a provided {@link ExecutionDeploymentReconciliationHandler} to resolve them.
reconcileExecutionDeployments
java
apache/flink
flink-runtime/src/main/java/org/apache/flink/runtime/jobmaster/DefaultExecutionDeploymentReconciler.java
https://github.com/apache/flink/blob/master/flink-runtime/src/main/java/org/apache/flink/runtime/jobmaster/DefaultExecutionDeploymentReconciler.java
Apache-2.0
public ResourceID getResourceID() { return resourceID; }
Message indicating a successful {@link JobMaster} and {@link TaskExecutor} registration.
getResourceID
java
apache/flink
flink-runtime/src/main/java/org/apache/flink/runtime/jobmaster/JMTMRegistrationSuccess.java
https://github.com/apache/flink/blob/master/flink-runtime/src/main/java/org/apache/flink/runtime/jobmaster/JMTMRegistrationSuccess.java
Apache-2.0
default CompletableFuture<String> triggerCheckpoint(@RpcTimeout final Duration timeout) { return triggerCheckpoint(CheckpointType.DEFAULT, timeout) .thenApply(CompletedCheckpoint::getExternalPointer); }
Triggers taking a checkpoint of the executed job. @param timeout for the rpc call @return Future which is completed with the checkpoint path once completed
triggerCheckpoint
java
apache/flink
flink-runtime/src/main/java/org/apache/flink/runtime/jobmaster/JobMasterGateway.java
https://github.com/apache/flink/blob/master/flink-runtime/src/main/java/org/apache/flink/runtime/jobmaster/JobMasterGateway.java
Apache-2.0
default CompletableFuture<Collection<PartitionWithMetrics>> getPartitionWithMetrics( Duration timeout, Set<ResultPartitionID> expectedPartitions) { return CompletableFuture.completedFuture(Collections.emptyList()); }
Get specified partitions and their metrics (identified by {@code expectedPartitions}), the metrics include sizes of sub-partitions in a result partition. @param timeout The timeout used for retrieve the specified partitions. @param expectedPartitions The set of identifiers for the result partitions whose metrics are to be fetched. @return A future will contain a collection of the partitions with their metrics that could be retrieved from the expected partitions within the specified timeout period.
getPartitionWithMetrics
java
apache/flink
flink-runtime/src/main/java/org/apache/flink/runtime/jobmaster/JobMasterGateway.java
https://github.com/apache/flink/blob/master/flink-runtime/src/main/java/org/apache/flink/runtime/jobmaster/JobMasterGateway.java
Apache-2.0
public static JobMasterId fromUuidOrNull(@Nullable UUID uuid) { return uuid == null ? null : new JobMasterId(uuid); }
If the given uuid is null, this returns null, otherwise a JobMasterId that corresponds to the UUID, via {@link #JobMasterId(UUID)}.
fromUuidOrNull
java
apache/flink
flink-runtime/src/main/java/org/apache/flink/runtime/jobmaster/JobMasterId.java
https://github.com/apache/flink/blob/master/flink-runtime/src/main/java/org/apache/flink/runtime/jobmaster/JobMasterId.java
Apache-2.0
static JobMasterServiceProcess waitingForLeadership() { return WaitingForLeadership.INSTANCE; }
JobMasterServiceProcess is responsible for running a {@link JobMasterService}.
waitingForLeadership
java
apache/flink
flink-runtime/src/main/java/org/apache/flink/runtime/jobmaster/JobMasterServiceProcess.java
https://github.com/apache/flink/blob/master/flink-runtime/src/main/java/org/apache/flink/runtime/jobmaster/JobMasterServiceProcess.java
Apache-2.0
public Optional<SerializedThrowable> getSerializedThrowable() { return Optional.ofNullable(serializedThrowable); }
Returns an empty {@code Optional} if the job finished successfully, otherwise the {@code Optional} will carry the failure cause.
getSerializedThrowable
java
apache/flink
flink-runtime/src/main/java/org/apache/flink/runtime/jobmaster/JobResult.java
https://github.com/apache/flink/blob/master/flink-runtime/src/main/java/org/apache/flink/runtime/jobmaster/JobResult.java
Apache-2.0
public JobExecutionResult toJobExecutionResult(ClassLoader classLoader) throws JobExecutionException, IOException, ClassNotFoundException { if (applicationStatus == ApplicationStatus.SUCCEEDED) { return new JobExecutionResult( jobId, netRuntime, AccumulatorHelper.deserializeAccumulators(accumulatorResults, classLoader)); } else { final Throwable cause; if (serializedThrowable == null) { cause = null; } else { cause = serializedThrowable.deserializeError(classLoader); } final JobExecutionException exception; if (applicationStatus == ApplicationStatus.FAILED) { exception = new JobExecutionException(jobId, "Job execution failed.", cause); } else if (applicationStatus == ApplicationStatus.CANCELED) { exception = new JobCancellationException(jobId, "Job was cancelled.", cause); } else { exception = new JobExecutionException( jobId, "Job completed with illegal application status: " + applicationStatus + '.', cause); } throw exception; } }
Converts the {@link JobResult} to a {@link JobExecutionResult}. @param classLoader to use for deserialization @return JobExecutionResult @throws JobCancellationException if the job was cancelled @throws JobExecutionException if the job execution did not succeed @throws IOException if the accumulator could not be deserialized @throws ClassNotFoundException if the accumulator could not deserialized
toJobExecutionResult
java
apache/flink
flink-runtime/src/main/java/org/apache/flink/runtime/jobmaster/JobResult.java
https://github.com/apache/flink/blob/master/flink-runtime/src/main/java/org/apache/flink/runtime/jobmaster/JobResult.java
Apache-2.0
public static JobResult createFrom(AccessExecutionGraph accessExecutionGraph) { final JobID jobId = accessExecutionGraph.getJobID(); final JobStatus jobStatus = accessExecutionGraph.getState(); checkArgument( jobStatus.isTerminalState(), "The job " + accessExecutionGraph.getJobName() + '(' + jobId + ") is not in a " + "terminal state. It is in state " + jobStatus + '.'); final JobResult.Builder builder = new JobResult.Builder(); builder.jobId(jobId); builder.applicationStatus(ApplicationStatus.fromJobStatus(accessExecutionGraph.getState())); final long netRuntime = accessExecutionGraph.getStatusTimestamp(jobStatus) - accessExecutionGraph.getStatusTimestamp(JobStatus.INITIALIZING); // guard against clock changes final long guardedNetRuntime = Math.max(netRuntime, 0L); builder.netRuntime(guardedNetRuntime); builder.accumulatorResults(accessExecutionGraph.getAccumulatorsSerialized()); if (jobStatus == JobStatus.FAILED) { final ErrorInfo errorInfo = accessExecutionGraph.getFailureInfo(); checkNotNull(errorInfo, "No root cause is found for the job failure."); builder.serializedThrowable(errorInfo.getException()); } return builder.build(); }
Creates the {@link JobResult} from the given {@link AccessExecutionGraph} which must be in a globally terminal state. @param accessExecutionGraph to create the JobResult from @return JobResult of the given AccessExecutionGraph
createFrom
java
apache/flink
flink-runtime/src/main/java/org/apache/flink/runtime/jobmaster/JobResult.java
https://github.com/apache/flink/blob/master/flink-runtime/src/main/java/org/apache/flink/runtime/jobmaster/JobResult.java
Apache-2.0
default CompletableFuture<?> releaseSlot() { return releaseSlot(null); }
Releases this slot. @return Future which is completed once the slot has been released, in case of a failure it is completed exceptionally @deprecated Added because extended the actual releaseSlot method with cause parameter.
releaseSlot
java
apache/flink
flink-runtime/src/main/java/org/apache/flink/runtime/jobmaster/LogicalSlot.java
https://github.com/apache/flink/blob/master/flink-runtime/src/main/java/org/apache/flink/runtime/jobmaster/LogicalSlot.java
Apache-2.0
private DataInputStream tryGetNewInputStream() throws IOException { if (inputStream == null) { if (readIndex < readFiles.size()) { Path file = readFiles.get(readIndex++); inputStream = new DataInputStream(fileSystem.open(file)); LOG.info("Start reading job event file {}", file.getPath()); } } return inputStream; }
If current inputStream is null, try to get a new one.
tryGetNewInputStream
java
apache/flink
flink-runtime/src/main/java/org/apache/flink/runtime/jobmaster/event/FileSystemJobEventStore.java
https://github.com/apache/flink/blob/master/flink-runtime/src/main/java/org/apache/flink/runtime/jobmaster/event/FileSystemJobEventStore.java
Apache-2.0
private void openNewOutputStream() throws IOException { // get write file. writeFile = new Path(workingDir, FILE_PREFIX + writeIndex); outputStream = new FsBatchFlushOutputStream( fileSystem, writeFile, FileSystem.WriteMode.NO_OVERWRITE, writeBufferSize); LOG.info("Job events will be written to {}.", writeFile); writeIndex++; }
Try to open a new output stream.
openNewOutputStream
java
apache/flink
flink-runtime/src/main/java/org/apache/flink/runtime/jobmaster/event/FileSystemJobEventStore.java
https://github.com/apache/flink/blob/master/flink-runtime/src/main/java/org/apache/flink/runtime/jobmaster/event/FileSystemJobEventStore.java
Apache-2.0
@VisibleForTesting void closeOutputStream() { if (outputStream != null) { try { outputStream.close(); } catch (IOException exception) { LOG.warn( "Error happens when closing the output stream for {}. Do not record events any more.", writeFile, exception); corrupted = true; } finally { outputStream = null; } } }
Close output stream. Should be invoked in {@link #eventWriterExecutor}.
closeOutputStream
java
apache/flink
flink-runtime/src/main/java/org/apache/flink/runtime/jobmaster/event/FileSystemJobEventStore.java
https://github.com/apache/flink/blob/master/flink-runtime/src/main/java/org/apache/flink/runtime/jobmaster/event/FileSystemJobEventStore.java
Apache-2.0
default int getType() { return JobEvents.getTypeID(this.getClass()); }
Retrieves the type id of this job event. The type id is a unique identifier based on the class of the specific event object. @return An integer representing the unique type id of this job event.
getType
java
apache/flink
flink-runtime/src/main/java/org/apache/flink/runtime/jobmaster/event/JobEvent.java
https://github.com/apache/flink/blob/master/flink-runtime/src/main/java/org/apache/flink/runtime/jobmaster/event/JobEvent.java
Apache-2.0
public void stop(boolean clear) { if (running) { jobEventStore.stop(clear); running = false; } }
Stop the job event manager. <p>NOTE: This method maybe invoked multiply times.
stop
java
apache/flink
flink-runtime/src/main/java/org/apache/flink/runtime/jobmaster/event/JobEventManager.java
https://github.com/apache/flink/blob/master/flink-runtime/src/main/java/org/apache/flink/runtime/jobmaster/event/JobEventManager.java
Apache-2.0
public void writeEvent(JobEvent event, boolean cutBlock) { checkState(running); jobEventStore.writeEvent(event, cutBlock); }
Write a job event asynchronously. @param event The job event that will be recorded. @param cutBlock whether start a new event block after write this event.
writeEvent
java
apache/flink
flink-runtime/src/main/java/org/apache/flink/runtime/jobmaster/event/JobEventManager.java
https://github.com/apache/flink/blob/master/flink-runtime/src/main/java/org/apache/flink/runtime/jobmaster/event/JobEventManager.java
Apache-2.0
public boolean replay(JobEventReplayHandler replayHandler) { checkState(running); try { replaying = true; replayHandler.startReplay(); JobEvent event; while ((event = jobEventStore.readEvent()) != null) { replayHandler.replayOneEvent(event); } replayHandler.finalizeReplay(); } catch (Throwable throwable) { LOG.warn("Replay job event failed.", throwable); return false; } finally { replaying = false; } return true; }
Replay all job events that have been record. @param replayHandler handler which will process the job event. @return <code>true</code> if replay successfully, <code>false</code> otherwise.
replay
java
apache/flink
flink-runtime/src/main/java/org/apache/flink/runtime/jobmaster/event/JobEventManager.java
https://github.com/apache/flink/blob/master/flink-runtime/src/main/java/org/apache/flink/runtime/jobmaster/event/JobEventManager.java
Apache-2.0
public static int getTypeID(Class<? extends JobEvent> clazz) { return checkNotNull(jobEventTypeIdMapping.get(clazz)); }
A class hosts all the type ids of events.
getTypeID
java
apache/flink
flink-runtime/src/main/java/org/apache/flink/runtime/jobmaster/event/JobEvents.java
https://github.com/apache/flink/blob/master/flink-runtime/src/main/java/org/apache/flink/runtime/jobmaster/event/JobEvents.java
Apache-2.0
@Override public JobManagerJobMetricGroup create(@Nonnull ExecutionPlan executionPlan) { return jobManagerMetricGroup.addJob(executionPlan.getJobID(), executionPlan.getName()); }
Default implementation of {@link JobManagerJobMetricGroupFactory} which creates for a given {@link ExecutionPlan} a {@link JobManagerJobMetricGroup}.
create
java
apache/flink
flink-runtime/src/main/java/org/apache/flink/runtime/jobmaster/factories/DefaultJobManagerJobMetricGroupFactory.java
https://github.com/apache/flink/blob/master/flink-runtime/src/main/java/org/apache/flink/runtime/jobmaster/factories/DefaultJobManagerJobMetricGroupFactory.java
Apache-2.0
@Override public JobManagerJobMetricGroup create(@Nonnull ExecutionPlan executionPlan) { return UnregisteredMetricGroups.createUnregisteredJobManagerJobMetricGroup(); }
{@link JobManagerJobMetricGroupFactory} which returns an unregistered {@link JobManagerJobMetricGroup}.
create
java
apache/flink
flink-runtime/src/main/java/org/apache/flink/runtime/jobmaster/factories/UnregisteredJobManagerJobMetricGroupFactory.java
https://github.com/apache/flink/blob/master/flink-runtime/src/main/java/org/apache/flink/runtime/jobmaster/factories/UnregisteredJobManagerJobMetricGroupFactory.java
Apache-2.0
public void releasePayload(Throwable cause) { final Payload payload = payloadReference.get(); if (payload != null) { payload.release(cause); payloadReference.set(null); } }
Triggers the release of the assigned payload. If the payload could be released, then it is removed from the slot. @param cause of the release operation
releasePayload
java
apache/flink
flink-runtime/src/main/java/org/apache/flink/runtime/jobmaster/slotpool/AllocatedSlot.java
https://github.com/apache/flink/blob/master/flink-runtime/src/main/java/org/apache/flink/runtime/jobmaster/slotpool/AllocatedSlot.java
Apache-2.0
@Override public final int hashCode() { return super.hashCode(); }
This always returns a reference hash code.
hashCode
java
apache/flink
flink-runtime/src/main/java/org/apache/flink/runtime/jobmaster/slotpool/AllocatedSlot.java
https://github.com/apache/flink/blob/master/flink-runtime/src/main/java/org/apache/flink/runtime/jobmaster/slotpool/AllocatedSlot.java
Apache-2.0
default AllocationID getAllocationId() { return asSlotInfo().getAllocationId(); }
Returns since when this slot is free. @return the time since when the slot is free
getAllocationId
java
apache/flink
flink-runtime/src/main/java/org/apache/flink/runtime/jobmaster/slotpool/AllocatedSlotPool.java
https://github.com/apache/flink/blob/master/flink-runtime/src/main/java/org/apache/flink/runtime/jobmaster/slotpool/AllocatedSlotPool.java
Apache-2.0
@VisibleForTesting TimeoutCheckResult checkPhysicalSlotRequestBulkTimeout( final PhysicalSlotRequestBulkWithTimestamp slotRequestBulk, final Duration slotRequestTimeout) { if (slotRequestBulk.getPendingRequests().isEmpty()) { return TimeoutCheckResult.FULFILLED; } final boolean fulfillable = isSlotRequestBulkFulfillable(slotRequestBulk, slotsRetriever); if (fulfillable) { slotRequestBulk.markFulfillable(); } else { final long currentTimestamp = clock.relativeTimeMillis(); slotRequestBulk.markUnfulfillable(currentTimestamp); final long unfulfillableSince = slotRequestBulk.getUnfulfillableSince(); if (unfulfillableSince + slotRequestTimeout.toMillis() <= currentTimestamp) { return TimeoutCheckResult.TIMEOUT; } } return TimeoutCheckResult.PENDING; }
Check the slot request bulk and timeout its requests if it has been unfulfillable for too long. @param slotRequestBulk bulk of slot requests @param slotRequestTimeout indicates how long a pending request can be unfulfillable @return result of the check, indicating the bulk is fulfilled, still pending, or timed out
checkPhysicalSlotRequestBulkTimeout
java
apache/flink
flink-runtime/src/main/java/org/apache/flink/runtime/jobmaster/slotpool/PhysicalSlotRequestBulkCheckerImpl.java
https://github.com/apache/flink/blob/master/flink-runtime/src/main/java/org/apache/flink/runtime/jobmaster/slotpool/PhysicalSlotRequestBulkCheckerImpl.java
Apache-2.0
default CompletableFuture<PhysicalSlot> requestNewAllocatedSlot( SlotRequestId slotRequestId, ResourceProfile resourceProfile, @Nullable Duration timeout) { return requestNewAllocatedSlot( slotRequestId, resourceProfile, Collections.emptyList(), timeout); }
Request the allocation of a new slot from the resource manager. This method will not return a slot from the already available slots from the pool, but instead will add a new slot to that pool that is immediately allocated and returned. @param slotRequestId identifying the requested slot @param resourceProfile resource profile that specifies the resource requirements for the requested slot @param timeout timeout for the allocation procedure @return a newly allocated slot that was previously not available.
requestNewAllocatedSlot
java
apache/flink
flink-runtime/src/main/java/org/apache/flink/runtime/jobmaster/slotpool/SlotPool.java
https://github.com/apache/flink/blob/master/flink-runtime/src/main/java/org/apache/flink/runtime/jobmaster/slotpool/SlotPool.java
Apache-2.0
default CompletableFuture<PhysicalSlot> requestNewAllocatedBatchSlot( SlotRequestId slotRequestId, ResourceProfile resourceProfile) { return requestNewAllocatedBatchSlot( slotRequestId, resourceProfile, Collections.emptyList()); }
Requests the allocation of a new batch slot from the resource manager. Unlike the normal slot, a batch slot will only time out if the slot pool does not contain a suitable slot. Moreover, it won't react to failure signals from the resource manager. @param slotRequestId identifying the requested slot @param resourceProfile resource profile that specifies the resource requirements for the requested batch slot @return a future which is completed with newly allocated batch slot
requestNewAllocatedBatchSlot
java
apache/flink
flink-runtime/src/main/java/org/apache/flink/runtime/jobmaster/slotpool/SlotPool.java
https://github.com/apache/flink/blob/master/flink-runtime/src/main/java/org/apache/flink/runtime/jobmaster/slotpool/SlotPool.java
Apache-2.0
default <T> Optional<T> castInto(Class<T> clazz) { if (clazz.isAssignableFrom(this.getClass())) { return Optional.of(clazz.cast(this)); } else { return Optional.empty(); } }
Tries to cast this slot pool service into the given clazz. @param clazz to cast the slot pool service into @param <T> type of clazz @return {@link Optional#of} the target type if it can be cast; otherwise {@link Optional#empty()}
castInto
java
apache/flink
flink-runtime/src/main/java/org/apache/flink/runtime/jobmaster/slotpool/SlotPoolService.java
https://github.com/apache/flink/blob/master/flink-runtime/src/main/java/org/apache/flink/runtime/jobmaster/slotpool/SlotPoolService.java
Apache-2.0
default void notifyNotEnoughResourcesAvailable( Collection<ResourceRequirement> acquiredResources) {}
Notifies that not enough resources are available to fulfill the resource requirements. @param acquiredResources the resources that have been acquired
notifyNotEnoughResourcesAvailable
java
apache/flink
flink-runtime/src/main/java/org/apache/flink/runtime/jobmaster/slotpool/SlotPoolService.java
https://github.com/apache/flink/blob/master/flink-runtime/src/main/java/org/apache/flink/runtime/jobmaster/slotpool/SlotPoolService.java
Apache-2.0
public static LeaderInformationRegister empty() { return EMPTY_REGISTER; }
A register containing the {@link LeaderInformation} for multiple contenders based on their {@code componentId}. No empty {@code LeaderInformation} is stored physically. No entry and an entry with an empty {@code LeaderInformation} are, therefore, semantically the same.
empty
java
apache/flink
flink-runtime/src/main/java/org/apache/flink/runtime/leaderelection/LeaderInformationRegister.java
https://github.com/apache/flink/blob/master/flink-runtime/src/main/java/org/apache/flink/runtime/leaderelection/LeaderInformationRegister.java
Apache-2.0
public static LeaderInformationRegister of( String componentId, LeaderInformation leaderInformation) { return new LeaderInformationRegister( Collections.singletonMap(componentId, leaderInformation)); }
Creates a single-entry instance containing only the passed information.
of
java
apache/flink
flink-runtime/src/main/java/org/apache/flink/runtime/leaderelection/LeaderInformationRegister.java
https://github.com/apache/flink/blob/master/flink-runtime/src/main/java/org/apache/flink/runtime/leaderelection/LeaderInformationRegister.java
Apache-2.0
public static LeaderInformationRegister merge( @Nullable LeaderInformationRegister leaderInformationRegister, String componentId, LeaderInformation leaderInformation) { final Map<String, LeaderInformation> existingLeaderInformation = new HashMap<>( leaderInformationRegister == null ? Collections.emptyMap() : leaderInformationRegister.leaderInformationPerComponentId); if (leaderInformation.isEmpty()) { existingLeaderInformation.remove(componentId); } else { existingLeaderInformation.put(componentId, leaderInformation); } return new LeaderInformationRegister(existingLeaderInformation); }
Merges another {@code LeaderInformationRegister} with additional leader information into a new {@code LeaderInformationRegister} instance. Any existing {@link LeaderInformation} for the passed {@code componentId} will be overwritten. <p>Empty {@code LeaderInformation} results in the removal of the corresponding entry (if it exists).
merge
java
apache/flink
flink-runtime/src/main/java/org/apache/flink/runtime/leaderelection/LeaderInformationRegister.java
https://github.com/apache/flink/blob/master/flink-runtime/src/main/java/org/apache/flink/runtime/leaderelection/LeaderInformationRegister.java
Apache-2.0
public static LeaderInformationRegister clear( @Nullable LeaderInformationRegister leaderInformationRegister, String componentId) { if (leaderInformationRegister == null || !leaderInformationRegister.getRegisteredComponentIds().iterator().hasNext()) { return LeaderInformationRegister.empty(); } return merge(leaderInformationRegister, componentId, LeaderInformation.empty()); }
Creates a new {@code LeaderInformationRegister} that matches the passed {@code LeaderInformationRegister} except for the entry of {@code componentId} which is removed if it existed.
clear
java
apache/flink
flink-runtime/src/main/java/org/apache/flink/runtime/leaderelection/LeaderInformationRegister.java
https://github.com/apache/flink/blob/master/flink-runtime/src/main/java/org/apache/flink/runtime/leaderelection/LeaderInformationRegister.java
Apache-2.0
public Optional<LeaderInformation> forComponentId(String componentId) { return Optional.ofNullable(leaderInformationPerComponentId.get(componentId)); }
Returns the {@link LeaderInformation} that is stored or an empty {@code Optional} if no entry exists for the passed {@code componentId}.
forComponentId
java
apache/flink
flink-runtime/src/main/java/org/apache/flink/runtime/leaderelection/LeaderInformationRegister.java
https://github.com/apache/flink/blob/master/flink-runtime/src/main/java/org/apache/flink/runtime/leaderelection/LeaderInformationRegister.java
Apache-2.0
public LeaderInformation forComponentIdOrEmpty(String componentId) { return forComponentId(componentId).orElse(LeaderInformation.empty()); }
Returns a {@link LeaderInformation} which is empty if no {@code LeaderInformation} is stored for the passed {@code componentId}.
forComponentIdOrEmpty
java
apache/flink
flink-runtime/src/main/java/org/apache/flink/runtime/leaderelection/LeaderInformationRegister.java
https://github.com/apache/flink/blob/master/flink-runtime/src/main/java/org/apache/flink/runtime/leaderelection/LeaderInformationRegister.java
Apache-2.0
public Iterable<String> getRegisteredComponentIds() { return leaderInformationPerComponentId.keySet(); }
Returns the {@code componentId}s for which leader information is stored.
getRegisteredComponentIds
java
apache/flink
flink-runtime/src/main/java/org/apache/flink/runtime/leaderelection/LeaderInformationRegister.java
https://github.com/apache/flink/blob/master/flink-runtime/src/main/java/org/apache/flink/runtime/leaderelection/LeaderInformationRegister.java
Apache-2.0
public boolean hasLeaderInformation(String componentId) { return leaderInformationPerComponentId.containsKey(componentId); }
Checks whether the register holds non-empty {@link LeaderInformation} for the passed {@code componentId}.
hasLeaderInformation
java
apache/flink
flink-runtime/src/main/java/org/apache/flink/runtime/leaderelection/LeaderInformationRegister.java
https://github.com/apache/flink/blob/master/flink-runtime/src/main/java/org/apache/flink/runtime/leaderelection/LeaderInformationRegister.java
Apache-2.0
public boolean hasNoLeaderInformation() { return leaderInformationPerComponentId.isEmpty(); }
Checks that no non-empty {@link LeaderInformation} is stored. @return {@code true}, if there is no entry that refers to a non-empty {@code LeaderInformation}; otherwise {@code false} (i.e. either no information is stored under any {@code componentId} or there are entries for certain {@code componentId}s that refer to an empty {@code LeaderInformation} record).
hasNoLeaderInformation
java
apache/flink
flink-runtime/src/main/java/org/apache/flink/runtime/leaderelection/LeaderInformationRegister.java
https://github.com/apache/flink/blob/master/flink-runtime/src/main/java/org/apache/flink/runtime/leaderelection/LeaderInformationRegister.java
Apache-2.0
public MemorySegment getCurrentSegment() { return this.currentSegment; }
Gets the memory segment that will be used to read the next bytes from. If the segment is exactly exhausted, meaning that the last byte read was the last byte available in the segment, then this segment will not serve the next bytes. The segment to serve the next bytes will be obtained through the {@link #nextSegment(MemorySegment)} method. @return The current memory segment.
getCurrentSegment
java
apache/flink
flink-runtime/src/main/java/org/apache/flink/runtime/memory/AbstractPagedInputView.java
https://github.com/apache/flink/blob/master/flink-runtime/src/main/java/org/apache/flink/runtime/memory/AbstractPagedInputView.java
Apache-2.0
public int getCurrentPositionInSegment() { return this.positionInSegment; }
Gets the current write position (the position where the next bytes will be written) in the current memory segment. @return The current write offset in the current memory segment.
getCurrentPositionInSegment
java
apache/flink
flink-runtime/src/main/java/org/apache/flink/runtime/memory/AbstractPagedOutputView.java
https://github.com/apache/flink/blob/master/flink-runtime/src/main/java/org/apache/flink/runtime/memory/AbstractPagedOutputView.java
Apache-2.0
public int getSegmentSize() { return this.segmentSize; }
Gets the size of the segments used by this view. @return The memory segment size.
getSegmentSize
java
apache/flink
flink-runtime/src/main/java/org/apache/flink/runtime/memory/AbstractPagedOutputView.java
https://github.com/apache/flink/blob/master/flink-runtime/src/main/java/org/apache/flink/runtime/memory/AbstractPagedOutputView.java
Apache-2.0
public void advance() throws IOException { this.currentSegment = nextSegment(this.currentSegment, this.positionInSegment); this.positionInSegment = this.headerLength; }
Moves the output view to the next page. This method invokes internally the {@link #nextSegment(MemorySegment, int)} method to give the current memory segment to the concrete subclass' implementation and obtain the next segment to write to. Writing will continue inside the new segment after the header. @throws IOException Thrown, if the current segment could not be processed or a new segment could not be obtained.
advance
java
apache/flink
flink-runtime/src/main/java/org/apache/flink/runtime/memory/AbstractPagedOutputView.java
https://github.com/apache/flink/blob/master/flink-runtime/src/main/java/org/apache/flink/runtime/memory/AbstractPagedOutputView.java
Apache-2.0
protected void seekOutput(MemorySegment seg, int position) { this.currentSegment = seg; this.positionInSegment = position; }
Sets the internal state to the given memory segment and the given position within the segment. @param seg The memory segment to write the next bytes to. @param position The position to start writing the next bytes to.
seekOutput
java
apache/flink
flink-runtime/src/main/java/org/apache/flink/runtime/memory/AbstractPagedOutputView.java
https://github.com/apache/flink/blob/master/flink-runtime/src/main/java/org/apache/flink/runtime/memory/AbstractPagedOutputView.java
Apache-2.0
@Override public MemorySegment nextSegment() { if (this.segments.size() > 0) { return this.segments.remove(this.segments.size() - 1); } else { return null; } }
Simple memory segment source that draws segments from a list.
nextSegment
java
apache/flink
flink-runtime/src/main/java/org/apache/flink/runtime/memory/ListMemorySegmentSource.java
https://github.com/apache/flink/blob/master/flink-runtime/src/main/java/org/apache/flink/runtime/memory/ListMemorySegmentSource.java
Apache-2.0
public void shutdown() { if (!isShutDown) { // mark as shutdown and release memory isShutDown = true; reservedMemory.clear(); // go over all allocated segments and release them for (Set<MemorySegment> segments : allocatedSegments.values()) { for (MemorySegment seg : segments) { seg.free(); } segments.clear(); } allocatedSegments.clear(); } }
Shuts the memory manager down, trying to release all the memory it managed. Depending on implementation details, the memory does not necessarily become reclaimable by the garbage collector, because there might still be references to allocated segments in the code that allocated them from the memory manager.
shutdown
java
apache/flink
flink-runtime/src/main/java/org/apache/flink/runtime/memory/MemoryManager.java
https://github.com/apache/flink/blob/master/flink-runtime/src/main/java/org/apache/flink/runtime/memory/MemoryManager.java
Apache-2.0
@VisibleForTesting public boolean isShutdown() { return isShutDown; }
Checks whether the MemoryManager has been shut down. @return True, if the memory manager is shut down, false otherwise.
isShutdown
java
apache/flink
flink-runtime/src/main/java/org/apache/flink/runtime/memory/MemoryManager.java
https://github.com/apache/flink/blob/master/flink-runtime/src/main/java/org/apache/flink/runtime/memory/MemoryManager.java
Apache-2.0
public boolean verifyEmpty() { return memoryBudget.verifyEmpty(); }
Checks if the memory manager's memory is completely available (nothing allocated at the moment). @return True, if the memory manager is empty and valid, false if it is not empty or corrupted.
verifyEmpty
java
apache/flink
flink-runtime/src/main/java/org/apache/flink/runtime/memory/MemoryManager.java
https://github.com/apache/flink/blob/master/flink-runtime/src/main/java/org/apache/flink/runtime/memory/MemoryManager.java
Apache-2.0
public void releaseAll(Object owner) { if (owner == null) { return; } Preconditions.checkState(!isShutDown, "Memory manager has been shut down."); // get all segments Set<MemorySegment> segments = allocatedSegments.remove(owner); // all segments may have been freed previously individually if (segments == null || segments.isEmpty()) { return; } // free each segment for (MemorySegment segment : segments) { segment.free(); } segments.clear(); }
Releases all memory segments for the given owner. @param owner The owner memory segments are to be released.
releaseAll
java
apache/flink
flink-runtime/src/main/java/org/apache/flink/runtime/memory/MemoryManager.java
https://github.com/apache/flink/blob/master/flink-runtime/src/main/java/org/apache/flink/runtime/memory/MemoryManager.java
Apache-2.0
public void reserveMemory(Object owner, long size) throws MemoryReservationException { checkMemoryReservationPreconditions(owner, size); if (size == 0L) { return; } memoryBudget.reserveMemory(size); reservedMemory.compute( owner, (o, memoryReservedForOwner) -> memoryReservedForOwner == null ? size : memoryReservedForOwner + size); Preconditions.checkState(!isShutDown, "Memory manager has been concurrently shut down."); }
Reserves a memory chunk of a certain size for an owner from this memory manager. @param owner The owner to associate with the memory reservation, for the fallback release. @param size size of memory to reserve. @throws MemoryReservationException Thrown, if this memory manager does not have the requested amount of memory any more.
reserveMemory
java
apache/flink
flink-runtime/src/main/java/org/apache/flink/runtime/memory/MemoryManager.java
https://github.com/apache/flink/blob/master/flink-runtime/src/main/java/org/apache/flink/runtime/memory/MemoryManager.java
Apache-2.0
public void releaseAllMemory(Object owner) { checkMemoryReservationPreconditions(owner, 0L); Long memoryReservedForOwner = reservedMemory.remove(owner); if (memoryReservedForOwner != null) { memoryBudget.releaseMemory(memoryReservedForOwner); } }
Releases all reserved memory chunks from an owner to this memory manager. @param owner The owner to associate with the memory reservation, for the fallback release.
releaseAllMemory
java
apache/flink
flink-runtime/src/main/java/org/apache/flink/runtime/memory/MemoryManager.java
https://github.com/apache/flink/blob/master/flink-runtime/src/main/java/org/apache/flink/runtime/memory/MemoryManager.java
Apache-2.0
public <T extends AutoCloseable> OpaqueMemoryResource<T> getSharedMemoryResourceForManagedMemory( String type, LongFunctionWithException<T, Exception> initializer, double fractionToInitializeWith) throws Exception { // if we need to allocate the resource (no shared resource allocated, yet), this would be // the size to use final long numBytes = computeMemorySize(fractionToInitializeWith); // initializer and releaser as functions that are pushed into the SharedResources, // so that the SharedResources can decide in (thread-safely execute) when initialization // and release should happen final LongFunctionWithException<T, Exception> reserveAndInitialize = (size) -> { try { reserveMemory(type, size); } catch (MemoryReservationException e) { throw new MemoryAllocationException( "Could not created the shared memory resource of size " + size + ". Not enough memory left to reserve from the slot's managed memory.", e); } try { return initializer.apply(size); } catch (Throwable t) { releaseMemory(type, size); throw t; } }; final LongConsumer releaser = (size) -> releaseMemory(type, size); // This object identifies the lease in this request. It is used only to identify the release // operation. // Using the object to represent the lease is a bit nicer safer than just using a reference // counter. final Object leaseHolder = new Object(); final SharedResources.ResourceAndSize<T> resource = sharedResources.getOrAllocateSharedResource( type, leaseHolder, reserveAndInitialize, numBytes); // the actual size may theoretically be different from what we requested, if allocated it // was by // someone else before with a different value for fraction (should not happen in practice, // though). final long size = resource.size(); final ThrowingRunnable<Exception> disposer = () -> sharedResources.release(type, leaseHolder, releaser); return new OpaqueMemoryResource<>(resource.resourceHandle(), size, disposer); }
Acquires a shared memory resource, identified by a type string. If the resource already exists, this returns a descriptor to the resource. If the resource does not yet exist, the given memory fraction is reserved and the resource is initialized with that size. <p>The memory for the resource is reserved from the memory budget of this memory manager (thus determining the size of the resource), but resource itself is opaque, meaning the memory manager does not understand its structure. <p>The OpaqueMemoryResource object returned from this method must be closed once not used any further. Once all acquisitions have closed the object, the resource itself is closed. <p><b>Important:</b> The failure semantics are as follows: If the memory manager fails to reserve the memory, the external resource initializer will not be called. If an exception is thrown when the opaque resource is closed (last lease is released), the memory manager will still un-reserve the memory to make sure its own accounting is clean. The exception will need to be handled by the caller of {@link OpaqueMemoryResource#close()}. For example, if this indicates that native memory was not released and the process might thus have a memory leak, the caller can decide to kill the process as a result.
getSharedMemoryResourceForManagedMemory
java
apache/flink
flink-runtime/src/main/java/org/apache/flink/runtime/memory/MemoryManager.java
https://github.com/apache/flink/blob/master/flink-runtime/src/main/java/org/apache/flink/runtime/memory/MemoryManager.java
Apache-2.0
public <T extends AutoCloseable> OpaqueMemoryResource<T> getExternalSharedMemoryResource( String type, LongFunctionWithException<T, Exception> initializer, long numBytes) throws Exception { // This object identifies the lease in this request. It is used only to identify the release // operation. // Using the object to represent the lease is a bit nicer safer than just using a reference // counter. final Object leaseHolder = new Object(); final SharedResources.ResourceAndSize<T> resource = sharedResources.getOrAllocateSharedResource( type, leaseHolder, initializer, numBytes); final ThrowingRunnable<Exception> disposer = () -> sharedResources.release(type, leaseHolder); return new OpaqueMemoryResource<>(resource.resourceHandle(), resource.size(), disposer); }
Acquires a shared resource, identified by a type string. If the resource already exists, this returns a descriptor to the resource. If the resource does not yet exist, the method initializes a new resource using the initializer function and given size. <p>The resource opaque, meaning the memory manager does not understand its structure. <p>The OpaqueMemoryResource object returned from this method must be closed once not used any further. Once all acquisitions have closed the object, the resource itself is closed.
getExternalSharedMemoryResource
java
apache/flink
flink-runtime/src/main/java/org/apache/flink/runtime/memory/MemoryManager.java
https://github.com/apache/flink/blob/master/flink-runtime/src/main/java/org/apache/flink/runtime/memory/MemoryManager.java
Apache-2.0
public int getPageSize() { return (int) pageSize; }
Gets the size of the pages handled by the memory manager. @return The size of the pages handled by the memory manager.
getPageSize
java
apache/flink
flink-runtime/src/main/java/org/apache/flink/runtime/memory/MemoryManager.java
https://github.com/apache/flink/blob/master/flink-runtime/src/main/java/org/apache/flink/runtime/memory/MemoryManager.java
Apache-2.0
public long getMemorySize() { return memoryBudget.getTotalMemorySize(); }
Returns the total size of memory handled by this memory manager. @return The total size of memory.
getMemorySize
java
apache/flink
flink-runtime/src/main/java/org/apache/flink/runtime/memory/MemoryManager.java
https://github.com/apache/flink/blob/master/flink-runtime/src/main/java/org/apache/flink/runtime/memory/MemoryManager.java
Apache-2.0
public long availableMemory() { return memoryBudget.getAvailableMemorySize(); }
Returns the available amount of memory handled by this memory manager. @return The available amount of memory.
availableMemory
java
apache/flink
flink-runtime/src/main/java/org/apache/flink/runtime/memory/MemoryManager.java
https://github.com/apache/flink/blob/master/flink-runtime/src/main/java/org/apache/flink/runtime/memory/MemoryManager.java
Apache-2.0
public int computeNumberOfPages(double fraction) { validateFraction(fraction); return (int) (totalNumberOfPages * fraction); }
Computes to how many pages the given number of bytes corresponds. If the given number of bytes is not an exact multiple of a page size, the result is rounded down, such that a portion of the memory (smaller than the page size) is not included. @param fraction the fraction of the total memory per slot @return The number of pages to which
computeNumberOfPages
java
apache/flink
flink-runtime/src/main/java/org/apache/flink/runtime/memory/MemoryManager.java
https://github.com/apache/flink/blob/master/flink-runtime/src/main/java/org/apache/flink/runtime/memory/MemoryManager.java
Apache-2.0
public long computeMemorySize(double fraction) { validateFraction(fraction); return (long) Math.floor(memoryBudget.getTotalMemorySize() * fraction); }
Computes the memory size corresponding to the fraction of all memory governed by this MemoryManager. @param fraction The fraction of all memory governed by this MemoryManager @return The memory size corresponding to the memory fraction
computeMemorySize
java
apache/flink
flink-runtime/src/main/java/org/apache/flink/runtime/memory/MemoryManager.java
https://github.com/apache/flink/blob/master/flink-runtime/src/main/java/org/apache/flink/runtime/memory/MemoryManager.java
Apache-2.0
public T getResourceHandle() { return resourceHandle; }
Gets the handle to the resource.
getResourceHandle
java
apache/flink
flink-runtime/src/main/java/org/apache/flink/runtime/memory/OpaqueMemoryResource.java
https://github.com/apache/flink/blob/master/flink-runtime/src/main/java/org/apache/flink/runtime/memory/OpaqueMemoryResource.java
Apache-2.0
public <T extends AutoCloseable> ResourceAndSize<T> getOrAllocateSharedResource( String type, Object leaseHolder, LongFunctionWithException<T, Exception> initializer, long sizeForInitialization) throws Exception { // We could be stuck on this lock for a while, in cases where another initialization is // currently // happening and the initialization is expensive. // We lock interruptibly here to allow for faster exit in case of cancellation errors. try { lock.lockInterruptibly(); } catch (InterruptedException e) { Thread.currentThread().interrupt(); throw new MemoryAllocationException("Interrupted while acquiring memory"); } try { // we cannot use "computeIfAbsent()" here because the computing function may throw an // exception. @SuppressWarnings("unchecked") LeasedResource<T> resource = (LeasedResource<T>) reservedResources.get(type); if (resource == null) { resource = createResource(initializer, sizeForInitialization); reservedResources.put(type, resource); } resource.addLeaseHolder(leaseHolder); return resource; } finally { lock.unlock(); } }
Gets the shared memory resource for the given owner and registers a lease. If the resource does not yet exist, it will be created via the given initializer function. <p>The resource must be released when no longer used. That releases the lease. When all leases are released, the resource is disposed.
getOrAllocateSharedResource
java
apache/flink
flink-runtime/src/main/java/org/apache/flink/runtime/memory/SharedResources.java
https://github.com/apache/flink/blob/master/flink-runtime/src/main/java/org/apache/flink/runtime/memory/SharedResources.java
Apache-2.0
void release(String type, Object leaseHolder) throws Exception { release(type, leaseHolder, (value) -> {}); }
Releases a lease (identified by the lease holder object) for the given type. If no further leases exist, the resource is disposed.
release
java
apache/flink
flink-runtime/src/main/java/org/apache/flink/runtime/memory/SharedResources.java
https://github.com/apache/flink/blob/master/flink-runtime/src/main/java/org/apache/flink/runtime/memory/SharedResources.java
Apache-2.0
public Map<ExecutionAttemptID, Collection<ThreadInfoSample>> getSamples() { return samples; }
Returns a collection of ThreadInfoSample. @return A collection of thread info samples for a particular execution attempt (Task)
getSamples
java
apache/flink
flink-runtime/src/main/java/org/apache/flink/runtime/messages/TaskThreadInfoResponse.java
https://github.com/apache/flink/blob/master/flink-runtime/src/main/java/org/apache/flink/runtime/messages/TaskThreadInfoResponse.java
Apache-2.0
public static Optional<ThreadInfoSample> from(@Nullable ThreadInfo threadInfo) { if (threadInfo != null) { return Optional.of( new ThreadInfoSample(threadInfo.getThreadState(), threadInfo.getStackTrace())); } else { return Optional.empty(); } }
Constructs a {@link ThreadInfoSample} from {@link ThreadInfo}. @param threadInfo {@link ThreadInfo} where the data will be copied from. @return an Optional containing the {@link ThreadInfoSample} if the {@code threadInfo} is not null and an empty Optional otherwise.
from
java
apache/flink
flink-runtime/src/main/java/org/apache/flink/runtime/messages/ThreadInfoSample.java
https://github.com/apache/flink/blob/master/flink-runtime/src/main/java/org/apache/flink/runtime/messages/ThreadInfoSample.java
Apache-2.0
@Nullable default String getMetricQueryServiceGatewayRpcAddress() { return null; }
Returns the gateway of the {@link MetricQueryService} or null, if none is started. @return Gateway of the MetricQueryService or null, if none is started
getMetricQueryServiceGatewayRpcAddress
java
apache/flink
flink-runtime/src/main/java/org/apache/flink/runtime/metrics/MetricRegistry.java
https://github.com/apache/flink/blob/master/flink-runtime/src/main/java/org/apache/flink/runtime/metrics/MetricRegistry.java
Apache-2.0
public static MetricRegistryConfiguration fromConfiguration( Configuration configuration, long maximumFrameSize) { ScopeFormats scopeFormats; try { scopeFormats = ScopeFormats.fromConfig(configuration); } catch (Exception e) { LOG.warn("Failed to parse scope format, using default scope formats", e); scopeFormats = ScopeFormats.fromConfig(new Configuration()); } char delim; try { delim = configuration.get(MetricOptions.SCOPE_DELIMITER).charAt(0); } catch (Exception e) { LOG.warn("Failed to parse delimiter, using default delimiter.", e); delim = '.'; } // padding to account for serialization overhead final long messageSizeLimitPadding = 256; return new MetricRegistryConfiguration( scopeFormats, delim, maximumFrameSize - messageSizeLimitPadding); }
Create a metric registry configuration object from the given {@link Configuration}. @param configuration to generate the metric registry configuration from @param maximumFrameSize the maximum message size that the RPC system supports @return Metric registry configuration generated from the configuration
fromConfiguration
java
apache/flink
flink-runtime/src/main/java/org/apache/flink/runtime/metrics/MetricRegistryConfiguration.java
https://github.com/apache/flink/blob/master/flink-runtime/src/main/java/org/apache/flink/runtime/metrics/MetricRegistryConfiguration.java
Apache-2.0
public void startQueryService(RpcService rpcService, ResourceID resourceID) { synchronized (lock) { Preconditions.checkState( !isShutdown(), "The metric registry has already been shut down."); try { metricQueryServiceRpcService = rpcService; queryService = MetricQueryService.createMetricQueryService( rpcService, resourceID, maximumFramesize); queryService.start(); } catch (Exception e) { LOG.warn( "Could not start MetricDumpActor. No metrics will be submitted to the WebInterface.", e); } } }
Initializes the MetricQueryService. @param rpcService RpcService to create the MetricQueryService on @param resourceID resource ID used to disambiguate the actor name
startQueryService
java
apache/flink
flink-runtime/src/main/java/org/apache/flink/runtime/metrics/MetricRegistryImpl.java
https://github.com/apache/flink/blob/master/flink-runtime/src/main/java/org/apache/flink/runtime/metrics/MetricRegistryImpl.java
Apache-2.0
@Nullable public RpcService getMetricQueryServiceRpcService() { return metricQueryServiceRpcService; }
Returns the rpc service that the {@link MetricQueryService} runs in. @return rpc service of hte MetricQueryService
getMetricQueryServiceRpcService
java
apache/flink
flink-runtime/src/main/java/org/apache/flink/runtime/metrics/MetricRegistryImpl.java
https://github.com/apache/flink/blob/master/flink-runtime/src/main/java/org/apache/flink/runtime/metrics/MetricRegistryImpl.java
Apache-2.0
public boolean isShutdown() { synchronized (lock) { return isShutdown; } }
Returns whether this registry has been shutdown. @return true, if this registry was shutdown, otherwise false
isShutdown
java
apache/flink
flink-runtime/src/main/java/org/apache/flink/runtime/metrics/MetricRegistryImpl.java
https://github.com/apache/flink/blob/master/flink-runtime/src/main/java/org/apache/flink/runtime/metrics/MetricRegistryImpl.java
Apache-2.0
private static Pattern createReporterClassPattern(String prefix, String suffix) { // [\S&&[^.]] = intersection of non-whitespace and non-period character classes return Pattern.compile(prefix + "([\\S&&[^.]]*)\\." + suffix); }
Builder class for {@link Reporter}. @param <REPORTED> Generic type of what's reported. @param <REPORTER> Generic type of the reporter. @param <SETUP> Generic type of the created setup. @param <REPORTER_FACTORY> Generic type of the reporter factory.
createReporterClassPattern
java
apache/flink
flink-runtime/src/main/java/org/apache/flink/runtime/metrics/ReporterSetupBuilder.java
https://github.com/apache/flink/blob/master/flink-runtime/src/main/java/org/apache/flink/runtime/metrics/ReporterSetupBuilder.java
Apache-2.0
public synchronized long getMaxSingleMeasurement() { return previousMaxSingleMeasurement; }
@return the longest marked period as measured by the given * TimerGauge. For example the longest consecutive back pressured period.
getMaxSingleMeasurement
java
apache/flink
flink-runtime/src/main/java/org/apache/flink/runtime/metrics/TimerGauge.java
https://github.com/apache/flink/blob/master/flink-runtime/src/main/java/org/apache/flink/runtime/metrics/TimerGauge.java
Apache-2.0
public void notifyOfAddedView(View view) { synchronized (lock) { toAdd.add(view); } }
Notifies this ViewUpdater of a new metric that should be regularly updated. @param view metric that should be regularly updated
notifyOfAddedView
java
apache/flink
flink-runtime/src/main/java/org/apache/flink/runtime/metrics/ViewUpdater.java
https://github.com/apache/flink/blob/master/flink-runtime/src/main/java/org/apache/flink/runtime/metrics/ViewUpdater.java
Apache-2.0
public void notifyOfRemovedView(View view) { synchronized (lock) { toRemove.add(view); } }
Notifies this ViewUpdater of a metric that should no longer be regularly updated. @param view metric that should no longer be regularly updated
notifyOfRemovedView
java
apache/flink
flink-runtime/src/main/java/org/apache/flink/runtime/metrics/ViewUpdater.java
https://github.com/apache/flink/blob/master/flink-runtime/src/main/java/org/apache/flink/runtime/metrics/ViewUpdater.java
Apache-2.0
@Override public void run() { for (View toUpdate : this.views) { toUpdate.update(); } synchronized (lock) { views.addAll(toAdd); toAdd.clear(); views.removeAll(toRemove); toRemove.clear(); } }
The TimerTask doing the actual updating.
run
java
apache/flink
flink-runtime/src/main/java/org/apache/flink/runtime/metrics/ViewUpdater.java
https://github.com/apache/flink/blob/master/flink-runtime/src/main/java/org/apache/flink/runtime/metrics/ViewUpdater.java
Apache-2.0
@Override public String toString() { return "MetricDump{" + "scopeInfo=" + scopeInfo + ", name='" + name + '\'' + ", category='" + getCategory() + '\'' + '}'; }
Returns the category for this MetricDump. @return category
toString
java
apache/flink
flink-runtime/src/main/java/org/apache/flink/runtime/metrics/dump/MetricDump.java
https://github.com/apache/flink/blob/master/flink-runtime/src/main/java/org/apache/flink/runtime/metrics/dump/MetricDump.java
Apache-2.0
private static String replaceInvalidChars(String str) { char[] chars = null; final int strLen = str.length(); int pos = 0; for (int i = 0; i < strLen; i++) { final char c = str.charAt(i); switch (c) { case ' ': case '.': case ':': case ',': if (chars == null) { chars = str.toCharArray(); } chars[pos++] = '_'; break; default: if (chars != null) { chars[pos] = c; } pos++; } } return chars == null ? str : new String(chars, 0, pos); }
Lightweight method to replace unsupported characters. If the string does not contain any unsupported characters, this method creates no new string (and in fact no new objects at all). <p>Replacements: <ul> <li>{@code space : . ,} are replaced by {@code _} (underscore) </ul>
replaceInvalidChars
java
apache/flink
flink-runtime/src/main/java/org/apache/flink/runtime/metrics/dump/MetricQueryService.java
https://github.com/apache/flink/blob/master/flink-runtime/src/main/java/org/apache/flink/runtime/metrics/dump/MetricQueryService.java
Apache-2.0
public static MetricQueryService createMetricQueryService( RpcService rpcService, ResourceID resourceID, long maximumFrameSize) { String endpointId = resourceID == null ? METRIC_QUERY_SERVICE_NAME : METRIC_QUERY_SERVICE_NAME + "_" + resourceID.getResourceIdString(); return new MetricQueryService(rpcService, endpointId, maximumFrameSize); }
Starts the MetricQueryService actor in the given actor system. @param rpcService The rpcService running the MetricQueryService @param resourceID resource ID to disambiguate the actor name @return actor reference to the MetricQueryService
createMetricQueryService
java
apache/flink
flink-runtime/src/main/java/org/apache/flink/runtime/metrics/dump/MetricQueryService.java
https://github.com/apache/flink/blob/master/flink-runtime/src/main/java/org/apache/flink/runtime/metrics/dump/MetricQueryService.java
Apache-2.0
@Override public String[] getScopeComponents() { return scopeComponents; }
Gets the scope as an array of the scope components, for example {@code ["host-7", "taskmanager-2", "window_word_count", "my-mapper"]}. @see #getMetricIdentifier(String)
getScopeComponents
java
apache/flink
flink-runtime/src/main/java/org/apache/flink/runtime/metrics/groups/AbstractMetricGroup.java
https://github.com/apache/flink/blob/master/flink-runtime/src/main/java/org/apache/flink/runtime/metrics/groups/AbstractMetricGroup.java
Apache-2.0
public QueryScopeInfo getQueryServiceMetricInfo(CharacterFilter filter) { if (queryServiceScopeInfo == null) { queryServiceScopeInfo = createQueryServiceMetricInfo(filter); } return queryServiceScopeInfo; }
Returns the metric query service scope for this group. @param filter character filter @return query service scope
getQueryServiceMetricInfo
java
apache/flink
flink-runtime/src/main/java/org/apache/flink/runtime/metrics/groups/AbstractMetricGroup.java
https://github.com/apache/flink/blob/master/flink-runtime/src/main/java/org/apache/flink/runtime/metrics/groups/AbstractMetricGroup.java
Apache-2.0
@Override public String getMetricIdentifier(String metricName) { return getMetricIdentifier(metricName, CharacterFilter.NO_OP_FILTER); }
Returns the fully qualified metric name, for example {@code "host-7.taskmanager-2.window_word_count.my-mapper.metricName"}. @param metricName metric name @return fully qualified metric name
getMetricIdentifier
java
apache/flink
flink-runtime/src/main/java/org/apache/flink/runtime/metrics/groups/AbstractMetricGroup.java
https://github.com/apache/flink/blob/master/flink-runtime/src/main/java/org/apache/flink/runtime/metrics/groups/AbstractMetricGroup.java
Apache-2.0
@Override public String getMetricIdentifier(String metricName, CharacterFilter filter) { return getMetricIdentifier(metricName, filter, -1, registry.getDelimiter()); }
Returns the fully qualified metric name, for example {@code "host-7.taskmanager-2.window_word_count.my-mapper.metricName"}. @param metricName metric name @param filter character filter which is applied to the scope components if not null. @return fully qualified metric name
getMetricIdentifier
java
apache/flink
flink-runtime/src/main/java/org/apache/flink/runtime/metrics/groups/AbstractMetricGroup.java
https://github.com/apache/flink/blob/master/flink-runtime/src/main/java/org/apache/flink/runtime/metrics/groups/AbstractMetricGroup.java
Apache-2.0
@Override public void close() { synchronized (this) { if (!isClosed()) { // remove all metrics and generic subgroups super.close(); // remove and close all subcomponent metrics for (ComponentMetricGroup<?> group : subComponents()) { group.close(); } } } }
Closes the component group by removing and closing all metrics and subgroups (inherited from {@link AbstractMetricGroup}), plus closing and removing all dedicated component subgroups.
close
java
apache/flink
flink-runtime/src/main/java/org/apache/flink/runtime/metrics/groups/ComponentMetricGroup.java
https://github.com/apache/flink/blob/master/flink-runtime/src/main/java/org/apache/flink/runtime/metrics/groups/ComponentMetricGroup.java
Apache-2.0
public void reuseInputMetricsForTask() { TaskIOMetricGroup taskIO = parentMetricGroup.getTaskIOMetricGroup(); taskIO.reuseRecordsInputCounter(this.numRecordsIn); }
Causes the containing task to use this operators input record counter.
reuseInputMetricsForTask
java
apache/flink
flink-runtime/src/main/java/org/apache/flink/runtime/metrics/groups/InternalOperatorIOMetricGroup.java
https://github.com/apache/flink/blob/master/flink-runtime/src/main/java/org/apache/flink/runtime/metrics/groups/InternalOperatorIOMetricGroup.java
Apache-2.0
public void reuseOutputMetricsForTask() { TaskIOMetricGroup taskIO = parentMetricGroup.getTaskIOMetricGroup(); taskIO.reuseRecordsOutputCounter(this.numRecordsOut); }
Causes the containing task to use this operators output record counter.
reuseOutputMetricsForTask
java
apache/flink
flink-runtime/src/main/java/org/apache/flink/runtime/metrics/groups/InternalOperatorIOMetricGroup.java
https://github.com/apache/flink/blob/master/flink-runtime/src/main/java/org/apache/flink/runtime/metrics/groups/InternalOperatorIOMetricGroup.java
Apache-2.0
public void reuseBytesInputMetricsForTask() { TaskIOMetricGroup taskIO = parentMetricGroup.getTaskIOMetricGroup(); taskIO.reuseBytesInputCounter(this.numBytesIn); }
Causes the containing task to use this operators input bytes counter.
reuseBytesInputMetricsForTask
java
apache/flink
flink-runtime/src/main/java/org/apache/flink/runtime/metrics/groups/InternalOperatorIOMetricGroup.java
https://github.com/apache/flink/blob/master/flink-runtime/src/main/java/org/apache/flink/runtime/metrics/groups/InternalOperatorIOMetricGroup.java
Apache-2.0
public void reuseBytesOutputMetricsForTask() { TaskIOMetricGroup taskIO = parentMetricGroup.getTaskIOMetricGroup(); taskIO.reuseBytesOutputCounter(this.numBytesOut); }
Causes the containing task to use this operators output bytes counter.
reuseBytesOutputMetricsForTask
java
apache/flink
flink-runtime/src/main/java/org/apache/flink/runtime/metrics/groups/InternalOperatorIOMetricGroup.java
https://github.com/apache/flink/blob/master/flink-runtime/src/main/java/org/apache/flink/runtime/metrics/groups/InternalOperatorIOMetricGroup.java
Apache-2.0
public static ResourceManagerMetricGroup create( MetricRegistry metricRegistry, String hostname) { return new ResourceManagerMetricGroup(metricRegistry, hostname); }
Metric group which is used by the {@link ResourceManager} to register metrics.
create
java
apache/flink
flink-runtime/src/main/java/org/apache/flink/runtime/metrics/groups/ResourceManagerMetricGroup.java
https://github.com/apache/flink/blob/master/flink-runtime/src/main/java/org/apache/flink/runtime/metrics/groups/ResourceManagerMetricGroup.java
Apache-2.0
public static SlotManagerMetricGroup create(MetricRegistry metricRegistry, String hostname) { return new SlotManagerMetricGroup(metricRegistry, hostname); }
Metric group which is used by the {@link SlotManager} to register metrics.
create
java
apache/flink
flink-runtime/src/main/java/org/apache/flink/runtime/metrics/groups/SlotManagerMetricGroup.java
https://github.com/apache/flink/blob/master/flink-runtime/src/main/java/org/apache/flink/runtime/metrics/groups/SlotManagerMetricGroup.java
Apache-2.0
@Override public JobManagerJobMetricGroup addJob(JobID jobId, String jobName) { return createUnregisteredJobManagerJobMetricGroup(); }
A safe drop-in replacement for {@link JobManagerMetricGroup}s.
addJob
java
apache/flink
flink-runtime/src/main/java/org/apache/flink/runtime/metrics/groups/UnregisteredMetricGroups.java
https://github.com/apache/flink/blob/master/flink-runtime/src/main/java/org/apache/flink/runtime/metrics/groups/UnregisteredMetricGroups.java
Apache-2.0
@Override public JobManagerOperatorMetricGroup getOrAddOperator( AbstractID vertexId, String taskName, OperatorID operatorID, String operatorName) { return createUnregisteredJobManagerOperatorMetricGroup(this); }
A safe drop-in replacement for {@link JobManagerJobMetricGroup}s.
getOrAddOperator
java
apache/flink
flink-runtime/src/main/java/org/apache/flink/runtime/metrics/groups/UnregisteredMetricGroups.java
https://github.com/apache/flink/blob/master/flink-runtime/src/main/java/org/apache/flink/runtime/metrics/groups/UnregisteredMetricGroups.java
Apache-2.0