code
stringlengths
25
201k
docstring
stringlengths
19
96.2k
func_name
stringlengths
0
235
language
stringclasses
1 value
repo
stringlengths
8
51
path
stringlengths
11
314
url
stringlengths
62
377
license
stringclasses
7 values
public boolean add(T task) { // Check that this slot has been assigned to the job sending this task Preconditions.checkArgument( task.getJobID().equals(jobId), "The task's job id does not match the " + "job id for which the slot has been allocated."); Preconditions.checkArgument( task.getAllocationId().equals(allocationId), "The task's allocation " + "id does not match the allocation id for which the slot has been allocated."); Preconditions.checkState( TaskSlotState.ACTIVE == state, "The task slot is not in state active."); T oldTask = tasks.put(task.getExecutionId(), task); if (oldTask != null) { tasks.put(task.getExecutionId(), oldTask); return false; } else { return true; } }
Add the given task to the task slot. This is only possible if there is not already another task with the same execution attempt id added to the task slot. In this case, the method returns true. Otherwise the task slot is left unchanged and false is returned. <p>In case that the task slot state is not active an {@link IllegalStateException} is thrown. In case that the task's job id and allocation id don't match with the job id and allocation id for which the task slot has been allocated, an {@link IllegalArgumentException} is thrown. @param task to be added to the task slot @throws IllegalStateException if the task slot is not in state active @return true if the task was added to the task slot; otherwise false
add
java
apache/flink
flink-runtime/src/main/java/org/apache/flink/runtime/taskexecutor/slot/TaskSlot.java
https://github.com/apache/flink/blob/master/flink-runtime/src/main/java/org/apache/flink/runtime/taskexecutor/slot/TaskSlot.java
Apache-2.0
public T remove(ExecutionAttemptID executionAttemptId) { return tasks.remove(executionAttemptId); }
Remove the task identified by the given execution attempt id. @param executionAttemptId identifying the task to be removed @return The removed task if there was any; otherwise null.
remove
java
apache/flink
flink-runtime/src/main/java/org/apache/flink/runtime/taskexecutor/slot/TaskSlot.java
https://github.com/apache/flink/blob/master/flink-runtime/src/main/java/org/apache/flink/runtime/taskexecutor/slot/TaskSlot.java
Apache-2.0
public boolean markActive() { if (TaskSlotState.ALLOCATED == state || TaskSlotState.ACTIVE == state) { state = TaskSlotState.ACTIVE; return true; } else { return false; } }
Mark this slot as active. A slot can only be marked active if it's in state allocated. <p>The method returns true if the slot was set to active. Otherwise it returns false. @return True if the new state of the slot is active; otherwise false
markActive
java
apache/flink
flink-runtime/src/main/java/org/apache/flink/runtime/taskexecutor/slot/TaskSlot.java
https://github.com/apache/flink/blob/master/flink-runtime/src/main/java/org/apache/flink/runtime/taskexecutor/slot/TaskSlot.java
Apache-2.0
public boolean markInactive() { if (TaskSlotState.ACTIVE == state || TaskSlotState.ALLOCATED == state) { state = TaskSlotState.ALLOCATED; return true; } else { return false; } }
Mark the slot as inactive/allocated. A slot can only be marked as inactive/allocated if it's in state allocated or active. @return True if the new state of the slot is allocated; otherwise false
markInactive
java
apache/flink
flink-runtime/src/main/java/org/apache/flink/runtime/taskexecutor/slot/TaskSlot.java
https://github.com/apache/flink/blob/master/flink-runtime/src/main/java/org/apache/flink/runtime/taskexecutor/slot/TaskSlot.java
Apache-2.0
default int freeSlot(AllocationID allocationId) throws SlotNotFoundException { return freeSlot(allocationId, new Exception("The task slot of this task is being freed.")); }
Try to free the slot. If the slot is empty it will set the state of the task slot to free and return its index. If the slot is not empty, then it will set the state of the task slot to releasing, fail all tasks and return -1. @param allocationId identifying the task slot to be freed @throws SlotNotFoundException if there is not task slot for the given allocation id @return Index of the freed slot if the slot could be freed; otherwise -1
freeSlot
java
apache/flink
flink-runtime/src/main/java/org/apache/flink/runtime/taskexecutor/slot/TaskSlotTable.java
https://github.com/apache/flink/blob/master/flink-runtime/src/main/java/org/apache/flink/runtime/taskexecutor/slot/TaskSlotTable.java
Apache-2.0
@Override public Thread newThread(Runnable r) { Thread t = new Thread(group, r, threadName); t.setDaemon(true); t.setUncaughtExceptionHandler(FatalExitExceptionHandler.INSTANCE); return t; }
Creates a new thread factory. @param group The group that the threads will be associated with. @param threadName The name for the threads.
newThread
java
apache/flink
flink-runtime/src/main/java/org/apache/flink/runtime/taskmanager/DispatcherThreadFactory.java
https://github.com/apache/flink/blob/master/flink-runtime/src/main/java/org/apache/flink/runtime/taskmanager/DispatcherThreadFactory.java
Apache-2.0
public static void startIfConfigured( Logger logger, Configuration configuration, CompletableFuture<Void> taskManagerTerminationFuture) { if (!logger.isInfoEnabled() || !configuration.get(TaskManagerOptions.DEBUG_MEMORY_LOG)) { return; } logger.info("Starting periodic memory usage logger"); new MemoryLogger( logger, configuration .get(TaskManagerOptions.DEBUG_MEMORY_USAGE_LOG_INTERVAL_MS) .toMillis(), taskManagerTerminationFuture) .start(); }
A thread the periodically logs statistics about: <ul> <li>Heap and non-heap memory usage <li>Memory pools and pool usage <li>Garbage collection times and counts </ul>
startIfConfigured
java
apache/flink
flink-runtime/src/main/java/org/apache/flink/runtime/taskmanager/MemoryLogger.java
https://github.com/apache/flink/blob/master/flink-runtime/src/main/java/org/apache/flink/runtime/taskmanager/MemoryLogger.java
Apache-2.0
public static String getMemoryUsageStatsAsString(MemoryMXBean memoryMXBean) { MemoryUsage heap = memoryMXBean.getHeapMemoryUsage(); MemoryUsage nonHeap = memoryMXBean.getNonHeapMemoryUsage(); long heapUsed = heap.getUsed() >> 20; long heapCommitted = heap.getCommitted() >> 20; long heapMax = heap.getMax() >> 20; long nonHeapUsed = nonHeap.getUsed() >> 20; long nonHeapCommitted = nonHeap.getCommitted() >> 20; long nonHeapMax = nonHeap.getMax() >> 20; return String.format( "Memory usage stats: [HEAP: %d/%d/%d MB, " + "NON HEAP: %d/%d/%d MB (used/committed/max)]", heapUsed, heapCommitted, heapMax, nonHeapUsed, nonHeapCommitted, nonHeapMax); }
Gets the memory footprint of the JVM in a string representation. @return A string describing how much heap memory and direct memory are allocated and used.
getMemoryUsageStatsAsString
java
apache/flink
flink-runtime/src/main/java/org/apache/flink/runtime/taskmanager/MemoryLogger.java
https://github.com/apache/flink/blob/master/flink-runtime/src/main/java/org/apache/flink/runtime/taskmanager/MemoryLogger.java
Apache-2.0
public static String getDirectMemoryStatsAsString(BufferPoolMXBean bufferPoolMxBean) { if (bufferPoolMxBean == null) { return "Direct memory stats: unavailable"; } else { return String.format( "Direct memory stats: Count: %d, Total Capacity: %d, Used Memory: %d", bufferPoolMxBean.getCount(), bufferPoolMxBean.getTotalCapacity(), bufferPoolMxBean.getMemoryUsed()); } }
Returns a String with the <strong>direct</strong> memory footprint. <p>These stats are not part of the other memory beans. @param bufferPoolMxBean The direct buffer pool bean or <code>null</code> if none available. @return A string with the count, total capacity, and used direct memory.
getDirectMemoryStatsAsString
java
apache/flink
flink-runtime/src/main/java/org/apache/flink/runtime/taskmanager/MemoryLogger.java
https://github.com/apache/flink/blob/master/flink-runtime/src/main/java/org/apache/flink/runtime/taskmanager/MemoryLogger.java
Apache-2.0
public static String getMemoryPoolStatsAsString(List<MemoryPoolMXBean> poolBeans) { StringBuilder bld = new StringBuilder("Off-heap pool stats: "); int count = 0; for (MemoryPoolMXBean bean : poolBeans) { if (bean.getType() == MemoryType.NON_HEAP) { if (count > 0) { bld.append(", "); } count++; MemoryUsage usage = bean.getUsage(); long used = usage.getUsed() >> 20; long committed = usage.getCommitted() >> 20; long max = usage.getMax() >> 20; bld.append('[').append(bean.getName()).append(": "); bld.append(used).append('/').append(committed).append('/').append(max); bld.append(" MB (used/committed/max)]"); } } return bld.toString(); }
Gets the memory pool statistics from the JVM. @param poolBeans The collection of memory pool beans. @return A string denoting the names and sizes of the memory pools.
getMemoryPoolStatsAsString
java
apache/flink
flink-runtime/src/main/java/org/apache/flink/runtime/taskmanager/MemoryLogger.java
https://github.com/apache/flink/blob/master/flink-runtime/src/main/java/org/apache/flink/runtime/taskmanager/MemoryLogger.java
Apache-2.0
public static String getGarbageCollectorStatsAsString(List<GarbageCollectorMXBean> gcMXBeans) { StringBuilder bld = new StringBuilder("Garbage collector stats: "); for (GarbageCollectorMXBean bean : gcMXBeans) { bld.append('[') .append(bean.getName()) .append(", GC TIME (ms): ") .append(bean.getCollectionTime()); bld.append(", GC COUNT: ").append(bean.getCollectionCount()).append(']'); bld.append(", "); } if (!gcMXBeans.isEmpty()) { bld.setLength(bld.length() - 2); } return bld.toString(); }
Gets the garbage collection statistics from the JVM. @param gcMXBeans The collection of garbage collector beans. @return A string denoting the number of times and total elapsed time in garbage collection.
getGarbageCollectorStatsAsString
java
apache/flink
flink-runtime/src/main/java/org/apache/flink/runtime/taskmanager/MemoryLogger.java
https://github.com/apache/flink/blob/master/flink-runtime/src/main/java/org/apache/flink/runtime/taskmanager/MemoryLogger.java
Apache-2.0
public static NettyShuffleEnvironmentConfiguration fromConfiguration( Configuration configuration, MemorySize networkMemorySize, boolean localTaskManagerCommunication, InetAddress taskManagerAddress) { final PortRange dataBindPortRange = getDataBindPortRange(configuration); final int pageSize = ConfigurationParserUtils.getPageSize(configuration); int startingBufferSize = Integer.MAX_VALUE; if (configuration.get(TaskManagerOptions.BUFFER_DEBLOAT_ENABLED)) { startingBufferSize = (int) configuration .get(TaskManagerOptions.STARTING_MEMORY_SEGMENT_SIZE) .getBytes(); } final NettyConfig nettyConfig = createNettyConfig( configuration, localTaskManagerCommunication, taskManagerAddress, dataBindPortRange, pageSize); final int numberOfNetworkBuffers = calculateNumberOfNetworkBuffers(networkMemorySize, pageSize); int initialRequestBackoff = configuration.get(NettyShuffleEnvironmentOptions.NETWORK_REQUEST_BACKOFF_INITIAL); int maxRequestBackoff = configuration.get(NettyShuffleEnvironmentOptions.NETWORK_REQUEST_BACKOFF_MAX); int listenerTimeout = (int) configuration .get( NettyShuffleEnvironmentOptions .NETWORK_PARTITION_REQUEST_TIMEOUT) .toMillis(); int buffersPerChannel = 2; int extraBuffersPerGate = 8; Optional<Integer> maxRequiredBuffersPerGate = configuration.getOptional( NettyShuffleEnvironmentOptions.NETWORK_READ_MAX_REQUIRED_BUFFERS_PER_GATE); int maxBuffersPerChannel = 10; long batchShuffleReadMemoryBytes = configuration.get(TaskManagerOptions.NETWORK_BATCH_SHUFFLE_READ_MEMORY).getBytes(); int sortShuffleMinBuffers = configuration.get(NettyShuffleEnvironmentOptions.NETWORK_SORT_SHUFFLE_MIN_BUFFERS); int sortShuffleMinParallelism = 1; boolean isNetworkDetailedMetrics = configuration.get(NettyShuffleEnvironmentOptions.NETWORK_DETAILED_METRICS); String[] tempDirs = ConfigurationUtils.parseTempDirectories(configuration); // Shuffle the data directories to make it fairer for directory selection between different // TaskManagers, which is good for load balance especially when there are multiple disks. List<String> shuffleDirs = Arrays.asList(tempDirs); Collections.shuffle(shuffleDirs); Duration requestSegmentsTimeout = configuration.get(NettyShuffleEnvironmentOptions.NETWORK_BUFFERS_REQUEST_TIMEOUT); BoundedBlockingSubpartitionType blockingSubpartitionType = BoundedBlockingSubpartitionType.FILE; CompressionCodec compressionCodec = configuration.get(NettyShuffleEnvironmentOptions.SHUFFLE_COMPRESSION_CODEC); boolean batchShuffleCompressionEnabled = compressionCodec != CompressionCodec.NONE; boolean connectionReuseEnabled = configuration.get( NettyShuffleEnvironmentOptions.TCP_CONNECTION_REUSE_ACROSS_JOBS_ENABLED); checkArgument( !maxRequiredBuffersPerGate.isPresent() || maxRequiredBuffersPerGate.get() >= 1, String.format( "At least one buffer is required for each gate, please increase the value of %s.", NettyShuffleEnvironmentOptions.NETWORK_READ_MAX_REQUIRED_BUFFERS_PER_GATE .key())); TieredStorageConfiguration tieredStorageConfiguration = null; if ((configuration.get(BATCH_SHUFFLE_MODE) == ALL_EXCHANGES_HYBRID_FULL || configuration.get(BATCH_SHUFFLE_MODE) == ALL_EXCHANGES_HYBRID_SELECTIVE)) { tieredStorageConfiguration = TieredStorageConfiguration.fromConfiguration(configuration); } return new NettyShuffleEnvironmentConfiguration( numberOfNetworkBuffers, pageSize, startingBufferSize, initialRequestBackoff, maxRequestBackoff, listenerTimeout, buffersPerChannel, extraBuffersPerGate, maxRequiredBuffersPerGate, requestSegmentsTimeout, isNetworkDetailedMetrics, nettyConfig, shuffleDirs.toArray(tempDirs), blockingSubpartitionType, batchShuffleCompressionEnabled, compressionCodec, maxBuffersPerChannel, batchShuffleReadMemoryBytes, sortShuffleMinBuffers, sortShuffleMinParallelism, BufferDebloatConfiguration.fromConfiguration(configuration), connectionReuseEnabled, 20, tieredStorageConfiguration); }
Utility method to extract network related parameters from the configuration and to sanity check them. @param configuration configuration object @param networkMemorySize the size of memory reserved for shuffle environment @param localTaskManagerCommunication true, to skip initializing the network stack @param taskManagerAddress identifying the IP address under which the TaskManager will be accessible @return NettyShuffleEnvironmentConfiguration
fromConfiguration
java
apache/flink
flink-runtime/src/main/java/org/apache/flink/runtime/taskmanager/NettyShuffleEnvironmentConfiguration.java
https://github.com/apache/flink/blob/master/flink-runtime/src/main/java/org/apache/flink/runtime/taskmanager/NettyShuffleEnvironmentConfiguration.java
Apache-2.0
private static PortRange getDataBindPortRange(Configuration configuration) { if (configuration.contains(NettyShuffleEnvironmentOptions.DATA_BIND_PORT)) { String dataBindPort = configuration.get(NettyShuffleEnvironmentOptions.DATA_BIND_PORT); return new PortRange(dataBindPort); } int dataBindPort = configuration.get(NettyShuffleEnvironmentOptions.DATA_PORT); ConfigurationParserUtils.checkConfigParameter( dataBindPort >= 0, dataBindPort, NettyShuffleEnvironmentOptions.DATA_PORT.key(), "Leave config parameter empty or use 0 to let the system choose a port automatically."); return new PortRange(dataBindPort); }
Parses the hosts / ports for communication and data exchange from configuration. @param configuration configuration object @return the data port
getDataBindPortRange
java
apache/flink
flink-runtime/src/main/java/org/apache/flink/runtime/taskmanager/NettyShuffleEnvironmentConfiguration.java
https://github.com/apache/flink/blob/master/flink-runtime/src/main/java/org/apache/flink/runtime/taskmanager/NettyShuffleEnvironmentConfiguration.java
Apache-2.0
private static int calculateNumberOfNetworkBuffers(MemorySize networkMemorySize, int pageSize) { // tolerate offcuts between intended and allocated memory due to segmentation (will be // available to the user-space memory) long numberOfNetworkBuffersLong = networkMemorySize.getBytes() / pageSize; if (numberOfNetworkBuffersLong > Integer.MAX_VALUE) { throw new IllegalArgumentException( "The given number of memory bytes (" + networkMemorySize.getBytes() + ") corresponds to more than MAX_INT pages."); } return (int) numberOfNetworkBuffersLong; }
Calculates the number of network buffers based on configuration and jvm heap size. @param networkMemorySize the size of memory reserved for shuffle environment @param pageSize size of memory segment @return the number of network buffers
calculateNumberOfNetworkBuffers
java
apache/flink
flink-runtime/src/main/java/org/apache/flink/runtime/taskmanager/NettyShuffleEnvironmentConfiguration.java
https://github.com/apache/flink/blob/master/flink-runtime/src/main/java/org/apache/flink/runtime/taskmanager/NettyShuffleEnvironmentConfiguration.java
Apache-2.0
@Override public void failExternally(Throwable cause) {}
A dummy implementation of the {@link TaskActions} which is mainly used for tests.
failExternally
java
apache/flink
flink-runtime/src/main/java/org/apache/flink/runtime/taskmanager/NoOpTaskActions.java
https://github.com/apache/flink/blob/master/flink-runtime/src/main/java/org/apache/flink/runtime/taskmanager/NoOpTaskActions.java
Apache-2.0
public ExecutionState getExecutionState() { return this.executionState; }
Returns the current execution state of the task. @return The current execution state of the task.
getExecutionState
java
apache/flink
flink-runtime/src/main/java/org/apache/flink/runtime/taskmanager/Task.java
https://github.com/apache/flink/blob/master/flink-runtime/src/main/java/org/apache/flink/runtime/taskmanager/Task.java
Apache-2.0
public boolean isCanceledOrFailed() { return executionState == ExecutionState.CANCELING || executionState == ExecutionState.CANCELED || executionState == ExecutionState.FAILED; }
Checks whether the task has failed, is canceled, or is being canceled at the moment. @return True is the task in state FAILED, CANCELING, or CANCELED, false otherwise.
isCanceledOrFailed
java
apache/flink
flink-runtime/src/main/java/org/apache/flink/runtime/taskmanager/Task.java
https://github.com/apache/flink/blob/master/flink-runtime/src/main/java/org/apache/flink/runtime/taskmanager/Task.java
Apache-2.0
public Throwable getFailureCause() { return failureCause; }
If the task has failed, this method gets the exception that caused this task to fail. Otherwise this method returns null. @return The exception that caused the task to fail, or null, if the task has not failed.
getFailureCause
java
apache/flink
flink-runtime/src/main/java/org/apache/flink/runtime/taskmanager/Task.java
https://github.com/apache/flink/blob/master/flink-runtime/src/main/java/org/apache/flink/runtime/taskmanager/Task.java
Apache-2.0
private boolean transitionState(ExecutionState currentState, ExecutionState newState) { return transitionState(currentState, newState, null); }
Try to transition the execution state from the current state to the new state. @param currentState of the execution @param newState of the execution @return true if the transition was successful, otherwise false
transitionState
java
apache/flink
flink-runtime/src/main/java/org/apache/flink/runtime/taskmanager/Task.java
https://github.com/apache/flink/blob/master/flink-runtime/src/main/java/org/apache/flink/runtime/taskmanager/Task.java
Apache-2.0
private boolean transitionState( ExecutionState currentState, ExecutionState newState, Throwable cause) { if (STATE_UPDATER.compareAndSet(this, currentState, newState)) { if (cause == null) { LOG.info( "{} ({}) switched from {} to {}.", taskNameWithSubtask, executionId, currentState, newState); } else if (ExceptionUtils.findThrowable(cause, CancelTaskException.class).isPresent()) { if (LOG.isDebugEnabled()) { LOG.debug( "{} ({}) switched from {} to {} due to CancelTaskException:", taskNameWithSubtask, executionId, currentState, newState, cause); } else { LOG.info( "{} ({}) switched from {} to {} due to CancelTaskException.", taskNameWithSubtask, executionId, currentState, newState); } } else { // proper failure of the task. record the exception as the root // cause failureCause = cause; LOG.warn( "{} ({}) switched from {} to {} with failure cause:", taskNameWithSubtask, executionId, currentState, newState, cause); } return true; } else { return false; } }
Try to transition the execution state from the current state to the new state. @param currentState of the execution @param newState of the execution @param cause of the transition change or null @return true if the transition was successful, otherwise false
transitionState
java
apache/flink
flink-runtime/src/main/java/org/apache/flink/runtime/taskmanager/Task.java
https://github.com/apache/flink/blob/master/flink-runtime/src/main/java/org/apache/flink/runtime/taskmanager/Task.java
Apache-2.0
public void cancelExecution() { try (MdcUtils.MdcCloseable ignored = MdcUtils.withContext(MdcUtils.asContextData(jobId))) { LOG.info("Attempting to cancel task {} ({}).", taskNameWithSubtask, executionId); cancelOrFailAndCancelInvokable(ExecutionState.CANCELING, null); } }
Cancels the task execution. If the task is already in a terminal state (such as FINISHED, CANCELED, FAILED), or if the task is already canceling this does nothing. Otherwise it sets the state to CANCELING, and, if the invokable code is running, starts an asynchronous thread that aborts that code. <p>This method never blocks.
cancelExecution
java
apache/flink
flink-runtime/src/main/java/org/apache/flink/runtime/taskmanager/Task.java
https://github.com/apache/flink/blob/master/flink-runtime/src/main/java/org/apache/flink/runtime/taskmanager/Task.java
Apache-2.0
private static TaskInvokable loadAndInstantiateInvokable( ClassLoader classLoader, String className, Environment environment) throws Throwable { final Class<? extends TaskInvokable> invokableClass; try { invokableClass = Class.forName(className, true, classLoader).asSubclass(TaskInvokable.class); } catch (Throwable t) { throw new Exception("Could not load the task's invokable class.", t); } Constructor<? extends TaskInvokable> statelessCtor; try { statelessCtor = invokableClass.getConstructor(Environment.class); } catch (NoSuchMethodException ee) { throw new FlinkException("Task misses proper constructor", ee); } // instantiate the class try { //noinspection ConstantConditions --> cannot happen return statelessCtor.newInstance(environment); } catch (InvocationTargetException e) { // directly forward exceptions from the eager initialization throw e.getTargetException(); } catch (Exception e) { throw new FlinkException("Could not instantiate the task's invokable class.", e); } }
Instantiates the given task invokable class, passing the given environment (and possibly the initial task state) to the task's constructor. <p>The method will first try to instantiate the task via a constructor accepting both the Environment and the TaskStateSnapshot. If no such constructor exists, and there is no initial state, the method will fall back to the stateless convenience constructor that accepts only the Environment. @param classLoader The classloader to load the class through. @param className The name of the class to load. @param environment The task environment. @return The instantiated invokable task object. @throws Throwable Forwards all exceptions that happen during initialization of the task. Also throws an exception if the task class misses the necessary constructor.
loadAndInstantiateInvokable
java
apache/flink
flink-runtime/src/main/java/org/apache/flink/runtime/taskmanager/Task.java
https://github.com/apache/flink/blob/master/flink-runtime/src/main/java/org/apache/flink/runtime/taskmanager/Task.java
Apache-2.0
@Override public void run() { try (MdcCloseable ignored = MdcUtils.withContext(MdcUtils.asContextData(jobId))) { // the user-defined cancel method may throw errors. // we need do continue despite that try { invokable.cancel(); } catch (Throwable t) { ExceptionUtils.rethrowIfFatalError(t); logger.error("Error while canceling the task {}.", taskName, t); } // Early release of input and output buffer pools. We do this // in order to unblock async Threads, which produce/consume the // intermediate streams outside of the main Task Thread (like // the Kafka consumer). // Notes: 1) This does not mean to release all network resources, // the task thread itself will release them; 2) We can not close // ResultPartitions here because of possible race conditions with // Task thread so we just call the fail here. failAllResultPartitions(); closeAllInputGates(); invokable.maybeInterruptOnCancel(executor, null, null); } catch (Throwable t) { ExceptionUtils.rethrowIfFatalError(t); logger.error("Error in the task canceler for task {}.", taskName, t); } }
This runner calls cancel() on the invokable, closes input-/output resources, and initially interrupts the task thread.
run
java
apache/flink
flink-runtime/src/main/java/org/apache/flink/runtime/taskmanager/Task.java
https://github.com/apache/flink/blob/master/flink-runtime/src/main/java/org/apache/flink/runtime/taskmanager/Task.java
Apache-2.0
@Override public void run() { try (MdcCloseable ignored = MdcUtils.withContext(MdcUtils.asContextData(jobID))) { // we initially wait for one interval // in most cases, the threads go away immediately (by the cancellation thread) // and we need not actually do anything executorThread.join(interruptIntervalMillis); // log stack trace where the executing thread is stuck and // interrupt the running thread periodically while it is still alive while (executorThread.isAlive()) { task.maybeInterruptOnCancel(executorThread, taskName, interruptIntervalMillis); try { executorThread.join(interruptIntervalMillis); } catch (InterruptedException e) { // we ignore this and fall through the loop } } } catch (Throwable t) { ExceptionUtils.rethrowIfFatalError(t); log.error("Error in the task canceler for task {}.", taskName, t); } }
The interval in which we interrupt.
run
java
apache/flink
flink-runtime/src/main/java/org/apache/flink/runtime/taskmanager/Task.java
https://github.com/apache/flink/blob/master/flink-runtime/src/main/java/org/apache/flink/runtime/taskmanager/Task.java
Apache-2.0
public Throwable getError(ClassLoader userCodeClassloader) { if (this.throwable == null) { return null; } else { return this.throwable.deserializeError(userCodeClassloader); } }
Gets the attached exception, which is in serialized form. Returns null, if the status update is no failure with an associated exception. @param userCodeClassloader The classloader that can resolve user-defined exceptions. @return The attached exception, or null, if none.
getError
java
apache/flink
flink-runtime/src/main/java/org/apache/flink/runtime/taskmanager/TaskExecutionState.java
https://github.com/apache/flink/blob/master/flink-runtime/src/main/java/org/apache/flink/runtime/taskmanager/TaskExecutionState.java
Apache-2.0
public ExecutionAttemptID getID() { return this.executionId; }
Returns the ID of the task this result belongs to @return the ID of the task this result belongs to
getID
java
apache/flink
flink-runtime/src/main/java/org/apache/flink/runtime/taskmanager/TaskExecutionState.java
https://github.com/apache/flink/blob/master/flink-runtime/src/main/java/org/apache/flink/runtime/taskmanager/TaskExecutionState.java
Apache-2.0
public static TaskManagerLocation fromUnresolvedLocation( final UnresolvedTaskManagerLocation unresolvedLocation, final ResolutionMode resolutionMode) throws UnknownHostException { InetAddress inetAddress = InetAddress.getByName(unresolvedLocation.getExternalAddress()); switch (resolutionMode) { case RETRIEVE_HOST_NAME: return new TaskManagerLocation( unresolvedLocation.getResourceID(), inetAddress, unresolvedLocation.getDataPort(), new DefaultHostNameSupplier(inetAddress), unresolvedLocation.getNodeId()); case USE_IP_ONLY: return new TaskManagerLocation( unresolvedLocation.getResourceID(), inetAddress, unresolvedLocation.getDataPort(), new IpOnlyHostNameSupplier(inetAddress), unresolvedLocation.getNodeId()); default: throw new UnsupportedOperationException("Unsupported resolution mode provided."); } }
Constructs a new instance connection info object. The constructor will attempt to retrieve the instance's host name and domain name through the operating system's lookup mechanisms. @param inetAddress the network address the instance's task manager binds its sockets to @param dataPort the port instance's task manager expects to receive transfer envelopes on
fromUnresolvedLocation
java
apache/flink
flink-runtime/src/main/java/org/apache/flink/runtime/taskmanager/TaskManagerLocation.java
https://github.com/apache/flink/blob/master/flink-runtime/src/main/java/org/apache/flink/runtime/taskmanager/TaskManagerLocation.java
Apache-2.0
public int dataPort() { return dataPort; }
Returns the port instance's task manager expects to receive transfer envelopes on. @return the port instance's task manager expects to receive transfer envelopes on
dataPort
java
apache/flink
flink-runtime/src/main/java/org/apache/flink/runtime/taskmanager/TaskManagerLocation.java
https://github.com/apache/flink/blob/master/flink-runtime/src/main/java/org/apache/flink/runtime/taskmanager/TaskManagerLocation.java
Apache-2.0
public InetAddress address() { return inetAddress; }
Returns the network address the instance's task manager binds its sockets to. @return the network address the instance's task manager binds its sockets to
address
java
apache/flink
flink-runtime/src/main/java/org/apache/flink/runtime/taskmanager/TaskManagerLocation.java
https://github.com/apache/flink/blob/master/flink-runtime/src/main/java/org/apache/flink/runtime/taskmanager/TaskManagerLocation.java
Apache-2.0
public String addressString() { return inetAddress.toString(); }
Gets the IP address where the TaskManager operates. @return The IP address.
addressString
java
apache/flink
flink-runtime/src/main/java/org/apache/flink/runtime/taskmanager/TaskManagerLocation.java
https://github.com/apache/flink/blob/master/flink-runtime/src/main/java/org/apache/flink/runtime/taskmanager/TaskManagerLocation.java
Apache-2.0
public String getFQDNHostname() { return hostNameSupplier.getFqdnHostName(); }
Returns the fully-qualified domain name of the TaskManager provided by {@link #hostNameSupplier}. @return The fully-qualified domain name of the TaskManager.
getFQDNHostname
java
apache/flink
flink-runtime/src/main/java/org/apache/flink/runtime/taskmanager/TaskManagerLocation.java
https://github.com/apache/flink/blob/master/flink-runtime/src/main/java/org/apache/flink/runtime/taskmanager/TaskManagerLocation.java
Apache-2.0
public String getHostname() { return hostNameSupplier.getHostName(); }
Gets the hostname of the TaskManager from {@link #hostNameSupplier}. @return The hostname of the TaskManager.
getHostname
java
apache/flink
flink-runtime/src/main/java/org/apache/flink/runtime/taskmanager/TaskManagerLocation.java
https://github.com/apache/flink/blob/master/flink-runtime/src/main/java/org/apache/flink/runtime/taskmanager/TaskManagerLocation.java
Apache-2.0
public String getNodeId() { return nodeId; }
Return the ID of node where the task manager is located on. @return The ID of node where the task manager is located on.
getNodeId
java
apache/flink
flink-runtime/src/main/java/org/apache/flink/runtime/taskmanager/TaskManagerLocation.java
https://github.com/apache/flink/blob/master/flink-runtime/src/main/java/org/apache/flink/runtime/taskmanager/TaskManagerLocation.java
Apache-2.0
public String getEndpoint() { return String.format("%s:%d", getFQDNHostname(), dataPort); }
Gets the endpoint of the TaskManager in the format of "$HOST:$PORT". @return The endpoint of the TaskManager.
getEndpoint
java
apache/flink
flink-runtime/src/main/java/org/apache/flink/runtime/taskmanager/TaskManagerLocation.java
https://github.com/apache/flink/blob/master/flink-runtime/src/main/java/org/apache/flink/runtime/taskmanager/TaskManagerLocation.java
Apache-2.0
private static String getFqdnHostName(InetAddress inetAddress) { String fqdnHostName; try { fqdnHostName = inetAddress.getCanonicalHostName(); } catch (Throwable t) { LOG.warn( "Unable to determine the canonical hostname. Input split assignment (such as " + "for HDFS files) may be non-local when the canonical hostname is missing."); LOG.debug("getCanonicalHostName() Exception:", t); fqdnHostName = inetAddress.getHostAddress(); } return fqdnHostName; }
Gets the fully qualified hostname of the TaskManager based on the network address. @param inetAddress the network address that the TaskManager binds its sockets to @return fully qualified hostname of the TaskManager
getFqdnHostName
java
apache/flink
flink-runtime/src/main/java/org/apache/flink/runtime/taskmanager/TaskManagerLocation.java
https://github.com/apache/flink/blob/master/flink-runtime/src/main/java/org/apache/flink/runtime/taskmanager/TaskManagerLocation.java
Apache-2.0
@Override public String getHostName() { if (hostName == null) { hostName = TaskManagerLocation.getHostName(inetAddress); } return hostName; }
Gets the hostname of the TaskManager. The hostname derives from the fully qualified domain name (FQDN, see {@link #getFQDNHostname()}): <ul> <li>If the FQDN is the textual IP address, then the hostname is also the IP address <li>If the FQDN has only one segment (such as "localhost", or "host17"), then this is used as the hostname. <li>If the FQDN has multiple segments (such as "worker3.subgroup.company.net"), then the first segment (here "worker3") will be used as the hostname. </ul> @return The hostname of the TaskManager.
getHostName
java
apache/flink
flink-runtime/src/main/java/org/apache/flink/runtime/taskmanager/TaskManagerLocation.java
https://github.com/apache/flink/blob/master/flink-runtime/src/main/java/org/apache/flink/runtime/taskmanager/TaskManagerLocation.java
Apache-2.0
@Override public String getFqdnHostName() { if (fqdnHostName == null) { fqdnHostName = TaskManagerLocation.getFqdnHostName(inetAddress); } return fqdnHostName; }
Returns the fully-qualified domain name the TaskManager. If the name could not be determined, the return value will be a textual representation of the TaskManager's IP address. @return The fully-qualified domain name of the TaskManager.
getFqdnHostName
java
apache/flink
flink-runtime/src/main/java/org/apache/flink/runtime/taskmanager/TaskManagerLocation.java
https://github.com/apache/flink/blob/master/flink-runtime/src/main/java/org/apache/flink/runtime/taskmanager/TaskManagerLocation.java
Apache-2.0
@Override public String getHostName() { return inetAddress.getHostAddress(); }
Returns the textual representation of the TaskManager's IP address as host name. @return The textual representation of the TaskManager's IP address.
getHostName
java
apache/flink
flink-runtime/src/main/java/org/apache/flink/runtime/taskmanager/TaskManagerLocation.java
https://github.com/apache/flink/blob/master/flink-runtime/src/main/java/org/apache/flink/runtime/taskmanager/TaskManagerLocation.java
Apache-2.0
@Override public String getFqdnHostName() { return inetAddress.getHostAddress(); }
Returns the textual representation of the TaskManager's IP address as FQDN host name. @return The textual representation of the TaskManager's IP address.
getFqdnHostName
java
apache/flink
flink-runtime/src/main/java/org/apache/flink/runtime/taskmanager/TaskManagerLocation.java
https://github.com/apache/flink/blob/master/flink-runtime/src/main/java/org/apache/flink/runtime/taskmanager/TaskManagerLocation.java
Apache-2.0
default String getTaskManagerBindAddress() { return getConfiguration().get(TaskManagerOptions.BIND_HOST); }
Gets the bind address of the Taskmanager. @return The bind address of the TaskManager.
getTaskManagerBindAddress
java
apache/flink
flink-runtime/src/main/java/org/apache/flink/runtime/taskmanager/TaskManagerRuntimeInfo.java
https://github.com/apache/flink/blob/master/flink-runtime/src/main/java/org/apache/flink/runtime/taskmanager/TaskManagerRuntimeInfo.java
Apache-2.0
public int calculateBufferSize(long totalBufferSizeInBytes, int totalBuffers) { checkArgument(totalBufferSizeInBytes >= 0, "Size of buffer should be non negative"); checkArgument(totalBuffers > 0, "Number of buffers should be positive"); // Since the result value is always limited by max buffer size while the instant value is // potentially unlimited. It can lead to an instant change from min to max value in case // when the instant value is significantly larger than the possible max value. // The solution is to limit the instant buffer size by twice of current buffer size in order // to have the same growth and shrink speeds. for example if the instant value is equal to 0 // and the current value is 16000 we can decrease it at maximum by 1600(suppose alfa=0.1) . // The idea is to allow increase and decrease size by the same number. So if the instant // value would be large(for example 100000) it will be possible to increase the current // value by 1600(the same as decreasing) because the limit will be 2 * currentValue = 32000. // Example of change speed: // growing = 32768, 29647, 26823, 24268, 21956, 19864 // shrinking = 19864, 21755, 23826, 26095, 28580, 31301, 32768 double desirableBufferSize = Math.min(((double) totalBufferSizeInBytes) / totalBuffers, 2L * lastBufferSize); lastBufferSize += alpha * (desirableBufferSize - lastBufferSize); lastBufferSize = Math.max(minBufferSize, Math.min(lastBufferSize, maxBufferSize)); return (int) Math.round(lastBufferSize); }
Calculating the buffer size over total possible buffers size and number of buffers in use. @param totalBufferSizeInBytes Total buffers size. @param totalBuffers Total number of buffers in use. @return Throughput calculated according to implemented algorithm.
calculateBufferSize
java
apache/flink
flink-runtime/src/main/java/org/apache/flink/runtime/throughput/BufferSizeEMA.java
https://github.com/apache/flink/blob/master/flink-runtime/src/main/java/org/apache/flink/runtime/throughput/BufferSizeEMA.java
Apache-2.0
public void incomingDataSize(long receivedDataSize) { // Force resuming measurement. if (measurementStartTime == NOT_TRACKED) { measurementStartTime = clock.absoluteTimeMillis(); } currentAccumulatedDataSize += receivedDataSize; }
Class for measuring the throughput based on incoming data size and measurement period.
incomingDataSize
java
apache/flink
flink-runtime/src/main/java/org/apache/flink/runtime/throughput/ThroughputCalculator.java
https://github.com/apache/flink/blob/master/flink-runtime/src/main/java/org/apache/flink/runtime/throughput/ThroughputCalculator.java
Apache-2.0
public void pauseMeasurement() { if (measurementStartTime != NOT_TRACKED) { currentMeasurementTime += clock.absoluteTimeMillis() - measurementStartTime; } measurementStartTime = NOT_TRACKED; }
Mark when the time should not be taken into account.
pauseMeasurement
java
apache/flink
flink-runtime/src/main/java/org/apache/flink/runtime/throughput/ThroughputCalculator.java
https://github.com/apache/flink/blob/master/flink-runtime/src/main/java/org/apache/flink/runtime/throughput/ThroughputCalculator.java
Apache-2.0
public void resumeMeasurement() { if (measurementStartTime == NOT_TRACKED) { measurementStartTime = clock.absoluteTimeMillis(); } }
Mark when the time should be included to the throughput calculation.
resumeMeasurement
java
apache/flink
flink-runtime/src/main/java/org/apache/flink/runtime/throughput/ThroughputCalculator.java
https://github.com/apache/flink/blob/master/flink-runtime/src/main/java/org/apache/flink/runtime/throughput/ThroughputCalculator.java
Apache-2.0
public static ThrowableType getThrowableType(Throwable cause) { final ThrowableAnnotation annotation = cause.getClass().getAnnotation(ThrowableAnnotation.class); return annotation == null ? ThrowableType.RecoverableError : annotation.value(); }
Classify the exceptions by extracting the {@link ThrowableType} from a potential {@link ThrowableAnnotation}. @param cause the {@link Throwable} to classify. @return The extracted {@link ThrowableType} or ThrowableType.RecoverableError if there is no such annotation.
getThrowableType
java
apache/flink
flink-runtime/src/main/java/org/apache/flink/runtime/throwable/ThrowableClassifier.java
https://github.com/apache/flink/blob/master/flink-runtime/src/main/java/org/apache/flink/runtime/throwable/ThrowableClassifier.java
Apache-2.0
public static Optional<Throwable> findThrowableOfThrowableType( Throwable throwable, ThrowableType throwableType) { if (throwable == null || throwableType == null) { return Optional.empty(); } Throwable t = throwable; while (t != null) { final ThrowableAnnotation annotation = t.getClass().getAnnotation(ThrowableAnnotation.class); if (annotation != null && annotation.value() == throwableType) { return Optional.of(t); } else { t = t.getCause(); } } return Optional.empty(); }
Checks whether a throwable chain contains a specific throwable type and returns the corresponding throwable. @param throwable the throwable chain to check. @param throwableType the throwable type to search for in the chain. @return Optional throwable of the throwable type if available, otherwise empty
findThrowableOfThrowableType
java
apache/flink
flink-runtime/src/main/java/org/apache/flink/runtime/throwable/ThrowableClassifier.java
https://github.com/apache/flink/blob/master/flink-runtime/src/main/java/org/apache/flink/runtime/throwable/ThrowableClassifier.java
Apache-2.0
default PR getPipelinedRegionOfVertex(VID vertexId) { throw new UnsupportedOperationException(); }
The pipelined region for a specified vertex. @param vertexId the vertex id identifying the vertex for which the pipelined region should be returned @return the pipelined region of the vertex @throws IllegalArgumentException if there is no vertex in this topology with the specified vertex id
getPipelinedRegionOfVertex
java
apache/flink
flink-runtime/src/main/java/org/apache/flink/runtime/topology/Topology.java
https://github.com/apache/flink/blob/master/flink-runtime/src/main/java/org/apache/flink/runtime/topology/Topology.java
Apache-2.0
public int size() { return this.elements.size(); }
Returns the number of currently stored elements. @return The number of currently stored elements.
size
java
apache/flink
flink-runtime/src/main/java/org/apache/flink/runtime/util/BoundedFIFOQueue.java
https://github.com/apache/flink/blob/master/flink-runtime/src/main/java/org/apache/flink/runtime/util/BoundedFIFOQueue.java
Apache-2.0
@Override public Iterator<T> iterator() { return elements.iterator(); }
Returns the {@code BoundedFIFOQueue}'s {@link Iterator}. @return The queue's {@code Iterator}.
iterator
java
apache/flink
flink-runtime/src/main/java/org/apache/flink/runtime/util/BoundedFIFOQueue.java
https://github.com/apache/flink/blob/master/flink-runtime/src/main/java/org/apache/flink/runtime/util/BoundedFIFOQueue.java
Apache-2.0
@Override public void uncaughtException(Thread t, Throwable e) { if (handleMode == ClusterOptions.UncaughtExceptionHandleMode.LOG) { LOG.error( "WARNING: Thread '{}' produced an uncaught exception. If you want to fail on uncaught exceptions, then configure {} accordingly", t.getName(), ClusterOptions.UNCAUGHT_EXCEPTION_HANDLING.key(), e); } else { // by default, fail the job FatalExitExceptionHandler.INSTANCE.uncaughtException(t, e); } }
Utility for handling any uncaught exceptions <p>Handles any uncaught exceptions according to cluster configuration in {@link ClusterOptions} to either just log exception, or fail job.
uncaughtException
java
apache/flink
flink-runtime/src/main/java/org/apache/flink/runtime/util/ClusterUncaughtExceptionHandler.java
https://github.com/apache/flink/blob/master/flink-runtime/src/main/java/org/apache/flink/runtime/util/ClusterUncaughtExceptionHandler.java
Apache-2.0
public static int getSlot(Configuration configuration) { int slots = configuration.get(TaskManagerOptions.NUM_TASK_SLOTS, 1); // we need this because many configs have been written with a "-1" entry if (slots == -1) { slots = 1; } ConfigurationParserUtils.checkConfigParameter( slots >= 1, slots, TaskManagerOptions.NUM_TASK_SLOTS.key(), "Number of task slots must be at least one."); return slots; }
Parses the configuration to get the number of slots and validates the value. @param configuration configuration object @return the number of slots in task manager
getSlot
java
apache/flink
flink-runtime/src/main/java/org/apache/flink/runtime/util/ConfigurationParserUtils.java
https://github.com/apache/flink/blob/master/flink-runtime/src/main/java/org/apache/flink/runtime/util/ConfigurationParserUtils.java
Apache-2.0
public static void checkConfigParameter( boolean condition, Object parameter, String name, String errorMessage) throws IllegalConfigurationException { if (!condition) { throw new IllegalConfigurationException( "Invalid configuration value for " + name + " : " + parameter + " - " + errorMessage); } }
Validates a condition for a config parameter and displays a standard exception, if the condition does not hold. @param condition The condition that must hold. If the condition is false, an exception is thrown. @param parameter The parameter value. Will be shown in the exception message. @param name The name of the config parameter. Will be shown in the exception message. @param errorMessage The optional custom error message to append to the exception message. @throws IllegalConfigurationException if the condition does not hold
checkConfigParameter
java
apache/flink
flink-runtime/src/main/java/org/apache/flink/runtime/util/ConfigurationParserUtils.java
https://github.com/apache/flink/blob/master/flink-runtime/src/main/java/org/apache/flink/runtime/util/ConfigurationParserUtils.java
Apache-2.0
public static int getPageSize(Configuration configuration) { final int pageSize = checkedDownCast( configuration.get(TaskManagerOptions.MEMORY_SEGMENT_SIZE).getBytes()); // check page size of for minimum size checkConfigParameter( pageSize >= MemoryManager.MIN_PAGE_SIZE, pageSize, TaskManagerOptions.MEMORY_SEGMENT_SIZE.key(), "Minimum memory segment size is " + MemoryManager.MIN_PAGE_SIZE); // check page size for power of two checkConfigParameter( MathUtils.isPowerOf2(pageSize), pageSize, TaskManagerOptions.MEMORY_SEGMENT_SIZE.key(), "Memory segment size must be a power of 2."); return pageSize; }
Parses the configuration to get the page size and validates the value. @param configuration configuration object @return size of memory segment
getPageSize
java
apache/flink
flink-runtime/src/main/java/org/apache/flink/runtime/util/ConfigurationParserUtils.java
https://github.com/apache/flink/blob/master/flink-runtime/src/main/java/org/apache/flink/runtime/util/ConfigurationParserUtils.java
Apache-2.0
@Override public boolean hasNext() { return false; }
Always returns false, since this iterator is empty. @see java.util.Iterator#hasNext()
hasNext
java
apache/flink
flink-runtime/src/main/java/org/apache/flink/runtime/util/EmptyIterator.java
https://github.com/apache/flink/blob/master/flink-runtime/src/main/java/org/apache/flink/runtime/util/EmptyIterator.java
Apache-2.0
@Override public E next() { throw new NoSuchElementException(); }
Always throws a {@link java.util.NoSuchElementException}. @see java.util.Iterator#next()
next
java
apache/flink
flink-runtime/src/main/java/org/apache/flink/runtime/util/EmptyIterator.java
https://github.com/apache/flink/blob/master/flink-runtime/src/main/java/org/apache/flink/runtime/util/EmptyIterator.java
Apache-2.0
@Override public E next(E target) { return null; }
Always returns null. @see MutableObjectIterator#next(Object)
next
java
apache/flink
flink-runtime/src/main/java/org/apache/flink/runtime/util/EmptyMutableObjectIterator.java
https://github.com/apache/flink/blob/master/flink-runtime/src/main/java/org/apache/flink/runtime/util/EmptyMutableObjectIterator.java
Apache-2.0
@Override public E next() { return null; }
Always returns null. @see MutableObjectIterator#next()
next
java
apache/flink
flink-runtime/src/main/java/org/apache/flink/runtime/util/EmptyMutableObjectIterator.java
https://github.com/apache/flink/blob/master/flink-runtime/src/main/java/org/apache/flink/runtime/util/EmptyMutableObjectIterator.java
Apache-2.0
public static String getVersion() { return getVersionsInstance().projectVersion; }
Returns the version of the code as String. @return The project version string.
getVersion
java
apache/flink
flink-runtime/src/main/java/org/apache/flink/runtime/util/EnvironmentInformation.java
https://github.com/apache/flink/blob/master/flink-runtime/src/main/java/org/apache/flink/runtime/util/EnvironmentInformation.java
Apache-2.0
public static String getScalaVersion() { return getVersionsInstance().scalaVersion; }
Returns the version of the used Scala compiler as String. @return The scala version string.
getScalaVersion
java
apache/flink
flink-runtime/src/main/java/org/apache/flink/runtime/util/EnvironmentInformation.java
https://github.com/apache/flink/blob/master/flink-runtime/src/main/java/org/apache/flink/runtime/util/EnvironmentInformation.java
Apache-2.0
public static Instant getBuildTime() { return getVersionsInstance().gitBuildTime; }
@return The Instant this version of the software was built.
getBuildTime
java
apache/flink
flink-runtime/src/main/java/org/apache/flink/runtime/util/EnvironmentInformation.java
https://github.com/apache/flink/blob/master/flink-runtime/src/main/java/org/apache/flink/runtime/util/EnvironmentInformation.java
Apache-2.0
public static String getBuildTimeString() { return getVersionsInstance().gitBuildTimeStr; }
@return The Instant this version of the software was built as a String using the Europe/Berlin timezone.
getBuildTimeString
java
apache/flink
flink-runtime/src/main/java/org/apache/flink/runtime/util/EnvironmentInformation.java
https://github.com/apache/flink/blob/master/flink-runtime/src/main/java/org/apache/flink/runtime/util/EnvironmentInformation.java
Apache-2.0
public static String getHadoopUser() { try { Class<?> ugiClass = Class.forName( "org.apache.hadoop.security.UserGroupInformation", false, EnvironmentInformation.class.getClassLoader()); Method currentUserMethod = ugiClass.getMethod("getCurrentUser"); Method shortUserNameMethod = ugiClass.getMethod("getShortUserName"); Object ugi = currentUserMethod.invoke(null); return (String) shortUserNameMethod.invoke(ugi); } catch (ClassNotFoundException e) { return "<no hadoop dependency found>"; } catch (LinkageError e) { // hadoop classes are not in the classpath LOG.debug( "Cannot determine user/group information using Hadoop utils. " + "Hadoop classes not loaded or compatible", e); } catch (Throwable t) { // some other error occurred that we should log and make known LOG.warn("Error while accessing user/group information via Hadoop utils.", t); } return UNKNOWN; }
Gets the name of the user that is running the JVM. @return The name of the user that is running the JVM.
getHadoopUser
java
apache/flink
flink-runtime/src/main/java/org/apache/flink/runtime/util/EnvironmentInformation.java
https://github.com/apache/flink/blob/master/flink-runtime/src/main/java/org/apache/flink/runtime/util/EnvironmentInformation.java
Apache-2.0
public static long getMaxJvmHeapMemory() { final long maxMemory = Runtime.getRuntime().maxMemory(); if (maxMemory != Long.MAX_VALUE) { // we have the proper max memory return maxMemory; } else { // max JVM heap size is not set - use the heuristic to use 1/4th of the physical memory final long physicalMemory = Hardware.getSizeOfPhysicalMemory(); if (physicalMemory != -1) { // got proper value for physical memory return physicalMemory / 4; } else { throw new RuntimeException( "Could not determine the amount of free memory.\n" + "Please set the maximum memory for the JVM, e.g. -Xmx512M for 512 megabytes."); } } }
The maximum JVM heap size, in bytes. <p>This method uses the <i>-Xmx</i> value of the JVM, if set. If not set, it returns (as a heuristic) 1/4th of the physical memory size. @return The maximum JVM heap size, in bytes.
getMaxJvmHeapMemory
java
apache/flink
flink-runtime/src/main/java/org/apache/flink/runtime/util/EnvironmentInformation.java
https://github.com/apache/flink/blob/master/flink-runtime/src/main/java/org/apache/flink/runtime/util/EnvironmentInformation.java
Apache-2.0
public static long getSizeOfFreeHeapMemoryWithDefrag() { // trigger a garbage collection, to reduce fragmentation System.gc(); return getSizeOfFreeHeapMemory(); }
Gets an estimate of the size of the free heap memory. <p>NOTE: This method is heavy-weight. It triggers a garbage collection to reduce fragmentation and get a better estimate at the size of free memory. It is typically more accurate than the plain version {@link #getSizeOfFreeHeapMemory()}. @return An estimate of the size of the free heap memory, in bytes.
getSizeOfFreeHeapMemoryWithDefrag
java
apache/flink
flink-runtime/src/main/java/org/apache/flink/runtime/util/EnvironmentInformation.java
https://github.com/apache/flink/blob/master/flink-runtime/src/main/java/org/apache/flink/runtime/util/EnvironmentInformation.java
Apache-2.0
public static long getSizeOfFreeHeapMemory() { Runtime r = Runtime.getRuntime(); return getMaxJvmHeapMemory() - r.totalMemory() + r.freeMemory(); }
Gets an estimate of the size of the free heap memory. The estimate may vary, depending on the current level of memory fragmentation and the number of dead objects. For a better (but more heavy-weight) estimate, use {@link #getSizeOfFreeHeapMemoryWithDefrag()}. @return An estimate of the size of the free heap memory, in bytes.
getSizeOfFreeHeapMemory
java
apache/flink
flink-runtime/src/main/java/org/apache/flink/runtime/util/EnvironmentInformation.java
https://github.com/apache/flink/blob/master/flink-runtime/src/main/java/org/apache/flink/runtime/util/EnvironmentInformation.java
Apache-2.0
public static String getJvmVersion() { try { final RuntimeMXBean bean = ManagementFactory.getRuntimeMXBean(); return bean.getVmName() + " - " + bean.getVmVendor() + " - " + bean.getSpecVersion() + '/' + bean.getVmVersion(); } catch (Throwable t) { return UNKNOWN; } }
Gets the version of the JVM in the form "VM_Name - Vendor - Spec/Version". @return The JVM version.
getJvmVersion
java
apache/flink
flink-runtime/src/main/java/org/apache/flink/runtime/util/EnvironmentInformation.java
https://github.com/apache/flink/blob/master/flink-runtime/src/main/java/org/apache/flink/runtime/util/EnvironmentInformation.java
Apache-2.0
public static String getTemporaryFileDirectory() { return System.getProperty("java.io.tmpdir"); }
Gets the directory for temporary files, as returned by the JVM system property "java.io.tmpdir". @return The directory for temporary files.
getTemporaryFileDirectory
java
apache/flink
flink-runtime/src/main/java/org/apache/flink/runtime/util/EnvironmentInformation.java
https://github.com/apache/flink/blob/master/flink-runtime/src/main/java/org/apache/flink/runtime/util/EnvironmentInformation.java
Apache-2.0
public static int getNumberCPUCores() { return Runtime.getRuntime().availableProcessors(); }
Gets the number of CPU cores (hardware contexts) that the JVM has access to. @return The number of CPU cores.
getNumberCPUCores
java
apache/flink
flink-runtime/src/main/java/org/apache/flink/runtime/util/Hardware.java
https://github.com/apache/flink/blob/master/flink-runtime/src/main/java/org/apache/flink/runtime/util/Hardware.java
Apache-2.0
public static long getSizeOfPhysicalMemory() { // first try if the JVM can directly tell us what the system memory is // this works only on Oracle JVMs try { Class<?> clazz = Class.forName("com.sun.management.OperatingSystemMXBean"); Method method = clazz.getMethod("getTotalPhysicalMemorySize"); OperatingSystemMXBean operatingSystemMXBean = ManagementFactory.getOperatingSystemMXBean(); // someone may install different beans, so we need to check whether the bean // is in fact the sun management bean if (clazz.isInstance(operatingSystemMXBean)) { return (Long) method.invoke(operatingSystemMXBean); } } catch (ClassNotFoundException e) { // this happens on non-Oracle JVMs, do nothing and use the alternative code paths } catch (NoSuchMethodException | IllegalAccessException | InvocationTargetException e) { LOG.warn( "Access to physical memory size: " + "com.sun.management.OperatingSystemMXBean incompatibly changed.", e); } // we now try the OS specific access paths switch (OperatingSystem.getCurrentOperatingSystem()) { case LINUX: return getSizeOfPhysicalMemoryForLinux(); case WINDOWS: return getSizeOfPhysicalMemoryForWindows(); case MAC_OS: return getSizeOfPhysicalMemoryForMac(); case FREE_BSD: return getSizeOfPhysicalMemoryForFreeBSD(); case UNKNOWN: LOG.error("Cannot determine size of physical memory for unknown operating system"); return -1; default: LOG.error("Unrecognized OS: " + OperatingSystem.getCurrentOperatingSystem()); return -1; } }
Returns the size of the physical memory in bytes. @return the size of the physical memory in bytes or {@code -1}, if the size could not be determined.
getSizeOfPhysicalMemory
java
apache/flink
flink-runtime/src/main/java/org/apache/flink/runtime/util/Hardware.java
https://github.com/apache/flink/blob/master/flink-runtime/src/main/java/org/apache/flink/runtime/util/Hardware.java
Apache-2.0
private static long getSizeOfPhysicalMemoryForLinux() { try (BufferedReader lineReader = new BufferedReader(new FileReader(LINUX_MEMORY_INFO_PATH))) { String line; while ((line = lineReader.readLine()) != null) { Matcher matcher = LINUX_MEMORY_REGEX.matcher(line); if (matcher.matches()) { String totalMemory = matcher.group(1); return Long.parseLong(totalMemory) * 1024L; // Convert from kilobyte to byte } } // expected line did not come LOG.error( "Cannot determine the size of the physical memory for Linux host (using '/proc/meminfo'). " + "Unexpected format."); return -1; } catch (NumberFormatException e) { LOG.error( "Cannot determine the size of the physical memory for Linux host (using '/proc/meminfo'). " + "Unexpected format."); return -1; } catch (Throwable t) { LOG.error( "Cannot determine the size of the physical memory for Linux host (using '/proc/meminfo') ", t); return -1; } }
Returns the size of the physical memory in bytes on a Linux-based operating system. @return the size of the physical memory in bytes or {@code -1}, if the size could not be determined
getSizeOfPhysicalMemoryForLinux
java
apache/flink
flink-runtime/src/main/java/org/apache/flink/runtime/util/Hardware.java
https://github.com/apache/flink/blob/master/flink-runtime/src/main/java/org/apache/flink/runtime/util/Hardware.java
Apache-2.0
private static long getSizeOfPhysicalMemoryForMac() { BufferedReader bi = null; try { Process proc = Runtime.getRuntime().exec("sysctl hw.memsize"); bi = new BufferedReader( new InputStreamReader(proc.getInputStream(), StandardCharsets.UTF_8)); String line; while ((line = bi.readLine()) != null) { if (line.startsWith("hw.memsize")) { long memsize = Long.parseLong(line.split(":")[1].trim()); bi.close(); proc.destroy(); return memsize; } } } catch (Throwable t) { LOG.error("Cannot determine physical memory of machine for MacOS host", t); return -1; } finally { if (bi != null) { try { bi.close(); } catch (IOException ignored) { } } } return -1; }
Returns the size of the physical memory in bytes on a Mac OS-based operating system @return the size of the physical memory in bytes or {@code -1}, if the size could not be determined
getSizeOfPhysicalMemoryForMac
java
apache/flink
flink-runtime/src/main/java/org/apache/flink/runtime/util/Hardware.java
https://github.com/apache/flink/blob/master/flink-runtime/src/main/java/org/apache/flink/runtime/util/Hardware.java
Apache-2.0
private static long getSizeOfPhysicalMemoryForWindows() { BufferedReader bi = null; try { Process proc = Runtime.getRuntime().exec("wmic memorychip get capacity"); bi = new BufferedReader( new InputStreamReader(proc.getInputStream(), StandardCharsets.UTF_8)); String line = bi.readLine(); if (line == null) { return -1L; } if (!line.startsWith("Capacity")) { return -1L; } long sizeOfPhyiscalMemory = 0L; while ((line = bi.readLine()) != null) { if (line.isEmpty()) { continue; } line = line.replaceAll(" ", ""); sizeOfPhyiscalMemory += Long.parseLong(line); } return sizeOfPhyiscalMemory; } catch (Throwable t) { LOG.error( "Cannot determine the size of the physical memory for Windows host " + "(using 'wmic memorychip')", t); return -1L; } finally { if (bi != null) { try { bi.close(); } catch (Throwable ignored) { } } } }
Returns the size of the physical memory in bytes on Windows. @return the size of the physical memory in bytes or {@code -1}, if the size could not be determined
getSizeOfPhysicalMemoryForWindows
java
apache/flink
flink-runtime/src/main/java/org/apache/flink/runtime/util/Hardware.java
https://github.com/apache/flink/blob/master/flink-runtime/src/main/java/org/apache/flink/runtime/util/Hardware.java
Apache-2.0
public static void installAsShutdownHook(Logger logger) { installAsShutdownHook(logger, DEFAULT_DELAY); }
Installs the safeguard shutdown hook. The maximum time that the JVM is allowed to spend on shutdown before being killed is five seconds. @param logger The logger to log errors to.
installAsShutdownHook
java
apache/flink
flink-runtime/src/main/java/org/apache/flink/runtime/util/JvmShutdownSafeguard.java
https://github.com/apache/flink/blob/master/flink-runtime/src/main/java/org/apache/flink/runtime/util/JvmShutdownSafeguard.java
Apache-2.0
public static void installAsShutdownHook(Logger logger, long delayMillis) { checkArgument(delayMillis >= 0, "delay must be >= 0"); // install the blocking shutdown hook Thread shutdownHook = new JvmShutdownSafeguard(delayMillis); ShutdownHookUtil.addShutdownHookThread( shutdownHook, JvmShutdownSafeguard.class.getSimpleName(), logger); }
Installs the safeguard shutdown hook. The maximum time that the JVM is allowed to spend on shutdown before being killed is the given number of milliseconds. @param logger The logger to log errors to. @param delayMillis The delay (in milliseconds) to wait after clean shutdown was stared, before forcibly terminating the JVM.
installAsShutdownHook
java
apache/flink
flink-runtime/src/main/java/org/apache/flink/runtime/util/JvmShutdownSafeguard.java
https://github.com/apache/flink/blob/master/flink-runtime/src/main/java/org/apache/flink/runtime/util/JvmShutdownSafeguard.java
Apache-2.0
public static Collection<ThreadInfo> createThreadDump() { ThreadMXBean threadMxBean = ManagementFactory.getThreadMXBean(); return Arrays.asList(threadMxBean.dumpAllThreads(true, true)); }
Creates a thread dump of the current JVM. @return the thread dump of current JVM
createThreadDump
java
apache/flink
flink-runtime/src/main/java/org/apache/flink/runtime/util/JvmUtils.java
https://github.com/apache/flink/blob/master/flink-runtime/src/main/java/org/apache/flink/runtime/util/JvmUtils.java
Apache-2.0
public static Optional<ThreadInfoSample> createThreadInfoSample( long threadId, int maxStackTraceDepth) { ThreadMXBean threadMxBean = ManagementFactory.getThreadMXBean(); return ThreadInfoSample.from(threadMxBean.getThreadInfo(threadId, maxStackTraceDepth)); }
Creates a {@link ThreadInfoSample} for a specific thread. Contains thread traces if maxStackTraceDepth > 0. @param threadId The ID of the thread to create the thread dump for. @param maxStackTraceDepth The maximum number of entries in the stack trace to be collected. @return The thread information of a specific thread.
createThreadInfoSample
java
apache/flink
flink-runtime/src/main/java/org/apache/flink/runtime/util/JvmUtils.java
https://github.com/apache/flink/blob/master/flink-runtime/src/main/java/org/apache/flink/runtime/util/JvmUtils.java
Apache-2.0
public static Map<Long, ThreadInfoSample> createThreadInfoSample( Collection<Long> threadIds, int maxStackTraceDepth) { ThreadMXBean threadMxBean = ManagementFactory.getThreadMXBean(); long[] threadIdsArray = threadIds.stream().mapToLong(l -> l).toArray(); ThreadInfo[] threadInfo = threadMxBean.getThreadInfo(threadIdsArray, maxStackTraceDepth); List<ThreadInfo> threadInfoNoNulls = IntStream.range(0, threadIdsArray.length) .filter( i -> { if (threadInfo[i] == null) { LOG.debug( "FlameGraphs: thread {} is not alive or does not exist.", threadIdsArray[i]); return false; } return true; }) .mapToObj(i -> threadInfo[i]) .collect(Collectors.toList()); return ThreadInfoSample.from(threadInfoNoNulls); }
Creates a {@link ThreadInfoSample} for a specific thread. Contains thread traces if maxStackTraceDepth > 0. @param threadIds The IDs of the threads to create the thread dump for. @param maxStackTraceDepth The maximum number of entries in the stack trace to be collected. @return The map key is the thread id, the map value is the thread information for the requested thread IDs.
createThreadInfoSample
java
apache/flink
flink-runtime/src/main/java/org/apache/flink/runtime/util/JvmUtils.java
https://github.com/apache/flink/blob/master/flink-runtime/src/main/java/org/apache/flink/runtime/util/JvmUtils.java
Apache-2.0
@Override public void close() throws IOException { // ignore }
Decorator for input streams that ignores calls to {@link InputStream#close()}.
close
java
apache/flink
flink-runtime/src/main/java/org/apache/flink/runtime/util/NonClosingInputStreamDecorator.java
https://github.com/apache/flink/blob/master/flink-runtime/src/main/java/org/apache/flink/runtime/util/NonClosingInputStreamDecorator.java
Apache-2.0
public int getResourceCount(ResourceProfile resourceProfile) { return resources.getOrDefault(resourceProfile, 0); }
Number of resources with the given {@link ResourceProfile}. @param resourceProfile resourceProfile for which to look up the count @return number of resources with the given resourceProfile or {@code 0} if the resource profile does not exist
getResourceCount
java
apache/flink
flink-runtime/src/main/java/org/apache/flink/runtime/util/ResourceCounter.java
https://github.com/apache/flink/blob/master/flink-runtime/src/main/java/org/apache/flink/runtime/util/ResourceCounter.java
Apache-2.0
public int getTotalResourceCount() { return resources.isEmpty() ? 0 : resources.values().stream().reduce(0, Integer::sum); }
Computes the total number of resources in this counter. @return the total number of resources in this counter
getTotalResourceCount
java
apache/flink
flink-runtime/src/main/java/org/apache/flink/runtime/util/ResourceCounter.java
https://github.com/apache/flink/blob/master/flink-runtime/src/main/java/org/apache/flink/runtime/util/ResourceCounter.java
Apache-2.0
public ResourceProfile getTotalResource() { return resources.entrySet().stream() .map(entry -> entry.getKey().multiply(entry.getValue())) .reduce(ResourceProfile.ZERO, ResourceProfile::merge); }
Computes the total resources in this counter. @return the total resources in this counter
getTotalResource
java
apache/flink
flink-runtime/src/main/java/org/apache/flink/runtime/util/ResourceCounter.java
https://github.com/apache/flink/blob/master/flink-runtime/src/main/java/org/apache/flink/runtime/util/ResourceCounter.java
Apache-2.0
public ResourceCounter add(ResourceCounter increment) { return internalAdd(increment.getResourcesWithCount()); }
Adds increment to this resource counter value and returns the resulting value. @param increment increment to add to this resource counter value @return new ResourceCounter containing the result of the addition
add
java
apache/flink
flink-runtime/src/main/java/org/apache/flink/runtime/util/ResourceCounter.java
https://github.com/apache/flink/blob/master/flink-runtime/src/main/java/org/apache/flink/runtime/util/ResourceCounter.java
Apache-2.0
public ResourceCounter add(Map<ResourceProfile, Integer> increment) { return internalAdd(increment.entrySet()); }
Adds the given increment to this resource counter value and returns the resulting value. @param increment increment ot add to this resource counter value @return new ResourceCounter containing the result of the addition
add
java
apache/flink
flink-runtime/src/main/java/org/apache/flink/runtime/util/ResourceCounter.java
https://github.com/apache/flink/blob/master/flink-runtime/src/main/java/org/apache/flink/runtime/util/ResourceCounter.java
Apache-2.0
public Collection<Map.Entry<ResourceProfile, Integer>> getResourcesWithCount() { return resources.entrySet(); }
Gets the stored resources and their counts. The counts are guaranteed to be positive (> 0). @return collection of {@link ResourceProfile} and count pairs
getResourcesWithCount
java
apache/flink
flink-runtime/src/main/java/org/apache/flink/runtime/util/ResourceCounter.java
https://github.com/apache/flink/blob/master/flink-runtime/src/main/java/org/apache/flink/runtime/util/ResourceCounter.java
Apache-2.0
public boolean containsResource(ResourceProfile resourceProfile) { return resources.containsKey(resourceProfile); }
Checks whether resourceProfile is contained in this counter. @param resourceProfile resourceProfile to check whether it is contained @return {@code true} if the counter has a positive count for the given resourceProfile; otherwise {@code false}
containsResource
java
apache/flink
flink-runtime/src/main/java/org/apache/flink/runtime/util/ResourceCounter.java
https://github.com/apache/flink/blob/master/flink-runtime/src/main/java/org/apache/flink/runtime/util/ResourceCounter.java
Apache-2.0
public Set<ResourceProfile> getResources() { return resources.keySet(); }
Gets all stored {@link ResourceProfile ResourceProfiles}. @return collection of stored {@link ResourceProfile ResourceProfiles}
getResources
java
apache/flink
flink-runtime/src/main/java/org/apache/flink/runtime/util/ResourceCounter.java
https://github.com/apache/flink/blob/master/flink-runtime/src/main/java/org/apache/flink/runtime/util/ResourceCounter.java
Apache-2.0
public boolean isEmpty() { return resources.isEmpty(); }
Checks whether the resource counter is empty. @return {@code true} if the counter does not contain any counts; otherwise {@code false}
isEmpty
java
apache/flink
flink-runtime/src/main/java/org/apache/flink/runtime/util/ResourceCounter.java
https://github.com/apache/flink/blob/master/flink-runtime/src/main/java/org/apache/flink/runtime/util/ResourceCounter.java
Apache-2.0
public static ResourceCounter empty() { return new ResourceCounter(Collections.emptyMap()); }
Creates an empty resource counter. @return empty resource counter
empty
java
apache/flink
flink-runtime/src/main/java/org/apache/flink/runtime/util/ResourceCounter.java
https://github.com/apache/flink/blob/master/flink-runtime/src/main/java/org/apache/flink/runtime/util/ResourceCounter.java
Apache-2.0
public static ResourceCounter withResources(Map<ResourceProfile, Integer> resources) { return new ResourceCounter(new HashMap<>(resources)); }
Creates a resource counter with the specified set of resources. @param resources resources with which to initialize the resource counter @return ResourceCounter which contains the specified set of resources
withResources
java
apache/flink
flink-runtime/src/main/java/org/apache/flink/runtime/util/ResourceCounter.java
https://github.com/apache/flink/blob/master/flink-runtime/src/main/java/org/apache/flink/runtime/util/ResourceCounter.java
Apache-2.0
public static Integer parseRestBindPortFromWebInterfaceUrl(String webInterfaceUrl) { if (webInterfaceUrl != null) { final int lastColon = webInterfaceUrl.lastIndexOf(':'); if (lastColon == -1) { return -1; } else { try { return Integer.parseInt(webInterfaceUrl.substring(lastColon + 1)); } catch (NumberFormatException e) { return -1; } } } else { return -1; } }
Parse the port from the webInterfaceUrl. @param webInterfaceUrl The web interface url to be parsed @return the parsed rest port or -1 if failed
parseRestBindPortFromWebInterfaceUrl
java
apache/flink
flink-runtime/src/main/java/org/apache/flink/runtime/util/ResourceManagerUtils.java
https://github.com/apache/flink/blob/master/flink-runtime/src/main/java/org/apache/flink/runtime/util/ResourceManagerUtils.java
Apache-2.0
public static void assertNoException(Runnable runnable) { withUncaughtExceptionHandler(runnable, FatalExitExceptionHandler.INSTANCE).run(); }
Asserts that the given {@link Runnable} does not throw exceptions. If the runnable throws exceptions, then it will call the {@link FatalExitExceptionHandler}. @param runnable to assert for no exceptions
assertNoException
java
apache/flink
flink-runtime/src/main/java/org/apache/flink/runtime/util/Runnables.java
https://github.com/apache/flink/blob/master/flink-runtime/src/main/java/org/apache/flink/runtime/util/Runnables.java
Apache-2.0
public static Runnable withUncaughtExceptionHandler( Runnable runnable, Thread.UncaughtExceptionHandler uncaughtExceptionHandler) { return () -> { try { runnable.run(); } catch (Throwable t) { uncaughtExceptionHandler.uncaughtException(Thread.currentThread(), t); } }; }
Guard {@link Runnable} with uncaughtException handler, because {@link java.util.concurrent.ScheduledExecutorService} does not respect the one assigned to executing {@link Thread} instance. @param runnable Runnable future to guard. @param uncaughtExceptionHandler Handler to call in case of uncaught exception. @return Future with handler.
withUncaughtExceptionHandler
java
apache/flink
flink-runtime/src/main/java/org/apache/flink/runtime/util/Runnables.java
https://github.com/apache/flink/blob/master/flink-runtime/src/main/java/org/apache/flink/runtime/util/Runnables.java
Apache-2.0
public static void register(final Logger LOG) { synchronized (SignalHandler.class) { if (registered) { return; } registered = true; final String[] SIGNALS = OperatingSystem.isWindows() ? new String[] {"TERM", "INT"} : new String[] {"TERM", "HUP", "INT"}; StringBuilder bld = new StringBuilder(); bld.append("Registered UNIX signal handlers for ["); String separator = ""; for (String signalName : SIGNALS) { try { new Handler(signalName, LOG); bld.append(separator); bld.append(signalName); separator = ", "; } catch (Exception e) { LOG.info("Error while registering signal handler", e); } } bld.append("]"); LOG.info(bld.toString()); } }
Register some signal handlers. @param LOG The slf4j logger
register
java
apache/flink
flink-runtime/src/main/java/org/apache/flink/runtime/util/SignalHandler.java
https://github.com/apache/flink/blob/master/flink-runtime/src/main/java/org/apache/flink/runtime/util/SignalHandler.java
Apache-2.0
public static SlotSelectionStrategy selectSlotSelectionStrategy( final JobType jobType, final Configuration configuration) { TaskManagerLoadBalanceMode taskManagerLoadBalanceMode = configuration.get(TaskManagerOptions.TASK_MANAGER_LOAD_BALANCE_MODE); final SlotSelectionStrategy locationPreferenceSlotSelectionStrategy; locationPreferenceSlotSelectionStrategy = taskManagerLoadBalanceMode == TaskManagerLoadBalanceMode.SLOTS ? LocationPreferenceSlotSelectionStrategy.createEvenlySpreadOut() : LocationPreferenceSlotSelectionStrategy.createDefault(); final boolean isLocalRecoveryEnabled = configuration.get(StateRecoveryOptions.LOCAL_RECOVERY); if (isLocalRecoveryEnabled) { if (jobType == JobType.STREAMING) { return PreviousAllocationSlotSelectionStrategy.create( locationPreferenceSlotSelectionStrategy); } else { LOG.warn( "Batch job does not support local recovery. Falling back to use " + locationPreferenceSlotSelectionStrategy.getClass()); return locationPreferenceSlotSelectionStrategy; } } else { return locationPreferenceSlotSelectionStrategy; } }
Utility class for selecting {@link SlotSelectionStrategy}.
selectSlotSelectionStrategy
java
apache/flink
flink-runtime/src/main/java/org/apache/flink/runtime/util/SlotSelectionStrategyUtils.java
https://github.com/apache/flink/blob/master/flink-runtime/src/main/java/org/apache/flink/runtime/util/SlotSelectionStrategyUtils.java
Apache-2.0
public static byte[] serializeOrDiscard(StateObject stateObject) throws Exception { try { return InstantiationUtil.serializeObject(stateObject); } catch (Exception e) { try { stateObject.discardState(); } catch (Exception discardException) { e.addSuppressed(discardException); } ExceptionUtils.rethrowException(e); } // will never happen but is added to please the compiler return new byte[0]; }
Serializes the passed {@link StateObject} and discards the state in case of failure. @param stateObject the {@code StateObject} that shall be serialized. @return The serialized version of the passed {@code StateObject}. @throws Exception if an error occurred during the serialization. The corresponding {@code StateObject} will be discarded in that case.
serializeOrDiscard
java
apache/flink
flink-runtime/src/main/java/org/apache/flink/runtime/util/StateHandleStoreUtils.java
https://github.com/apache/flink/blob/master/flink-runtime/src/main/java/org/apache/flink/runtime/util/StateHandleStoreUtils.java
Apache-2.0
public static <T extends Serializable> T deserialize(byte[] data) throws IOException, ClassNotFoundException { return InstantiationUtil.deserializeObject( data, Thread.currentThread().getContextClassLoader()); }
Deserializes the passed data into a {@link RetrievableStateHandle}. @param data The data that shall be deserialized. @param <T> The type of data handled by the deserialized {@code RetrievableStateHandle}. @return The {@code RetrievableStateHandle} instance. @throws IOException Any of the usual Input/Output related exceptions. @throws ClassNotFoundException If the data couldn't be deserialized into a {@code RetrievableStateHandle} referring to the expected type {@code <T>}.
deserialize
java
apache/flink
flink-runtime/src/main/java/org/apache/flink/runtime/util/StateHandleStoreUtils.java
https://github.com/apache/flink/blob/master/flink-runtime/src/main/java/org/apache/flink/runtime/util/StateHandleStoreUtils.java
Apache-2.0
public static CuratorFrameworkWithUnhandledErrorListener startCuratorFramework( Configuration configuration, FatalErrorHandler fatalErrorHandler) { checkNotNull(configuration, "configuration"); String zkQuorum = configuration.getValue(HighAvailabilityOptions.HA_ZOOKEEPER_QUORUM); if (zkQuorum == null || StringUtils.isBlank(zkQuorum)) { throw new RuntimeException( "No valid ZooKeeper quorum has been specified. " + "You can specify the quorum via the configuration key '" + HighAvailabilityOptions.HA_ZOOKEEPER_QUORUM.key() + "'."); } int sessionTimeout = Math.toIntExact( configuration .get(HighAvailabilityOptions.ZOOKEEPER_SESSION_TIMEOUT) .toMillis()); int connectionTimeout = Math.toIntExact( configuration .get(HighAvailabilityOptions.ZOOKEEPER_CONNECTION_TIMEOUT) .toMillis()); int retryWait = Math.toIntExact( configuration.get(HighAvailabilityOptions.ZOOKEEPER_RETRY_WAIT).toMillis()); int maxRetryAttempts = configuration.get(HighAvailabilityOptions.ZOOKEEPER_MAX_RETRY_ATTEMPTS); String root = configuration.getValue(HighAvailabilityOptions.HA_ZOOKEEPER_ROOT); String namespace = configuration.getValue(HighAvailabilityOptions.HA_CLUSTER_ID); boolean disableSaslClient = configuration.get(SecurityOptions.ZOOKEEPER_SASL_DISABLE); ACLProvider aclProvider; ZkClientACLMode aclMode = ZkClientACLMode.fromConfig(configuration); if (disableSaslClient && aclMode == ZkClientACLMode.CREATOR) { String errorMessage = "Cannot set ACL role to " + ZkClientACLMode.CREATOR + " since SASL authentication is " + "disabled through the " + SecurityOptions.ZOOKEEPER_SASL_DISABLE.key() + " property"; LOG.warn(errorMessage); throw new IllegalConfigurationException(errorMessage); } if (aclMode == ZkClientACLMode.CREATOR) { LOG.info("Enforcing creator for ZK connections"); aclProvider = new SecureAclProvider(); } else { LOG.info("Enforcing default ACL for ZK connections"); aclProvider = new DefaultACLProvider(); } String rootWithNamespace = generateZookeeperPath(root, namespace); LOG.info("Using '{}' as Zookeeper namespace.", rootWithNamespace); boolean ensembleTracking = configuration.get(HighAvailabilityOptions.ZOOKEEPER_ENSEMBLE_TRACKING); final CuratorFrameworkFactory.Builder curatorFrameworkBuilder = CuratorFrameworkFactory.builder() .connectString(zkQuorum) .sessionTimeoutMs(sessionTimeout) .connectionTimeoutMs(connectionTimeout) .retryPolicy(new ExponentialBackoffRetry(retryWait, maxRetryAttempts)) // Curator prepends a '/' manually and throws an Exception if the // namespace starts with a '/'. .namespace(trimStartingSlash(rootWithNamespace)) .ensembleTracker(ensembleTracking) .aclProvider(aclProvider); if (configuration.contains(HighAvailabilityOptions.ZOOKEEPER_CLIENT_AUTHORIZATION)) { Map<String, String> authMap = configuration.get(HighAvailabilityOptions.ZOOKEEPER_CLIENT_AUTHORIZATION); List<AuthInfo> authInfos = authMap.entrySet().stream() .map( entry -> new AuthInfo( entry.getKey(), entry.getValue() .getBytes( ConfigConstants .DEFAULT_CHARSET))) .collect(Collectors.toList()); curatorFrameworkBuilder.authorization(authInfos); } if (configuration.contains(HighAvailabilityOptions.ZOOKEEPER_MAX_CLOSE_WAIT)) { long maxCloseWait = configuration.get(HighAvailabilityOptions.ZOOKEEPER_MAX_CLOSE_WAIT).toMillis(); if (maxCloseWait < 0 || maxCloseWait > Integer.MAX_VALUE) { throw new IllegalConfigurationException( "The value (%d ms) is out-of-range for %s. The milliseconds timeout is expected to be between 0 and %d ms.", maxCloseWait, HighAvailabilityOptions.ZOOKEEPER_MAX_CLOSE_WAIT.key(), Integer.MAX_VALUE); } curatorFrameworkBuilder.maxCloseWaitMs((int) maxCloseWait); } if (configuration.contains( HighAvailabilityOptions.ZOOKEEPER_SIMULATED_SESSION_EXP_PERCENT)) { curatorFrameworkBuilder.simulatedSessionExpirationPercent( configuration.get( HighAvailabilityOptions.ZOOKEEPER_SIMULATED_SESSION_EXP_PERCENT)); } if (configuration.get(HighAvailabilityOptions.ZOOKEEPER_TOLERATE_SUSPENDED_CONNECTIONS)) { curatorFrameworkBuilder.connectionStateErrorPolicy( new SessionConnectionStateErrorPolicy()); } return startCuratorFramework(curatorFrameworkBuilder, fatalErrorHandler); }
Starts a {@link CuratorFramework} instance and connects it to the given ZooKeeper quorum. @param configuration {@link Configuration} object containing the configuration values @param fatalErrorHandler {@link FatalErrorHandler} fatalErrorHandler to handle unexpected errors of {@link CuratorFramework} @return {@link CuratorFrameworkWithUnhandledErrorListener} instance
startCuratorFramework
java
apache/flink
flink-runtime/src/main/java/org/apache/flink/runtime/util/ZooKeeperUtils.java
https://github.com/apache/flink/blob/master/flink-runtime/src/main/java/org/apache/flink/runtime/util/ZooKeeperUtils.java
Apache-2.0
@VisibleForTesting public static CuratorFrameworkWithUnhandledErrorListener startCuratorFramework( CuratorFrameworkFactory.Builder builder, FatalErrorHandler fatalErrorHandler) { CuratorFramework cf = builder.build(); UnhandledErrorListener unhandledErrorListener = (message, throwable) -> { LOG.error( "Unhandled error in curator framework, error message: {}", message, throwable); // The exception thrown in UnhandledErrorListener will be caught by // CuratorFramework. So we mostly trigger exit process or interact with main // thread to inform the failure in FatalErrorHandler. fatalErrorHandler.onFatalError(throwable); }; cf.getUnhandledErrorListenable().addListener(unhandledErrorListener); cf.start(); return new CuratorFrameworkWithUnhandledErrorListener(cf, unhandledErrorListener); }
Starts a {@link CuratorFramework} instance and connects it to the given ZooKeeper quorum from a builder. @param builder {@link CuratorFrameworkFactory.Builder} A builder for curatorFramework. @param fatalErrorHandler {@link FatalErrorHandler} fatalErrorHandler to handle unexpected errors of {@link CuratorFramework} @return {@link CuratorFrameworkWithUnhandledErrorListener} instance
startCuratorFramework
java
apache/flink
flink-runtime/src/main/java/org/apache/flink/runtime/util/ZooKeeperUtils.java
https://github.com/apache/flink/blob/master/flink-runtime/src/main/java/org/apache/flink/runtime/util/ZooKeeperUtils.java
Apache-2.0
public static boolean isZooKeeperRecoveryMode(Configuration flinkConf) { return HighAvailabilityMode.fromConfig(flinkConf).equals(HighAvailabilityMode.ZOOKEEPER); }
Returns whether {@link HighAvailabilityMode#ZOOKEEPER} is configured.
isZooKeeperRecoveryMode
java
apache/flink
flink-runtime/src/main/java/org/apache/flink/runtime/util/ZooKeeperUtils.java
https://github.com/apache/flink/blob/master/flink-runtime/src/main/java/org/apache/flink/runtime/util/ZooKeeperUtils.java
Apache-2.0
public static String getZooKeeperEnsemble(Configuration flinkConf) throws IllegalConfigurationException { String zkQuorum = flinkConf.getValue(HighAvailabilityOptions.HA_ZOOKEEPER_QUORUM); if (zkQuorum == null || StringUtils.isBlank(zkQuorum)) { throw new IllegalConfigurationException("No ZooKeeper quorum specified in config."); } // Remove all whitespace zkQuorum = zkQuorum.replaceAll("\\s+", ""); return zkQuorum; }
Returns the configured ZooKeeper quorum (and removes whitespace, because ZooKeeper does not tolerate it).
getZooKeeperEnsemble
java
apache/flink
flink-runtime/src/main/java/org/apache/flink/runtime/util/ZooKeeperUtils.java
https://github.com/apache/flink/blob/master/flink-runtime/src/main/java/org/apache/flink/runtime/util/ZooKeeperUtils.java
Apache-2.0