code
stringlengths
25
201k
docstring
stringlengths
19
96.2k
func_name
stringlengths
0
235
language
stringclasses
1 value
repo
stringlengths
8
51
path
stringlengths
11
314
url
stringlengths
62
377
license
stringclasses
7 values
@Override public Integer getValue() { int totalBuffers = 0; for (ResultPartition producedPartition : resultPartitions) { totalBuffers += producedPartition.getNumberOfQueuedBuffers(); } return totalBuffers; }
Gauge metric measuring the number of queued output buffers for {@link ResultPartition}s.
getValue
java
apache/flink
flink-runtime/src/main/java/org/apache/flink/runtime/io/network/metrics/OutputBuffersGauge.java
https://github.com/apache/flink/blob/master/flink-runtime/src/main/java/org/apache/flink/runtime/io/network/metrics/OutputBuffersGauge.java
Apache-2.0
@Override public Long getValue() { long totalBuffers = 0; for (ResultPartition producedPartition : resultPartitions) { totalBuffers += producedPartition.getSizeOfQueuedBuffersUnsafe(); } return totalBuffers; }
Gauge metric measuring the size in bytes of queued output buffers for {@link ResultPartition}s.
getValue
java
apache/flink
flink-runtime/src/main/java/org/apache/flink/runtime/io/network/metrics/OutputBuffersSizeGauge.java
https://github.com/apache/flink/blob/master/flink-runtime/src/main/java/org/apache/flink/runtime/io/network/metrics/OutputBuffersSizeGauge.java
Apache-2.0
long refreshAndGetTotal() { return partition.getNumberOfQueuedBuffers(); }
Iterates over all sub-partitions and collects the total number of queued buffers in a best-effort way. @return total number of queued buffers
refreshAndGetTotal
java
apache/flink
flink-runtime/src/main/java/org/apache/flink/runtime/io/network/metrics/ResultPartitionMetrics.java
https://github.com/apache/flink/blob/master/flink-runtime/src/main/java/org/apache/flink/runtime/io/network/metrics/ResultPartitionMetrics.java
Apache-2.0
int refreshAndGetMin() { int min = Integer.MAX_VALUE; int numSubpartitions = partition.getNumberOfSubpartitions(); if (numSubpartitions == 0) { // meaningful value when no channels exist: return 0; } for (int targetSubpartition = 0; targetSubpartition < numSubpartitions; ++targetSubpartition) { int size = partition.getNumberOfQueuedBuffers(targetSubpartition); min = Math.min(min, size); } return min; }
Iterates over all sub-partitions and collects the minimum number of queued buffers in a sub-partition in a best-effort way. @return minimum number of queued buffers per sub-partition (<tt>0</tt> if sub-partitions exist)
refreshAndGetMin
java
apache/flink
flink-runtime/src/main/java/org/apache/flink/runtime/io/network/metrics/ResultPartitionMetrics.java
https://github.com/apache/flink/blob/master/flink-runtime/src/main/java/org/apache/flink/runtime/io/network/metrics/ResultPartitionMetrics.java
Apache-2.0
int refreshAndGetMax() { int max = 0; int numSubpartitions = partition.getNumberOfSubpartitions(); for (int targetSubpartition = 0; targetSubpartition < numSubpartitions; ++targetSubpartition) { int size = partition.getNumberOfQueuedBuffers(targetSubpartition); max = Math.max(max, size); } return max; }
Iterates over all sub-partitions and collects the maximum number of queued buffers in a sub-partition in a best-effort way. @return maximum number of queued buffers per sub-partition
refreshAndGetMax
java
apache/flink
flink-runtime/src/main/java/org/apache/flink/runtime/io/network/metrics/ResultPartitionMetrics.java
https://github.com/apache/flink/blob/master/flink-runtime/src/main/java/org/apache/flink/runtime/io/network/metrics/ResultPartitionMetrics.java
Apache-2.0
@Override public Long getValue() { long result = Long.MIN_VALUE; for (SingleInputGate gate : gates) { result = Math.max(gate.getLastEstimatedTimeToConsume().toMillis(), result); } return result; }
Gauge metric measuring the maximal time to consume all buffers of all input gates.
getValue
java
apache/flink
flink-runtime/src/main/java/org/apache/flink/runtime/io/network/metrics/TimeToConsumeGauge.java
https://github.com/apache/flink/blob/master/flink-runtime/src/main/java/org/apache/flink/runtime/io/network/metrics/TimeToConsumeGauge.java
Apache-2.0
private void decodePartialBufferSizes(ByteBuf data) { // If partial buffers are present and not all are processed yet if (bufferResponse.numOfPartialBuffers > 0 && bufferResponse.getPartialBufferSizes().size() < bufferResponse.numOfPartialBuffers) { // Continue completing the current partial buffer size if necessary accumulatePartialSizeBytes(data); // Process remaining partial buffer sizes when possible readRemainingBufferSizes(data); } }
Decodes the sizes of partial buffers from the provided ByteBuf. This function processes the incoming data and accumulates bytes until a full integer can be formed to represent the size of each buffer. @param data the ByteBuf containing the incoming data.
decodePartialBufferSizes
java
apache/flink
flink-runtime/src/main/java/org/apache/flink/runtime/io/network/netty/BufferResponseDecoder.java
https://github.com/apache/flink/blob/master/flink-runtime/src/main/java/org/apache/flink/runtime/io/network/netty/BufferResponseDecoder.java
Apache-2.0
private void accumulatePartialSizeBytes(ByteBuf data) { if (partialSizeBytes != null) { while (partialSizeBytes.size() < Integer.BYTES && data.isReadable()) { partialSizeBytes.add(data.readByte()); } if (partialSizeBytes.size() == Integer.BYTES) { int size = buildIntFromBytes(partialSizeBytes); bufferResponse.getPartialBufferSizes().add(size); partialSizeBytes = null; } } }
Accumulates bytes to form a complete integer size for a partial buffer. If enough bytes are accumulated, forms an integer and adds it to bufferResponse list. @param data the ByteBuf containing the incoming data.
accumulatePartialSizeBytes
java
apache/flink
flink-runtime/src/main/java/org/apache/flink/runtime/io/network/netty/BufferResponseDecoder.java
https://github.com/apache/flink/blob/master/flink-runtime/src/main/java/org/apache/flink/runtime/io/network/netty/BufferResponseDecoder.java
Apache-2.0
private void readRemainingBufferSizes(ByteBuf data) { while (data.isReadable() && bufferResponse.getPartialBufferSizes().size() < bufferResponse.numOfPartialBuffers) { if (data.readableBytes() >= Integer.BYTES) { bufferResponse.getPartialBufferSizes().add(data.readInt()); } else { partialSizeBytes = new ArrayList<>(); while (data.isReadable()) { partialSizeBytes.add(data.readByte()); } } } }
Reads remaining complete partial buffer sizes directly from the ByteBuf if possible. Prepares for partially available sizes by initializing byte accumulator. @param data the ByteBuf containing the incoming data.
readRemainingBufferSizes
java
apache/flink
flink-runtime/src/main/java/org/apache/flink/runtime/io/network/netty/BufferResponseDecoder.java
https://github.com/apache/flink/blob/master/flink-runtime/src/main/java/org/apache/flink/runtime/io/network/netty/BufferResponseDecoder.java
Apache-2.0
@Nullable public static ByteBuf accumulate( ByteBuf target, ByteBuf source, int targetAccumulationSize, int accumulatedSize) { if (accumulatedSize == 0 && source.readableBytes() >= targetAccumulationSize) { return source; } int copyLength = Math.min(source.readableBytes(), targetAccumulationSize - accumulatedSize); if (copyLength > 0) { target.writeBytes(source, copyLength); } if (accumulatedSize + copyLength == targetAccumulationSize) { return target; } return null; }
Accumulates data from <tt>source</tt> to <tt>target</tt>. If no data has been accumulated yet and <tt>source</tt> has enough data, <tt>source</tt> will be returned directly. Otherwise, data will be copied into <tt>target</tt>. If the size of data copied after this operation has reached <tt>targetAccumulationSize</tt>, <tt>target</tt> will be returned, otherwise <tt>null</tt> will be returned to indicate more data is required. @param target The target buffer. @param source The source buffer. @param targetAccumulationSize The target size of data to accumulate. @param accumulatedSize The size of data accumulated so far. @return The ByteBuf containing accumulated data. If not enough data has been accumulated, <tt>null</tt> will be returned.
accumulate
java
apache/flink
flink-runtime/src/main/java/org/apache/flink/runtime/io/network/netty/ByteBufUtils.java
https://github.com/apache/flink/blob/master/flink-runtime/src/main/java/org/apache/flink/runtime/io/network/netty/ByteBufUtils.java
Apache-2.0
private static NetworkBuffer sliceBuffer( NettyMessage.BufferResponse bufferOrEvent, BufferRecycler recycler, int offset, int size) { ByteBuffer nioBuffer = bufferOrEvent.getBuffer().getNioBuffer(offset, size); MemorySegment segment; if (nioBuffer.isDirect()) { segment = MemorySegmentFactory.wrapOffHeapMemory(nioBuffer); } else { byte[] bytes = nioBuffer.array(); segment = MemorySegmentFactory.wrap(bytes); } return new NetworkBuffer( segment, recycler, bufferOrEvent.dataType, bufferOrEvent.isCompressed, size); }
Creates a {@link NetworkBuffer} by wrapping the specified portion of a given buffer's underlying memory segment rather than creating a slice of the buffer. <p>Currently, there is an assumption that each buffer received from a {@link RemoteInputChannel} exclusively holds a single memory segment object. <p>If this assumption were violated and multiple buffers were allowed to share a single segment, it could introduce instability and unpredictable behavior. <p>For instance, the BufferManager releases buffers by directly operating on their underlying memory segments and adding them to a list designated for release. If buffers share the same segment, the segment might be added to the buffer pool multiple times, and subsequent buffers may inadvertently be allocated to the same segment for reading and writing. <p>Therefore, to avoid introducing potential risks, this method operates directly on the segment instead of slicing the buffer. @param bufferOrEvent the buffer or event containing the data to be wrapped into a network buffer @param recycler the buffer recycler used to manage the lifecycle of the network buffer @param offset the offset within the buffer where the data begins @param size the size of the data to be wrapped @return a new {@link NetworkBuffer} wrapping the specified portion of the buffer's memory segment
sliceBuffer
java
apache/flink
flink-runtime/src/main/java/org/apache/flink/runtime/io/network/netty/CreditBasedPartitionRequestClientHandler.java
https://github.com/apache/flink/blob/master/flink-runtime/src/main/java/org/apache/flink/runtime/io/network/netty/CreditBasedPartitionRequestClientHandler.java
Apache-2.0
@Override public ResultSubpartitionView.AvailabilityWithBacklog getAvailabilityAndBacklog() { return subpartitionView.getAvailabilityAndBacklog(numCreditsAvailable > 0); }
Returns true only if the next buffer is an event or the reader has both available credits and buffers. @implSpec BEWARE: this must be in sync with {@link #getNextDataType(BufferAndBacklog)}, such that {@code getNextDataType(bufferAndBacklog) != NONE <=> AvailabilityWithBacklog#isAvailable()}!
getAvailabilityAndBacklog
java
apache/flink
flink-runtime/src/main/java/org/apache/flink/runtime/io/network/netty/CreditBasedSequenceNumberingViewReader.java
https://github.com/apache/flink/blob/master/flink-runtime/src/main/java/org/apache/flink/runtime/io/network/netty/CreditBasedSequenceNumberingViewReader.java
Apache-2.0
int getNumberOfArenas() { return numberOfArenas; }
Returns the number of arenas. @return Number of arenas.
getNumberOfArenas
java
apache/flink
flink-runtime/src/main/java/org/apache/flink/runtime/io/network/netty/NettyBufferPool.java
https://github.com/apache/flink/blob/master/flink-runtime/src/main/java/org/apache/flink/runtime/io/network/netty/NettyBufferPool.java
Apache-2.0
int getChunkSize() { return chunkSize; }
Returns the chunk size. @return Chunk size.
getChunkSize
java
apache/flink
flink-runtime/src/main/java/org/apache/flink/runtime/io/network/netty/NettyBufferPool.java
https://github.com/apache/flink/blob/master/flink-runtime/src/main/java/org/apache/flink/runtime/io/network/netty/NettyBufferPool.java
Apache-2.0
public Optional<Long> getNumberOfAllocatedBytes() throws NoSuchFieldException, IllegalAccessException { if (directArenas != null) { long numChunks = 0; for (Object arena : directArenas) { numChunks += getNumberOfAllocatedChunks(arena, "qInit"); numChunks += getNumberOfAllocatedChunks(arena, "q000"); numChunks += getNumberOfAllocatedChunks(arena, "q025"); numChunks += getNumberOfAllocatedChunks(arena, "q050"); numChunks += getNumberOfAllocatedChunks(arena, "q075"); numChunks += getNumberOfAllocatedChunks(arena, "q100"); } long allocatedBytes = numChunks * chunkSize; return Optional.of(allocatedBytes); } else { return Optional.empty(); } }
Returns the number of currently allocated bytes. <p>The stats are gathered via Reflection and are mostly relevant for debugging purposes. @return Number of currently allocated bytes. @throws NoSuchFieldException Error getting the statistics (should not happen when the Netty version stays the same). @throws IllegalAccessException Error getting the statistics (should not happen when the Netty version stays the same).
getNumberOfAllocatedBytes
java
apache/flink
flink-runtime/src/main/java/org/apache/flink/runtime/io/network/netty/NettyBufferPool.java
https://github.com/apache/flink/blob/master/flink-runtime/src/main/java/org/apache/flink/runtime/io/network/netty/NettyBufferPool.java
Apache-2.0
private static ByteBuf allocateBuffer(ByteBufAllocator allocator, byte id) { return allocateBuffer(allocator, id, -1); }
Allocates a new (header and contents) buffer and adds some header information for the frame decoder. <p>Before sending the buffer, you must write the actual length after adding the contents as an integer to position <tt>0</tt>! @param allocator byte buffer allocator to use @param id {@link NettyMessage} subclass ID @return a newly allocated direct buffer with header data written for {@link NettyMessageEncoder}
allocateBuffer
java
apache/flink
flink-runtime/src/main/java/org/apache/flink/runtime/io/network/netty/NettyMessage.java
https://github.com/apache/flink/blob/master/flink-runtime/src/main/java/org/apache/flink/runtime/io/network/netty/NettyMessage.java
Apache-2.0
private static ByteBuf allocateBuffer(ByteBufAllocator allocator, byte id, int contentLength) { return allocateBuffer(allocator, id, 0, contentLength, true); }
Allocates a new (header and contents) buffer and adds some header information for the frame decoder. <p>If the <tt>contentLength</tt> is unknown, you must write the actual length after adding the contents as an integer to position <tt>0</tt>! @param allocator byte buffer allocator to use @param id {@link NettyMessage} subclass ID @param contentLength content length (or <tt>-1</tt> if unknown) @return a newly allocated direct buffer with header data written for {@link NettyMessageEncoder}
allocateBuffer
java
apache/flink
flink-runtime/src/main/java/org/apache/flink/runtime/io/network/netty/NettyMessage.java
https://github.com/apache/flink/blob/master/flink-runtime/src/main/java/org/apache/flink/runtime/io/network/netty/NettyMessage.java
Apache-2.0
@Override public void channelActive(ChannelHandlerContext ctx) throws Exception { bufferResponseDecoder.onChannelActive(ctx); nonBufferResponseDecoder.onChannelActive(ctx); frameHeaderBuffer = ctx.alloc().directBuffer(FRAME_HEADER_LENGTH); super.channelActive(ctx); }
The decoder for the current message. It is null if we are decoding the frame header.
channelActive
java
apache/flink
flink-runtime/src/main/java/org/apache/flink/runtime/io/network/netty/NettyMessageClientDecoderDelegate.java
https://github.com/apache/flink/blob/master/flink-runtime/src/main/java/org/apache/flink/runtime/io/network/netty/NettyMessageClientDecoderDelegate.java
Apache-2.0
void onNewMessageReceived(int msgId, int messageLength) { this.msgId = msgId; this.messageLength = messageLength; }
Notifies that a new message is to be decoded. @param msgId The type of the message to be decoded. @param messageLength The length of the message to be decoded.
onNewMessageReceived
java
apache/flink
flink-runtime/src/main/java/org/apache/flink/runtime/io/network/netty/NettyMessageDecoder.java
https://github.com/apache/flink/blob/master/flink-runtime/src/main/java/org/apache/flink/runtime/io/network/netty/NettyMessageDecoder.java
Apache-2.0
boolean canBeDisposed() { return closeReferenceCounter.get() == 0 && !canBeReused(); }
If zero, the underlying TCP channel can be safely closed.
canBeDisposed
java
apache/flink
flink-runtime/src/main/java/org/apache/flink/runtime/io/network/netty/NettyPartitionRequestClient.java
https://github.com/apache/flink/blob/master/flink-runtime/src/main/java/org/apache/flink/runtime/io/network/netty/NettyPartitionRequestClient.java
Apache-2.0
boolean validateClientAndIncrementReferenceCounter() { if (!clientHandler.hasChannelError()) { return closeReferenceCounter.incrementAndGet() > 0; } return false; }
Validate the client and increment the reference counter. <p>Note: the reference counter has to be incremented before returning the instance of this client to ensure correct closing logic. @return whether this client can be used.
validateClientAndIncrementReferenceCounter
java
apache/flink
flink-runtime/src/main/java/org/apache/flink/runtime/io/network/netty/NettyPartitionRequestClient.java
https://github.com/apache/flink/blob/master/flink-runtime/src/main/java/org/apache/flink/runtime/io/network/netty/NettyPartitionRequestClient.java
Apache-2.0
@Override public void requestSubpartition( final ResultPartitionID partitionId, final ResultSubpartitionIndexSet subpartitionIndexSet, final RemoteInputChannel inputChannel, int delayMs) throws IOException { checkNotClosed(); LOG.debug( "Requesting subpartition {} of partition {} with {} ms delay.", subpartitionIndexSet, partitionId, delayMs); clientHandler.addInputChannel(inputChannel); final PartitionRequest request = new PartitionRequest( partitionId, subpartitionIndexSet, inputChannel.getInputChannelId(), inputChannel.getInitialCredit()); final ChannelFutureListener listener = future -> { if (!future.isSuccess()) { clientHandler.removeInputChannel(inputChannel); inputChannel.onError( new LocalTransportException( String.format( "Sending the partition request to '%s [%s] (#%d)' failed.", connectionId.getAddress(), connectionId .getResourceID() .getStringWithMetadata(), connectionId.getConnectionIndex()), future.channel().localAddress(), future.cause())); sendToChannel( new ConnectionErrorMessage( future.cause() == null ? new RuntimeException( "Cannot send partition request.") : future.cause())); } }; if (delayMs == 0) { ChannelFuture f = tcpChannel.writeAndFlush(request); f.addListener(listener); } else { final ChannelFuture[] f = new ChannelFuture[1]; tcpChannel .eventLoop() .schedule( () -> { f[0] = tcpChannel.writeAndFlush(request); f[0].addListener(listener); }, delayMs, TimeUnit.MILLISECONDS); } }
Requests a remote intermediate result partition queue. <p>The request goes to the remote producer, for which this partition request client instance has been created.
requestSubpartition
java
apache/flink
flink-runtime/src/main/java/org/apache/flink/runtime/io/network/netty/NettyPartitionRequestClient.java
https://github.com/apache/flink/blob/master/flink-runtime/src/main/java/org/apache/flink/runtime/io/network/netty/NettyPartitionRequestClient.java
Apache-2.0
public ChannelHandler[] getServerChannelHandlers() { PartitionRequestQueue queueOfPartitionQueues = new PartitionRequestQueue(); PartitionRequestServerHandler serverHandler = new PartitionRequestServerHandler( partitionProvider, taskEventPublisher, queueOfPartitionQueues); return new ChannelHandler[] { messageEncoder, new NettyMessage.NettyMessageDecoder(), serverHandler, queueOfPartitionQueues }; }
Returns the server channel handlers. <pre> +-------------------------------------------------------------------+ | SERVER CHANNEL PIPELINE | | | | +----------+----------+ (3) write +----------------------+ | | | Queue of queues +----------->| Message encoder | | | +----------+----------+ +-----------+----------+ | | /|\ \|/ | | | (2) enqueue | | | +----------+----------+ | | | | Request handler | | | | +----------+----------+ | | | /|\ | | | | | | | +-----------+-----------+ | | | | Message+Frame decoder | | | | +-----------+-----------+ | | | /|\ | | +---------------+-----------------------------------+---------------+ | | (1) client request \|/ +---------------+-----------------------------------+---------------+ | | | | | [ Socket.read() ] [ Socket.write() ] | | | | Netty Internal I/O Threads (Transport Implementation) | +-------------------------------------------------------------------+ </pre> @return channel handlers
getServerChannelHandlers
java
apache/flink
flink-runtime/src/main/java/org/apache/flink/runtime/io/network/netty/NettyProtocol.java
https://github.com/apache/flink/blob/master/flink-runtime/src/main/java/org/apache/flink/runtime/io/network/netty/NettyProtocol.java
Apache-2.0
public ChannelHandler[] getClientChannelHandlers() { NetworkClientHandler networkClientHandler = new CreditBasedPartitionRequestClientHandler(); return new ChannelHandler[] { messageEncoder, new NettyMessageClientDecoderDelegate(networkClientHandler), networkClientHandler }; }
Returns the client channel handlers. <pre> +-----------+----------+ +----------------------+ | Remote input channel | | request client | +-----------+----------+ +-----------+----------+ | | (1) write +---------------+-----------------------------------+---------------+ | | CLIENT CHANNEL PIPELINE | | | | \|/ | | +----------+----------+ +----------------------+ | | | Request handler + | Message encoder | | | +----------+----------+ +-----------+----------+ | | /|\ \|/ | | | | | | +----------+------------+ | | | | Message+Frame decoder | | | | +----------+------------+ | | | /|\ | | +---------------+-----------------------------------+---------------+ | | (3) server response \|/ (2) client request +---------------+-----------------------------------+---------------+ | | | | | [ Socket.read() ] [ Socket.write() ] | | | | Netty Internal I/O Threads (Transport Implementation) | +-------------------------------------------------------------------+ </pre> @return channel handlers
getClientChannelHandlers
java
apache/flink
flink-runtime/src/main/java/org/apache/flink/runtime/io/network/netty/NettyProtocol.java
https://github.com/apache/flink/blob/master/flink-runtime/src/main/java/org/apache/flink/runtime/io/network/netty/NettyProtocol.java
Apache-2.0
@Nullable Buffer allocatePooledNetworkBuffer(InputChannelID receiverId) { Buffer buffer = null; RemoteInputChannel inputChannel = networkClientHandler.getInputChannel(receiverId); // If the input channel has been released, we cannot allocate buffer and the received // message // will be discarded. if (inputChannel != null) { buffer = inputChannel.requestBuffer(); } return buffer; }
Allocates a pooled network buffer for the specific input channel. @param receiverId The id of the requested input channel. @return The pooled network buffer.
allocatePooledNetworkBuffer
java
apache/flink
flink-runtime/src/main/java/org/apache/flink/runtime/io/network/netty/NetworkBufferAllocator.java
https://github.com/apache/flink/blob/master/flink-runtime/src/main/java/org/apache/flink/runtime/io/network/netty/NetworkBufferAllocator.java
Apache-2.0
Buffer allocateUnPooledNetworkBuffer(int size, Buffer.DataType dataType) { checkArgument(size > 0, "Illegal buffer size, must be positive."); byte[] byteArray = new byte[size]; MemorySegment memSeg = MemorySegmentFactory.wrap(byteArray); return new NetworkBuffer(memSeg, FreeingBufferRecycler.INSTANCE, dataType); }
Allocates an un-pooled network buffer with the specific size. @param size The requested buffer size. @param dataType The data type this buffer represents. @return The un-pooled network buffer.
allocateUnPooledNetworkBuffer
java
apache/flink
flink-runtime/src/main/java/org/apache/flink/runtime/io/network/netty/NetworkBufferAllocator.java
https://github.com/apache/flink/blob/master/flink-runtime/src/main/java/org/apache/flink/runtime/io/network/netty/NetworkBufferAllocator.java
Apache-2.0
void destroyPartitionRequestClient(ConnectionID connectionId, PartitionRequestClient client) { final CompletableFuture<NettyPartitionRequestClient> future = clients.get(connectionId); if (future != null && future.isDone()) { future.thenAccept( futureClient -> { if (client.equals(futureClient)) { clients.remove(connectionId, future); } }); } }
Removes the client for the given {@link ConnectionID}.
destroyPartitionRequestClient
java
apache/flink
flink-runtime/src/main/java/org/apache/flink/runtime/io/network/netty/PartitionRequestClientFactory.java
https://github.com/apache/flink/blob/master/flink-runtime/src/main/java/org/apache/flink/runtime/io/network/netty/PartitionRequestClientFactory.java
Apache-2.0
private void enqueueAvailableReader(final NetworkSequenceViewReader reader) throws Exception { if (reader.isRegisteredAsAvailable()) { return; } ResultSubpartitionView.AvailabilityWithBacklog availabilityWithBacklog = reader.getAvailabilityAndBacklog(); if (!availabilityWithBacklog.isAvailable()) { int backlog = availabilityWithBacklog.getBacklog(); if (backlog > 0 && reader.needAnnounceBacklog()) { announceBacklog(reader, backlog); } return; } // Queue an available reader for consumption. If the queue is empty, // we try trigger the actual write. Otherwise this will be handled by // the writeAndFlushNextMessageIfPossible calls. boolean triggerWrite = availableReaders.isEmpty(); registerAvailableReader(reader); if (triggerWrite) { writeAndFlushNextMessageIfPossible(ctx.channel()); } }
Try to enqueue the reader once receiving credit notification from the consumer or receiving non-empty reader notification from the producer. <p>NOTE: Only one thread would trigger the actual enqueue after checking the reader's availability, so there is no race condition here.
enqueueAvailableReader
java
apache/flink
flink-runtime/src/main/java/org/apache/flink/runtime/io/network/netty/PartitionRequestQueue.java
https://github.com/apache/flink/blob/master/flink-runtime/src/main/java/org/apache/flink/runtime/io/network/netty/PartitionRequestQueue.java
Apache-2.0
public static BoundedBlockingSubpartition createWithFileChannel( int index, ResultPartition parent, File tempFile, int readBufferSize, boolean sslEnabled) throws IOException { final FileChannelBoundedData bd = FileChannelBoundedData.create(tempFile.toPath(), readBufferSize); return new BoundedBlockingSubpartition(index, parent, bd, !sslEnabled); }
Creates a BoundedBlockingSubpartition that simply stores the partition data in a file. Data is eagerly spilled (written to disk) and readers directly read from the file.
createWithFileChannel
java
apache/flink
flink-runtime/src/main/java/org/apache/flink/runtime/io/network/partition/BoundedBlockingSubpartition.java
https://github.com/apache/flink/blob/master/flink-runtime/src/main/java/org/apache/flink/runtime/io/network/partition/BoundedBlockingSubpartition.java
Apache-2.0
public static BoundedBlockingSubpartition createWithMemoryMappedFile( int index, ResultPartition parent, File tempFile) throws IOException { final MemoryMappedBoundedData bd = MemoryMappedBoundedData.create(tempFile.toPath()); return new BoundedBlockingSubpartition(index, parent, bd, false); }
Creates a BoundedBlockingSubpartition that stores the partition data in memory mapped file. Data is written to and read from the mapped memory region. Disk spilling happens lazily, when the OS swaps out the pages from the memory mapped file.
createWithMemoryMappedFile
java
apache/flink
flink-runtime/src/main/java/org/apache/flink/runtime/io/network/partition/BoundedBlockingSubpartition.java
https://github.com/apache/flink/blob/master/flink-runtime/src/main/java/org/apache/flink/runtime/io/network/partition/BoundedBlockingSubpartition.java
Apache-2.0
public static BoundedBlockingSubpartition createWithFileAndMemoryMappedReader( int index, ResultPartition parent, File tempFile) throws IOException { final FileChannelMemoryMappedBoundedData bd = FileChannelMemoryMappedBoundedData.create(tempFile.toPath()); return new BoundedBlockingSubpartition(index, parent, bd, false); }
Creates a BoundedBlockingSubpartition that stores the partition data in a file and memory maps that file for reading. Data is eagerly spilled (written to disk) and then mapped into memory. The main difference to the {@link #createWithMemoryMappedFile(int, ResultPartition, File)} variant is that no I/O is necessary when pages from the memory mapped file are evicted.
createWithFileAndMemoryMappedReader
java
apache/flink
flink-runtime/src/main/java/org/apache/flink/runtime/io/network/partition/BoundedBlockingSubpartition.java
https://github.com/apache/flink/blob/master/flink-runtime/src/main/java/org/apache/flink/runtime/io/network/partition/BoundedBlockingSubpartition.java
Apache-2.0
@Nullable @Override public BufferAndBacklog getNextBuffer() throws IOException { final Buffer current = nextBuffer; // copy reference to stack if (current == null) { // as per contract, we must return null when the reader is empty, // but also in case the reader is disposed (rather than throwing an exception) return null; } if (current.isBuffer()) { dataBufferBacklog--; } assert dataReader != null; nextBuffer = dataReader.nextBuffer(); Buffer.DataType nextDataType = nextBuffer != null ? nextBuffer.getDataType() : Buffer.DataType.NONE; return BufferAndBacklog.fromBufferAndLookahead( current, nextDataType, dataBufferBacklog, sequenceNumber++); }
Convenience constructor that takes a single buffer.
getNextBuffer
java
apache/flink
flink-runtime/src/main/java/org/apache/flink/runtime/io/network/partition/BoundedBlockingSubpartitionReader.java
https://github.com/apache/flink/blob/master/flink-runtime/src/main/java/org/apache/flink/runtime/io/network/partition/BoundedBlockingSubpartitionReader.java
Apache-2.0
@Override public BoundedBlockingSubpartition create( int index, ResultPartition parent, File tempFile, int readBufferSize, boolean sslEnabled) throws IOException { return BoundedBlockingSubpartition.createWithFileChannel( index, parent, tempFile, readBufferSize, sslEnabled); }
A BoundedBlockingSubpartition type that simply stores the partition data in a file. Data is eagerly spilled (written to disk) and readers directly read from the file.
create
java
apache/flink
flink-runtime/src/main/java/org/apache/flink/runtime/io/network/partition/BoundedBlockingSubpartitionType.java
https://github.com/apache/flink/blob/master/flink-runtime/src/main/java/org/apache/flink/runtime/io/network/partition/BoundedBlockingSubpartitionType.java
Apache-2.0
@Override public BoundedBlockingSubpartition create( int index, ResultPartition parent, File tempFile, int readBufferSize, boolean sslEnabled) throws IOException { return BoundedBlockingSubpartition.createWithMemoryMappedFile(index, parent, tempFile); }
A BoundedBlockingSubpartition type that stores the partition data in memory mapped file. Data is written to and read from the mapped memory region. Disk spilling happens lazily, when the OS swaps out the pages from the memory mapped file.
create
java
apache/flink
flink-runtime/src/main/java/org/apache/flink/runtime/io/network/partition/BoundedBlockingSubpartitionType.java
https://github.com/apache/flink/blob/master/flink-runtime/src/main/java/org/apache/flink/runtime/io/network/partition/BoundedBlockingSubpartitionType.java
Apache-2.0
@Override public BoundedBlockingSubpartition create( int index, ResultPartition parent, File tempFile, int readBufferSize, boolean sslEnabled) throws IOException { return BoundedBlockingSubpartition.createWithFileAndMemoryMappedReader( index, parent, tempFile); }
Creates a BoundedBlockingSubpartition that stores the partition data in a file and memory maps that file for reading. Data is eagerly spilled (written to disk) and then mapped into memory. The main difference to the {@link BoundedBlockingSubpartitionType#MMAP} variant is that no I/O is necessary when pages from the memory mapped file are evicted.
create
java
apache/flink
flink-runtime/src/main/java/org/apache/flink/runtime/io/network/partition/BoundedBlockingSubpartitionType.java
https://github.com/apache/flink/blob/master/flink-runtime/src/main/java/org/apache/flink/runtime/io/network/partition/BoundedBlockingSubpartitionType.java
Apache-2.0
@Override public BoundedBlockingSubpartition create( int index, ResultPartition parent, File tempFile, int readBufferSize, boolean sslEnabled) throws IOException { return ResultPartitionFactory.getBoundedBlockingType() .create(index, parent, tempFile, readBufferSize, sslEnabled); }
Selects the BoundedBlockingSubpartition type based on the current memory architecture. If 64-bit, the type of {@link BoundedBlockingSubpartitionType#FILE_MMAP} is recommended. Otherwise, the type of {@link BoundedBlockingSubpartitionType#FILE} is by default.
create
java
apache/flink
flink-runtime/src/main/java/org/apache/flink/runtime/io/network/partition/BoundedBlockingSubpartitionType.java
https://github.com/apache/flink/blob/master/flink-runtime/src/main/java/org/apache/flink/runtime/io/network/partition/BoundedBlockingSubpartitionType.java
Apache-2.0
default BoundedData.Reader createReader() throws IOException { return createReader(new NoOpResultSubpartitionView()); }
Gets a reader for the bounded data. Multiple readers may be created. This call only succeeds once the write phase was finished via {@link #finishWrite()}.
createReader
java
apache/flink
flink-runtime/src/main/java/org/apache/flink/runtime/io/network/partition/BoundedData.java
https://github.com/apache/flink/blob/master/flink-runtime/src/main/java/org/apache/flink/runtime/io/network/partition/BoundedData.java
Apache-2.0
public static FileChannelMemoryMappedBoundedData create(Path memMappedFilePath) throws IOException { return createWithRegionSize(memMappedFilePath, Integer.MAX_VALUE); }
Creates new FileChannelMemoryMappedBoundedData, creating a memory mapped file at the given path.
create
java
apache/flink
flink-runtime/src/main/java/org/apache/flink/runtime/io/network/partition/FileChannelMemoryMappedBoundedData.java
https://github.com/apache/flink/blob/master/flink-runtime/src/main/java/org/apache/flink/runtime/io/network/partition/FileChannelMemoryMappedBoundedData.java
Apache-2.0
public static FileChannelMemoryMappedBoundedData createWithRegionSize( Path memMappedFilePath, int regionSize) throws IOException { checkNotNull(memMappedFilePath, "memMappedFilePath"); checkArgument(regionSize > 0, "regions size most be > 0"); final FileChannel fileChannel = FileChannel.open( memMappedFilePath, StandardOpenOption.READ, StandardOpenOption.WRITE, StandardOpenOption.CREATE_NEW); return new FileChannelMemoryMappedBoundedData(memMappedFilePath, fileChannel, regionSize); }
Creates new FileChannelMemoryMappedBoundedData, creating a memory mapped file at the given path. Each mapped region (= ByteBuffer) will be of the given size.
createWithRegionSize
java
apache/flink
flink-runtime/src/main/java/org/apache/flink/runtime/io/network/partition/FileChannelMemoryMappedBoundedData.java
https://github.com/apache/flink/blob/master/flink-runtime/src/main/java/org/apache/flink/runtime/io/network/partition/FileChannelMemoryMappedBoundedData.java
Apache-2.0
default void stopTrackingAndReleasePartitions( Collection<ResultPartitionID> resultPartitionIds) { stopTrackingAndReleasePartitions(resultPartitionIds, true); }
Releases the given partitions and stop the tracking of partitions that were released.
stopTrackingAndReleasePartitions
java
apache/flink
flink-runtime/src/main/java/org/apache/flink/runtime/io/network/partition/JobMasterPartitionTracker.java
https://github.com/apache/flink/blob/master/flink-runtime/src/main/java/org/apache/flink/runtime/io/network/partition/JobMasterPartitionTracker.java
Apache-2.0
default Collection<ResultPartitionDeploymentDescriptor> getAllTrackedNonClusterPartitions() { return getAllTrackedPartitions().stream() .filter(descriptor -> !descriptor.getPartitionType().isPersistent()) .collect(Collectors.toList()); }
Gets all the non-cluster partitions under tracking.
getAllTrackedNonClusterPartitions
java
apache/flink
flink-runtime/src/main/java/org/apache/flink/runtime/io/network/partition/JobMasterPartitionTracker.java
https://github.com/apache/flink/blob/master/flink-runtime/src/main/java/org/apache/flink/runtime/io/network/partition/JobMasterPartitionTracker.java
Apache-2.0
default Collection<ResultPartitionDeploymentDescriptor> getAllTrackedClusterPartitions() { return getAllTrackedPartitions().stream() .filter(descriptor -> descriptor.getPartitionType().isPersistent()) .collect(Collectors.toList()); }
Gets all the cluster partitions under tracking.
getAllTrackedClusterPartitions
java
apache/flink
flink-runtime/src/main/java/org/apache/flink/runtime/io/network/partition/JobMasterPartitionTracker.java
https://github.com/apache/flink/blob/master/flink-runtime/src/main/java/org/apache/flink/runtime/io/network/partition/JobMasterPartitionTracker.java
Apache-2.0
public void close() throws IOException { IOUtils.closeQuietly(file); // in case we dispose before finishing writes for (ByteBuffer bb : fullBuffers) { PlatformDependent.freeDirectBuffer(bb); } fullBuffers.clear(); if (currentBuffer != null) { PlatformDependent.freeDirectBuffer(currentBuffer); currentBuffer = null; } // To make this compatible with all versions of Windows, we must wait with // deleting the file until it is unmapped. // See also // https://stackoverflow.com/questions/11099295/file-flag-delete-on-close-and-memory-mapped-files/51649618#51649618 Files.delete(filePath); }
Unmaps the file from memory and deletes the file. After calling this method, access to any ByteBuffer obtained from this instance will cause a segmentation fault.
close
java
apache/flink
flink-runtime/src/main/java/org/apache/flink/runtime/io/network/partition/MemoryMappedBoundedData.java
https://github.com/apache/flink/blob/master/flink-runtime/src/main/java/org/apache/flink/runtime/io/network/partition/MemoryMappedBoundedData.java
Apache-2.0
private static int alignSize(int maxRegionSize) { checkArgument(maxRegionSize >= PAGE_SIZE); return maxRegionSize - (maxRegionSize % PAGE_SIZE); }
Rounds the size down to the next multiple of the {@link #PAGE_SIZE}. We need to round down here to not exceed the original maximum size value. Otherwise, values like INT_MAX would round up to overflow the valid maximum size of a memory mapping region in Java.
alignSize
java
apache/flink
flink-runtime/src/main/java/org/apache/flink/runtime/io/network/partition/MemoryMappedBoundedData.java
https://github.com/apache/flink/blob/master/flink-runtime/src/main/java/org/apache/flink/runtime/io/network/partition/MemoryMappedBoundedData.java
Apache-2.0
public static MemoryMappedBoundedData create(Path memMappedFilePath) throws IOException { return createWithRegionSize(memMappedFilePath, Integer.MAX_VALUE); }
Creates new MemoryMappedBoundedData, creating a memory mapped file at the given path.
create
java
apache/flink
flink-runtime/src/main/java/org/apache/flink/runtime/io/network/partition/MemoryMappedBoundedData.java
https://github.com/apache/flink/blob/master/flink-runtime/src/main/java/org/apache/flink/runtime/io/network/partition/MemoryMappedBoundedData.java
Apache-2.0
public static MemoryMappedBoundedData createWithRegionSize( Path memMappedFilePath, int regionSize) throws IOException { final FileChannel fileChannel = FileChannel.open( memMappedFilePath, StandardOpenOption.READ, StandardOpenOption.WRITE, StandardOpenOption.CREATE_NEW); return new MemoryMappedBoundedData(memMappedFilePath, fileChannel, regionSize); }
Creates new MemoryMappedBoundedData, creating a memory mapped file at the given path. Each mapped region (= ByteBuffer) will be of the given size.
createWithRegionSize
java
apache/flink
flink-runtime/src/main/java/org/apache/flink/runtime/io/network/partition/MemoryMappedBoundedData.java
https://github.com/apache/flink/blob/master/flink-runtime/src/main/java/org/apache/flink/runtime/io/network/partition/MemoryMappedBoundedData.java
Apache-2.0
public static int getSystemPageSize() { try { return PageSizeUtilInternal.getSystemPageSize(); } catch (Throwable t) { ExceptionUtils.rethrowIfFatalError(t); return PAGE_SIZE_UNKNOWN; } }
Tries to get the system page size. If the page size cannot be determined, this returns -1. <p>This internally relies on the presence of "unsafe" and the resolution via some Netty utilities.
getSystemPageSize
java
apache/flink
flink-runtime/src/main/java/org/apache/flink/runtime/io/network/partition/PageSizeUtil.java
https://github.com/apache/flink/blob/master/flink-runtime/src/main/java/org/apache/flink/runtime/io/network/partition/PageSizeUtil.java
Apache-2.0
public static int getSystemPageSizeOrDefault() { final int pageSize = getSystemPageSize(); return pageSize == PAGE_SIZE_UNKNOWN ? DEFAULT_PAGE_SIZE : pageSize; }
Tries to get the system page size. If the page size cannot be determined, this returns the {@link #DEFAULT_PAGE_SIZE}.
getSystemPageSizeOrDefault
java
apache/flink
flink-runtime/src/main/java/org/apache/flink/runtime/io/network/partition/PageSizeUtil.java
https://github.com/apache/flink/blob/master/flink-runtime/src/main/java/org/apache/flink/runtime/io/network/partition/PageSizeUtil.java
Apache-2.0
public static int getSystemPageSizeOrConservativeMultiple() { final int pageSize = getSystemPageSize(); return pageSize == PAGE_SIZE_UNKNOWN ? CONSERVATIVE_PAGE_SIZE_MULTIPLE : pageSize; }
Tries to get the system page size. If the page size cannot be determined, this returns the {@link #CONSERVATIVE_PAGE_SIZE_MULTIPLE}.
getSystemPageSizeOrConservativeMultiple
java
apache/flink
flink-runtime/src/main/java/org/apache/flink/runtime/io/network/partition/PageSizeUtil.java
https://github.com/apache/flink/blob/master/flink-runtime/src/main/java/org/apache/flink/runtime/io/network/partition/PageSizeUtil.java
Apache-2.0
private long getIndexEntryOffset(int region, int subpartition) { checkArgument(region >= 0 && region < getNumRegions(), "Illegal target region."); checkArgument( subpartition >= 0 && subpartition < numSubpartitions, "Subpartition index out of bound."); return (((long) region) * numSubpartitions + subpartition) * INDEX_ENTRY_SIZE; }
Returns the index entry offset of the target region and subpartition in the index file. Both region index and subpartition index start from 0.
getIndexEntryOffset
java
apache/flink
flink-runtime/src/main/java/org/apache/flink/runtime/io/network/partition/PartitionedFile.java
https://github.com/apache/flink/blob/master/flink-runtime/src/main/java/org/apache/flink/runtime/io/network/partition/PartitionedFile.java
Apache-2.0
@VisibleForTesting void updateReadableOffsetAndSize( ByteBuffer indexEntryBuf, Queue<BufferPositionDescriptor> readBufferPositions) throws IOException { int startSubpartition = subpartitionIndexSet.getStartIndex(); int endSubpartition = subpartitionIndexSet.getEndIndex(); if (startSubpartition >= subpartitionOrderRotationIndex || endSubpartition < subpartitionOrderRotationIndex) { updateReadableOffsetAndSize( startSubpartition, endSubpartition, indexEntryBuf, readBufferPositions); } else { updateReadableOffsetAndSize( subpartitionOrderRotationIndex, endSubpartition, indexEntryBuf, readBufferPositions); updateReadableOffsetAndSize( startSubpartition, subpartitionOrderRotationIndex - 1, indexEntryBuf, readBufferPositions); } }
Updates the readable offsets and sizes for subpartitions based on a given index buffer. This method handles cases where the subpartition range is split by a rotation index, ensuring that all necessary index entries are processed. <p>The method operates in the following way: <ol> <li>It checks if the range of subpartition indices requires handling of a wrap around the rotation index. <li>If no wrap is necessary (when the range does not cross the rotation point), it directly updates readable offsets and sizes for the entire range. <li>If a wrap is necessary, it splits the process into two updates: <ul> <li>Firstly, it updates from the rotation index to the end subpartition. <li>Secondly, it updates from the start subpartition to just before the rotation index. </ul> </ol> <p>This ensures that all relevant subpartitions are correctly processed and offsets and sizes are added to the queue for subsequent reading. @param indexEntryBuf A ByteBuffer containing index entries which provide offset and size information. @param readBufferPositions A queue to store the buffer position descriptors. @throws IOException If an I/O error occurs when accessing the index file channel.
updateReadableOffsetAndSize
java
apache/flink
flink-runtime/src/main/java/org/apache/flink/runtime/io/network/partition/PartitionedFileReader.java
https://github.com/apache/flink/blob/master/flink-runtime/src/main/java/org/apache/flink/runtime/io/network/partition/PartitionedFileReader.java
Apache-2.0
public PartitionedFile finish() throws IOException { checkState(!isFinished, "File writer is already finished."); checkState(!isClosed, "File writer is already closed."); isFinished = true; writeRegionIndex(); flushIndexBuffer(); indexBuffer.rewind(); long dataFileSize = dataFileChannel.size(); long indexFileSize = indexFileChannel.size(); close(); ByteBuffer indexEntryCache = null; if (allIndexEntriesCached) { indexEntryCache = indexBuffer; } indexBuffer = null; return new PartitionedFile( numRegions, numSubpartitions, dataFilePath, indexFilePath, dataFileSize, indexFileSize, numBuffers, indexEntryCache); }
Finishes writing the {@link PartitionedFile} which closes the file channel and returns the corresponding {@link PartitionedFile}. <p>Note: The caller is responsible for releasing the failed {@link PartitionedFile} if any exception occurs.
finish
java
apache/flink
flink-runtime/src/main/java/org/apache/flink/runtime/io/network/partition/PartitionedFileWriter.java
https://github.com/apache/flink/blob/master/flink-runtime/src/main/java/org/apache/flink/runtime/io/network/partition/PartitionedFileWriter.java
Apache-2.0
public ResultPartitionID getPartitionId() { return partitionId; }
Exception for covering all the scenarios of consuming partition failure which causes the consumer task failed, and the job master would decide whether to restart the producer based on this exception.
getPartitionId
java
apache/flink
flink-runtime/src/main/java/org/apache/flink/runtime/io/network/partition/PartitionException.java
https://github.com/apache/flink/blob/master/flink-runtime/src/main/java/org/apache/flink/runtime/io/network/partition/PartitionException.java
Apache-2.0
public void removeExpiration( long now, long timeout, Collection<PartitionRequestListener> timeoutListeners) { Iterator<Map.Entry<InputChannelID, PartitionRequestListener>> iterator = listeners.entrySet().iterator(); while (iterator.hasNext()) { Map.Entry<InputChannelID, PartitionRequestListener> entry = iterator.next(); PartitionRequestListener partitionRequestListener = entry.getValue(); if ((now - partitionRequestListener.getCreateTimestamp()) > timeout) { timeoutListeners.add(partitionRequestListener); iterator.remove(); } } }
Remove the expire partition request listener and add it to the given timeoutListeners. @param now the timestamp @param timeout the timeout mills @param timeoutListeners the expire partition request listeners
removeExpiration
java
apache/flink
flink-runtime/src/main/java/org/apache/flink/runtime/io/network/partition/PartitionRequestListenerManager.java
https://github.com/apache/flink/blob/master/flink-runtime/src/main/java/org/apache/flink/runtime/io/network/partition/PartitionRequestListenerManager.java
Apache-2.0
@Override public void releaseAllResources() { isReleased.compareAndSet(false, true); }
Pipelined ResultPartition relies on its subpartition view's release to decide whether the partition is ready to release. In contrast, Approximate Pipelined ResultPartition is put into the JobMaster's Partition Tracker and relies on the tracker to release partitions after the job is finished. Hence, in the approximate pipelined case, no resource related to view is needed to be released.
releaseAllResources
java
apache/flink
flink-runtime/src/main/java/org/apache/flink/runtime/io/network/partition/PipelinedApproximateSubpartitionView.java
https://github.com/apache/flink/blob/master/flink-runtime/src/main/java/org/apache/flink/runtime/io/network/partition/PipelinedApproximateSubpartitionView.java
Apache-2.0
@Override public void setChannelStateWriter(ChannelStateWriter channelStateWriter) { for (final ResultSubpartition subpartition : subpartitions) { if (subpartition instanceof ChannelStateHolder) { ((PipelinedSubpartition) subpartition).setChannelStateWriter(channelStateWriter); } } }
The total number of references to subpartitions of this result. The result partition can be safely released, iff the reference count is zero. Every subpartition is a user of the result as well the {@link PipelinedResultPartition} is a user itself, as it's writing to those results. Even if all consumers are released, partition can not be released until writer releases the partition as well.
setChannelStateWriter
java
apache/flink
flink-runtime/src/main/java/org/apache/flink/runtime/io/network/partition/PipelinedResultPartition.java
https://github.com/apache/flink/blob/master/flink-runtime/src/main/java/org/apache/flink/runtime/io/network/partition/PipelinedResultPartition.java
Apache-2.0
@GuardedBy("buffers") private void increaseBuffersInBacklog(BufferConsumer buffer) { assert Thread.holdsLock(buffers); if (buffer != null && buffer.isBuffer()) { buffersInBacklog++; } }
Increases the number of non-event buffers by one after adding a non-event buffer into this subpartition.
increaseBuffersInBacklog
java
apache/flink
flink-runtime/src/main/java/org/apache/flink/runtime/io/network/partition/PipelinedSubpartition.java
https://github.com/apache/flink/blob/master/flink-runtime/src/main/java/org/apache/flink/runtime/io/network/partition/PipelinedSubpartition.java
Apache-2.0
public void addPriorityElement(T element) { // priority elements are rather rare and short-lived, so most of there are none if (numPriorityElements == 0) { deque.addFirst(element); } else if (numPriorityElements == deque.size()) { // no non-priority elements deque.add(element); } else { // remove all priority elements final ArrayDeque<T> priorPriority = new ArrayDeque<>(numPriorityElements); for (int index = 0; index < numPriorityElements; index++) { priorPriority.addFirst(deque.poll()); } deque.addFirst(element); // read them before the newly added element for (final T priorityEvent : priorPriority) { deque.addFirst(priorityEvent); } } numPriorityElements++; }
Adds a priority element to this deque, such that it will be polled after all existing priority elements but before any non-priority element. @param element the element to add
addPriorityElement
java
apache/flink
flink-runtime/src/main/java/org/apache/flink/runtime/io/network/partition/PrioritizedDeque.java
https://github.com/apache/flink/blob/master/flink-runtime/src/main/java/org/apache/flink/runtime/io/network/partition/PrioritizedDeque.java
Apache-2.0
public void add(T element, boolean priority, boolean prioritize) { if (!priority) { add(element); } else { if (prioritize) { prioritize(element); } else { addPriorityElement(element); } } }
Convenience method for adding an element with optional priority and prior removal. @param element the element to add @param priority flag indicating if it's a priority or non-priority element @param prioritize flag that hints that the element is already in this deque, potentially as non-priority element.
add
java
apache/flink
flink-runtime/src/main/java/org/apache/flink/runtime/io/network/partition/PrioritizedDeque.java
https://github.com/apache/flink/blob/master/flink-runtime/src/main/java/org/apache/flink/runtime/io/network/partition/PrioritizedDeque.java
Apache-2.0
public void prioritize(T element) { final Iterator<T> iterator = deque.iterator(); // Already prioritized? Then, do not reorder elements. for (int i = 0; i < numPriorityElements && iterator.hasNext(); i++) { if (iterator.next() == element) { return; } } // If the next non-priority element is the given element, we can simply include it in the // priority section if (iterator.hasNext() && iterator.next() == element) { numPriorityElements++; return; } // Remove the given element. while (iterator.hasNext()) { if (iterator.next() == element) { iterator.remove(); break; } } addPriorityElement(element); }
Prioritizes an already existing element. Note that this method assumes identity. <p>{@implNote Since this method removes the element and reinserts it in a priority position in general, some optimizations for special cases are used.} @param element the element to prioritize.
prioritize
java
apache/flink
flink-runtime/src/main/java/org/apache/flink/runtime/io/network/partition/PrioritizedDeque.java
https://github.com/apache/flink/blob/master/flink-runtime/src/main/java/org/apache/flink/runtime/io/network/partition/PrioritizedDeque.java
Apache-2.0
public T getAndRemove(Predicate<T> preCondition) { Iterator<T> iterator = deque.iterator(); for (int i = 0; iterator.hasNext(); i++) { T next = iterator.next(); if (preCondition.test(next)) { if (i < numPriorityElements) { numPriorityElements--; } iterator.remove(); return next; } } throw new NoSuchElementException(); }
Find first element matching the {@link Predicate}, remove it from the {@link PrioritizedDeque} and return it. @return removed element
getAndRemove
java
apache/flink
flink-runtime/src/main/java/org/apache/flink/runtime/io/network/partition/PrioritizedDeque.java
https://github.com/apache/flink/blob/master/flink-runtime/src/main/java/org/apache/flink/runtime/io/network/partition/PrioritizedDeque.java
Apache-2.0
@Nullable public T poll() { final T polled = deque.poll(); if (polled != null && numPriorityElements > 0) { numPriorityElements--; } return polled; }
Polls the first priority element or non-priority element if the former does not exist. @return the first element or null.
poll
java
apache/flink
flink-runtime/src/main/java/org/apache/flink/runtime/io/network/partition/PrioritizedDeque.java
https://github.com/apache/flink/blob/master/flink-runtime/src/main/java/org/apache/flink/runtime/io/network/partition/PrioritizedDeque.java
Apache-2.0
@Nullable public T peek() { return deque.peek(); }
Returns the first priority element or non-priority element if the former does not exist. @return the first element or null.
peek
java
apache/flink
flink-runtime/src/main/java/org/apache/flink/runtime/io/network/partition/PrioritizedDeque.java
https://github.com/apache/flink/blob/master/flink-runtime/src/main/java/org/apache/flink/runtime/io/network/partition/PrioritizedDeque.java
Apache-2.0
public int getNumPriorityElements() { return numPriorityElements; }
Returns the current number of priority elements ([0; {@link #size()}]).
getNumPriorityElements
java
apache/flink
flink-runtime/src/main/java/org/apache/flink/runtime/io/network/partition/PrioritizedDeque.java
https://github.com/apache/flink/blob/master/flink-runtime/src/main/java/org/apache/flink/runtime/io/network/partition/PrioritizedDeque.java
Apache-2.0
public boolean containsPriorityElement(T element) { if (numPriorityElements == 0) { return false; } final Iterator<T> iterator = deque.iterator(); for (int i = 0; i < numPriorityElements && iterator.hasNext(); i++) { if (iterator.next() == element) { return true; } } return false; }
Returns whether the given element is a known priority element. Test is performed by identity.
containsPriorityElement
java
apache/flink
flink-runtime/src/main/java/org/apache/flink/runtime/io/network/partition/PrioritizedDeque.java
https://github.com/apache/flink/blob/master/flink-runtime/src/main/java/org/apache/flink/runtime/io/network/partition/PrioritizedDeque.java
Apache-2.0
public int size() { return deque.size(); }
Returns the number of priority and non-priority elements.
size
java
apache/flink
flink-runtime/src/main/java/org/apache/flink/runtime/io/network/partition/PrioritizedDeque.java
https://github.com/apache/flink/blob/master/flink-runtime/src/main/java/org/apache/flink/runtime/io/network/partition/PrioritizedDeque.java
Apache-2.0
public void clear() { deque.clear(); numPriorityElements = 0; }
Removes all priority and non-priority elements.
clear
java
apache/flink
flink-runtime/src/main/java/org/apache/flink/runtime/io/network/partition/PrioritizedDeque.java
https://github.com/apache/flink/blob/master/flink-runtime/src/main/java/org/apache/flink/runtime/io/network/partition/PrioritizedDeque.java
Apache-2.0
public boolean isEmpty() { return deque.isEmpty(); }
Returns true if there are no elements.
isEmpty
java
apache/flink
flink-runtime/src/main/java/org/apache/flink/runtime/io/network/partition/PrioritizedDeque.java
https://github.com/apache/flink/blob/master/flink-runtime/src/main/java/org/apache/flink/runtime/io/network/partition/PrioritizedDeque.java
Apache-2.0
@Override public void setup() throws IOException { checkState( this.bufferPool == null, "Bug in result partition setup logic: Already registered buffer pool."); this.bufferPool = checkNotNull(bufferPoolFactory.get()); setupInternal(); partitionManager.registerResultPartition(this); }
Registers a buffer pool with this result partition. <p>There is one pool for each result partition, which is shared by all its sub partitions. <p>The pool is registered with the partition *after* it as been constructed in order to conform to the life-cycle of task registrations in the {@link TaskExecutor}.
setup
java
apache/flink
flink-runtime/src/main/java/org/apache/flink/runtime/io/network/partition/ResultPartition.java
https://github.com/apache/flink/blob/master/flink-runtime/src/main/java/org/apache/flink/runtime/io/network/partition/ResultPartition.java
Apache-2.0
private static boolean isOverdraftBufferNeeded(ResultPartitionType resultPartitionType) { // Only pipelined / pipelined-bounded partition needs overdraft buffer. More // specifically, there is no reason to request more buffers for non-pipelined (i.e. // batch) shuffle. The reasons are as follows: // 1. For BoundedBlockingShuffle, each full buffer will be directly released. // 2. For SortMergeShuffle, the maximum capacity of buffer pool is 4 * numSubpartitions. It // is efficient enough to spill this part of memory to disk. // 3. For Hybrid Shuffle, the buffer pool is unbounded. If it can't get a normal buffer, it // also can't get an overdraft buffer. return resultPartitionType.isPipelinedOrPipelinedBoundedResultPartition(); }
Return whether this result partition need overdraft buffer.
isOverdraftBufferNeeded
java
apache/flink
flink-runtime/src/main/java/org/apache/flink/runtime/io/network/partition/ResultPartitionFactory.java
https://github.com/apache/flink/blob/master/flink-runtime/src/main/java/org/apache/flink/runtime/io/network/partition/ResultPartitionFactory.java
Apache-2.0
public boolean mustBePipelinedConsumed() { return consumingConstraint == ConsumingConstraint.MUST_BE_PIPELINED; }
return if this partition's upstream and downstream must be scheduled in the same time.
mustBePipelinedConsumed
java
apache/flink
flink-runtime/src/main/java/org/apache/flink/runtime/io/network/partition/ResultPartitionType.java
https://github.com/apache/flink/blob/master/flink-runtime/src/main/java/org/apache/flink/runtime/io/network/partition/ResultPartitionType.java
Apache-2.0
public boolean isBlockingOrBlockingPersistentResultPartition() { return this == BLOCKING || this == BLOCKING_PERSISTENT; }
{@link #isBlockingOrBlockingPersistentResultPartition()} is used to judge whether it is the specified {@link #BLOCKING} or {@link #BLOCKING_PERSISTENT} resultPartitionType. <p>this method suitable for judgment conditions related to the specific implementation of {@link ResultPartitionType}. <p>this method not related to data consumption and partition release. As for the logic related to partition release, use {@link #isReleaseByScheduler()} instead, and as consume type, use {@link #mustBePipelinedConsumed()} or {@link #canBePipelinedConsumed()} instead.
isBlockingOrBlockingPersistentResultPartition
java
apache/flink
flink-runtime/src/main/java/org/apache/flink/runtime/io/network/partition/ResultPartitionType.java
https://github.com/apache/flink/blob/master/flink-runtime/src/main/java/org/apache/flink/runtime/io/network/partition/ResultPartitionType.java
Apache-2.0
public boolean isHybridResultPartition() { return this == HYBRID_FULL || this == HYBRID_SELECTIVE; }
{@link #isHybridResultPartition()} is used to judge whether it is the specified {@link #HYBRID_FULL} or {@link #HYBRID_SELECTIVE} resultPartitionType. <p>this method suitable for judgment conditions related to the specific implementation of {@link ResultPartitionType}. <p>this method not related to data consumption and partition release. As for the logic related to partition release, use {@link #isReleaseByScheduler()} instead, and as consume type, use {@link #mustBePipelinedConsumed()} or {@link #canBePipelinedConsumed()} instead.
isHybridResultPartition
java
apache/flink
flink-runtime/src/main/java/org/apache/flink/runtime/io/network/partition/ResultPartitionType.java
https://github.com/apache/flink/blob/master/flink-runtime/src/main/java/org/apache/flink/runtime/io/network/partition/ResultPartitionType.java
Apache-2.0
public boolean isPipelinedOrPipelinedBoundedResultPartition() { return this == PIPELINED || this == PIPELINED_BOUNDED; }
{@link #isPipelinedOrPipelinedBoundedResultPartition()} is used to judge whether it is the specified {@link #PIPELINED} or {@link #PIPELINED_BOUNDED} resultPartitionType. <p>This method suitable for judgment conditions related to the specific implementation of {@link ResultPartitionType}. <p>This method not related to data consumption and partition release. As for the logic related to partition release, use {@link #isReleaseByScheduler()} instead, and as consume type, use {@link #mustBePipelinedConsumed()} or {@link #canBePipelinedConsumed()} instead.
isPipelinedOrPipelinedBoundedResultPartition
java
apache/flink
flink-runtime/src/main/java/org/apache/flink/runtime/io/network/partition/ResultPartitionType.java
https://github.com/apache/flink/blob/master/flink-runtime/src/main/java/org/apache/flink/runtime/io/network/partition/ResultPartitionType.java
Apache-2.0
public ResultSubpartitionInfo getSubpartitionInfo() { return subpartitionInfo; }
The parent partition this subpartition belongs to.
getSubpartitionInfo
java
apache/flink
flink-runtime/src/main/java/org/apache/flink/runtime/io/network/partition/ResultSubpartition.java
https://github.com/apache/flink/blob/master/flink-runtime/src/main/java/org/apache/flink/runtime/io/network/partition/ResultSubpartition.java
Apache-2.0
public int getSubPartitionIndex() { return subpartitionInfo.getSubPartitionIdx(); }
Gets the total numbers of buffers (data buffers plus events).
getSubPartitionIndex
java
apache/flink
flink-runtime/src/main/java/org/apache/flink/runtime/io/network/partition/ResultSubpartition.java
https://github.com/apache/flink/blob/master/flink-runtime/src/main/java/org/apache/flink/runtime/io/network/partition/ResultSubpartition.java
Apache-2.0
@VisibleForTesting public final int add(BufferConsumer bufferConsumer) throws IOException { return add(bufferConsumer, 0); }
Notifies the parent partition about a consumed {@link ResultSubpartitionView}.
add
java
apache/flink
flink-runtime/src/main/java/org/apache/flink/runtime/io/network/partition/ResultSubpartition.java
https://github.com/apache/flink/blob/master/flink-runtime/src/main/java/org/apache/flink/runtime/io/network/partition/ResultSubpartition.java
Apache-2.0
@Override public BufferWithSubpartition getNextBuffer(MemorySegment transitBuffer) { checkState(isFinished, "Sort buffer is not ready to be read."); checkState(!isReleased, "Sort buffer is already released."); if (!hasRemaining()) { return null; } int numBytesCopied = 0; DataType bufferDataType = DataType.DATA_BUFFER; int subpartitionIndex = subpartitionReadOrder[readOrderIndex]; do { int sourceSegmentIndex = getSegmentIndexFromPointer(readIndexEntryAddress); int sourceSegmentOffset = getSegmentOffsetFromPointer(readIndexEntryAddress); MemorySegment sourceSegment = segments.get(sourceSegmentIndex); long lengthAndDataType = sourceSegment.getLong(sourceSegmentOffset); int length = getSegmentIndexFromPointer(lengthAndDataType); DataType dataType = DataType.values()[getSegmentOffsetFromPointer(lengthAndDataType)]; // return the data read directly if the next to read is an event if (dataType.isEvent() && numBytesCopied > 0) { break; } bufferDataType = dataType; // get the next index entry address and move the read position forward long nextReadIndexEntryAddress = sourceSegment.getLong(sourceSegmentOffset + 8); sourceSegmentOffset += INDEX_ENTRY_SIZE; // allocate a temp buffer for the event if the target buffer is not big enough if (bufferDataType.isEvent() && transitBuffer.size() < length) { transitBuffer = MemorySegmentFactory.allocateUnpooledSegment(length); } numBytesCopied += copyRecordOrEvent( transitBuffer, numBytesCopied, sourceSegmentIndex, sourceSegmentOffset, length); if (recordRemainingBytes == 0) { // move to next subpartition if the current subpartition has been finished if (readIndexEntryAddress == lastIndexEntryAddresses[subpartitionIndex]) { updateReadSubpartitionAndIndexEntryAddress(); break; } readIndexEntryAddress = nextReadIndexEntryAddress; } } while (numBytesCopied < transitBuffer.size() && bufferDataType.isBuffer()); numTotalBytesRead += numBytesCopied; Buffer buffer = new NetworkBuffer(transitBuffer, (buf) -> {}, bufferDataType, numBytesCopied); return new BufferWithSubpartition(buffer, subpartitionIndex); }
When getting buffers, The {@link SortBasedDataBuffer} should recycle the read target buffer with the given {@link BufferRecycler}.
getNextBuffer
java
apache/flink
flink-runtime/src/main/java/org/apache/flink/runtime/io/network/partition/SortBasedDataBuffer.java
https://github.com/apache/flink/blob/master/flink-runtime/src/main/java/org/apache/flink/runtime/io/network/partition/SortBasedDataBuffer.java
Apache-2.0
void requestExclusiveBuffers(int numExclusiveBuffers) throws IOException { checkArgument(numExclusiveBuffers >= 0, "Num exclusive buffers must be non-negative."); if (numExclusiveBuffers == 0) { return; } Collection<MemorySegment> segments = globalPool.requestUnpooledMemorySegments(numExclusiveBuffers); synchronized (bufferQueue) { // AvailableBufferQueue::addExclusiveBuffer may release the previously allocated // floating buffer, which requires the caller to recycle these released floating // buffers. There should be no floating buffers that have been allocated before the // exclusive buffers are initialized, so here only a simple assertion is required checkState( unsynchronizedGetFloatingBuffersAvailable() == 0, "Bug in buffer allocation logic: floating buffer is allocated before exclusive buffers are initialized."); for (MemorySegment segment : segments) { bufferQueue.addExclusiveBuffer( new NetworkBuffer(segment, this), numRequiredBuffers); } } }
Requests exclusive buffers from the provider.
requestExclusiveBuffers
java
apache/flink
flink-runtime/src/main/java/org/apache/flink/runtime/io/network/partition/consumer/BufferManager.java
https://github.com/apache/flink/blob/master/flink-runtime/src/main/java/org/apache/flink/runtime/io/network/partition/consumer/BufferManager.java
Apache-2.0
void releaseAllBuffers(ArrayDeque<Buffer> buffers) throws IOException { // Gather all exclusive buffers and recycle them to global pool in batch, because // we do not want to trigger redistribution of buffers after each recycle. final List<MemorySegment> exclusiveRecyclingSegments = new ArrayList<>(); Exception err = null; Buffer buffer; while ((buffer = buffers.poll()) != null) { try { if (buffer.getRecycler() == BufferManager.this) { exclusiveRecyclingSegments.add(buffer.getMemorySegment()); } else { buffer.recycleBuffer(); } } catch (Exception e) { err = firstOrSuppressed(e, err); } } try { synchronized (bufferQueue) { bufferQueue.releaseAll(exclusiveRecyclingSegments); bufferQueue.notifyAll(); } } catch (Exception e) { err = firstOrSuppressed(e, err); } try { if (exclusiveRecyclingSegments.size() > 0) { globalPool.recycleUnpooledMemorySegments(exclusiveRecyclingSegments); } } catch (Exception e) { err = firstOrSuppressed(e, err); } if (err != null) { throw err instanceof IOException ? (IOException) err : new IOException(err); } }
Recycles all the exclusive and floating buffers from the given buffer queue.
releaseAllBuffers
java
apache/flink
flink-runtime/src/main/java/org/apache/flink/runtime/io/network/partition/consumer/BufferManager.java
https://github.com/apache/flink/blob/master/flink-runtime/src/main/java/org/apache/flink/runtime/io/network/partition/consumer/BufferManager.java
Apache-2.0
@Nullable Buffer addExclusiveBuffer(Buffer buffer, int numRequiredBuffers) { exclusiveBuffers.add(buffer); if (getAvailableBufferSize() > numRequiredBuffers) { return floatingBuffers.poll(); } return null; }
Adds an exclusive buffer (back) into the queue and releases one floating buffer if the number of available buffers in queue is more than the required amount. If floating buffer is released, the total amount of available buffers after adding this exclusive buffer has not changed, and no new buffers are available. The caller is responsible for recycling the release/returned floating buffer. @param buffer The exclusive buffer to add @param numRequiredBuffers The number of required buffers @return An released floating buffer, may be null if the numRequiredBuffers is not met.
addExclusiveBuffer
java
apache/flink
flink-runtime/src/main/java/org/apache/flink/runtime/io/network/partition/consumer/BufferManager.java
https://github.com/apache/flink/blob/master/flink-runtime/src/main/java/org/apache/flink/runtime/io/network/partition/consumer/BufferManager.java
Apache-2.0
@Nullable Buffer takeBuffer() { if (floatingBuffers.size() > 0) { return floatingBuffers.poll(); } else { return exclusiveBuffers.poll(); } }
Takes the floating buffer first in order to make full use of floating buffers reasonably. @return An available floating or exclusive buffer, may be null if the channel is released.
takeBuffer
java
apache/flink
flink-runtime/src/main/java/org/apache/flink/runtime/io/network/partition/consumer/BufferManager.java
https://github.com/apache/flink/blob/master/flink-runtime/src/main/java/org/apache/flink/runtime/io/network/partition/consumer/BufferManager.java
Apache-2.0
@Override public void close() { if (toNotifyPriority != null) { toNotifyPriority.complete(null); } if (toNotify != null) { toNotify.complete(null); } }
Abstracts the notification of the availability futures of {@link InputGate}s. <p>Should be created and closed outside of the lock.
close
java
apache/flink
flink-runtime/src/main/java/org/apache/flink/runtime/io/network/partition/consumer/GateNotificationHelper.java
https://github.com/apache/flink/blob/master/flink-runtime/src/main/java/org/apache/flink/runtime/io/network/partition/consumer/GateNotificationHelper.java
Apache-2.0
public void notifyPriority() { toNotifyPriority = inputGate.priorityAvailabilityHelper.getUnavailableToResetAvailable(); }
Must be called under lock to ensure integrity of priorityAvailabilityHelper.
notifyPriority
java
apache/flink
flink-runtime/src/main/java/org/apache/flink/runtime/io/network/partition/consumer/GateNotificationHelper.java
https://github.com/apache/flink/blob/master/flink-runtime/src/main/java/org/apache/flink/runtime/io/network/partition/consumer/GateNotificationHelper.java
Apache-2.0
public void notifyDataAvailable() { availabilityMonitor.notifyAll(); toNotify = inputGate.availabilityHelper.getUnavailableToResetAvailable(); }
Must be called under lock to ensure integrity of availabilityHelper and allow notification.
notifyDataAvailable
java
apache/flink
flink-runtime/src/main/java/org/apache/flink/runtime/io/network/partition/consumer/GateNotificationHelper.java
https://github.com/apache/flink/blob/master/flink-runtime/src/main/java/org/apache/flink/runtime/io/network/partition/consumer/GateNotificationHelper.java
Apache-2.0
public int getChannelIndex() { return channelInfo.getInputChannelIdx(); }
Returns the index of this channel within its {@link SingleInputGate}.
getChannelIndex
java
apache/flink
flink-runtime/src/main/java/org/apache/flink/runtime/io/network/partition/consumer/InputChannel.java
https://github.com/apache/flink/blob/master/flink-runtime/src/main/java/org/apache/flink/runtime/io/network/partition/consumer/InputChannel.java
Apache-2.0
public void notifyPriorityEvent(int priorityBufferNumber) { inputGate.notifyPriorityEvent(this, priorityBufferNumber); }
Notifies the owning {@link SingleInputGate} that this channel became non-empty. <p>This is guaranteed to be called only when a Buffer was added to a previously empty input channel. The notion of empty is atomically consistent with the flag {@link BufferAndAvailability#moreAvailable()} when polling the next buffer from this channel. <p><b>Note:</b> When the input channel observes an exception, this method is called regardless of whether the channel was empty before. That ensures that the parent InputGate will always be notified about the exception.
notifyPriorityEvent
java
apache/flink
flink-runtime/src/main/java/org/apache/flink/runtime/io/network/partition/consumer/InputChannel.java
https://github.com/apache/flink/blob/master/flink-runtime/src/main/java/org/apache/flink/runtime/io/network/partition/consumer/InputChannel.java
Apache-2.0
public int peekNextBufferSubpartitionId() throws IOException { if (subpartitionId >= 0) { return subpartitionId; } return peekNextBufferSubpartitionIdInternal(); }
Returns the index of the subpartition where the next buffer locates, or -1 if there is no buffer available and the subpartition to be consumed is not determined.
peekNextBufferSubpartitionId
java
apache/flink
flink-runtime/src/main/java/org/apache/flink/runtime/io/network/partition/consumer/InputChannel.java
https://github.com/apache/flink/blob/master/flink-runtime/src/main/java/org/apache/flink/runtime/io/network/partition/consumer/InputChannel.java
Apache-2.0
protected void checkError() throws IOException { final Throwable t = cause.get(); if (t != null) { if (t instanceof CancelTaskException) { throw (CancelTaskException) t; } if (t instanceof IOException) { throw (IOException) t; } else { throw new IOException(t); } } }
Checks for an error and rethrows it if one was reported. <p>Note: Any {@link PartitionException} instances should not be transformed and make sure they are always visible in task failure cause.
checkError
java
apache/flink
flink-runtime/src/main/java/org/apache/flink/runtime/io/network/partition/consumer/InputChannel.java
https://github.com/apache/flink/blob/master/flink-runtime/src/main/java/org/apache/flink/runtime/io/network/partition/consumer/InputChannel.java
Apache-2.0
protected void setError(Throwable cause) { if (this.cause.compareAndSet(null, checkNotNull(cause))) { // Notify the input gate. notifyChannelNonEmpty(); } }
Atomically sets an error for this channel and notifies the input gate about available data to trigger querying this channel by the task thread.
setError
java
apache/flink
flink-runtime/src/main/java/org/apache/flink/runtime/io/network/partition/consumer/InputChannel.java
https://github.com/apache/flink/blob/master/flink-runtime/src/main/java/org/apache/flink/runtime/io/network/partition/consumer/InputChannel.java
Apache-2.0
protected int getCurrentBackoff() { return currentBackoff <= 0 ? 0 : currentBackoff; }
Returns the current backoff in ms.
getCurrentBackoff
java
apache/flink
flink-runtime/src/main/java/org/apache/flink/runtime/io/network/partition/consumer/InputChannel.java
https://github.com/apache/flink/blob/master/flink-runtime/src/main/java/org/apache/flink/runtime/io/network/partition/consumer/InputChannel.java
Apache-2.0
@Override public void setChannelStateWriter(ChannelStateWriter channelStateWriter) { for (int index = 0, numChannels = getNumberOfInputChannels(); index < numChannels; index++) { final InputChannel channel = getChannel(index); if (channel instanceof ChannelStateHolder) { ((ChannelStateHolder) channel).setChannelStateWriter(channelStateWriter); } } }
An input gate consumes one or more partitions of a single produced intermediate result. <p>Each intermediate result is partitioned over its producing parallel subtasks; each of these partitions is furthermore partitioned into one or more subpartitions. <p>As an example, consider a map-reduce program, where the map operator produces data and the reduce operator consumes the produced data. <pre>{@code +-----+ +---------------------+ +--------+ | Map | = produce => | Intermediate Result | <= consume = | Reduce | +-----+ +---------------------+ +--------+ }</pre> <p>When deploying such a program in parallel, the intermediate result will be partitioned over its producing parallel subtasks; each of these partitions is furthermore partitioned into one or more subpartitions. <pre>{@code Intermediate result +-----------------------------------------+ | +----------------+ | +-----------------------+ +-------+ | +-------------+ +=> | Subpartition 1 | | <=======+=== | Input Gate | Reduce 1 | | Map 1 | ==> | | Partition 1 | =| +----------------+ | | +-----------------------+ +-------+ | +-------------+ +=> | Subpartition 2 | | <==+ | | +----------------+ | | | Subpartition request | | | | | +----------------+ | | | +-------+ | +-------------+ +=> | Subpartition 1 | | <==+====+ | Map 2 | ==> | | Partition 2 | =| +----------------+ | | +-----------------------+ +-------+ | +-------------+ +=> | Subpartition 2 | | <==+======== | Input Gate | Reduce 2 | | +----------------+ | +-----------------------+ +-----------------------------------------+ }</pre> <p>In the above example, two map subtasks produce the intermediate result in parallel, resulting in two partitions (Partition 1 and 2). Each of these partitions is further partitioned into two subpartitions -- one for each parallel reduce subtask. As shown in the Figure, each reduce task will have an input gate attached to it. This will provide its input, which will consist of one subpartition from each partition of the intermediate result.
setChannelStateWriter
java
apache/flink
flink-runtime/src/main/java/org/apache/flink/runtime/io/network/partition/consumer/InputGate.java
https://github.com/apache/flink/blob/master/flink-runtime/src/main/java/org/apache/flink/runtime/io/network/partition/consumer/InputGate.java
Apache-2.0
@Override public CompletableFuture<?> getAvailableFuture() { return availabilityHelper.getAvailableFuture(); }
@return a future that is completed if there are more records available. If there are more records available immediately, {@link #AVAILABLE} should be returned. Previously returned not completed futures should become completed once there are more records available.
getAvailableFuture
java
apache/flink
flink-runtime/src/main/java/org/apache/flink/runtime/io/network/partition/consumer/InputGate.java
https://github.com/apache/flink/blob/master/flink-runtime/src/main/java/org/apache/flink/runtime/io/network/partition/consumer/InputGate.java
Apache-2.0
public List<InputChannelInfo> getChannelInfos() { return IntStream.range(0, getNumberOfInputChannels()) .mapToObj(index -> getChannel(index).getChannelInfo()) .collect(Collectors.toList()); }
Returns the channel infos of this gate.
getChannelInfos
java
apache/flink
flink-runtime/src/main/java/org/apache/flink/runtime/io/network/partition/consumer/InputGate.java
https://github.com/apache/flink/blob/master/flink-runtime/src/main/java/org/apache/flink/runtime/io/network/partition/consumer/InputGate.java
Apache-2.0
public CompletableFuture<?> getPriorityEventAvailableFuture() { return priorityAvailabilityHelper.getAvailableFuture(); }
Notifies when a priority event has been enqueued. If this future is queried from task thread, it is guaranteed that a priority event is available and retrieved through {@link #getNext()}.
getPriorityEventAvailableFuture
java
apache/flink
flink-runtime/src/main/java/org/apache/flink/runtime/io/network/partition/consumer/InputGate.java
https://github.com/apache/flink/blob/master/flink-runtime/src/main/java/org/apache/flink/runtime/io/network/partition/consumer/InputGate.java
Apache-2.0
@Override public String toString() { return "InputWithData{" + "input=" + input + ", data=" + data + ", moreAvailable=" + moreAvailable + ", morePriorityEvents=" + morePriorityEvents + '}'; }
Simple pojo for INPUT, DATA and moreAvailable.
toString
java
apache/flink
flink-runtime/src/main/java/org/apache/flink/runtime/io/network/partition/consumer/InputGate.java
https://github.com/apache/flink/blob/master/flink-runtime/src/main/java/org/apache/flink/runtime/io/network/partition/consumer/InputGate.java
Apache-2.0
@Override protected InputChannel toInputChannelInternal() { return new LocalInputChannel( inputGate, getChannelIndex(), partitionId, consumedSubpartitionIndexSet, partitionManager, taskEventPublisher, initialBackoff, maxBackoff, numBytesIn, numBuffersIn, channelStateWriter); }
An input channel reads recovered state from previous unaligned checkpoint snapshots and then converts into {@link LocalInputChannel} finally.
toInputChannelInternal
java
apache/flink
flink-runtime/src/main/java/org/apache/flink/runtime/io/network/partition/consumer/LocalRecoveredInputChannel.java
https://github.com/apache/flink/blob/master/flink-runtime/src/main/java/org/apache/flink/runtime/io/network/partition/consumer/LocalRecoveredInputChannel.java
Apache-2.0
@VisibleForTesting void setExpectedSequenceNumber(int expectedSequenceNumber) { this.expectedSequenceNumber = expectedSequenceNumber; }
The number of available buffers that have not been announced to the producer yet.
setExpectedSequenceNumber
java
apache/flink
flink-runtime/src/main/java/org/apache/flink/runtime/io/network/partition/consumer/RemoteInputChannel.java
https://github.com/apache/flink/blob/master/flink-runtime/src/main/java/org/apache/flink/runtime/io/network/partition/consumer/RemoteInputChannel.java
Apache-2.0
@Override void setup() throws IOException { checkState( bufferManager.unsynchronizedGetAvailableExclusiveBuffers() == 0, "Bug in input channel setup logic: exclusive buffers have already been set for this input channel."); bufferManager.requestExclusiveBuffers(initialCredit); }
Setup includes assigning exclusive buffers to this input channel, and this method should be called only once after this input channel is created.
setup
java
apache/flink
flink-runtime/src/main/java/org/apache/flink/runtime/io/network/partition/consumer/RemoteInputChannel.java
https://github.com/apache/flink/blob/master/flink-runtime/src/main/java/org/apache/flink/runtime/io/network/partition/consumer/RemoteInputChannel.java
Apache-2.0
public int getUnannouncedCredit() { return unannouncedCredit.get(); }
Gets the currently unannounced credit. @return Credit which was not announced to the sender yet.
getUnannouncedCredit
java
apache/flink
flink-runtime/src/main/java/org/apache/flink/runtime/io/network/partition/consumer/RemoteInputChannel.java
https://github.com/apache/flink/blob/master/flink-runtime/src/main/java/org/apache/flink/runtime/io/network/partition/consumer/RemoteInputChannel.java
Apache-2.0