code
stringlengths
25
201k
docstring
stringlengths
19
96.2k
func_name
stringlengths
0
235
language
stringclasses
1 value
repo
stringlengths
8
51
path
stringlengths
11
314
url
stringlengths
62
377
license
stringclasses
7 values
@ParameterizedTest(name = "{index} => isFullyFilled={0}, numOfPartialBuffers={1}") @MethodSource("bufferDescriptors") void testClientMessageDecodeWithReleasedInputChannel( boolean isFullyFilled, int numOfPartialBuffers) throws Exception { setup(numOfPartialBuffers); testNettyMessageClientDecoding(isFullyFilled, numOfPartialBuffers, false, true, false); }
Verifies that the client side decoder works well with buffers sent to a released input channel. The data buffer part should be discarded before reading the next message.
testClientMessageDecodeWithReleasedInputChannel
java
apache/flink
flink-runtime/src/test/java/org/apache/flink/runtime/io/network/netty/NettyMessageClientDecoderDelegateTest.java
https://github.com/apache/flink/blob/master/flink-runtime/src/test/java/org/apache/flink/runtime/io/network/netty/NettyMessageClientDecoderDelegateTest.java
Apache-2.0
private static void runAllScheduledPendingTasks(EmbeddedChannel channel, long deadline) throws InterruptedException { // NOTE: we don't have to be super fancy here; busy-polling with 1ms delays is enough while (channel.runScheduledPendingTasks() != -1 && System.currentTimeMillis() < deadline) { Thread.sleep(1); } }
Run all pending scheduled tasks (waits until all tasks have been run or the deadline has passed. @param channel the channel to execute tasks for @param deadline maximum timestamp in ms to stop waiting further @throws InterruptedException
runAllScheduledPendingTasks
java
apache/flink
flink-runtime/src/test/java/org/apache/flink/runtime/io/network/netty/NettyPartitionRequestClientTest.java
https://github.com/apache/flink/blob/master/flink-runtime/src/test/java/org/apache/flink/runtime/io/network/netty/NettyPartitionRequestClientTest.java
Apache-2.0
@Test public void testNotifyReaderPartitionTimeout() throws Exception { PartitionRequestQueue queue = new PartitionRequestQueue(); EmbeddedChannel channel = new EmbeddedChannel(queue); ResultPartitionManager resultPartitionManager = new ResultPartitionManager(); ResultPartitionID resultPartitionId = new ResultPartitionID(); CreditBasedSequenceNumberingViewReader reader = new CreditBasedSequenceNumberingViewReader(new InputChannelID(0, 0), 10, queue); reader.requestSubpartitionViewOrRegisterListener( resultPartitionManager, resultPartitionId, new ResultSubpartitionIndexSet(0)); assertThat( resultPartitionManager .getListenerManagers() .get(resultPartitionId) .getPartitionRequestListeners()) .hasSize(1); reader.notifyPartitionRequestTimeout( resultPartitionManager .getListenerManagers() .get(resultPartitionId) .getPartitionRequestListeners() .iterator() .next()); channel.runPendingTasks(); Object read = channel.readOutbound(); assertThat(read) .isNotNull() .isInstanceOf(NettyMessage.ErrorResponse.class) .isInstanceOfSatisfying( NettyMessage.ErrorResponse.class, r -> assertThat(r.cause).isInstanceOf(PartitionNotFoundException.class)); }
Test that PartitionNotFound message will be sent to downstream in notifying timeout.
testNotifyReaderPartitionTimeout
java
apache/flink
flink-runtime/src/test/java/org/apache/flink/runtime/io/network/netty/PartitionRequestQueueTest.java
https://github.com/apache/flink/blob/master/flink-runtime/src/test/java/org/apache/flink/runtime/io/network/netty/PartitionRequestQueueTest.java
Apache-2.0
@Test void testNotifyReaderNonEmptyOnEmptyReaders() throws Exception { final int buffersToWrite = 5; PartitionRequestQueue queue = new PartitionRequestQueue(); EmbeddedChannel channel = new EmbeddedChannel(queue); CreditBasedSequenceNumberingViewReader reader1 = new CreditBasedSequenceNumberingViewReader(new InputChannelID(0, 0), 10, queue); CreditBasedSequenceNumberingViewReader reader2 = new CreditBasedSequenceNumberingViewReader(new InputChannelID(1, 1), 10, queue); ResultSubpartitionView view1 = new EmptyAlwaysAvailableResultSubpartitionView(); reader1.notifySubpartitionsCreated( TestingResultPartition.newBuilder() .setCreateSubpartitionViewFunction((index, listener) -> view1) .build(), new ResultSubpartitionIndexSet(0)); reader1.notifyDataAvailable(view1); assertThat(reader1.getAvailabilityAndBacklog().isAvailable()).isTrue(); assertThat(reader1.isRegisteredAsAvailable()).isFalse(); channel.unsafe().outboundBuffer().setUserDefinedWritability(1, false); assertThat(channel.isWritable()).isFalse(); reader1.notifyDataAvailable(view1); channel.runPendingTasks(); ResultSubpartitionView view2 = new DefaultBufferResultSubpartitionView(buffersToWrite); reader2.notifyDataAvailable(view2); reader2.notifySubpartitionsCreated( TestingResultPartition.newBuilder() .setCreateSubpartitionViewFunction((index, listener) -> view2) .build(), new ResultSubpartitionIndexSet(0)); assertThat(reader2.getAvailabilityAndBacklog().isAvailable()).isTrue(); assertThat(reader2.isRegisteredAsAvailable()).isFalse(); reader2.notifyDataAvailable(view2); // changing a channel writability should result in draining both reader1 and reader2 channel.unsafe().outboundBuffer().setUserDefinedWritability(1, true); channel.runPendingTasks(); assertThat(channel.outboundMessages()).hasSize(buffersToWrite); }
In case of enqueuing an empty reader and a reader that actually has some buffers when channel is not writable, on channelWritability change event should result in reading all of the messages.
testNotifyReaderNonEmptyOnEmptyReaders
java
apache/flink
flink-runtime/src/test/java/org/apache/flink/runtime/io/network/netty/PartitionRequestQueueTest.java
https://github.com/apache/flink/blob/master/flink-runtime/src/test/java/org/apache/flink/runtime/io/network/netty/PartitionRequestQueueTest.java
Apache-2.0
@Test void testDefaultBufferWriting() throws Exception { testBufferWriting(new DefaultBufferResultSubpartitionView(1)); }
Tests {@link PartitionRequestQueue} buffer writing with default buffers.
testDefaultBufferWriting
java
apache/flink
flink-runtime/src/test/java/org/apache/flink/runtime/io/network/netty/PartitionRequestQueueTest.java
https://github.com/apache/flink/blob/master/flink-runtime/src/test/java/org/apache/flink/runtime/io/network/netty/PartitionRequestQueueTest.java
Apache-2.0
@Test void testEnqueueReaderByNotifyingBufferAndCredit() throws Exception { // setup final ResultSubpartitionView view = new DefaultBufferResultSubpartitionView(10); ResultPartition partition = TestingResultPartition.newBuilder() .setCreateSubpartitionViewFunction((index, listener) -> view) .build(); final InputChannelID receiverId = new InputChannelID(); final PartitionRequestQueue queue = new PartitionRequestQueue(); final CreditBasedSequenceNumberingViewReader reader = new CreditBasedSequenceNumberingViewReader(receiverId, 2, queue); final EmbeddedChannel channel = new EmbeddedChannel(queue); reader.addCredit(-2); reader.notifySubpartitionsCreated(partition, new ResultSubpartitionIndexSet(0)); queue.notifyReaderCreated(reader); // block the channel so that we see an intermediate state in the test ByteBuf channelBlockingBuffer = blockChannel(channel); assertThat((Object) channel.readOutbound()).isNull(); // Notify available buffers to trigger enqueue the reader final int notifyNumBuffers = 5; for (int i = 0; i < notifyNumBuffers; i++) { reader.notifyDataAvailable(view); } channel.runPendingTasks(); // the reader is not enqueued in the pipeline because no credits are available // -> it should still have the same number of pending buffers assertThat(queue.getAvailableReaders()).isEmpty(); assertThat(reader.hasBuffersAvailable().isAvailable()).isTrue(); assertThat(reader.isRegisteredAsAvailable()).isFalse(); assertThat(reader.getNumCreditsAvailable()).isZero(); // Notify available credits to trigger enqueue the reader again final int notifyNumCredits = 3; for (int i = 1; i <= notifyNumCredits; i++) { queue.addCreditOrResumeConsumption(receiverId, viewReader -> viewReader.addCredit(1)); // the reader is enqueued in the pipeline because it has both available buffers and // credits // since the channel is blocked though, we will not process anything and only enqueue // the // reader once assertThat(reader.isRegisteredAsAvailable()).isTrue(); assertThat(queue.getAvailableReaders()).contains(reader); // contains only (this) one! assertThat(reader.getNumCreditsAvailable()).isEqualTo(i); assertThat(reader.hasBuffersAvailable().isAvailable()).isTrue(); } // Flush the buffer to make the channel writable again and see the final results channel.flush(); assertThat((ByteBuf) channel.readOutbound()).isSameAs(channelBlockingBuffer); assertThat(queue.getAvailableReaders()).isEmpty(); assertThat(reader.getNumCreditsAvailable()).isZero(); assertThat(reader.hasBuffersAvailable().isAvailable()).isTrue(); assertThat(reader.isRegisteredAsAvailable()).isFalse(); for (int i = 1; i <= notifyNumCredits; i++) { assertThat((Object) channel.readOutbound()) .isInstanceOf(NettyMessage.BufferResponse.class); } assertThat((Object) channel.readOutbound()).isNull(); }
Tests {@link PartitionRequestQueue#enqueueAvailableReader(NetworkSequenceViewReader)}, verifying the reader would be enqueued in the pipeline iff it has both available credits and buffers.
testEnqueueReaderByNotifyingBufferAndCredit
java
apache/flink
flink-runtime/src/test/java/org/apache/flink/runtime/io/network/netty/PartitionRequestQueueTest.java
https://github.com/apache/flink/blob/master/flink-runtime/src/test/java/org/apache/flink/runtime/io/network/netty/PartitionRequestQueueTest.java
Apache-2.0
static ByteBuf blockChannel(EmbeddedChannel channel) { final int highWaterMark = channel.config().getWriteBufferHighWaterMark(); // Set the writer index to the high water mark to ensure that all bytes are written // to the wire although the buffer is "empty". ByteBuf channelBlockingBuffer = Unpooled.buffer(highWaterMark).writerIndex(highWaterMark); channel.write(channelBlockingBuffer); assertThat(channel.isWritable()).isFalse(); return channelBlockingBuffer; }
Blocks the given channel by adding a buffer that is bigger than the high watermark. <p>The channel may be unblocked with: <pre> channel.flush(); assertSame(channelBlockingBuffer, channel.readOutbound()); </pre> @param channel the channel to block @return the created blocking buffer
blockChannel
java
apache/flink
flink-runtime/src/test/java/org/apache/flink/runtime/io/network/netty/PartitionRequestQueueTest.java
https://github.com/apache/flink/blob/master/flink-runtime/src/test/java/org/apache/flink/runtime/io/network/netty/PartitionRequestQueueTest.java
Apache-2.0
@Test void testRegisterResultPartitionBeforeRequest() throws Exception { final TestPooledBufferProvider outboundBuffers = new TestPooledBufferProvider(16); final CountDownLatch sync = new CountDownLatch(1); final ResultSubpartitionView view = new CancelPartitionRequestTest.InfiniteSubpartitionView(outboundBuffers, sync); ResultPartitionManager partitionManager = new ResultPartitionManager(); ResultPartition resultPartition = TestingResultPartition.newBuilder() .setResultPartitionManager(partitionManager) .setCreateSubpartitionViewFunction((index, listener) -> view) .build(); // Register result partition before request partitionManager.registerResultPartition(resultPartition); NettyTestUtil.NettyServerAndClient serverAndClient = null; try { NettyProtocol protocol = new NettyProtocol(partitionManager, new NoOpTaskEventPublisher()); serverAndClient = initServerAndClient(protocol); Channel ch = connect(serverAndClient); // Request for non-existing input channel => results in cancel request ch.writeAndFlush( new NettyMessage.PartitionRequest( resultPartition.getPartitionId(), new ResultSubpartitionIndexSet(0), new InputChannelID(), Integer.MAX_VALUE)) .await(); // Wait for the notification if (!sync.await(TestingUtils.TESTING_DURATION.toMillis(), TimeUnit.MILLISECONDS)) { fail( "Timed out after waiting for " + TestingUtils.TESTING_DURATION.toMillis() + " ms to be notified about cancelled partition."); } } finally { shutdown(serverAndClient); } }
Verifies that result partition manager registers partition before receive partition request.
testRegisterResultPartitionBeforeRequest
java
apache/flink
flink-runtime/src/test/java/org/apache/flink/runtime/io/network/netty/PartitionRequestRegistrationTest.java
https://github.com/apache/flink/blob/master/flink-runtime/src/test/java/org/apache/flink/runtime/io/network/netty/PartitionRequestRegistrationTest.java
Apache-2.0
@Test void testRegisterResultPartitionAfterRequest() throws Exception { final TestPooledBufferProvider outboundBuffers = new TestPooledBufferProvider(16); final CountDownLatch sync = new CountDownLatch(1); final ResultSubpartitionView view = new CancelPartitionRequestTest.InfiniteSubpartitionView(outboundBuffers, sync); ResultPartitionManager partitionManager = new ResultPartitionManager(); ResultPartition resultPartition = TestingResultPartition.newBuilder() .setResultPartitionManager(partitionManager) .setCreateSubpartitionViewFunction((index, listener) -> view) .build(); NettyTestUtil.NettyServerAndClient serverAndClient = null; try { NettyProtocol protocol = new NettyProtocol(partitionManager, new NoOpTaskEventPublisher()); serverAndClient = initServerAndClient(protocol); Channel ch = connect(serverAndClient); // Request for non-existing input channel => results in cancel request ch.writeAndFlush( new NettyMessage.PartitionRequest( resultPartition.getPartitionId(), new ResultSubpartitionIndexSet(0), new InputChannelID(), Integer.MAX_VALUE)) .await(); // Register result partition after partition request partitionManager.registerResultPartition(resultPartition); // Wait for the notification if (!sync.await(TestingUtils.TESTING_DURATION.toMillis(), TimeUnit.MILLISECONDS)) { fail( "Timed out after waiting for " + TestingUtils.TESTING_DURATION.toMillis() + " ms to be notified about cancelled partition."); } } finally { shutdown(serverAndClient); } }
Verifies that result partition manager registers partition after receive partition request.
testRegisterResultPartitionAfterRequest
java
apache/flink
flink-runtime/src/test/java/org/apache/flink/runtime/io/network/netty/PartitionRequestRegistrationTest.java
https://github.com/apache/flink/blob/master/flink-runtime/src/test/java/org/apache/flink/runtime/io/network/netty/PartitionRequestRegistrationTest.java
Apache-2.0
@Override public void onFailedPartitionRequest() { latch.countDown(); }
The test remote input channel to count down the latch when it receives partition not found exception.
onFailedPartitionRequest
java
apache/flink
flink-runtime/src/test/java/org/apache/flink/runtime/io/network/netty/PartitionRequestRegistrationTest.java
https://github.com/apache/flink/blob/master/flink-runtime/src/test/java/org/apache/flink/runtime/io/network/netty/PartitionRequestRegistrationTest.java
Apache-2.0
@Override public boolean publish(ResultPartitionID partitionId, TaskEvent event) { return true; }
A testing implementation of {@link TaskEventPublisher} without operation.
publish
java
apache/flink
flink-runtime/src/test/java/org/apache/flink/runtime/io/network/netty/PartitionRequestRegistrationTest.java
https://github.com/apache/flink/blob/master/flink-runtime/src/test/java/org/apache/flink/runtime/io/network/netty/PartitionRequestRegistrationTest.java
Apache-2.0
@Override public void notifyDataAvailable(ResultSubpartitionView view) { numNotifications++; }
A simple BufferAvailabilityListener that counts the number of notifications.
notifyDataAvailable
java
apache/flink
flink-runtime/src/test/java/org/apache/flink/runtime/io/network/partition/CountingAvailabilityListener.java
https://github.com/apache/flink/blob/master/flink-runtime/src/test/java/org/apache/flink/runtime/io/network/partition/CountingAvailabilityListener.java
Apache-2.0
private static Configuration getFlinkConfiguration() { final Configuration config = new Configuration(); config.set(RpcOptions.ASK_TIMEOUT_DURATION, TestingUtils.DEFAULT_ASK_TIMEOUT); return config; }
Test for consuming a pipelined result only partially.
getFlinkConfiguration
java
apache/flink
flink-runtime/src/test/java/org/apache/flink/runtime/io/network/partition/PartialConsumePipelinedResultTest.java
https://github.com/apache/flink/blob/master/flink-runtime/src/test/java/org/apache/flink/runtime/io/network/partition/PartialConsumePipelinedResultTest.java
Apache-2.0
@Override public void invoke() throws Exception { final ResultPartitionWriter writer = getEnvironment().getWriter(0); for (int i = 0; i < 8; i++) { writer.emitRecord(ByteBuffer.allocate(1024), 0); Thread.sleep(50); } }
Sends a fixed number of buffers and sleeps in-between sends.
invoke
java
apache/flink
flink-runtime/src/test/java/org/apache/flink/runtime/io/network/partition/PartialConsumePipelinedResultTest.java
https://github.com/apache/flink/blob/master/flink-runtime/src/test/java/org/apache/flink/runtime/io/network/partition/PartialConsumePipelinedResultTest.java
Apache-2.0
@Override public void invoke() throws Exception { InputGate gate = getEnvironment().getInputGate(0); gate.finishReadRecoveredState(); while (!gate.getStateConsumedFuture().isDone()) { gate.pollNext(); } gate.setChannelStateWriter(ChannelStateWriter.NO_OP); gate.requestPartitions(); Buffer buffer = gate.getNext().orElseThrow(IllegalStateException::new).getBuffer(); if (buffer != null) { buffer.recycleBuffer(); } }
Reads a single buffer and recycles it.
invoke
java
apache/flink
flink-runtime/src/test/java/org/apache/flink/runtime/io/network/partition/PartialConsumePipelinedResultTest.java
https://github.com/apache/flink/blob/master/flink-runtime/src/test/java/org/apache/flink/runtime/io/network/partition/PartialConsumePipelinedResultTest.java
Apache-2.0
@Test void testAddListener() { PartitionRequestListenerManager partitionRequestListenerManager = new PartitionRequestListenerManager(); assertThat(partitionRequestListenerManager.isEmpty()).isTrue(); List<PartitionRequestListener> listenerList = new ArrayList<>(); NettyPartitionRequestListener listener1 = new NettyPartitionRequestListener( TestingResultPartitionProvider.newBuilder().build(), TestingSubpartitionCreatedViewReader.newBuilder() .setReceiverId(new InputChannelID()) .build(), new ResultSubpartitionIndexSet(0), new ResultPartitionID()); partitionRequestListenerManager.registerListener(listener1); listenerList.add(listener1); NettyPartitionRequestListener listener2 = new NettyPartitionRequestListener( TestingResultPartitionProvider.newBuilder().build(), TestingSubpartitionCreatedViewReader.newBuilder() .setReceiverId(new InputChannelID()) .build(), new ResultSubpartitionIndexSet(1), new ResultPartitionID()); partitionRequestListenerManager.registerListener(listener2); listenerList.add(listener2); NettyPartitionRequestListener listener3 = new NettyPartitionRequestListener( TestingResultPartitionProvider.newBuilder().build(), TestingSubpartitionCreatedViewReader.newBuilder() .setReceiverId(new InputChannelID()) .build(), new ResultSubpartitionIndexSet(2), new ResultPartitionID()); partitionRequestListenerManager.registerListener(listener3); listenerList.add(listener3); assertThat(partitionRequestListenerManager.getPartitionRequestListeners()) .hasSize(listenerList.size()); assertThat(listenerList) .containsAll(partitionRequestListenerManager.getPartitionRequestListeners()); }
Test add listener to {@link PartitionRequestListenerManager}.
testAddListener
java
apache/flink
flink-runtime/src/test/java/org/apache/flink/runtime/io/network/partition/PartitionRequestListenerManagerTest.java
https://github.com/apache/flink/blob/master/flink-runtime/src/test/java/org/apache/flink/runtime/io/network/partition/PartitionRequestListenerManagerTest.java
Apache-2.0
@Test void testRemoveListener() { PartitionRequestListenerManager partitionRequestListenerManager = new PartitionRequestListenerManager(); assertThat(partitionRequestListenerManager.isEmpty()).isTrue(); List<PartitionRequestListener> listenerList = new ArrayList<>(); NettyPartitionRequestListener listener1 = new NettyPartitionRequestListener( TestingResultPartitionProvider.newBuilder().build(), TestingSubpartitionCreatedViewReader.newBuilder() .setReceiverId(new InputChannelID()) .build(), new ResultSubpartitionIndexSet(0), new ResultPartitionID()); partitionRequestListenerManager.registerListener(listener1); NettyPartitionRequestListener listener2 = new NettyPartitionRequestListener( TestingResultPartitionProvider.newBuilder().build(), TestingSubpartitionCreatedViewReader.newBuilder() .setReceiverId(new InputChannelID()) .build(), new ResultSubpartitionIndexSet(1), new ResultPartitionID()); partitionRequestListenerManager.registerListener(listener2); listenerList.add(listener2); NettyPartitionRequestListener listener3 = new NettyPartitionRequestListener( TestingResultPartitionProvider.newBuilder().build(), TestingSubpartitionCreatedViewReader.newBuilder() .setReceiverId(new InputChannelID()) .build(), new ResultSubpartitionIndexSet(2), new ResultPartitionID()); partitionRequestListenerManager.registerListener(listener3); listenerList.add(listener3); partitionRequestListenerManager.remove(listener1.getReceiverId()); assertThat(partitionRequestListenerManager.getPartitionRequestListeners()) .hasSize(listenerList.size()); assertThat(listenerList) .containsAll(partitionRequestListenerManager.getPartitionRequestListeners()); }
Test remove listener from {@link PartitionRequestListenerManager} by {@link InputChannelID}.
testRemoveListener
java
apache/flink
flink-runtime/src/test/java/org/apache/flink/runtime/io/network/partition/PartitionRequestListenerManagerTest.java
https://github.com/apache/flink/blob/master/flink-runtime/src/test/java/org/apache/flink/runtime/io/network/partition/PartitionRequestListenerManagerTest.java
Apache-2.0
@Test void testRemoveExpiration() { PartitionRequestListenerManager partitionRequestListenerManager = new PartitionRequestListenerManager(); assertThat(partitionRequestListenerManager.isEmpty()).isTrue(); List<PartitionRequestListener> listenerList = new ArrayList<>(); List<PartitionRequestListener> expireListenerList = new ArrayList<>(); NettyPartitionRequestListener listener1 = new NettyPartitionRequestListener( TestingResultPartitionProvider.newBuilder().build(), TestingSubpartitionCreatedViewReader.newBuilder() .setReceiverId(new InputChannelID()) .build(), new ResultSubpartitionIndexSet(0), new ResultPartitionID(), 0L); partitionRequestListenerManager.registerListener(listener1); expireListenerList.add(listener1); NettyPartitionRequestListener listener2 = new NettyPartitionRequestListener( TestingResultPartitionProvider.newBuilder().build(), TestingSubpartitionCreatedViewReader.newBuilder() .setReceiverId(new InputChannelID()) .build(), new ResultSubpartitionIndexSet(1), new ResultPartitionID(), 0L); partitionRequestListenerManager.registerListener(listener2); expireListenerList.add(listener2); long currentTimestamp = System.currentTimeMillis(); NettyPartitionRequestListener listener3 = new NettyPartitionRequestListener( TestingResultPartitionProvider.newBuilder().build(), TestingSubpartitionCreatedViewReader.newBuilder() .setReceiverId(new InputChannelID()) .build(), new ResultSubpartitionIndexSet(2), new ResultPartitionID(), currentTimestamp); partitionRequestListenerManager.registerListener(listener3); listenerList.add(listener3); List<PartitionRequestListener> removeExpireListenerList = new ArrayList<>(); partitionRequestListenerManager.removeExpiration( currentTimestamp, 1L, removeExpireListenerList); assertThat(partitionRequestListenerManager.getPartitionRequestListeners()) .hasSize(listenerList.size()); assertThat(listenerList) .containsAll(partitionRequestListenerManager.getPartitionRequestListeners()); assertThat(removeExpireListenerList).hasSize(expireListenerList.size()); assertThat(expireListenerList).containsAll(removeExpireListenerList); }
Test remove expire listeners from {@link PartitionRequestListenerManager}.
testRemoveExpiration
java
apache/flink
flink-runtime/src/test/java/org/apache/flink/runtime/io/network/partition/PartitionRequestListenerManagerTest.java
https://github.com/apache/flink/blob/master/flink-runtime/src/test/java/org/apache/flink/runtime/io/network/partition/PartitionRequestListenerManagerTest.java
Apache-2.0
@TestTemplate void testCleanupReleasedPartitionNoView() throws Exception { testCleanupReleasedPartition(false); }
Tests cleanup of {@link PipelinedSubpartition#release()} with no read view attached.
testCleanupReleasedPartitionNoView
java
apache/flink
flink-runtime/src/test/java/org/apache/flink/runtime/io/network/partition/PipelinedSubpartitionTest.java
https://github.com/apache/flink/blob/master/flink-runtime/src/test/java/org/apache/flink/runtime/io/network/partition/PipelinedSubpartitionTest.java
Apache-2.0
@TestTemplate void testCleanupReleasedPartitionWithView() throws Exception { testCleanupReleasedPartition(true); }
Tests cleanup of {@link PipelinedSubpartition#release()} with a read view attached.
testCleanupReleasedPartitionWithView
java
apache/flink
flink-runtime/src/test/java/org/apache/flink/runtime/io/network/partition/PipelinedSubpartitionTest.java
https://github.com/apache/flink/blob/master/flink-runtime/src/test/java/org/apache/flink/runtime/io/network/partition/PipelinedSubpartitionTest.java
Apache-2.0
@TestTemplate void testUnfinishedBufferBehindFinished() throws Exception { subpartition.add(createFilledFinishedBufferConsumer(1025)); // finished subpartition.add(createFilledUnfinishedBufferConsumer(1024)); // not finished assertThat(subpartition.getBuffersInBacklogUnsafe()).isOne(); assertThat(availablityListener.getNumNotifications()).isGreaterThan(0L); assertNextBuffer(readView, 1025, false, 0, false, true); // not notified, but we could still access the unfinished buffer assertNextBuffer(readView, 1024, false, 0, false, false); assertNoNextBuffer(readView); }
Normally moreAvailable flag from InputChannel should ignore non finished BufferConsumers, otherwise we would busy loop on the unfinished BufferConsumers.
testUnfinishedBufferBehindFinished
java
apache/flink
flink-runtime/src/test/java/org/apache/flink/runtime/io/network/partition/PipelinedSubpartitionWithReadViewTest.java
https://github.com/apache/flink/blob/master/flink-runtime/src/test/java/org/apache/flink/runtime/io/network/partition/PipelinedSubpartitionWithReadViewTest.java
Apache-2.0
@TestTemplate void testFlushWithUnfinishedBufferBehindFinished() throws Exception { subpartition.add(createFilledFinishedBufferConsumer(1025)); // finished subpartition.add(createFilledUnfinishedBufferConsumer(1024)); // not finished long oldNumNotifications = availablityListener.getNumNotifications(); assertThat(subpartition.getBuffersInBacklogUnsafe()).isOne(); subpartition.flush(); // buffer queue is > 1, should already be notified, no further notification necessary assertThat(oldNumNotifications).isGreaterThan(0L); assertThat(availablityListener.getNumNotifications()).isEqualTo(oldNumNotifications); assertThat(subpartition.getBuffersInBacklogUnsafe()).isEqualTo(2); assertNextBuffer(readView, 1025, true, 1, false, true); assertNextBuffer(readView, 1024, false, 0, false, false); assertNoNextBuffer(readView); }
After flush call unfinished BufferConsumers should be reported as available, otherwise we might not flush some of the data.
testFlushWithUnfinishedBufferBehindFinished
java
apache/flink
flink-runtime/src/test/java/org/apache/flink/runtime/io/network/partition/PipelinedSubpartitionWithReadViewTest.java
https://github.com/apache/flink/blob/master/flink-runtime/src/test/java/org/apache/flink/runtime/io/network/partition/PipelinedSubpartitionWithReadViewTest.java
Apache-2.0
@Test void testReportProcessingWithPartitionLossOnSameTaskExecutor() { TestClusterPartitionReleaser partitionReleaser = new TestClusterPartitionReleaser(); final ResourceManagerPartitionTracker tracker = new ResourceManagerPartitionTrackerImpl(partitionReleaser); report(tracker, TASK_EXECUTOR_ID_1, DATA_SET_ID, 2, PARTITION_ID_1, PARTITION_ID_2); report(tracker, TASK_EXECUTOR_ID_1, DATA_SET_ID, 2, PARTITION_ID_2); assertThat(partitionReleaser.releaseCalls) .contains(Tuple2.of(TASK_EXECUTOR_ID_1, Collections.singleton(DATA_SET_ID))); }
Verifies that a task executor hosting multiple partitions of a data set receives a release call if a subset of its partitions is lost.
testReportProcessingWithPartitionLossOnSameTaskExecutor
java
apache/flink
flink-runtime/src/test/java/org/apache/flink/runtime/io/network/partition/ResourceManagerPartitionTrackerImplTest.java
https://github.com/apache/flink/blob/master/flink-runtime/src/test/java/org/apache/flink/runtime/io/network/partition/ResourceManagerPartitionTrackerImplTest.java
Apache-2.0
@Test void testThrowPartitionNotFoundException() { final ResultPartitionManager partitionManager = new ResultPartitionManager(); final ResultPartition partition = createPartition(); verifyCreateSubpartitionViewThrowsException(partitionManager, partition.getPartitionId()); }
Tests that {@link ResultPartitionProvider#createSubpartitionView(ResultPartitionID, ResultSubpartitionIndexSet, BufferAvailabilityListener)} would throw {@link PartitionNotFoundException} if this partition was not registered before.
testThrowPartitionNotFoundException
java
apache/flink
flink-runtime/src/test/java/org/apache/flink/runtime/io/network/partition/ResultPartitionManagerTest.java
https://github.com/apache/flink/blob/master/flink-runtime/src/test/java/org/apache/flink/runtime/io/network/partition/ResultPartitionManagerTest.java
Apache-2.0
@Test void testCreateViewForRegisteredPartition() throws Exception { final ResultPartitionManager partitionManager = new ResultPartitionManager(); final ResultPartition partition = createPartition(); partitionManager.registerResultPartition(partition); partitionManager.createSubpartitionView( partition.getPartitionId(), new ResultSubpartitionIndexSet(0), new NoOpBufferAvailablityListener()); }
Tests {@link ResultPartitionProvider#createSubpartitionView(ResultPartitionID, ResultSubpartitionIndexSet, BufferAvailabilityListener)} successful if this partition was already registered before.
testCreateViewForRegisteredPartition
java
apache/flink
flink-runtime/src/test/java/org/apache/flink/runtime/io/network/partition/ResultPartitionManagerTest.java
https://github.com/apache/flink/blob/master/flink-runtime/src/test/java/org/apache/flink/runtime/io/network/partition/ResultPartitionManagerTest.java
Apache-2.0
@Test void testCreateSubpartitionViewAfterRegisteredPartition() throws Exception { final ResultPartitionManager partitionManager = new ResultPartitionManager(); final ResultPartition partition = createPartition(); assertThat(partitionManager.getListenerManagers().isEmpty()).isTrue(); partitionManager.registerResultPartition(partition); PartitionRequestListener partitionRequestListener = TestingPartitionRequestListener.newBuilder().build(); assertThat( partitionManager.createSubpartitionViewOrRegisterListener( partition.getPartitionId(), new ResultSubpartitionIndexSet(0), new NoOpBufferAvailablityListener(), partitionRequestListener)) .isPresent(); assertThat(partitionManager.getListenerManagers().isEmpty()).isTrue(); }
{@link ResultPartitionManager} creates subpartition view reader after the given partition is registered.
testCreateSubpartitionViewAfterRegisteredPartition
java
apache/flink
flink-runtime/src/test/java/org/apache/flink/runtime/io/network/partition/ResultPartitionManagerTest.java
https://github.com/apache/flink/blob/master/flink-runtime/src/test/java/org/apache/flink/runtime/io/network/partition/ResultPartitionManagerTest.java
Apache-2.0
@Test void testRegisterPartitionListenerBeforeRegisteredPartition() throws Exception { final ResultPartitionManager partitionManager = new ResultPartitionManager(); final ResultPartition partition = createPartition(); assertThat(partitionManager.getListenerManagers().isEmpty()).isTrue(); final CompletableFuture<ResultPartition> notifySubpartitionCreatedFuture = new CompletableFuture<>(); PartitionRequestListener partitionRequestListener = TestingPartitionRequestListener.newBuilder() .setResultPartitionId(partition.getPartitionId()) .setNetworkSequenceViewReader( TestingSubpartitionCreatedViewReader.newBuilder() .setNotifySubpartitionCreatedConsumer( tuple -> notifySubpartitionCreatedFuture.complete( tuple.f0)) .build()) .build(); assertThat( partitionManager.createSubpartitionViewOrRegisterListener( partition.getPartitionId(), new ResultSubpartitionIndexSet(0), new NoOpBufferAvailablityListener(), partitionRequestListener)) .isNotPresent(); assertThat(partitionManager.getListenerManagers()).hasSize(1); // Check if the partition request listener is registered. PartitionRequestListenerManager listenerManager = partitionManager.getListenerManagers().get(partition.getPartitionId()); assertThat(listenerManager).isNotNull(); assertThat(listenerManager.isEmpty()).isFalse(); assertThat(listenerManager.getPartitionRequestListeners()).hasSize(1); PartitionRequestListener listener = listenerManager.getPartitionRequestListeners().iterator().next(); assertThat(listener.getResultPartitionId()).isEqualTo(partition.getPartitionId()); assertThat(notifySubpartitionCreatedFuture).isNotDone(); partitionManager.registerResultPartition(partition); // Check if the listener is notified. ResultPartition notifyPartition = notifySubpartitionCreatedFuture.get(10, TimeUnit.MILLISECONDS); assertThat(partition.getPartitionId()).isEqualTo(notifyPartition.getPartitionId()); assertThat(partitionManager.getListenerManagers().isEmpty()).isTrue(); }
The {@link ResultPartitionManager} registers {@link PartitionRequestListener} before specify {@link ResultPartition} is registered. When the {@link ResultPartition} is registered, the {@link ResultPartitionManager} will find the listener and create partition view reader. an
testRegisterPartitionListenerBeforeRegisteredPartition
java
apache/flink
flink-runtime/src/test/java/org/apache/flink/runtime/io/network/partition/ResultPartitionManagerTest.java
https://github.com/apache/flink/blob/master/flink-runtime/src/test/java/org/apache/flink/runtime/io/network/partition/ResultPartitionManagerTest.java
Apache-2.0
@Test void testCreateViewReaderForNotifierTimeout() throws Exception { ManuallyTriggeredScheduledExecutor scheduledExecutor = new ManuallyTriggeredScheduledExecutor(); final ResultPartitionManager partitionManager = new ResultPartitionManager(1000000, scheduledExecutor); final ResultPartition partition1 = createPartition(); final ResultPartition partition2 = createPartition(); CompletableFuture<PartitionRequestListener> timeoutFuture1 = new CompletableFuture<>(); CompletableFuture<PartitionRequestListener> timeoutFuture2 = new CompletableFuture<>(); partitionManager.createSubpartitionViewOrRegisterListener( partition1.getPartitionId(), new ResultSubpartitionIndexSet(0), new NoOpBufferAvailablityListener(), new NettyPartitionRequestListener( TestingResultPartitionProvider.newBuilder().build(), TestingSubpartitionCreatedViewReader.newBuilder() .setReceiverId(new InputChannelID()) .setPartitionRequestListenerTimeoutConsumer( timeoutFuture1::complete) .build(), new ResultSubpartitionIndexSet(0), partition1.getPartitionId(), 0L)); partitionManager.createSubpartitionViewOrRegisterListener( partition2.getPartitionId(), new ResultSubpartitionIndexSet(0), new NoOpBufferAvailablityListener(), new NettyPartitionRequestListener( TestingResultPartitionProvider.newBuilder().build(), TestingSubpartitionCreatedViewReader.newBuilder() .setReceiverId(new InputChannelID()) .setPartitionRequestListenerTimeoutConsumer( timeoutFuture2::complete) .build(), new ResultSubpartitionIndexSet(0), partition2.getPartitionId())); scheduledExecutor.triggerScheduledTasks(); assertThat(timeoutFuture1.isDone()).isTrue(); assertThat(partition1.getPartitionId()) .isEqualTo(timeoutFuture1.get().getResultPartitionId()); assertThat(timeoutFuture2.isDone()).isFalse(); assertThat(partitionManager.getListenerManagers().get(partition1.getPartitionId())) .isNull(); assertThat(partitionManager.getListenerManagers().get(partition2.getPartitionId())) .isNotNull(); }
Test notifier timeout in {@link ResultPartitionManager}.
testCreateViewReaderForNotifierTimeout
java
apache/flink
flink-runtime/src/test/java/org/apache/flink/runtime/io/network/partition/ResultPartitionManagerTest.java
https://github.com/apache/flink/blob/master/flink-runtime/src/test/java/org/apache/flink/runtime/io/network/partition/ResultPartitionManagerTest.java
Apache-2.0
@Test void testCreateSubpartitionOnFailingPartition() throws Exception { final ResultPartitionManager manager = new ResultPartitionManager(); final ResultPartition partition = new ResultPartitionBuilder().setResultPartitionManager(manager).build(); manager.registerResultPartition(partition); partition.fail(null); verifyCreateSubpartitionViewThrowsException(manager, partition.getPartitionId()); }
Tests {@link ResultPartitionProvider#createSubpartitionView(ResultPartitionID, ResultSubpartitionIndexSet, BufferAvailabilityListener)} would throw a {@link PartitionNotFoundException} if the registered partition was released from manager via {@link ResultPartition#fail(Throwable)} before.
testCreateSubpartitionOnFailingPartition
java
apache/flink
flink-runtime/src/test/java/org/apache/flink/runtime/io/network/partition/ResultPartitionTest.java
https://github.com/apache/flink/blob/master/flink-runtime/src/test/java/org/apache/flink/runtime/io/network/partition/ResultPartitionTest.java
Apache-2.0
@Test void testReleaseMemoryOnPipelinedPartition() throws Exception { final int numAllBuffers = 10; final NettyShuffleEnvironment network = new NettyShuffleEnvironmentBuilder() .setNumNetworkBuffers(numAllBuffers) .setBufferSize(bufferSize) .build(); final ResultPartition resultPartition = createPartition(network, ResultPartitionType.PIPELINED, 1); try { resultPartition.setup(); // take all buffers (more than the minimum required) for (int i = 0; i < numAllBuffers; ++i) { resultPartition.emitRecord(ByteBuffer.allocate(bufferSize - 1), 0); } assertThat(resultPartition.getBufferPool().getNumberOfAvailableMemorySegments()) .isZero(); resultPartition.close(); assertThat(resultPartition.getBufferPool().isDestroyed()).isTrue(); assertThat(network.getNetworkBufferPool().getNumberOfUsedMemorySegments()) .isEqualTo(numAllBuffers); resultPartition.release(); assertThat(network.getNetworkBufferPool().getNumberOfUsedMemorySegments()).isZero(); } finally { network.close(); } }
Tests {@link ResultPartition#close()} and {@link ResultPartition#release()} on a working pipelined partition.
testReleaseMemoryOnPipelinedPartition
java
apache/flink
flink-runtime/src/test/java/org/apache/flink/runtime/io/network/partition/ResultPartitionTest.java
https://github.com/apache/flink/blob/master/flink-runtime/src/test/java/org/apache/flink/runtime/io/network/partition/ResultPartitionTest.java
Apache-2.0
@Test void testPartitionNotFoundExceptionWhileRequestingPartition() throws Exception { final SingleInputGate inputGate = createSingleInputGate(1); final LocalInputChannel localChannel = createLocalInputChannel(inputGate, new ResultPartitionManager()); assertThatThrownBy(localChannel::requestSubpartitions) .isInstanceOfSatisfying( PartitionNotFoundException.class, notFound -> assertThat(localChannel.getPartitionId()) .isEqualTo(notFound.getPartitionId())); }
Tests that {@link LocalInputChannel#requestSubpartitions()} throws {@link PartitionNotFoundException} if the result partition was not registered in {@link ResultPartitionManager} and no backoff.
testPartitionNotFoundExceptionWhileRequestingPartition
java
apache/flink
flink-runtime/src/test/java/org/apache/flink/runtime/io/network/partition/consumer/LocalInputChannelTest.java
https://github.com/apache/flink/blob/master/flink-runtime/src/test/java/org/apache/flink/runtime/io/network/partition/consumer/LocalInputChannelTest.java
Apache-2.0
@Test void testRetriggerPartitionRequestWhilePartitionNotFound() throws Exception { final SingleInputGate inputGate = createSingleInputGate(1); final LocalInputChannel localChannel = createLocalInputChannel(inputGate, new ResultPartitionManager(), 1, 1); inputGate.setInputChannels(localChannel); localChannel.requestSubpartitions(); // The timer should be initialized at the first time of retriggering partition request. assertThat(inputGate.getRetriggerLocalRequestTimer()).isNotNull(); }
Tests that {@link SingleInputGate#retriggerPartitionRequest(IntermediateResultPartitionID)} is triggered after {@link LocalInputChannel#requestSubpartitions()} throws {@link PartitionNotFoundException} within backoff.
testRetriggerPartitionRequestWhilePartitionNotFound
java
apache/flink
flink-runtime/src/test/java/org/apache/flink/runtime/io/network/partition/consumer/LocalInputChannelTest.java
https://github.com/apache/flink/blob/master/flink-runtime/src/test/java/org/apache/flink/runtime/io/network/partition/consumer/LocalInputChannelTest.java
Apache-2.0
@Test void testConcurrentReleaseAndRetriggerPartitionRequest() throws Exception { final SingleInputGate gate = createSingleInputGate(1); ResultPartitionManager partitionManager = mock(ResultPartitionManager.class); when(partitionManager.createSubpartitionView( any(ResultPartitionID.class), any(ResultSubpartitionIndexSet.class), any(BufferAvailabilityListener.class))) .thenAnswer( (Answer<ResultSubpartitionView>) invocationOnMock -> { // Sleep here a little to give the releaser Thread // time to acquire the input gate lock. We throw // the Exception to retrigger the request. Thread.sleep(100); throw new PartitionNotFoundException(new ResultPartitionID()); }); final LocalInputChannel channel = createLocalInputChannel(gate, partitionManager, 1, 1); Thread releaser = new Thread( () -> { try { gate.close(); } catch (IOException ignored) { } }); Thread requester = new Thread( () -> { try { channel.requestSubpartitions(); } catch (IOException ignored) { } }); requester.start(); releaser.start(); releaser.join(); requester.join(); }
Verifies that concurrent release via the SingleInputGate and re-triggering of a partition request works smoothly. <ul> <li>SingleInputGate acquires its request lock and tries to release all registered channels. When releasing a channel, it needs to acquire the channel's shared request-release lock. <li>If a LocalInputChannel concurrently retriggers a partition request via a Timer Thread it acquires the channel's request-release lock and calls the retrigger callback on the SingleInputGate, which again tries to acquire the gate's request lock. </ul> <p>For certain timings this obviously leads to a deadlock. This test reliably reproduced such a timing (reported in FLINK-5228). This test is pretty much testing the buggy implementation and has not much more general value. If it becomes obsolete at some point (future greatness ;)), feel free to remove it. <p>The fix in the end was to not acquire the channels lock when releasing it and/or not doing any input gate callbacks while holding the channel's lock. I decided to do both.
testConcurrentReleaseAndRetriggerPartitionRequest
java
apache/flink
flink-runtime/src/test/java/org/apache/flink/runtime/io/network/partition/consumer/LocalInputChannelTest.java
https://github.com/apache/flink/blob/master/flink-runtime/src/test/java/org/apache/flink/runtime/io/network/partition/consumer/LocalInputChannelTest.java
Apache-2.0
@Test void testGetNextAfterPartitionReleased() throws Exception { ResultSubpartitionView subpartitionView = InputChannelTestUtils.createResultSubpartitionView(false); TestingResultPartitionManager partitionManager = new TestingResultPartitionManager(subpartitionView); LocalInputChannel channel = createLocalInputChannel(new SingleInputGateBuilder().build(), partitionManager); channel.requestSubpartitions(); assertThat(channel.getNextBuffer()).isNotPresent(); // release the subpartition view subpartitionView.releaseAllResources(); assertThatThrownBy(channel::getNextBuffer).isInstanceOf(CancelTaskException.class); channel.releaseAllResources(); assertThat(channel.getNextBuffer()).isNotPresent(); }
Tests that reading from a channel when after the partition has been released are handled and don't lead to NPEs.
testGetNextAfterPartitionReleased
java
apache/flink
flink-runtime/src/test/java/org/apache/flink/runtime/io/network/partition/consumer/LocalInputChannelTest.java
https://github.com/apache/flink/blob/master/flink-runtime/src/test/java/org/apache/flink/runtime/io/network/partition/consumer/LocalInputChannelTest.java
Apache-2.0
@Override public BufferAndChannel getNextBuffer() throws Exception { if (channelIndexes.size() > 0) { final int channelIndex = channelIndexes.remove(0); return new BufferAndChannel(new byte[bufferSize], channelIndex); } return null; }
Returns the configured number of buffers for each channel in a random order.
getNextBuffer
java
apache/flink
flink-runtime/src/test/java/org/apache/flink/runtime/io/network/partition/consumer/LocalInputChannelTest.java
https://github.com/apache/flink/blob/master/flink-runtime/src/test/java/org/apache/flink/runtime/io/network/partition/consumer/LocalInputChannelTest.java
Apache-2.0
@Override public Void call() throws Exception { // One counter per input channel. Expect the same number of buffers from each channel. final int[] numberOfBuffersPerChannel = new int[numberOfInputChannels]; try { Optional<BufferOrEvent> boe; while ((boe = inputGate.getNext()).isPresent()) { if (boe.get().isBuffer()) { boe.get().getBuffer().recycleBuffer(); // Check that we don't receive too many buffers if (++numberOfBuffersPerChannel[ boe.get().getChannelInfo().getInputChannelIdx()] > numberOfExpectedBuffersPerChannel) { throw new IllegalStateException( "Received more buffers than expected " + "on channel " + boe.get().getChannelInfo() + "."); } } } // Verify that we received the expected number of buffers on each channel for (int i = 0; i < numberOfBuffersPerChannel.length; i++) { final int actualNumberOfReceivedBuffers = numberOfBuffersPerChannel[i]; if (actualNumberOfReceivedBuffers != numberOfExpectedBuffersPerChannel) { throw new IllegalStateException( "Received unexpected number of buffers " + "on channel " + i + " (" + actualNumberOfReceivedBuffers + " instead " + "of " + numberOfExpectedBuffersPerChannel + ")."); } } } finally { inputGate.close(); } return null; }
Consumed the configured result partitions and verifies that each channel receives the expected number of buffers.
call
java
apache/flink
flink-runtime/src/test/java/org/apache/flink/runtime/io/network/partition/consumer/LocalInputChannelTest.java
https://github.com/apache/flink/blob/master/flink-runtime/src/test/java/org/apache/flink/runtime/io/network/partition/consumer/LocalInputChannelTest.java
Apache-2.0
@Test void testAvailableBuffersLessThanRequiredBuffers() throws Exception { // Setup final NetworkBufferPool networkBufferPool = new NetworkBufferPool(16, 32); final int numFloatingBuffers = 14; final SingleInputGate inputGate = createSingleInputGate(1, networkBufferPool); final RemoteInputChannel inputChannel = createRemoteInputChannel(inputGate); inputGate.setInputChannels(inputChannel); Throwable thrown = null; try { final BufferPool bufferPool = spy(networkBufferPool.createBufferPool(numFloatingBuffers, numFloatingBuffers)); inputGate.setBufferPool(bufferPool); inputGate.setupChannels(); inputChannel.requestSubpartitions(); // Prepare the exclusive and floating buffers to verify recycle logic later final Buffer exclusiveBuffer = inputChannel.requestBuffer(); assertThat(exclusiveBuffer).isNotNull(); final int numRecycleFloatingBuffers = 2; final ArrayDeque<Buffer> floatingBufferQueue = new ArrayDeque<>(numRecycleFloatingBuffers); for (int i = 0; i < numRecycleFloatingBuffers; i++) { Buffer floatingBuffer = bufferPool.requestBuffer(); assertThat(floatingBuffer).isNotNull(); floatingBufferQueue.add(floatingBuffer); } verify(bufferPool, times(numRecycleFloatingBuffers)).requestBuffer(); // Receive the producer's backlog more than the number of available floating buffers inputChannel.onSenderBacklog(14); // The channel requests (backlog + numExclusiveBuffers) floating buffers from local // pool. // It does not get enough floating buffers and register as buffer listener verify(bufferPool, times(15)).requestBuffer(); verify(bufferPool, times(1)).addBufferListener(inputChannel.getBufferManager()); assertThat(inputChannel.getNumberOfAvailableBuffers()) .withFailMessage("There should be 13 buffers available in the channel") .isEqualTo(13); assertThat(inputChannel.getNumberOfRequiredBuffers()) .withFailMessage("There should be 16 buffers required in the channel") .isEqualTo(16); assertThat(bufferPool.getNumberOfAvailableMemorySegments()) .withFailMessage("There should be 0 buffers available in local pool") .isZero(); assertThat(inputChannel.isWaitingForFloatingBuffers()).isTrue(); // Increase the backlog inputChannel.onSenderBacklog(16); // The channel is already in the status of waiting for buffers and will not request any // more verify(bufferPool, times(15)).requestBuffer(); verify(bufferPool, times(1)).addBufferListener(inputChannel.getBufferManager()); assertThat(inputChannel.getNumberOfAvailableBuffers()) .withFailMessage("There should be 13 buffers available in the channel") .isEqualTo(13); assertThat(inputChannel.getNumberOfRequiredBuffers()) .withFailMessage("There should be 18 buffers required in the channel") .isEqualTo(18); assertThat(bufferPool.getNumberOfAvailableMemorySegments()) .withFailMessage("There should be 0 buffers available in local pool") .isZero(); assertThat(inputChannel.isWaitingForFloatingBuffers()).isTrue(); // Recycle one exclusive buffer exclusiveBuffer.recycleBuffer(); // The exclusive buffer is returned to the channel directly verify(bufferPool, times(15)).requestBuffer(); verify(bufferPool, times(1)).addBufferListener(inputChannel.getBufferManager()); assertThat(inputChannel.getNumberOfAvailableBuffers()) .withFailMessage("There should be 14 buffers available in the channel") .isEqualTo(14); assertThat(inputChannel.getNumberOfRequiredBuffers()) .withFailMessage("There should be 18 buffers required in the channel") .isEqualTo(18); assertThat(bufferPool.getNumberOfAvailableMemorySegments()) .withFailMessage("There should be 0 buffers available in local pool") .isZero(); assertThat(inputChannel.isWaitingForFloatingBuffers()).isTrue(); // Recycle one floating buffer floatingBufferQueue.poll().recycleBuffer(); // Assign the floating buffer to the listener and the channel is still waiting for more // floating buffers verify(bufferPool, times(16)).requestBuffer(); verify(bufferPool, times(2)).addBufferListener(inputChannel.getBufferManager()); assertThat(inputChannel.getNumberOfAvailableBuffers()) .withFailMessage("There should be 15 buffers available in the channel") .isEqualTo(15); assertThat(inputChannel.getNumberOfRequiredBuffers()) .withFailMessage("There should be 18 buffers required in the channel") .isEqualTo(18); assertThat(bufferPool.getNumberOfAvailableMemorySegments()) .withFailMessage("There should be 0 buffers available in local pool") .isZero(); assertThat(inputChannel.isWaitingForFloatingBuffers()).isTrue(); // Decrease the backlog inputChannel.onSenderBacklog(13); // Only the number of required buffers is changed by (backlog + numExclusiveBuffers) verify(bufferPool, times(16)).requestBuffer(); verify(bufferPool, times(2)).addBufferListener(inputChannel.getBufferManager()); assertThat(inputChannel.getNumberOfAvailableBuffers()) .withFailMessage("There should be 15 buffers available in the channel") .isEqualTo(15); assertThat(inputChannel.getNumberOfRequiredBuffers()) .withFailMessage("There should be 15 buffers required in the channel") .isEqualTo(15); assertThat(bufferPool.getNumberOfAvailableMemorySegments()) .withFailMessage("There should be 0 buffers available in local pool") .isZero(); assertThat(inputChannel.isWaitingForFloatingBuffers()).isTrue(); // Recycle one more floating buffer floatingBufferQueue.poll().recycleBuffer(); // Return the floating buffer to the buffer pool and the channel is not waiting for more // floating buffers verify(bufferPool, times(16)).requestBuffer(); verify(bufferPool, times(2)).addBufferListener(inputChannel.getBufferManager()); assertThat(inputChannel.getNumberOfAvailableBuffers()) .withFailMessage("There should be 15 buffers available in the channel") .isEqualTo(15); assertThat(inputChannel.getNumberOfRequiredBuffers()) .withFailMessage("There should be 15 buffers required in the channel") .isEqualTo(15); assertThat(bufferPool.getNumberOfAvailableMemorySegments()) .withFailMessage("There should be 1 buffers available in local pool") .isOne(); assertThat(inputChannel.isWaitingForFloatingBuffers()).isFalse(); // Increase the backlog again inputChannel.onSenderBacklog(15); // The floating buffer is requested from the buffer pool and the channel is registered // as listener again. verify(bufferPool, times(18)).requestBuffer(); verify(bufferPool, times(3)).addBufferListener(inputChannel.getBufferManager()); assertThat(inputChannel.getNumberOfAvailableBuffers()) .withFailMessage("There should be 16 buffers available in the channel") .isEqualTo(16); assertThat(inputChannel.getNumberOfRequiredBuffers()) .withFailMessage("There should be 17 buffers required in the channel") .isEqualTo(17); assertThat(bufferPool.getNumberOfAvailableMemorySegments()) .withFailMessage("There should be 0 buffers available in local pool") .isZero(); assertThat(inputChannel.isWaitingForFloatingBuffers()).isTrue(); } catch (Throwable t) { thrown = t; } finally { cleanup(networkBufferPool, null, null, thrown, inputChannel); } }
Tests to verify the behaviours of three different processes if the number of available buffers is less than required buffers. <ol> <li>Recycle the floating buffer <li>Recycle the exclusive buffer <li>Decrease the sender's backlog </ol>
testAvailableBuffersLessThanRequiredBuffers
java
apache/flink
flink-runtime/src/test/java/org/apache/flink/runtime/io/network/partition/consumer/RemoteInputChannelTest.java
https://github.com/apache/flink/blob/master/flink-runtime/src/test/java/org/apache/flink/runtime/io/network/partition/consumer/RemoteInputChannelTest.java
Apache-2.0
@Test void testAvailableBuffersEqualToRequiredBuffers() throws Exception { // Setup final NetworkBufferPool networkBufferPool = new NetworkBufferPool(16, 32); final int numFloatingBuffers = 14; final SingleInputGate inputGate = createSingleInputGate(1, networkBufferPool); final RemoteInputChannel inputChannel = createRemoteInputChannel(inputGate); inputGate.setInputChannels(inputChannel); Throwable thrown = null; try { final BufferPool bufferPool = spy(networkBufferPool.createBufferPool(numFloatingBuffers, numFloatingBuffers)); inputGate.setBufferPool(bufferPool); inputGate.setupChannels(); inputChannel.requestSubpartitions(); // Prepare the exclusive and floating buffers to verify recycle logic later final Buffer exclusiveBuffer = inputChannel.requestBuffer(); assertThat(exclusiveBuffer).isNotNull(); final Buffer floatingBuffer = bufferPool.requestBuffer(); assertThat(floatingBuffer).isNotNull(); verify(bufferPool, times(1)).requestBuffer(); // Receive the producer's backlog inputChannel.onSenderBacklog(12); // The channel requests (backlog + numExclusiveBuffers) floating buffers from local pool // and gets enough floating buffers verify(bufferPool, times(14)).requestBuffer(); verify(bufferPool, times(0)).addBufferListener(inputChannel.getBufferManager()); assertThat(inputChannel.getNumberOfAvailableBuffers()) .withFailMessage("There should be 14 buffers available in the channel") .isEqualTo(14); assertThat(inputChannel.getNumberOfRequiredBuffers()) .withFailMessage("There should be 14 buffers required in the channel") .isEqualTo(14); assertThat(bufferPool.getNumberOfAvailableMemorySegments()) .withFailMessage("There should be 0 buffers available in local pool") .isZero(); // Recycle one floating buffer floatingBuffer.recycleBuffer(); // The floating buffer is returned to local buffer directly because the channel is not // waiting // for floating buffers verify(bufferPool, times(14)).requestBuffer(); verify(bufferPool, times(0)).addBufferListener(inputChannel.getBufferManager()); assertThat(inputChannel.getNumberOfAvailableBuffers()) .withFailMessage("There should be 14 buffers available in the channel") .isEqualTo(14); assertThat(inputChannel.getNumberOfRequiredBuffers()) .withFailMessage("There should be 14 buffers required in the channel") .isEqualTo(14); assertThat(bufferPool.getNumberOfAvailableMemorySegments()) .withFailMessage("There should be 1 buffers available in local pool") .isOne(); // Recycle one exclusive buffer exclusiveBuffer.recycleBuffer(); // Return one extra floating buffer to the local pool because the number of available // buffers // already equals to required buffers verify(bufferPool, times(14)).requestBuffer(); verify(bufferPool, times(0)).addBufferListener(inputChannel.getBufferManager()); assertThat(inputChannel.getNumberOfAvailableBuffers()) .withFailMessage("There should be 14 buffers available in the channel") .isEqualTo(14); assertThat(inputChannel.getNumberOfRequiredBuffers()) .withFailMessage("There should be 14 buffers required in the channel") .isEqualTo(14); assertThat(bufferPool.getNumberOfAvailableMemorySegments()) .withFailMessage("There should be 2 buffers available in local pool") .isEqualTo(2); } catch (Throwable t) { thrown = t; } finally { cleanup(networkBufferPool, null, null, thrown, inputChannel); } }
Tests to verify the behaviours of recycling floating and exclusive buffers if the number of available buffers equals to required buffers.
testAvailableBuffersEqualToRequiredBuffers
java
apache/flink
flink-runtime/src/test/java/org/apache/flink/runtime/io/network/partition/consumer/RemoteInputChannelTest.java
https://github.com/apache/flink/blob/master/flink-runtime/src/test/java/org/apache/flink/runtime/io/network/partition/consumer/RemoteInputChannelTest.java
Apache-2.0
@Test void testAvailableBuffersMoreThanRequiredBuffers() throws Exception { // Setup final NetworkBufferPool networkBufferPool = new NetworkBufferPool(16, 32); final int numFloatingBuffers = 14; final SingleInputGate inputGate = createSingleInputGate(1, networkBufferPool); final RemoteInputChannel inputChannel = createRemoteInputChannel(inputGate); inputGate.setInputChannels(inputChannel); Throwable thrown = null; try { final BufferPool bufferPool = spy(networkBufferPool.createBufferPool(numFloatingBuffers, numFloatingBuffers)); inputGate.setBufferPool(bufferPool); inputGate.setupChannels(); inputChannel.requestSubpartitions(); // Prepare the exclusive and floating buffers to verify recycle logic later final Buffer exclusiveBuffer = inputChannel.requestBuffer(); assertThat(exclusiveBuffer).isNotNull(); final Buffer floatingBuffer = bufferPool.requestBuffer(); assertThat(floatingBuffer).isNotNull(); verify(bufferPool, times(1)).requestBuffer(); // Receive the producer's backlog inputChannel.onSenderBacklog(12); // The channel gets enough floating buffers from local pool verify(bufferPool, times(14)).requestBuffer(); verify(bufferPool, times(0)).addBufferListener(inputChannel.getBufferManager()); assertThat(inputChannel.getNumberOfAvailableBuffers()) .withFailMessage("There should be 14 buffers available in the channel") .isEqualTo(14); assertThat(inputChannel.getNumberOfRequiredBuffers()) .withFailMessage("There should be 14 buffers required in the channel") .isEqualTo(14); assertThat(bufferPool.getNumberOfAvailableMemorySegments()) .withFailMessage("There should be 0 buffers available in local pool") .isZero(); // Decrease the backlog to make the number of available buffers more than required // buffers inputChannel.onSenderBacklog(10); // Only the number of required buffers is changed by (backlog + numExclusiveBuffers) verify(bufferPool, times(14)).requestBuffer(); verify(bufferPool, times(0)).addBufferListener(inputChannel.getBufferManager()); assertThat(inputChannel.getNumberOfAvailableBuffers()) .withFailMessage("There should be 14 buffers available in the channel") .isEqualTo(14); assertThat(inputChannel.getNumberOfRequiredBuffers()) .withFailMessage("There should be 12 buffers required in the channel") .isEqualTo(12); assertThat(bufferPool.getNumberOfAvailableMemorySegments()) .withFailMessage("There should be 0 buffers available in local pool") .isZero(); // Recycle one exclusive buffer exclusiveBuffer.recycleBuffer(); // Return one extra floating buffer to the local pool because the number of available // buffers // is more than required buffers verify(bufferPool, times(14)).requestBuffer(); verify(bufferPool, times(0)).addBufferListener(inputChannel.getBufferManager()); assertThat(inputChannel.getNumberOfAvailableBuffers()) .withFailMessage("There should be 14 buffers available in the channel") .isEqualTo(14); assertThat(inputChannel.getNumberOfRequiredBuffers()) .withFailMessage("There should be 12 buffers required in the channel") .isEqualTo(12); assertThat(bufferPool.getNumberOfAvailableMemorySegments()) .withFailMessage("There should be 1 buffers available in local pool") .isOne(); // Recycle one floating buffer floatingBuffer.recycleBuffer(); // The floating buffer is returned to local pool directly because the channel is not // waiting for // floating buffers verify(bufferPool, times(14)).requestBuffer(); verify(bufferPool, times(0)).addBufferListener(inputChannel.getBufferManager()); assertThat(inputChannel.getNumberOfAvailableBuffers()) .withFailMessage("There should be 14 buffers available in the channel") .isEqualTo(14); assertThat(inputChannel.getNumberOfRequiredBuffers()) .withFailMessage("There should be 12 buffers required in the channel") .isEqualTo(12); assertThat(bufferPool.getNumberOfAvailableMemorySegments()) .withFailMessage("There should be 2 buffers available in local pool") .isEqualTo(2); } catch (Throwable t) { thrown = t; } finally { cleanup(networkBufferPool, null, null, thrown, inputChannel); } }
Tests to verify the behaviours of recycling floating and exclusive buffers if the number of available buffers is more than required buffers by decreasing the sender's backlog.
testAvailableBuffersMoreThanRequiredBuffers
java
apache/flink
flink-runtime/src/test/java/org/apache/flink/runtime/io/network/partition/consumer/RemoteInputChannelTest.java
https://github.com/apache/flink/blob/master/flink-runtime/src/test/java/org/apache/flink/runtime/io/network/partition/consumer/RemoteInputChannelTest.java
Apache-2.0
@Test void testFairDistributionFloatingBuffers() throws Exception { // Setup final int numExclusiveBuffers = 2; final NetworkBufferPool networkBufferPool = new NetworkBufferPool(12, 32); final int numFloatingBuffers = 3; final SingleInputGate inputGate = createSingleInputGate(3, networkBufferPool); final RemoteInputChannel[] inputChannels = new RemoteInputChannel[3]; inputChannels[0] = createRemoteInputChannel(inputGate); inputChannels[1] = createRemoteInputChannel(inputGate); inputChannels[2] = createRemoteInputChannel(inputGate); inputGate.setInputChannels(inputChannels); Throwable thrown = null; try { final BufferPool bufferPool = spy(networkBufferPool.createBufferPool(numFloatingBuffers, numFloatingBuffers)); inputGate.setBufferPool(bufferPool); inputGate.setupChannels(); inputGate.requestPartitions(); for (RemoteInputChannel inputChannel : inputChannels) { inputChannel.requestSubpartitions(); } // Exhaust all the floating buffers final List<Buffer> floatingBuffers = new ArrayList<>(numFloatingBuffers); for (int i = 0; i < numFloatingBuffers; i++) { Buffer buffer = bufferPool.requestBuffer(); assertThat(buffer).isNotNull(); floatingBuffers.add(buffer); } // Receive the producer's backlog to trigger request floating buffers from pool // and register as listeners as a result for (RemoteInputChannel inputChannel : inputChannels) { inputChannel.onSenderBacklog(8); verify(bufferPool, times(1)).addBufferListener(inputChannel.getBufferManager()); assertThat(inputChannel.getNumberOfAvailableBuffers()) .withFailMessage( "There should be %d buffers available in the channel", numExclusiveBuffers) .isEqualTo(numExclusiveBuffers); } // Recycle three floating buffers to trigger notify buffer available for (Buffer buffer : floatingBuffers) { buffer.recycleBuffer(); } for (RemoteInputChannel inputChannel : inputChannels) { assertThat(inputChannel.getNumberOfAvailableBuffers()) .withFailMessage("There should be 3 buffers available in the channel") .isEqualTo(3); assertThat(inputChannel.getUnannouncedCredit()) .withFailMessage("There should be 1 unannounced credits in the channel") .isOne(); } } catch (Throwable t) { thrown = t; } finally { cleanup(networkBufferPool, null, null, thrown, inputChannels); } }
Tests to verify that the buffer pool will distribute available floating buffers among all the channel listeners in a fair way.
testFairDistributionFloatingBuffers
java
apache/flink
flink-runtime/src/test/java/org/apache/flink/runtime/io/network/partition/consumer/RemoteInputChannelTest.java
https://github.com/apache/flink/blob/master/flink-runtime/src/test/java/org/apache/flink/runtime/io/network/partition/consumer/RemoteInputChannelTest.java
Apache-2.0
@Test void testFailureInNotifyBufferAvailable() throws Exception { // Setup final int numExclusiveBuffers = 1; final int numFloatingBuffers = 1; final int numTotalBuffers = numExclusiveBuffers + numFloatingBuffers; final NetworkBufferPool networkBufferPool = new NetworkBufferPool(numTotalBuffers, 32); final SingleInputGate inputGate = createSingleInputGate(1); final RemoteInputChannel successfulRemoteIC = createRemoteInputChannel(inputGate); successfulRemoteIC.requestSubpartitions(); // late creation -> no exclusive buffers, also no requested subpartition in // successfulRemoteIC // (to trigger a failure in RemoteInputChannel#notifyBufferAvailable()) final RemoteInputChannel failingRemoteIC = createRemoteInputChannel(inputGate); Buffer buffer = null; Throwable thrown = null; try { final BufferPool bufferPool = networkBufferPool.createBufferPool(numFloatingBuffers, numFloatingBuffers); inputGate.setBufferPool(bufferPool); buffer = checkNotNull(bufferPool.requestBuffer()); // trigger subscription to buffer pool failingRemoteIC.onSenderBacklog(1); successfulRemoteIC.onSenderBacklog(numExclusiveBuffers + 1); // recycling will call RemoteInputChannel#notifyBufferAvailable() which will fail and // this exception will be swallowed and set as an error in failingRemoteIC buffer.recycleBuffer(); buffer = null; assertThatThrownBy(failingRemoteIC::checkError) .isInstanceOf(IOException.class) .hasCauseInstanceOf(IllegalStateException.class); // currently, the buffer is still enqueued in the bufferQueue of failingRemoteIC assertThat(bufferPool.getNumberOfAvailableMemorySegments()).isZero(); buffer = successfulRemoteIC.requestBuffer(); assertThat(buffer) .withFailMessage("buffer should still remain in failingRemoteIC") .isNull(); // releasing resources in failingRemoteIC should free the buffer again and immediately // recycle it into successfulRemoteIC failingRemoteIC.releaseAllResources(); assertThat(bufferPool.getNumberOfAvailableMemorySegments()).isZero(); buffer = successfulRemoteIC.requestBuffer(); assertThat(buffer).withFailMessage("no buffer given to successfulRemoteIC").isNotNull(); } catch (Throwable t) { thrown = t; } finally { cleanup(networkBufferPool, null, buffer, thrown, failingRemoteIC, successfulRemoteIC); } }
Tests that failures are propagated correctly if {@link RemoteInputChannel#notifyBufferAvailable(int)} throws an exception. Also tests that a second listener will be notified in this case.
testFailureInNotifyBufferAvailable
java
apache/flink
flink-runtime/src/test/java/org/apache/flink/runtime/io/network/partition/consumer/RemoteInputChannelTest.java
https://github.com/apache/flink/blob/master/flink-runtime/src/test/java/org/apache/flink/runtime/io/network/partition/consumer/RemoteInputChannelTest.java
Apache-2.0
@Test void testPartitionNotFoundExceptionWhileRetriggeringRequest() throws Exception { final RemoteInputChannel inputChannel = InputChannelTestUtils.createRemoteInputChannel( createSingleInputGate(1), 0, new TestingConnectionManager()); // Request partition to initialize client to avoid illegal state after retriggering // partition inputChannel.requestSubpartitions(); // The default backoff is 0 then it would set PartitionNotFoundException on this channel inputChannel.retriggerSubpartitionRequest(); assertThatThrownBy(inputChannel::checkError) .isInstanceOfSatisfying( PartitionNotFoundException.class, notFound -> assertThat(inputChannel.getPartitionId()) .isEqualTo(notFound.getPartitionId())); }
Tests that {@link RemoteInputChannel#retriggerSubpartitionRequest()} would throw the {@link PartitionNotFoundException} if backoff is 0.
testPartitionNotFoundExceptionWhileRetriggeringRequest
java
apache/flink
flink-runtime/src/test/java/org/apache/flink/runtime/io/network/partition/consumer/RemoteInputChannelTest.java
https://github.com/apache/flink/blob/master/flink-runtime/src/test/java/org/apache/flink/runtime/io/network/partition/consumer/RemoteInputChannelTest.java
Apache-2.0
@Test void testUpdateChannelBeforeRequest() throws Exception { SingleInputGate inputGate = createInputGate(1); TestingResultPartitionManager partitionManager = new TestingResultPartitionManager(new NoOpResultSubpartitionView()); InputChannel unknown = InputChannelBuilder.newBuilder() .setPartitionManager(partitionManager) .buildUnknownChannel(inputGate); inputGate.setInputChannels(unknown); // Update to a local channel and verify that no request is triggered ResultPartitionID resultPartitionID = unknown.getPartitionId(); ResourceID location = ResourceID.generate(); inputGate.updateInputChannel( location, createRemoteWithIdAndLocation(resultPartitionID.getPartitionId(), location)); assertThat(partitionManager.counter).isEqualTo(0); }
Tests that an update channel does not trigger a partition request before the UDF has requested any partitions. Otherwise, this can lead to races when registering a listener at the gate (e.g. in UnionInputGate), which can result in missed buffer notifications at the listener.
testUpdateChannelBeforeRequest
java
apache/flink
flink-runtime/src/test/java/org/apache/flink/runtime/io/network/partition/consumer/SingleInputGateTest.java
https://github.com/apache/flink/blob/master/flink-runtime/src/test/java/org/apache/flink/runtime/io/network/partition/consumer/SingleInputGateTest.java
Apache-2.0
@Test void testUpdateLocalInputChannelWithNewPartitionId() throws Exception { SingleInputGate inputGate = createInputGate(1); TestingResultPartitionManager partitionManager = new TestingResultPartitionManager(new NoOpResultSubpartitionView()); ResultPartitionID oldPartitionId = new ResultPartitionID(); InputChannel unknown = InputChannelBuilder.newBuilder() .setPartitionManager(partitionManager) .setPartitionId(oldPartitionId) .buildUnknownChannel(inputGate); inputGate.setInputChannels(unknown); // Update to a local channel and verify that no request is triggered ResultPartitionID resultPartitionID = unknown.getPartitionId(); assertThat(resultPartitionID).isEqualTo(oldPartitionId); ResultPartitionID newPartitionId = new ResultPartitionID( // speculative execution have the same IntermediateResultPartitionID with // original, only executionAttemptID is different. oldPartitionId.getPartitionId(), ExecutionAttemptID.randomId()); ResourceID location = ResourceID.generate(); NettyShuffleDescriptor nettyShuffleDescriptor = NettyShuffleDescriptorBuilder.newBuilder() .setId(newPartitionId) .setProducerLocation(location) .buildLocal(); inputGate.updateInputChannel(location, nettyShuffleDescriptor); InputChannel newChannel = inputGate.getChannel(0); assertThat(newChannel).isInstanceOf(LocalInputChannel.class); assertThat(newChannel.partitionId).isEqualTo(newPartitionId); }
Test unknown input channel can set resultPartitionId correctly when update to local input channel, this occurs in the case of speculative execution that unknown input channel only carries original resultPartitionId.
testUpdateLocalInputChannelWithNewPartitionId
java
apache/flink
flink-runtime/src/test/java/org/apache/flink/runtime/io/network/partition/consumer/SingleInputGateTest.java
https://github.com/apache/flink/blob/master/flink-runtime/src/test/java/org/apache/flink/runtime/io/network/partition/consumer/SingleInputGateTest.java
Apache-2.0
@Test void testUpdateRemoteInputChannelWithNewPartitionId() throws Exception { int bufferSize = 1024; SingleInputGate inputGate = createInputGate(1); TestingResultPartitionManager partitionManager = new TestingResultPartitionManager(new NoOpResultSubpartitionView()); ResultPartitionID oldPartitionId = new ResultPartitionID(); InputChannel unknown = InputChannelBuilder.newBuilder() .setPartitionManager(partitionManager) .setPartitionId(oldPartitionId) .buildUnknownChannel(inputGate); inputGate.setInputChannels(unknown); ResultPartitionID resultPartitionID = unknown.getPartitionId(); assertThat(resultPartitionID).isEqualTo(oldPartitionId); ResultPartitionID newPartitionId = new ResultPartitionID( // speculative execution have the same IntermediateResultPartitionID with // original, only executionAttemptID is different. oldPartitionId.getPartitionId(), ExecutionAttemptID.randomId()); NettyShuffleDescriptor nettyShuffleDescriptor = NettyShuffleDescriptorBuilder.newBuilder() .setId(newPartitionId) .setProducerLocation(ResourceID.generate()) .buildRemote(); inputGate.setBufferPool(new NoOpBufferPool()); inputGate.updateInputChannel(ResourceID.generate(), nettyShuffleDescriptor); InputChannel newChannel = inputGate.getChannel(0); assertThat(newChannel).isInstanceOf(RemoteInputChannel.class); assertThat(newChannel.partitionId).isEqualTo(newPartitionId); }
Test unknown input channel can set resultPartitionId correctly when update to remote input channel, this occurs in the case of speculative execution that unknown input channel only carries original resultPartitionId.
testUpdateRemoteInputChannelWithNewPartitionId
java
apache/flink
flink-runtime/src/test/java/org/apache/flink/runtime/io/network/partition/consumer/SingleInputGateTest.java
https://github.com/apache/flink/blob/master/flink-runtime/src/test/java/org/apache/flink/runtime/io/network/partition/consumer/SingleInputGateTest.java
Apache-2.0
@Test void testRequestBackoffConfiguration() throws Exception { IntermediateResultPartitionID[] partitionIds = new IntermediateResultPartitionID[] { new IntermediateResultPartitionID(), new IntermediateResultPartitionID(), new IntermediateResultPartitionID() }; int initialBackoff = 137; int partitionRequestTimeout = 600; int maxBackoff = 1001; final NettyShuffleEnvironment netEnv = new NettyShuffleEnvironmentBuilder() .setPartitionRequestInitialBackoff(initialBackoff) .setPartitionRequestTimeout(partitionRequestTimeout) .setPartitionRequestMaxBackoff(maxBackoff) .build(); SingleInputGate gate = createSingleInputGate(partitionIds, ResultPartitionType.PIPELINED, netEnv); gate.setChannelStateWriter(ChannelStateWriter.NO_OP); gate.finishReadRecoveredState(); while (!gate.getStateConsumedFuture().isDone()) { gate.pollNext(); } gate.convertRecoveredInputChannels(); try (Closer closer = Closer.create()) { closer.register(netEnv::close); closer.register(gate::close); assertThat(gate.getConsumedPartitionType()).isEqualTo(ResultPartitionType.PIPELINED); Map<Tuple2<IntermediateResultPartitionID, InputChannelInfo>, InputChannel> channelMap = gate.getInputChannels(); assertThat(channelMap.size()).isEqualTo(3); channelMap .values() .forEach( channel -> { try { channel.checkError(); } catch (IOException e) { throw new RuntimeException(e); } }); InputChannel localChannel = getTheOnlyInputChannelInPartition(gate, partitionIds[0]); assertThat(localChannel.getClass()).isEqualTo(LocalInputChannel.class); InputChannel unknownChannel = getTheOnlyInputChannelInPartition(gate, partitionIds[2]); assertThat(unknownChannel.getClass()).isEqualTo(UnknownInputChannel.class); InputChannel[] channels = new InputChannel[] {localChannel, unknownChannel}; for (InputChannel ch : channels) { assertThat(ch.getCurrentBackoff()).isEqualTo(0); assertThat(ch.increaseBackoff()).isTrue(); assertThat(ch.getCurrentBackoff()).isEqualTo(initialBackoff); assertThat(ch.increaseBackoff()).isTrue(); assertThat(ch.getCurrentBackoff()).isEqualTo(initialBackoff * 2); assertThat(ch.increaseBackoff()).isTrue(); assertThat(ch.getCurrentBackoff()).isEqualTo(initialBackoff * 2 * 2); assertThat(ch.increaseBackoff()).isTrue(); assertThat(ch.getCurrentBackoff()).isEqualTo(maxBackoff); assertThat(ch.increaseBackoff()).isFalse(); } InputChannel remoteChannel = getTheOnlyInputChannelInPartition(gate, partitionIds[1]); assertThat(remoteChannel.getClass()).isEqualTo(RemoteInputChannel.class); assertThat(remoteChannel.getCurrentBackoff()).isEqualTo(0); assertThat(remoteChannel.increaseBackoff()).isTrue(); assertThat(remoteChannel.getCurrentBackoff()).isEqualTo(partitionRequestTimeout); assertThat(remoteChannel.increaseBackoff()).isTrue(); assertThat(remoteChannel.getCurrentBackoff()).isEqualTo(partitionRequestTimeout * 2); assertThat(remoteChannel.increaseBackoff()).isTrue(); assertThat(remoteChannel.getCurrentBackoff()).isEqualTo(partitionRequestTimeout * 3); assertThat(remoteChannel.increaseBackoff()).isFalse(); } }
Tests request back off configuration is correctly forwarded to the channels.
testRequestBackoffConfiguration
java
apache/flink
flink-runtime/src/test/java/org/apache/flink/runtime/io/network/partition/consumer/SingleInputGateTest.java
https://github.com/apache/flink/blob/master/flink-runtime/src/test/java/org/apache/flink/runtime/io/network/partition/consumer/SingleInputGateTest.java
Apache-2.0
@Test void testRequestBuffersWithRemoteInputChannel() throws Exception { final NettyShuffleEnvironment network = createNettyShuffleEnvironment(); final SingleInputGate inputGate = createInputGate(network, 1, ResultPartitionType.PIPELINED_BOUNDED); int buffersPerChannel = 2; int extraNetworkBuffersPerGate = 8; try (Closer closer = Closer.create()) { closer.register(network::close); closer.register(inputGate::close); RemoteInputChannel remote = InputChannelBuilder.newBuilder() .setupFromNettyShuffleEnvironment(network) .setConnectionManager(new TestingConnectionManager()) .buildRemoteChannel(inputGate); inputGate.setInputChannels(remote); inputGate.setup(); NetworkBufferPool bufferPool = network.getNetworkBufferPool(); // only the exclusive buffers should be assigned/available now assertThat(remote.getNumberOfAvailableBuffers()).isEqualTo(buffersPerChannel); assertThat(bufferPool.getNumberOfAvailableMemorySegments()) .isEqualTo(bufferPool.getTotalNumberOfMemorySegments() - buffersPerChannel - 1); // note: exclusive buffers are not handed out into LocalBufferPool and are thus not // counted assertThat(bufferPool.countBuffers()).isEqualTo(extraNetworkBuffersPerGate); } }
Tests that input gate requests and assigns network buffers for remote input channel.
testRequestBuffersWithRemoteInputChannel
java
apache/flink
flink-runtime/src/test/java/org/apache/flink/runtime/io/network/partition/consumer/SingleInputGateTest.java
https://github.com/apache/flink/blob/master/flink-runtime/src/test/java/org/apache/flink/runtime/io/network/partition/consumer/SingleInputGateTest.java
Apache-2.0
@Test void testRequestBuffersWithUnknownInputChannel() throws Exception { final NettyShuffleEnvironment network = createNettyShuffleEnvironment(); final SingleInputGate inputGate = createInputGate(network, 1, ResultPartitionType.PIPELINED_BOUNDED); int buffersPerChannel = 2; int extraNetworkBuffersPerGate = 8; try (Closer closer = Closer.create()) { closer.register(network::close); closer.register(inputGate::close); final ResultPartitionID resultPartitionId = new ResultPartitionID(); InputChannel inputChannel = buildUnknownInputChannel(network, inputGate, resultPartitionId, 0); inputGate.setInputChannels(inputChannel); inputGate.setup(); NetworkBufferPool bufferPool = network.getNetworkBufferPool(); assertThat(bufferPool.getNumberOfAvailableMemorySegments()) .isEqualTo(bufferPool.getTotalNumberOfMemorySegments() - 1); // note: exclusive buffers are not handed out into LocalBufferPool and are thus not // counted assertThat(bufferPool.countBuffers()).isEqualTo(extraNetworkBuffersPerGate); // Trigger updates to remote input channel from unknown input channel inputGate.updateInputChannel( ResourceID.generate(), createRemoteWithIdAndLocation( resultPartitionId.getPartitionId(), ResourceID.generate())); RemoteInputChannel remote = (RemoteInputChannel) getTheOnlyInputChannelInPartition(inputGate, resultPartitionId); // only the exclusive buffers should be assigned/available now assertThat(remote.getNumberOfAvailableBuffers()).isEqualTo(buffersPerChannel); assertThat(bufferPool.getNumberOfAvailableMemorySegments()) .isEqualTo(bufferPool.getTotalNumberOfMemorySegments() - buffersPerChannel - 1); // note: exclusive buffers are not handed out into LocalBufferPool and are thus not // counted assertThat(bufferPool.countBuffers()).isEqualTo(extraNetworkBuffersPerGate); } }
Tests that input gate requests and assigns network buffers when unknown input channel updates to remote input channel.
testRequestBuffersWithUnknownInputChannel
java
apache/flink
flink-runtime/src/test/java/org/apache/flink/runtime/io/network/partition/consumer/SingleInputGateTest.java
https://github.com/apache/flink/blob/master/flink-runtime/src/test/java/org/apache/flink/runtime/io/network/partition/consumer/SingleInputGateTest.java
Apache-2.0
@Override public ResultSubpartitionView createSubpartitionView( ResultPartitionID partitionId, ResultSubpartitionIndexSet subpartitionIndexSet, BufferAvailabilityListener availabilityListener) throws IOException { ++counter; return subpartitionView; }
A testing implementation of {@link ResultPartitionManager} which counts the number of {@link ResultSubpartitionView} created.
createSubpartitionView
java
apache/flink
flink-runtime/src/test/java/org/apache/flink/runtime/io/network/partition/consumer/SingleInputGateTest.java
https://github.com/apache/flink/blob/master/flink-runtime/src/test/java/org/apache/flink/runtime/io/network/partition/consumer/SingleInputGateTest.java
Apache-2.0
@Override public boolean publish(ResultPartitionID partitionId, TaskEvent event) { ++counter; return true; }
A testing implementation of {@link TaskEventPublisher} which counts the number of publish times.
publish
java
apache/flink
flink-runtime/src/test/java/org/apache/flink/runtime/io/network/partition/consumer/SingleInputGateTest.java
https://github.com/apache/flink/blob/master/flink-runtime/src/test/java/org/apache/flink/runtime/io/network/partition/consumer/SingleInputGateTest.java
Apache-2.0
public SingleInputGate getInputGate() { return inputGate; }
A test input gate to mock reading data.
getInputGate
java
apache/flink
flink-runtime/src/test/java/org/apache/flink/runtime/io/network/partition/consumer/TestSingleInputGate.java
https://github.com/apache/flink/blob/master/flink-runtime/src/test/java/org/apache/flink/runtime/io/network/partition/consumer/TestSingleInputGate.java
Apache-2.0
public static ByteBuffer generateRandomData(int dataSize, Random random) { byte[] dataWritten = new byte[dataSize]; random.nextBytes(dataWritten); return ByteBuffer.wrap(dataWritten); }
Test utils for the tiered storage.
generateRandomData
java
apache/flink
flink-runtime/src/test/java/org/apache/flink/runtime/io/network/partition/hybrid/tiered/TieredStorageTestUtils.java
https://github.com/apache/flink/blob/master/flink-runtime/src/test/java/org/apache/flink/runtime/io/network/partition/hybrid/tiered/TieredStorageTestUtils.java
Apache-2.0
@Override @Nonnull public ScheduledFuture<?> schedule(Runnable command, long delay, TimeUnit unit) { if (super.isTerminated()) { throw new RejectedExecutionException(); } return super.schedule(command, delay, unit); }
This manually triggered executor service will throw {@link RejectedExecutionException} if the new job is added when the service shuts down.
schedule
java
apache/flink
flink-runtime/src/test/java/org/apache/flink/runtime/io/network/partition/hybrid/tiered/file/DiskIOSchedulerTest.java
https://github.com/apache/flink/blob/master/flink-runtime/src/test/java/org/apache/flink/runtime/io/network/partition/hybrid/tiered/file/DiskIOSchedulerTest.java
Apache-2.0
@Override public void setup(Configuration configuration) { // noop }
Testing implementation for {@link TierFactory} to init an external remote tier.
setup
java
apache/flink
flink-runtime/src/test/java/org/apache/flink/runtime/io/network/partition/hybrid/tiered/shuffle/TierFactoryInitializerTest.java
https://github.com/apache/flink/blob/master/flink-runtime/src/test/java/org/apache/flink/runtime/io/network/partition/hybrid/tiered/shuffle/TierFactoryInitializerTest.java
Apache-2.0
public static int deserializeRecords( ArrayDeque<SerializationTestType> records, RecordDeserializer<SerializationTestType> deserializer) throws Exception { int deserializedRecords = 0; while (!records.isEmpty()) { SerializationTestType expected = records.poll(); SerializationTestType actual = expected.getClass().newInstance(); final DeserializationResult result = deserializer.getNextRecord(actual); if (result.isFullRecord()) { assertThat(actual).isEqualTo(expected); deserializedRecords++; } else { records.addFirst(expected); } if (result.isBufferConsumed()) { break; } } return deserializedRecords; }
Iterates over the provided records to deserialize, verifies the results and stats the number of full records. @param records records to be deserialized @param deserializer the record deserializer @return the number of full deserialized records
deserializeRecords
java
apache/flink
flink-runtime/src/test/java/org/apache/flink/runtime/io/network/util/DeserializationUtils.java
https://github.com/apache/flink/blob/master/flink-runtime/src/test/java/org/apache/flink/runtime/io/network/util/DeserializationUtils.java
Apache-2.0
public static Buffer createBuffer(int dataSize) { return createBuffer(BUFFER_SIZE, dataSize); }
Creates a (network) buffer with default size, i.e. {@link #BUFFER_SIZE}, and unspecified data of the given size. @param dataSize size of the data in the buffer, i.e. the new writer index @return a new buffer instance
createBuffer
java
apache/flink
flink-runtime/src/test/java/org/apache/flink/runtime/io/network/util/TestBufferFactory.java
https://github.com/apache/flink/blob/master/flink-runtime/src/test/java/org/apache/flink/runtime/io/network/util/TestBufferFactory.java
Apache-2.0
public static Buffer createBuffer(int bufferSize, int dataSize) { return createBuffer(bufferSize, dataSize, Buffer.DataType.DATA_BUFFER); }
Creates a (network) buffer with unspecified data of the given size. @param bufferSize size of the buffer @param dataSize size of the data in the buffer, i.e. the new writer index @return a new buffer instance
createBuffer
java
apache/flink
flink-runtime/src/test/java/org/apache/flink/runtime/io/network/util/TestBufferFactory.java
https://github.com/apache/flink/blob/master/flink-runtime/src/test/java/org/apache/flink/runtime/io/network/util/TestBufferFactory.java
Apache-2.0
public int getNumberOfReadBuffers() { return numberOfReadBuffers.get(); }
Returns the number of read buffers.
getNumberOfReadBuffers
java
apache/flink
flink-runtime/src/test/java/org/apache/flink/runtime/io/network/util/TestConsumerCallback.java
https://github.com/apache/flink/blob/master/flink-runtime/src/test/java/org/apache/flink/runtime/io/network/util/TestConsumerCallback.java
Apache-2.0
@Test public void testIsJavaSerializable() throws Exception { JobCheckpointingSettings settings = new JobCheckpointingSettings( new CheckpointCoordinatorConfiguration( 1231231, 1231, 112, 12, CheckpointRetentionPolicy.RETAIN_ON_FAILURE, false, false, 0, 0), new SerializedValue<>(new HashMapStateBackend())); JobCheckpointingSettings copy = CommonTestUtils.createCopySerializable(settings); assertEquals( settings.getCheckpointCoordinatorConfiguration(), copy.getCheckpointCoordinatorConfiguration()); assertNotNull(copy.getDefaultStateBackend()); assertTrue( copy.getDefaultStateBackend() .deserializeValue(this.getClass().getClassLoader()) .getClass() == HashMapStateBackend.class); }
Tests that the settings are actually serializable.
testIsJavaSerializable
java
apache/flink
flink-runtime/src/test/java/org/apache/flink/runtime/jobgraph/tasks/JobCheckpointingSettingsTest.java
https://github.com/apache/flink/blob/master/flink-runtime/src/test/java/org/apache/flink/runtime/jobgraph/tasks/JobCheckpointingSettingsTest.java
Apache-2.0
@Test public void testBlobServerCleanupFinishedJob() throws Exception { testBlobServerCleanup(TestCase.JOB_FINISHES_SUCESSFULLY); }
Test cleanup for a job that finishes ordinarily.
testBlobServerCleanupFinishedJob
java
apache/flink
flink-runtime/src/test/java/org/apache/flink/runtime/jobmanager/BlobsCleanupITCase.java
https://github.com/apache/flink/blob/master/flink-runtime/src/test/java/org/apache/flink/runtime/jobmanager/BlobsCleanupITCase.java
Apache-2.0
@Test public void testBlobServerCleanupCancelledJob() throws Exception { testBlobServerCleanup(TestCase.JOB_IS_CANCELLED); }
Test cleanup for a job which is cancelled after submission.
testBlobServerCleanupCancelledJob
java
apache/flink
flink-runtime/src/test/java/org/apache/flink/runtime/jobmanager/BlobsCleanupITCase.java
https://github.com/apache/flink/blob/master/flink-runtime/src/test/java/org/apache/flink/runtime/jobmanager/BlobsCleanupITCase.java
Apache-2.0
@Override public void invoke() throws Exception { RecordWriter<IntValue> writer = new RecordWriterBuilder<IntValue>().build(getEnvironment().getWriter(0)); final int numberOfTimesToSend = getTaskConfiguration().get(getIntConfigOption(CONFIG_KEY), 0); final IntValue subtaskIndex = new IntValue(getEnvironment().getTaskInfo().getIndexOfThisSubtask()); try { for (int i = 0; i < numberOfTimesToSend; i++) { writer.emit(subtaskIndex); } writer.flushAll(); } finally { writer.close(); } }
Sends the subtask index a configurable number of times in a round-robin fashion.
invoke
java
apache/flink
flink-runtime/src/test/java/org/apache/flink/runtime/jobmanager/SlotCountExceedingParallelismTest.java
https://github.com/apache/flink/blob/master/flink-runtime/src/test/java/org/apache/flink/runtime/jobmanager/SlotCountExceedingParallelismTest.java
Apache-2.0
@Override public void invoke() throws Exception { RecordReader<IntValue> reader = new RecordReader<>( getEnvironment().getInputGate(0), IntValue.class, getEnvironment().getTaskManagerInfo().getTmpDirectories()); try { final int numberOfSubtaskIndexesToReceive = getTaskConfiguration().get(getIntConfigOption(CONFIG_KEY), 0); final BitSet receivedSubtaskIndexes = new BitSet(numberOfSubtaskIndexesToReceive); IntValue record; int numberOfReceivedSubtaskIndexes = 0; while ((record = reader.next()) != null) { // Check that we don't receive more than expected numberOfReceivedSubtaskIndexes++; if (numberOfReceivedSubtaskIndexes > numberOfSubtaskIndexesToReceive) { throw new IllegalStateException("Received more records than expected."); } int subtaskIndex = record.getValue(); // Check that we only receive each subtask index once if (receivedSubtaskIndexes.get(subtaskIndex)) { throw new IllegalStateException("Received expected subtask index twice."); } else { receivedSubtaskIndexes.set(subtaskIndex, true); } } // Check that we have received all expected subtask indexes if (receivedSubtaskIndexes.cardinality() != numberOfSubtaskIndexesToReceive) { throw new IllegalStateException( "Finished receive, but did not receive " + "all expected subtask indexes."); } } finally { reader.clearBuffers(); } }
Expects to receive the subtask index from a configurable number of sender tasks.
invoke
java
apache/flink
flink-runtime/src/test/java/org/apache/flink/runtime/jobmanager/SlotCountExceedingParallelismTest.java
https://github.com/apache/flink/blob/master/flink-runtime/src/test/java/org/apache/flink/runtime/jobmanager/SlotCountExceedingParallelismTest.java
Apache-2.0
@Test public void testNoOps() throws Exception { StandaloneExecutionPlanStore executionPlans = new StandaloneExecutionPlanStore(); ExecutionPlan executionPlan = JobGraphTestUtils.emptyJobGraph(); assertEquals(0, executionPlans.getJobIds().size()); executionPlans.putExecutionPlan(executionPlan); assertEquals(0, executionPlans.getJobIds().size()); executionPlans .globalCleanupAsync(executionPlan.getJobID(), Executors.directExecutor()) .join(); assertEquals(0, executionPlans.getJobIds().size()); assertNull(executionPlans.recoverExecutionPlan(new JobID())); }
Tests that all operations work and don't change the state.
testNoOps
java
apache/flink
flink-runtime/src/test/java/org/apache/flink/runtime/jobmanager/StandaloneExecutionPlanStoreTest.java
https://github.com/apache/flink/blob/master/flink-runtime/src/test/java/org/apache/flink/runtime/jobmanager/StandaloneExecutionPlanStoreTest.java
Apache-2.0
@Override public void invoke() throws Exception { final RecordReader<IntValue> reader = new RecordReader<>( getEnvironment().getInputGate(0), IntValue.class, getEnvironment().getTaskManagerInfo().getTmpDirectories()); final RecordWriter<IntValue> writer = new RecordWriterBuilder<IntValue>().build(getEnvironment().getWriter(0)); try { while (true) { final IntValue record = reader.next(); if (record == null) { return; } writer.emit(record); } } finally { writer.close(); } }
An {@link AbstractInvokable} that forwards all incoming elements.
invoke
java
apache/flink
flink-runtime/src/test/java/org/apache/flink/runtime/jobmanager/Tasks.java
https://github.com/apache/flink/blob/master/flink-runtime/src/test/java/org/apache/flink/runtime/jobmanager/Tasks.java
Apache-2.0
@Override public void invoke() throws Exception { consumeInputs(1, this); }
An {@link AbstractInvokable} that consumes 1 input channel.
invoke
java
apache/flink
flink-runtime/src/test/java/org/apache/flink/runtime/jobmanager/Tasks.java
https://github.com/apache/flink/blob/master/flink-runtime/src/test/java/org/apache/flink/runtime/jobmanager/Tasks.java
Apache-2.0
@Override public void invoke() throws Exception { consumeInputs(2, this); }
An {@link AbstractInvokable} that consumes 2 input channels.
invoke
java
apache/flink
flink-runtime/src/test/java/org/apache/flink/runtime/jobmanager/Tasks.java
https://github.com/apache/flink/blob/master/flink-runtime/src/test/java/org/apache/flink/runtime/jobmanager/Tasks.java
Apache-2.0
@Override public void invoke() throws Exception {}
An {@link AbstractInvokable} that throws an exception when being instantiated.
invoke
java
apache/flink
flink-runtime/src/test/java/org/apache/flink/runtime/jobmanager/Tasks.java
https://github.com/apache/flink/blob/master/flink-runtime/src/test/java/org/apache/flink/runtime/jobmanager/Tasks.java
Apache-2.0
@Before public void setUp() { buildJobGraphWithBlockingEdgeWithinRegion(); }
Tests for the updating of consumers depending on the producers result.
setUp
java
apache/flink
flink-runtime/src/test/java/org/apache/flink/runtime/jobmanager/scheduler/UpdatePartitionConsumersTest.java
https://github.com/apache/flink/blob/master/flink-runtime/src/test/java/org/apache/flink/runtime/jobmanager/scheduler/UpdatePartitionConsumersTest.java
Apache-2.0
private void buildJobGraphWithBlockingEdgeWithinRegion() { v1 = new JobVertex("v1"); v1.setInvokableClass(AbstractInvokable.class); v1.setParallelism(1); v2 = new JobVertex("v2"); v2.setInvokableClass(AbstractInvokable.class); v2.setParallelism(1); v3 = new JobVertex("v3"); v3.setInvokableClass(AbstractInvokable.class); v3.setParallelism(1); v4 = new JobVertex("v4"); v4.setInvokableClass(AbstractInvokable.class); v4.setParallelism(1); connectNewDataSetAsInput( v2, v1, DistributionPattern.ALL_TO_ALL, ResultPartitionType.PIPELINED); connectNewDataSetAsInput( v3, v1, DistributionPattern.ALL_TO_ALL, ResultPartitionType.PIPELINED); connectNewDataSetAsInput( v4, v2, DistributionPattern.ALL_TO_ALL, ResultPartitionType.PIPELINED); connectNewDataSetAsInput( v4, v3, DistributionPattern.ALL_TO_ALL, ResultPartitionType.BLOCKING); jobGraph = JobGraphTestUtils.batchJobGraph(v1, v2, v3, v4); }
Build a graph which allows consumer vertex v4 to be deployed before its BLOCKING input v3 finishes. <pre> +----+ +-- pipelined -> | v2 | -- pipelined -+ +----+ | +----+ | +----+ | v1 |-| | -> | v4 | +----+ | +----+ | +----+ +-- pipelined -> | v3 | -- blocking --+ +----+ </pre>
buildJobGraphWithBlockingEdgeWithinRegion
java
apache/flink
flink-runtime/src/test/java/org/apache/flink/runtime/jobmanager/scheduler/UpdatePartitionConsumersTest.java
https://github.com/apache/flink/blob/master/flink-runtime/src/test/java/org/apache/flink/runtime/jobmanager/scheduler/UpdatePartitionConsumersTest.java
Apache-2.0
@Override public void returnLogicalSlot(LogicalSlot logicalSlot) {}
SlotOwner implementation used for testing purposes only.
returnLogicalSlot
java
apache/flink
flink-runtime/src/test/java/org/apache/flink/runtime/jobmanager/slots/DummySlotOwner.java
https://github.com/apache/flink/blob/master/flink-runtime/src/test/java/org/apache/flink/runtime/jobmanager/slots/DummySlotOwner.java
Apache-2.0
@Override public void invoke() throws Exception { int index = getIndexInSubtaskGroup(); final RecordWriter<IntValue> writer = new RecordWriterBuilder<IntValue>().build(getEnvironment().getWriter(0)); try { for (int i = index; i < index + 100; ++i) { writer.emit(new IntValue(i)); LOG.debug("Sender({}) emit {}", index, i); } writer.flushAll(); } finally { writer.close(); } }
Basic sender {@link AbstractInvokable} which sends 100 record base on its index to down stream.
invoke
java
apache/flink
flink-runtime/src/test/java/org/apache/flink/runtime/jobmaster/JobIntermediateDatasetReuseTest.java
https://github.com/apache/flink/blob/master/flink-runtime/src/test/java/org/apache/flink/runtime/jobmaster/JobIntermediateDatasetReuseTest.java
Apache-2.0
@Override public void invoke() throws Exception { int index = getIndexInSubtaskGroup(); final RecordReader<IntValue> reader = new RecordReader<>( getEnvironment().getInputGate(0), IntValue.class, getEnvironment().getTaskManagerInfo().getTmpDirectories()); for (int i = index; i < index + 100; ++i) { final int value = reader.next().getValue(); LOG.debug("Receiver({}) received {}", index, value); Assertions.assertThat(value).isEqualTo(i); } Assertions.assertThat(reader.next()).isNull(); }
Basic receiver {@link AbstractInvokable} which verifies the sent elements from the {@link Sender}.
invoke
java
apache/flink
flink-runtime/src/test/java/org/apache/flink/runtime/jobmaster/JobIntermediateDatasetReuseTest.java
https://github.com/apache/flink/blob/master/flink-runtime/src/test/java/org/apache/flink/runtime/jobmaster/JobIntermediateDatasetReuseTest.java
Apache-2.0
@BeforeEach private void setup() { haServices.setResourceManagerLeaderRetriever(resourceManagerLeaderRetriever); haServices.setResourceManagerLeaderElection(new TestingLeaderElection()); haServices.setCheckpointRecoveryFactory(new StandaloneCheckpointRecoveryFactory()); }
Tests for the execution deployment-reconciliation logic in the {@link JobMaster}.
setup
java
apache/flink
flink-runtime/src/test/java/org/apache/flink/runtime/jobmaster/JobMasterExecutionDeploymentReconciliationTest.java
https://github.com/apache/flink/blob/master/flink-runtime/src/test/java/org/apache/flink/runtime/jobmaster/JobMasterExecutionDeploymentReconciliationTest.java
Apache-2.0
@Test void testExecutionDeploymentReconciliation() throws Exception { JobMasterBuilder.TestingOnCompletionActions onCompletionActions = new JobMasterBuilder.TestingOnCompletionActions(); TestingExecutionDeploymentTrackerWrapper deploymentTrackerWrapper = new TestingExecutionDeploymentTrackerWrapper(); final JobGraph jobGraph = JobGraphTestUtils.singleNoOpJobGraph(); try (JobMaster jobMaster = createAndStartJobMaster(onCompletionActions, deploymentTrackerWrapper, jobGraph)) { JobMasterGateway jobMasterGateway = jobMaster.getSelfGateway(JobMasterGateway.class); TESTING_RPC_SERVICE_EXTENSION .getTestingRpcService() .registerGateway(jobMasterGateway.getAddress(), jobMasterGateway); final CompletableFuture<ExecutionAttemptID> taskCancellationFuture = new CompletableFuture<>(); TaskExecutorGateway taskExecutorGateway = createTaskExecutorGateway(taskCancellationFuture); LocalUnresolvedTaskManagerLocation localUnresolvedTaskManagerLocation = new LocalUnresolvedTaskManagerLocation(); registerTaskExecutorAndOfferSlots( jobMasterGateway, jobGraph.getJobID(), taskExecutorGateway, localUnresolvedTaskManagerLocation); ExecutionAttemptID deployedExecution = deploymentTrackerWrapper.getTaskDeploymentFuture().get(); assertThatFuture(taskCancellationFuture).isNotDone(); ExecutionAttemptID unknownDeployment = createExecutionAttemptId(); // the deployment report is missing the just deployed task, but contains the ID of some // other unknown deployment // the job master should cancel the unknown deployment, and fail the job jobMasterGateway.heartbeatFromTaskManager( localUnresolvedTaskManagerLocation.getResourceID(), new TaskExecutorToJobManagerHeartbeatPayload( new AccumulatorReport(Collections.emptyList()), new ExecutionDeploymentReport( Collections.singleton(unknownDeployment)))); assertThatFuture(taskCancellationFuture) .eventuallySucceeds() .isEqualTo(unknownDeployment); assertThatFuture(deploymentTrackerWrapper.getStopFuture()) .eventuallySucceeds() .isEqualTo(deployedExecution); assertThat( onCompletionActions .getJobReachedGloballyTerminalStateFuture() .get() .getArchivedExecutionGraph() .getState()) .isEqualTo(JobStatus.FAILED); } }
Tests how the job master handles unknown/missing executions.
testExecutionDeploymentReconciliation
java
apache/flink
flink-runtime/src/test/java/org/apache/flink/runtime/jobmaster/JobMasterExecutionDeploymentReconciliationTest.java
https://github.com/apache/flink/blob/master/flink-runtime/src/test/java/org/apache/flink/runtime/jobmaster/JobMasterExecutionDeploymentReconciliationTest.java
Apache-2.0
@Test void testRestoringFromSavepoint() throws Exception { // create savepoint data final long savepointId = 42L; final File savepointFile = createSavepoint(savepointId); // set savepoint settings final SavepointRestoreSettings savepointRestoreSettings = SavepointRestoreSettings.forPath(savepointFile.getAbsolutePath(), true); final JobGraph jobGraph = createJobGraphWithCheckpointing(savepointRestoreSettings); final StandaloneCompletedCheckpointStore completedCheckpointStore = new StandaloneCompletedCheckpointStore(1); final CheckpointRecoveryFactory testingCheckpointRecoveryFactory = PerJobCheckpointRecoveryFactory.withoutCheckpointStoreRecovery( maxCheckpoints -> completedCheckpointStore); haServices.setCheckpointRecoveryFactory(testingCheckpointRecoveryFactory); try (final JobMaster jobMaster = new JobMasterBuilder(jobGraph, rpcService) .withHighAvailabilityServices(haServices) .createJobMaster()) { // we need to start and register the required slots to let the adaptive scheduler // restore from the savepoint jobMaster.start(); final OneShotLatch taskSubmitLatch = new OneShotLatch(); registerSlotsAtJobMaster( 1, jobMaster.getSelfGateway(JobMasterGateway.class), jobGraph.getJobID(), new TestingTaskExecutorGatewayBuilder() .setSubmitTaskConsumer( (taskDeploymentDescriptor, jobMasterId) -> { taskSubmitLatch.trigger(); return CompletableFuture.completedFuture(Acknowledge.get()); }) .createTestingTaskExecutorGateway(), new LocalUnresolvedTaskManagerLocation()); // wait until a task has submitted because this guarantees that the ExecutionGraph has // been created taskSubmitLatch.await(); final CompletedCheckpoint savepointCheckpoint = completedCheckpointStore.getLatestCheckpoint(); assertThat(savepointCheckpoint).isNotNull(); assertThat(savepointCheckpoint.getCheckpointID()).isEqualTo(savepointId); } }
Tests that a JobMaster will restore the given JobGraph from its savepoint upon initial submission.
testRestoringFromSavepoint
java
apache/flink
flink-runtime/src/test/java/org/apache/flink/runtime/jobmaster/JobMasterTest.java
https://github.com/apache/flink/blob/master/flink-runtime/src/test/java/org/apache/flink/runtime/jobmaster/JobMasterTest.java
Apache-2.0
@Test void testCheckpointPrecedesSavepointRecovery() throws Exception { // create savepoint data final long savepointId = 42L; final File savepointFile = createSavepoint(savepointId); // set savepoint settings final SavepointRestoreSettings savepointRestoreSettings = SavepointRestoreSettings.forPath("" + savepointFile.getAbsolutePath(), true); final JobGraph jobGraph = createJobGraphWithCheckpointing(savepointRestoreSettings); final long checkpointId = 1L; final CompletedCheckpoint completedCheckpoint = new CompletedCheckpoint( jobGraph.getJobID(), checkpointId, 1L, 1L, Collections.emptyMap(), null, CheckpointProperties.forCheckpoint( CheckpointRetentionPolicy.NEVER_RETAIN_AFTER_TERMINATION), new DummyCheckpointStorageLocation(), null); final StandaloneCompletedCheckpointStore completedCheckpointStore = new StandaloneCompletedCheckpointStore(1); completedCheckpointStore.addCheckpointAndSubsumeOldestOne( completedCheckpoint, new CheckpointsCleaner(), () -> {}); final CheckpointRecoveryFactory testingCheckpointRecoveryFactory = PerJobCheckpointRecoveryFactory.withoutCheckpointStoreRecovery( maxCheckpoints -> completedCheckpointStore); haServices.setCheckpointRecoveryFactory(testingCheckpointRecoveryFactory); try (final JobMaster jobMaster = new JobMasterBuilder(jobGraph, rpcService).createJobMaster()) { // starting the JobMaster should have read the savepoint final CompletedCheckpoint savepointCheckpoint = completedCheckpointStore.getLatestCheckpoint(); assertThat(savepointCheckpoint).isNotNull(); assertThat(savepointCheckpoint.getCheckpointID()).isEqualTo(checkpointId); } }
Tests that an existing checkpoint will have precedence over an savepoint.
testCheckpointPrecedesSavepointRecovery
java
apache/flink
flink-runtime/src/test/java/org/apache/flink/runtime/jobmaster/JobMasterTest.java
https://github.com/apache/flink/blob/master/flink-runtime/src/test/java/org/apache/flink/runtime/jobmaster/JobMasterTest.java
Apache-2.0
@Test void testCloseUnestablishedResourceManagerConnection() throws Exception { try (final JobMaster jobMaster = new JobMasterBuilder(jobGraph, rpcService) .withConfiguration(configuration) .withHighAvailabilityServices(haServices) .createJobMaster()) { jobMaster.start(); final TestingResourceManagerGateway firstResourceManagerGateway = createAndRegisterTestingResourceManagerGateway(); final TestingResourceManagerGateway secondResourceManagerGateway = createAndRegisterTestingResourceManagerGateway(); final OneShotLatch firstJobManagerRegistration = new OneShotLatch(); final OneShotLatch secondJobManagerRegistration = new OneShotLatch(); firstResourceManagerGateway.setRegisterJobManagerFunction( (jobMasterId, resourceID, s, jobID) -> { firstJobManagerRegistration.trigger(); return CompletableFuture.completedFuture( firstResourceManagerGateway.getJobMasterRegistrationSuccess()); }); secondResourceManagerGateway.setRegisterJobManagerFunction( (jobMasterId, resourceID, s, jobID) -> { secondJobManagerRegistration.trigger(); return CompletableFuture.completedFuture( secondResourceManagerGateway.getJobMasterRegistrationSuccess()); }); notifyResourceManagerLeaderListeners(firstResourceManagerGateway); // wait until we have seen the first registration attempt firstJobManagerRegistration.await(); // this should stop the connection attempts towards the first RM notifyResourceManagerLeaderListeners(secondResourceManagerGateway); // check that we start registering at the second RM secondJobManagerRegistration.await(); } }
Tests that we can close an unestablished ResourceManager connection.
testCloseUnestablishedResourceManagerConnection
java
apache/flink
flink-runtime/src/test/java/org/apache/flink/runtime/jobmaster/JobMasterTest.java
https://github.com/apache/flink/blob/master/flink-runtime/src/test/java/org/apache/flink/runtime/jobmaster/JobMasterTest.java
Apache-2.0
@Test void testReconnectionAfterDisconnect() throws Exception { try (final JobMaster jobMaster = new JobMasterBuilder(jobGraph, rpcService) .withJobMasterId(jobMasterId) .withConfiguration(configuration) .withHighAvailabilityServices(haServices) .withHeartbeatServices(heartbeatServices) .createJobMaster()) { jobMaster.start(); final JobMasterGateway jobMasterGateway = jobMaster.getSelfGateway(JobMasterGateway.class); final TestingResourceManagerGateway testingResourceManagerGateway = createAndRegisterTestingResourceManagerGateway(); final BlockingQueue<JobMasterId> registrationsQueue = new ArrayBlockingQueue<>(1); testingResourceManagerGateway.setRegisterJobManagerFunction( (jobMasterId, resourceID, s, jobID) -> { registrationsQueue.offer(jobMasterId); return CompletableFuture.completedFuture( testingResourceManagerGateway.getJobMasterRegistrationSuccess()); }); final ResourceManagerId resourceManagerId = testingResourceManagerGateway.getFencingToken(); notifyResourceManagerLeaderListeners(testingResourceManagerGateway); // wait for first registration attempt final JobMasterId firstRegistrationAttempt = registrationsQueue.take(); assertThat(firstRegistrationAttempt).isEqualTo(jobMasterId); assertThat(registrationsQueue).isEmpty(); jobMasterGateway.disconnectResourceManager( resourceManagerId, new FlinkException("Test exception")); // wait for the second registration attempt after the disconnect call assertThat(registrationsQueue.take()).isEqualTo(jobMasterId); } }
Tests that we continue reconnecting to the latest known RM after a disconnection message.
testReconnectionAfterDisconnect
java
apache/flink
flink-runtime/src/test/java/org/apache/flink/runtime/jobmaster/JobMasterTest.java
https://github.com/apache/flink/blob/master/flink-runtime/src/test/java/org/apache/flink/runtime/jobmaster/JobMasterTest.java
Apache-2.0
@Test void testResourceManagerConnectionAfterStart() throws Exception { try (final JobMaster jobMaster = new JobMasterBuilder(jobGraph, rpcService) .withJobMasterId(jobMasterId) .withConfiguration(configuration) .withHighAvailabilityServices(haServices) .withHeartbeatServices(heartbeatServices) .createJobMaster()) { final TestingResourceManagerGateway testingResourceManagerGateway = createAndRegisterTestingResourceManagerGateway(); final BlockingQueue<JobMasterId> registrationQueue = new ArrayBlockingQueue<>(1); testingResourceManagerGateway.setRegisterJobManagerFunction( (jobMasterId, resourceID, s, jobID) -> { registrationQueue.offer(jobMasterId); return CompletableFuture.completedFuture( testingResourceManagerGateway.getJobMasterRegistrationSuccess()); }); notifyResourceManagerLeaderListeners(testingResourceManagerGateway); jobMaster.start(); final JobMasterId firstRegistrationAttempt = registrationQueue.take(); assertThat(firstRegistrationAttempt).isEqualTo(jobMasterId); } }
Tests that the a JM connects to the leading RM after regaining leadership.
testResourceManagerConnectionAfterStart
java
apache/flink
flink-runtime/src/test/java/org/apache/flink/runtime/jobmaster/JobMasterTest.java
https://github.com/apache/flink/blob/master/flink-runtime/src/test/java/org/apache/flink/runtime/jobmaster/JobMasterTest.java
Apache-2.0
@Test void testTriggerSavepointTimeout() throws Exception { final TestingSchedulerNG testingSchedulerNG = TestingSchedulerNG.newBuilder() .setTriggerSavepointFunction( (ignoredA, ignoredB, formatType) -> new CompletableFuture<>()) .build(); try (final JobMaster jobMaster = new JobMasterBuilder(jobGraph, rpcService) .withFatalErrorHandler(testingFatalErrorHandler) .withSlotPoolServiceSchedulerFactory( DefaultSlotPoolServiceSchedulerFactory.create( TestingSlotPoolServiceBuilder.newBuilder(), new TestingSchedulerNGFactory(testingSchedulerNG))) .createJobMaster()) { jobMaster.start(); final JobMasterGateway jobMasterGateway = jobMaster.getSelfGateway(JobMasterGateway.class); final CompletableFuture<String> savepointFutureLowTimeout = jobMasterGateway.triggerSavepoint( "/tmp", false, SavepointFormatType.CANONICAL, Duration.ofMillis(1)); final CompletableFuture<String> savepointFutureHighTimeout = jobMasterGateway.triggerSavepoint( "/tmp", false, SavepointFormatType.CANONICAL, RpcUtils.INF_TIMEOUT); assertThatThrownBy( () -> savepointFutureLowTimeout.get( testingTimeout.toMillis(), TimeUnit.MILLISECONDS)) .hasRootCauseInstanceOf(TimeoutException.class); assertThat(savepointFutureHighTimeout).isNotDone(); } }
Tests that the timeout in {@link JobMasterGateway#triggerSavepoint(String, boolean, SavepointFormatType, Duration)} is respected.
testTriggerSavepointTimeout
java
apache/flink
flink-runtime/src/test/java/org/apache/flink/runtime/jobmaster/JobMasterTest.java
https://github.com/apache/flink/blob/master/flink-runtime/src/test/java/org/apache/flink/runtime/jobmaster/JobMasterTest.java
Apache-2.0
@Override public void invoke() throws Exception { final RecordWriter<IntValue> writer = new RecordWriterBuilder<IntValue>().build(getEnvironment().getWriter(0)); try { writer.emit(new IntValue(42)); writer.emit(new IntValue(1337)); writer.flushAll(); } finally { writer.close(); } }
Basic sender {@link AbstractInvokable} which sends 42 and 1337 down stream.
invoke
java
apache/flink
flink-runtime/src/test/java/org/apache/flink/runtime/jobmaster/TestingAbstractInvokables.java
https://github.com/apache/flink/blob/master/flink-runtime/src/test/java/org/apache/flink/runtime/jobmaster/TestingAbstractInvokables.java
Apache-2.0
@Override public void invoke() throws Exception { final RecordReader<IntValue> reader = new RecordReader<>( getEnvironment().getInputGate(0), IntValue.class, getEnvironment().getTaskManagerInfo().getTmpDirectories()); final IntValue i1 = reader.next(); final IntValue i2 = reader.next(); final IntValue i3 = reader.next(); if (i1.getValue() != 42 || i2.getValue() != 1337 || i3 != null) { throw new Exception("Wrong data received."); } }
Basic receiver {@link AbstractInvokable} which verifies the sent elements from the {@link Sender}. <p>This invokable must not run with a higher parallelism than {@link Sender}.
invoke
java
apache/flink
flink-runtime/src/test/java/org/apache/flink/runtime/jobmaster/TestingAbstractInvokables.java
https://github.com/apache/flink/blob/master/flink-runtime/src/test/java/org/apache/flink/runtime/jobmaster/TestingAbstractInvokables.java
Apache-2.0
@Test void testStopWithClear() throws IOException { final Path rootPath = new Path(TempDirUtils.newFolder(temporaryFolder).getAbsolutePath()); FileSystemJobEventStore store = new FileSystemJobEventStore(rootPath, new Configuration()); store.registerJobEventSerializer(TestingJobEvent.TYPE_ID, new GenericJobEventSerializer()); assertThat(new File(rootPath.getPath()).listFiles().length).isZero(); store.start(); store.writeEvent(new TestingJobEvent(0)); store.stop(false); assertThat(new File(rootPath.getPath()).listFiles().length).isEqualTo(1); store.start(); store.stop(true); assertThat(new File(rootPath.getPath()).exists()).isFalse(); }
Test stop with clear. The event files should be deleted.
testStopWithClear
java
apache/flink
flink-runtime/src/test/java/org/apache/flink/runtime/jobmaster/event/FileSystemJobEventStoreTest.java
https://github.com/apache/flink/blob/master/flink-runtime/src/test/java/org/apache/flink/runtime/jobmaster/event/FileSystemJobEventStoreTest.java
Apache-2.0
@Override public CompletableFuture<JobMasterService> createJobMasterService( UUID leaderSessionId, OnCompletionActions onCompletionActions) { return jobMasterServiceFunction.apply(onCompletionActions); }
Testing implementation of the {@link JobMasterServiceFactory}.
createJobMasterService
java
apache/flink
flink-runtime/src/test/java/org/apache/flink/runtime/jobmaster/factories/TestingJobMasterServiceFactory.java
https://github.com/apache/flink/blob/master/flink-runtime/src/test/java/org/apache/flink/runtime/jobmaster/factories/TestingJobMasterServiceFactory.java
Apache-2.0
@TestTemplate void testRequestsAreCompletedInRequestOrder() { runSlotRequestCompletionTest( CheckedSupplier.unchecked(() -> createAndSetUpSlotPool(slotBatchAllocatable)), slotPool -> {}); }
Tests that the {@link DeclarativeSlotPoolBridge} completes slots in request order.
testRequestsAreCompletedInRequestOrder
java
apache/flink
flink-runtime/src/test/java/org/apache/flink/runtime/jobmaster/slotpool/DeclarativeSlotPoolBridgeRequestCompletionTest.java
https://github.com/apache/flink/blob/master/flink-runtime/src/test/java/org/apache/flink/runtime/jobmaster/slotpool/DeclarativeSlotPoolBridgeRequestCompletionTest.java
Apache-2.0
@Parameters(name = "slotRequestMaxInterval: {0}") static List<Duration> getParametersCouples() { return Lists.newArrayList(Duration.ofMillis(50L), Duration.ZERO); }
Tests base class for the {@link DefaultDeclarativeSlotPool}.
getParametersCouples
java
apache/flink
flink-runtime/src/test/java/org/apache/flink/runtime/jobmaster/slotpool/DefaultDeclarativeSlotPoolTestBase.java
https://github.com/apache/flink/blob/master/flink-runtime/src/test/java/org/apache/flink/runtime/jobmaster/slotpool/DefaultDeclarativeSlotPoolTestBase.java
Apache-2.0
public static DefaultFreeSlotTracker createDefaultFreeSlotTracker( Map<AllocationID, PhysicalSlot> freeSlots) { return new DefaultFreeSlotTracker( freeSlots.keySet(), freeSlots::get, id -> new TestingFreeSlotTracker.TestingFreeSlotInfo(freeSlots.get(id)), ignored -> 0d); }
Create default free slot tracker for provided slots. @param freeSlots slots to track @return default free slot tracker
createDefaultFreeSlotTracker
java
apache/flink
flink-runtime/src/test/java/org/apache/flink/runtime/jobmaster/slotpool/FreeSlotTrackerTestUtils.java
https://github.com/apache/flink/blob/master/flink-runtime/src/test/java/org/apache/flink/runtime/jobmaster/slotpool/FreeSlotTrackerTestUtils.java
Apache-2.0
@Test void testNewSlotsAreMatchedAgainstPreferredAllocationIDs() { final PreferredAllocationRequestSlotMatchingStrategy strategy = PreferredAllocationRequestSlotMatchingStrategy.INSTANCE; final AllocationID allocationId1 = new AllocationID(); final AllocationID allocationId2 = new AllocationID(); final Collection<TestingPhysicalSlot> slots = Arrays.asList( TestingPhysicalSlot.builder().withAllocationID(allocationId1).build(), TestingPhysicalSlot.builder().withAllocationID(allocationId2).build()); final Collection<PendingRequest> pendingRequests = Arrays.asList( PendingRequest.createNormalRequest( new SlotRequestId(), ResourceProfile.UNKNOWN, Collections.singleton(allocationId2)), PendingRequest.createNormalRequest( new SlotRequestId(), ResourceProfile.UNKNOWN, Collections.singleton(allocationId1))); final Collection<RequestSlotMatchingStrategy.RequestSlotMatch> requestSlotMatches = strategy.matchRequestsAndSlots(slots, pendingRequests); assertThat(requestSlotMatches).hasSize(2); for (RequestSlotMatchingStrategy.RequestSlotMatch requestSlotMatch : requestSlotMatches) { assertThat(requestSlotMatch.getPendingRequest().getPreferredAllocations()) .contains(requestSlotMatch.getSlot().getAllocationId()); } }
This test ensures that new slots are matched against the preferred allocationIds of the pending requests.
testNewSlotsAreMatchedAgainstPreferredAllocationIDs
java
apache/flink
flink-runtime/src/test/java/org/apache/flink/runtime/jobmaster/slotpool/PreferredAllocationRequestSlotMatchingStrategyTest.java
https://github.com/apache/flink/blob/master/flink-runtime/src/test/java/org/apache/flink/runtime/jobmaster/slotpool/PreferredAllocationRequestSlotMatchingStrategyTest.java
Apache-2.0
@Test void testAllocatedSlotRelease() { final CompletableFuture<LogicalSlot> returnSlotFuture = new CompletableFuture<>(); final WaitingSlotOwner waitingSlotOwner = new WaitingSlotOwner(returnSlotFuture, new CompletableFuture<>()); final SingleLogicalSlot singleLogicalSlot = createSingleLogicalSlot(waitingSlotOwner); final CompletableFuture<?> terminalStateFuture = new CompletableFuture<>(); final CompletableFuture<?> failFuture = new CompletableFuture<>(); final ManualTestingPayload dummyPayload = new ManualTestingPayload(failFuture, terminalStateFuture); assertThat(singleLogicalSlot.tryAssignPayload(dummyPayload)).isTrue(); singleLogicalSlot.release(new FlinkException("Test exception")); assertThatFuture(failFuture).isDone(); // we don't require the logical slot to return to the owner because // the release call should only come from the owner assertThatFuture(returnSlotFuture).isNotDone(); }
Tests that the {@link PhysicalSlot.Payload#release(Throwable)} does not wait for the payload to reach a terminal state.
testAllocatedSlotRelease
java
apache/flink
flink-runtime/src/test/java/org/apache/flink/runtime/jobmaster/slotpool/SingleLogicalSlotTest.java
https://github.com/apache/flink/blob/master/flink-runtime/src/test/java/org/apache/flink/runtime/jobmaster/slotpool/SingleLogicalSlotTest.java
Apache-2.0
@Test void testSlotRelease() { final CompletableFuture<LogicalSlot> returnedSlotFuture = new CompletableFuture<>(); final CompletableFuture<Boolean> returnSlotResponseFuture = new CompletableFuture<>(); final WaitingSlotOwner waitingSlotOwner = new WaitingSlotOwner(returnedSlotFuture, returnSlotResponseFuture); final CompletableFuture<?> terminalStateFuture = new CompletableFuture<>(); final CompletableFuture<?> failFuture = new CompletableFuture<>(); final ManualTestingPayload dummyPayload = new ManualTestingPayload(failFuture, terminalStateFuture); final SingleLogicalSlot singleLogicalSlot = createSingleLogicalSlot(waitingSlotOwner); assertThat(singleLogicalSlot.tryAssignPayload(dummyPayload)).isTrue(); final CompletableFuture<?> releaseFuture = singleLogicalSlot.releaseSlot(new FlinkException("Test exception")); assertThatFuture(releaseFuture).isNotDone(); assertThatFuture(returnedSlotFuture).isNotDone(); assertThatFuture(failFuture).isDone(); terminalStateFuture.complete(null); assertThatFuture(returnedSlotFuture).isDone(); returnSlotResponseFuture.complete(true); assertThatFuture(releaseFuture).isDone(); }
Tests that the slot release is only signaled after the owner has taken it back.
testSlotRelease
java
apache/flink
flink-runtime/src/test/java/org/apache/flink/runtime/jobmaster/slotpool/SingleLogicalSlotTest.java
https://github.com/apache/flink/blob/master/flink-runtime/src/test/java/org/apache/flink/runtime/jobmaster/slotpool/SingleLogicalSlotTest.java
Apache-2.0
@Test void testPendingBatchSlotRequestTimeout() throws Exception { try (final SlotPool slotPool = createAndSetUpSlotPool(mainThreadExecutor, null, Duration.ofMillis(2L))) { final CompletableFuture<PhysicalSlot> slotFuture = SlotPoolUtils.requestNewAllocatedBatchSlot( slotPool, mainThreadExecutor, ResourceProfile.UNKNOWN); assertThatThrownBy(slotFuture::get) .withFailMessage("Expected that slot future times out.") .isInstanceOf(ExecutionException.class) .hasRootCauseInstanceOf(TimeoutException.class); } }
Tests that a batch slot request fails if there is no slot which can fulfill the slot request.
testPendingBatchSlotRequestTimeout
java
apache/flink
flink-runtime/src/test/java/org/apache/flink/runtime/jobmaster/slotpool/SlotPoolBatchSlotRequestTest.java
https://github.com/apache/flink/blob/master/flink-runtime/src/test/java/org/apache/flink/runtime/jobmaster/slotpool/SlotPoolBatchSlotRequestTest.java
Apache-2.0
@Test void testPendingBatchSlotRequestDoesNotTimeoutIfFulfillingSlotExists() throws Exception { final Duration batchSlotTimeout = Duration.ofMillis(2L); final ManualClock clock = new ManualClock(); try (final DeclarativeSlotPoolBridge slotPool = createAndSetUpSlotPool(mainThreadExecutor, null, batchSlotTimeout, clock)) { SlotPoolUtils.requestNewAllocatedBatchSlot( slotPool, mainThreadExecutor, resourceProfile); SlotPoolUtils.offerSlots(slotPool, mainThreadExecutor, Arrays.asList(resourceProfile)); final CompletableFuture<PhysicalSlot> firstPendingSlotFuture = SlotPoolUtils.requestNewAllocatedBatchSlot( slotPool, mainThreadExecutor, ResourceProfile.UNKNOWN); final CompletableFuture<PhysicalSlot> secondPendingSlotFuture = SlotPoolUtils.requestNewAllocatedBatchSlot( slotPool, mainThreadExecutor, resourceProfile); final List<CompletableFuture<PhysicalSlot>> slotFutures = Arrays.asList(firstPendingSlotFuture, secondPendingSlotFuture); advanceTimeAndTriggerCheckBatchSlotTimeout( slotPool, mainThreadExecutor, clock, batchSlotTimeout); for (CompletableFuture<PhysicalSlot> slotFuture : slotFutures) { assertThatFuture(slotFuture).isNotDone(); } } }
Tests that a batch slot request won't time out if there exists a slot in the SlotPool which fulfills the requested {@link ResourceProfile}.
testPendingBatchSlotRequestDoesNotTimeoutIfFulfillingSlotExists
java
apache/flink
flink-runtime/src/test/java/org/apache/flink/runtime/jobmaster/slotpool/SlotPoolBatchSlotRequestTest.java
https://github.com/apache/flink/blob/master/flink-runtime/src/test/java/org/apache/flink/runtime/jobmaster/slotpool/SlotPoolBatchSlotRequestTest.java
Apache-2.0
@Test void testPendingBatchSlotRequestDoesNotFailIfResourceDeclaringFails() throws Exception { final TestingResourceManagerGateway testingResourceManagerGateway = new TestingResourceManagerGateway(); testingResourceManagerGateway.setDeclareRequiredResourcesFunction( (jobMasterId, resourceRequirements) -> FutureUtils.completedExceptionally(new FlinkException("Failed request"))); final Duration batchSlotTimeout = Duration.ofMillis(1000L); try (final SlotPool slotPool = createAndSetUpSlotPool( mainThreadExecutor, testingResourceManagerGateway, batchSlotTimeout)) { final CompletableFuture<PhysicalSlot> slotFuture = SlotPoolUtils.requestNewAllocatedBatchSlot( slotPool, mainThreadExecutor, resourceProfile); assertThatFuture(slotFuture).willNotCompleteWithin(Duration.ofMillis(50L)); } }
Tests that a batch slot request won't fail if its resource manager request fails with exceptions other than {@link UnfulfillableSlotRequestException}.
testPendingBatchSlotRequestDoesNotFailIfResourceDeclaringFails
java
apache/flink
flink-runtime/src/test/java/org/apache/flink/runtime/jobmaster/slotpool/SlotPoolBatchSlotRequestTest.java
https://github.com/apache/flink/blob/master/flink-runtime/src/test/java/org/apache/flink/runtime/jobmaster/slotpool/SlotPoolBatchSlotRequestTest.java
Apache-2.0
@Override public void invoke() throws Exception { if (isBlocking) { synchronized (this) { while (true) { wait(); } } } }
Blocking invokable which is controlled by a static field. This class needs to be {@code public} because it is going to be instantiated from outside this testing class.
invoke
java
apache/flink
flink-runtime/src/test/java/org/apache/flink/runtime/leaderelection/LeaderChangeClusterComponentsTest.java
https://github.com/apache/flink/blob/master/flink-runtime/src/test/java/org/apache/flink/runtime/leaderelection/LeaderChangeClusterComponentsTest.java
Apache-2.0
public synchronized CompletableFuture<LeaderInformation> isLeader(UUID leaderSessionID) { if (confirmationFuture != null) { confirmationFuture.cancel(false); } confirmationFuture = new CompletableFuture<>(); issuedLeaderSessionId = leaderSessionID; if (contender != null) { contender.grantLeadership(leaderSessionID); } return confirmationFuture; }
Acquires the leadership with the given {@code leaderSessionID}. @return the contender's {@link LeaderInformation} after the leadership was confirmed. Waiting for the {@code CompletableFuture} to complete will leave the test code in a state where the {@link LeaderContender} confirmed the leadership. This simulates the information being written to the HA backend.
isLeader
java
apache/flink
flink-runtime/src/test/java/org/apache/flink/runtime/leaderelection/TestingLeaderElection.java
https://github.com/apache/flink/blob/master/flink-runtime/src/test/java/org/apache/flink/runtime/leaderelection/TestingLeaderElection.java
Apache-2.0
public synchronized CompletableFuture<Void> getStartFuture() { return startFuture; }
Returns the start future indicating whether this leader election service has been started or not. @return Future which is completed once this service has been started. @see TestingLeaderElection#startLeaderElection(LeaderContender)
getStartFuture
java
apache/flink
flink-runtime/src/test/java/org/apache/flink/runtime/leaderelection/TestingLeaderElection.java
https://github.com/apache/flink/blob/master/flink-runtime/src/test/java/org/apache/flink/runtime/leaderelection/TestingLeaderElection.java
Apache-2.0
public synchronized boolean isStopped() { return contender == null; }
Returns {@code true} if no contender is registered write now and the service is, therefore, stopped; otherwise {@code false}.
isStopped
java
apache/flink
flink-runtime/src/test/java/org/apache/flink/runtime/leaderelection/TestingLeaderElection.java
https://github.com/apache/flink/blob/master/flink-runtime/src/test/java/org/apache/flink/runtime/leaderelection/TestingLeaderElection.java
Apache-2.0
public static Builder newBuilder( AtomicBoolean hasLeadership, AtomicReference<LeaderInformationRegister> storedLeaderInformation, AtomicBoolean isClosed) { Preconditions.checkState( storedLeaderInformation.get() == null || !storedLeaderInformation .get() .getRegisteredComponentIds() .iterator() .hasNext(), "Initial state check for storedLeaderInformation failed."); Preconditions.checkState(!isClosed.get(), "Initial state check for isClosed failed."); return newNoOpBuilder() .setHasLeadershipFunction( lock -> { try { lock.lock(); return hasLeadership.get(); } finally { lock.unlock(); } }) .setPublishLeaderInformationConsumer( (lock, componentId, leaderInformation) -> { try { lock.lock(); if (hasLeadership.get()) { storedLeaderInformation.getAndUpdate( oldData -> LeaderInformationRegister.merge( oldData, componentId, leaderInformation)); } } finally { lock.unlock(); } }) .setDeleteLeaderInformationConsumer( (lock, componentId) -> { try { lock.lock(); if (hasLeadership.get()) { storedLeaderInformation.getAndUpdate( oldData -> LeaderInformationRegister.clear( oldData, componentId)); } } finally { lock.unlock(); } }) .setCloseConsumer( lock -> { try { lock.lock(); isClosed.set(true); } finally { lock.unlock(); } }); }
Returns a {@code Builder} that comes with a basic default implementation of the {@link LeaderElectionDriver} contract using the passed parameters for information storage. @param hasLeadership saves the current leadership state of the instance that is created from the {@code Builder}. @param storedLeaderInformation saves the leader information that would be otherwise stored in some external storage. @param isClosed saves the running state of the driver.
newBuilder
java
apache/flink
flink-runtime/src/test/java/org/apache/flink/runtime/leaderelection/TestingLeaderElectionDriver.java
https://github.com/apache/flink/blob/master/flink-runtime/src/test/java/org/apache/flink/runtime/leaderelection/TestingLeaderElectionDriver.java
Apache-2.0
public void failIfErrorEventHappened() { getNextErrorEvent() .ifPresent( error -> { throw new AssertionError( "An error was reported that wasn't properly handled.", error.getError()); }); }
Throws an {@code AssertionError} if an {@link LeaderElectionEvent.ErrorEvent} was observed. This method can be used to ensure that any error that was triggered unexpectedly is exposed within the test.
failIfErrorEventHappened
java
apache/flink
flink-runtime/src/test/java/org/apache/flink/runtime/leaderelection/TestingLeaderElectionListener.java
https://github.com/apache/flink/blob/master/flink-runtime/src/test/java/org/apache/flink/runtime/leaderelection/TestingLeaderElectionListener.java
Apache-2.0