code
stringlengths
25
201k
docstring
stringlengths
19
96.2k
func_name
stringlengths
0
235
language
stringclasses
1 value
repo
stringlengths
8
51
path
stringlengths
11
314
url
stringlengths
62
377
license
stringclasses
7 values
@Test void testJobFinishes() throws Exception { JobGraph jobGraph = JobGraphTestUtils.streamingJobGraph( ExecutionGraphTestUtils.createJobVertex("Task1", 2, NoOpInvokable.class), ExecutionGraphTestUtils.createJobVertex("Task2", 2, NoOpInvokable.class)); SchedulerBase scheduler = new DefaultSchedulerBuilder( jobGraph, ComponentMainThreadExecutorServiceAdapter.forMainThread(), EXECUTOR_RESOURCE.getExecutor()) .build(); ExecutionGraph eg = scheduler.getExecutionGraph(); scheduler.startScheduling(); ExecutionGraphTestUtils.switchAllVerticesToRunning(eg); Iterator<ExecutionJobVertex> jobVertices = eg.getVerticesTopologically().iterator(); ExecutionJobVertex sender = jobVertices.next(); ExecutionJobVertex receiver = jobVertices.next(); List<ExecutionVertex> senderVertices = Arrays.asList(sender.getTaskVertices()); List<ExecutionVertex> receiverVertices = Arrays.asList(receiver.getTaskVertices()); // test getNumExecutionVertexFinished senderVertices.get(0).getCurrentExecutionAttempt().markFinished(); assertThat(sender.getNumExecutionVertexFinished()).isOne(); assertThat(eg.getState()).isEqualTo(JobStatus.RUNNING); senderVertices.get(1).getCurrentExecutionAttempt().markFinished(); assertThat(sender.getNumExecutionVertexFinished()).isEqualTo(2); assertThat(eg.getState()).isEqualTo(JobStatus.RUNNING); // test job finishes receiverVertices.get(0).getCurrentExecutionAttempt().markFinished(); receiverVertices.get(1).getCurrentExecutionAttempt().markFinished(); assertThat(eg.getNumFinishedVertices()).isEqualTo(4); assertThat(eg.getState()).isEqualTo(JobStatus.FINISHED); }
Tests the finish behaviour of the {@link ExecutionGraph}.
testJobFinishes
java
apache/flink
flink-runtime/src/test/java/org/apache/flink/runtime/executiongraph/ExecutionGraphFinishTest.java
https://github.com/apache/flink/blob/master/flink-runtime/src/test/java/org/apache/flink/runtime/executiongraph/ExecutionGraphFinishTest.java
Apache-2.0
@Test void testFailingExecutionAfterRestart() throws Exception { JobVertex sender = ExecutionGraphTestUtils.createJobVertex("Task1", 1, NoOpInvokable.class); JobVertex receiver = ExecutionGraphTestUtils.createJobVertex("Task2", 1, NoOpInvokable.class); JobGraph jobGraph = JobGraphTestUtils.streamingJobGraph(sender, receiver); try (SlotPool slotPool = SlotPoolUtils.createDeclarativeSlotPoolBridge()) { SchedulerBase scheduler = new DefaultSchedulerBuilder( jobGraph, mainThreadExecutor, EXECUTOR_RESOURCE.getExecutor()) .setExecutionSlotAllocatorFactory( createExecutionSlotAllocatorFactory(slotPool)) .setRestartBackoffTimeStrategy( new TestRestartBackoffTimeStrategy(true, Long.MAX_VALUE)) .setDelayExecutor(taskRestartExecutor) .build(); ExecutionGraph eg = scheduler.getExecutionGraph(); startScheduling(scheduler); offerSlots(slotPool, 2); Iterator<ExecutionVertex> executionVertices = eg.getAllExecutionVertices().iterator(); Execution finishedExecution = executionVertices.next().getCurrentExecutionAttempt(); Execution failedExecution = executionVertices.next().getCurrentExecutionAttempt(); finishedExecution.markFinished(); failedExecution.fail(new Exception("Test Exception")); failedExecution.completeCancelling(); taskRestartExecutor.triggerScheduledTasks(); assertThat(eg.getState()).isEqualTo(JobStatus.RUNNING); // At this point all resources have been assigned for (ExecutionVertex vertex : eg.getAllExecutionVertices()) { assertThat(vertex.getCurrentAssignedResource()).isNotNull(); vertex.getCurrentExecutionAttempt().switchToInitializing(); vertex.getCurrentExecutionAttempt().switchToRunning(); } // fail old finished execution, this should not affect the execution finishedExecution.fail(new Exception("This should have no effect")); for (ExecutionVertex vertex : eg.getAllExecutionVertices()) { vertex.getCurrentExecutionAttempt().markFinished(); } // the state of the finished execution should have not changed since it is terminal assertThat(finishedExecution.getState()).isEqualTo(ExecutionState.FINISHED); assertThat(eg.getState()).isEqualTo(JobStatus.FINISHED); } }
Tests that a failing execution does not affect a restarted job. This is important if a callback handler fails an execution after it has already reached a final state and the job has been restarted.
testFailingExecutionAfterRestart
java
apache/flink
flink-runtime/src/test/java/org/apache/flink/runtime/executiongraph/ExecutionGraphRestartTest.java
https://github.com/apache/flink/blob/master/flink-runtime/src/test/java/org/apache/flink/runtime/executiongraph/ExecutionGraphRestartTest.java
Apache-2.0
@Test void testSuspendedOutOfCreated() throws Exception { final InteractionsCountingTaskManagerGateway gateway = new InteractionsCountingTaskManagerGateway(); final int parallelism = 10; final SchedulerBase scheduler = createScheduler(gateway, parallelism); final ExecutionGraph eg = scheduler.getExecutionGraph(); assertThat(eg.getState()).isEqualTo(JobStatus.CREATED); // suspend scheduler.closeAsync(); assertThat(eg.getState()).isEqualTo(JobStatus.SUSPENDED); validateAllVerticesInState(eg, ExecutionState.CANCELED); validateCancelRpcCalls(gateway, 0); ensureCannotLeaveSuspendedState(scheduler, gateway); }
Going into SUSPENDED out of CREATED should immediately cancel everything and not send out RPC calls.
testSuspendedOutOfCreated
java
apache/flink
flink-runtime/src/test/java/org/apache/flink/runtime/executiongraph/ExecutionGraphSuspendTest.java
https://github.com/apache/flink/blob/master/flink-runtime/src/test/java/org/apache/flink/runtime/executiongraph/ExecutionGraphSuspendTest.java
Apache-2.0
@Test void testSuspendedOutOfDeploying() throws Exception { final int parallelism = 10; final InteractionsCountingTaskManagerGateway gateway = new InteractionsCountingTaskManagerGateway(parallelism); final SchedulerBase scheduler = createScheduler(gateway, parallelism); final ExecutionGraph eg = scheduler.getExecutionGraph(); scheduler.startScheduling(); assertThat(eg.getState()).isEqualTo(JobStatus.RUNNING); validateAllVerticesInState(eg, ExecutionState.DEPLOYING); // suspend scheduler.closeAsync(); assertThat(eg.getState()).isEqualTo(JobStatus.SUSPENDED); validateCancelRpcCalls(gateway, parallelism); ensureCannotLeaveSuspendedState(scheduler, gateway); }
Going into SUSPENDED out of DEPLOYING vertices should cancel all vertices once with RPC calls.
testSuspendedOutOfDeploying
java
apache/flink
flink-runtime/src/test/java/org/apache/flink/runtime/executiongraph/ExecutionGraphSuspendTest.java
https://github.com/apache/flink/blob/master/flink-runtime/src/test/java/org/apache/flink/runtime/executiongraph/ExecutionGraphSuspendTest.java
Apache-2.0
@Test void testSuspendedOutOfRunning() throws Exception { final int parallelism = 10; final InteractionsCountingTaskManagerGateway gateway = new InteractionsCountingTaskManagerGateway(parallelism); final SchedulerBase scheduler = createScheduler(gateway, parallelism); final ExecutionGraph eg = scheduler.getExecutionGraph(); scheduler.startScheduling(); ExecutionGraphTestUtils.switchAllVerticesToRunning(eg); assertThat(eg.getState()).isEqualTo(JobStatus.RUNNING); validateAllVerticesInState(eg, ExecutionState.RUNNING); // suspend scheduler.closeAsync(); assertThat(eg.getState()).isEqualTo(JobStatus.SUSPENDED); validateCancelRpcCalls(gateway, parallelism); ensureCannotLeaveSuspendedState(scheduler, gateway); }
Going into SUSPENDED out of RUNNING vertices should cancel all vertices once with RPC calls.
testSuspendedOutOfRunning
java
apache/flink
flink-runtime/src/test/java/org/apache/flink/runtime/executiongraph/ExecutionGraphSuspendTest.java
https://github.com/apache/flink/blob/master/flink-runtime/src/test/java/org/apache/flink/runtime/executiongraph/ExecutionGraphSuspendTest.java
Apache-2.0
@Test void testSuspendedOutOfFailing() throws Exception { final int parallelism = 10; final InteractionsCountingTaskManagerGateway gateway = new InteractionsCountingTaskManagerGateway(parallelism); final SchedulerBase scheduler = createScheduler(gateway, parallelism); final ExecutionGraph eg = scheduler.getExecutionGraph(); scheduler.startScheduling(); ExecutionGraphTestUtils.switchAllVerticesToRunning(eg); scheduler.handleGlobalFailure(new Exception("fail global")); assertThat(eg.getState()).isEqualTo(JobStatus.FAILING); validateCancelRpcCalls(gateway, parallelism); // suspend scheduler.closeAsync(); assertThat(eg.getState()).isEqualTo(JobStatus.SUSPENDED); ensureCannotLeaveSuspendedState(scheduler, gateway); }
Suspending from FAILING goes to SUSPENDED and sends no additional RPC calls.
testSuspendedOutOfFailing
java
apache/flink
flink-runtime/src/test/java/org/apache/flink/runtime/executiongraph/ExecutionGraphSuspendTest.java
https://github.com/apache/flink/blob/master/flink-runtime/src/test/java/org/apache/flink/runtime/executiongraph/ExecutionGraphSuspendTest.java
Apache-2.0
@Test void testSuspendedOutOfFailed() throws Exception { final InteractionsCountingTaskManagerGateway gateway = new InteractionsCountingTaskManagerGateway(); final int parallelism = 10; final SchedulerBase scheduler = createScheduler(gateway, parallelism); final ExecutionGraph eg = scheduler.getExecutionGraph(); scheduler.startScheduling(); ExecutionGraphTestUtils.switchAllVerticesToRunning(eg); scheduler.handleGlobalFailure(new Exception("fail global")); assertThat(eg.getState()).isEqualTo(JobStatus.FAILING); validateCancelRpcCalls(gateway, parallelism); ExecutionGraphTestUtils.completeCancellingForAllVertices(eg); assertThat(eg.getState()).isEqualTo(JobStatus.FAILED); // suspend scheduler.closeAsync(); // still in failed state assertThat(eg.getState()).isEqualTo(JobStatus.FAILED); validateCancelRpcCalls(gateway, parallelism); }
Suspending from FAILED should do nothing.
testSuspendedOutOfFailed
java
apache/flink
flink-runtime/src/test/java/org/apache/flink/runtime/executiongraph/ExecutionGraphSuspendTest.java
https://github.com/apache/flink/blob/master/flink-runtime/src/test/java/org/apache/flink/runtime/executiongraph/ExecutionGraphSuspendTest.java
Apache-2.0
@Test void testSuspendedOutOfCanceling() throws Exception { final int parallelism = 10; final InteractionsCountingTaskManagerGateway gateway = new InteractionsCountingTaskManagerGateway(parallelism); final SchedulerBase scheduler = createScheduler(gateway, parallelism); final ExecutionGraph eg = scheduler.getExecutionGraph(); scheduler.startScheduling(); ExecutionGraphTestUtils.switchAllVerticesToRunning(eg); scheduler.cancel(); assertThat(eg.getState()).isEqualTo(JobStatus.CANCELLING); validateCancelRpcCalls(gateway, parallelism); // suspend scheduler.closeAsync(); assertThat(eg.getState()).isEqualTo(JobStatus.SUSPENDED); ensureCannotLeaveSuspendedState(scheduler, gateway); }
Suspending from CANCELING goes to SUSPENDED and sends no additional RPC calls.
testSuspendedOutOfCanceling
java
apache/flink
flink-runtime/src/test/java/org/apache/flink/runtime/executiongraph/ExecutionGraphSuspendTest.java
https://github.com/apache/flink/blob/master/flink-runtime/src/test/java/org/apache/flink/runtime/executiongraph/ExecutionGraphSuspendTest.java
Apache-2.0
@Test void testSuspendedOutOfCanceled() throws Exception { final InteractionsCountingTaskManagerGateway gateway = new InteractionsCountingTaskManagerGateway(); final int parallelism = 10; final SchedulerBase scheduler = createScheduler(gateway, parallelism); final ExecutionGraph eg = scheduler.getExecutionGraph(); scheduler.startScheduling(); ExecutionGraphTestUtils.switchAllVerticesToRunning(eg); scheduler.cancel(); assertThat(eg.getState()).isEqualTo(JobStatus.CANCELLING); validateCancelRpcCalls(gateway, parallelism); ExecutionGraphTestUtils.completeCancellingForAllVertices(eg); FlinkAssertions.assertThatFuture(eg.getTerminationFuture()) .eventuallySucceeds() .isEqualTo(JobStatus.CANCELED); // suspend scheduler.closeAsync(); // still in failed state assertThat(eg.getState()).isEqualTo(JobStatus.CANCELED); validateCancelRpcCalls(gateway, parallelism); }
Suspending from CANCELLED should do nothing.
testSuspendedOutOfCanceled
java
apache/flink
flink-runtime/src/test/java/org/apache/flink/runtime/executiongraph/ExecutionGraphSuspendTest.java
https://github.com/apache/flink/blob/master/flink-runtime/src/test/java/org/apache/flink/runtime/executiongraph/ExecutionGraphSuspendTest.java
Apache-2.0
public static void waitUntilJobStatus(ExecutionGraph eg, JobStatus status, long maxWaitMillis) throws TimeoutException { checkNotNull(eg); checkNotNull(status); checkArgument(maxWaitMillis >= 0); // this is a poor implementation - we may want to improve it eventually final long deadline = maxWaitMillis == 0 ? Long.MAX_VALUE : System.nanoTime() + (maxWaitMillis * 1_000_000); while (eg.getState() != status && System.nanoTime() < deadline) { try { Thread.sleep(2); } catch (InterruptedException ignored) { } } if (System.nanoTime() >= deadline) { throw new TimeoutException( String.format( "The job did not reach status %s in time. Current status is %s.", status, eg.getState())); } }
Waits until the Job has reached a certain state. <p>This method is based on polling and might miss very fast state transitions!
waitUntilJobStatus
java
apache/flink
flink-runtime/src/test/java/org/apache/flink/runtime/executiongraph/ExecutionGraphTestUtils.java
https://github.com/apache/flink/blob/master/flink-runtime/src/test/java/org/apache/flink/runtime/executiongraph/ExecutionGraphTestUtils.java
Apache-2.0
public static void waitUntilExecutionState( Execution execution, ExecutionState state, long maxWaitMillis) throws TimeoutException { checkNotNull(execution); checkNotNull(state); checkArgument(maxWaitMillis >= 0); // this is a poor implementation - we may want to improve it eventually final long deadline = maxWaitMillis == 0 ? Long.MAX_VALUE : System.nanoTime() + (maxWaitMillis * 1_000_000); while (execution.getState() != state && System.nanoTime() < deadline) { try { Thread.sleep(2); } catch (InterruptedException ignored) { } } if (System.nanoTime() >= deadline) { throw new TimeoutException( String.format( "The execution did not reach state %s in time. Current state is %s.", state, execution.getState())); } }
Waits until the Execution has reached a certain state. <p>This method is based on polling and might miss very fast state transitions!
waitUntilExecutionState
java
apache/flink
flink-runtime/src/test/java/org/apache/flink/runtime/executiongraph/ExecutionGraphTestUtils.java
https://github.com/apache/flink/blob/master/flink-runtime/src/test/java/org/apache/flink/runtime/executiongraph/ExecutionGraphTestUtils.java
Apache-2.0
public static void waitUntilExecutionVertexState( ExecutionVertex executionVertex, ExecutionState state, long maxWaitMillis) throws TimeoutException { checkNotNull(executionVertex); checkNotNull(state); checkArgument(maxWaitMillis >= 0); // this is a poor implementation - we may want to improve it eventually final long deadline = maxWaitMillis == 0 ? Long.MAX_VALUE : System.nanoTime() + (maxWaitMillis * 1_000_000); while (true) { Execution execution = executionVertex.getCurrentExecutionAttempt(); if (execution == null || (execution.getState() != state && System.nanoTime() < deadline)) { try { Thread.sleep(2); } catch (InterruptedException ignored) { } } else { break; } if (System.nanoTime() >= deadline) { if (execution != null) { throw new TimeoutException( String.format( "The execution vertex did not reach state %s in time. Current state is %s.", state, execution.getState())); } else { throw new TimeoutException( "Cannot get current execution attempt of " + executionVertex + '.'); } } } }
Waits until the ExecutionVertex has reached a certain state. <p>This method is based on polling and might miss very fast state transitions!
waitUntilExecutionVertexState
java
apache/flink
flink-runtime/src/test/java/org/apache/flink/runtime/executiongraph/ExecutionGraphTestUtils.java
https://github.com/apache/flink/blob/master/flink-runtime/src/test/java/org/apache/flink/runtime/executiongraph/ExecutionGraphTestUtils.java
Apache-2.0
public static void switchAllVerticesToInitializing(ExecutionGraph eg) { for (ExecutionVertex vertex : eg.getAllExecutionVertices()) { vertex.getCurrentExecutionAttempt().switchToInitializing(); } }
Takes all vertices in the given ExecutionGraph and switches their current execution to INITIALIZING.
switchAllVerticesToInitializing
java
apache/flink
flink-runtime/src/test/java/org/apache/flink/runtime/executiongraph/ExecutionGraphTestUtils.java
https://github.com/apache/flink/blob/master/flink-runtime/src/test/java/org/apache/flink/runtime/executiongraph/ExecutionGraphTestUtils.java
Apache-2.0
public static void switchAllVerticesToRunning(ExecutionGraph eg) { for (ExecutionVertex vertex : eg.getAllExecutionVertices()) { vertex.getCurrentExecutionAttempt().switchToInitializing(); vertex.getCurrentExecutionAttempt().switchToRunning(); } }
Takes all vertices in the given ExecutionGraph and switches their current execution to RUNNING.
switchAllVerticesToRunning
java
apache/flink
flink-runtime/src/test/java/org/apache/flink/runtime/executiongraph/ExecutionGraphTestUtils.java
https://github.com/apache/flink/blob/master/flink-runtime/src/test/java/org/apache/flink/runtime/executiongraph/ExecutionGraphTestUtils.java
Apache-2.0
public static void finishAllVertices(ExecutionGraph eg) { for (ExecutionVertex vertex : eg.getAllExecutionVertices()) { vertex.getCurrentExecutionAttempt().markFinished(); } }
Takes all vertices in the given ExecutionGraph and switches their current execution to FINISHED.
finishAllVertices
java
apache/flink
flink-runtime/src/test/java/org/apache/flink/runtime/executiongraph/ExecutionGraphTestUtils.java
https://github.com/apache/flink/blob/master/flink-runtime/src/test/java/org/apache/flink/runtime/executiongraph/ExecutionGraphTestUtils.java
Apache-2.0
static void verifyGeneratedExecutionJobVertex( ExecutionGraph executionGraph, JobVertex originJobVertex, @Nullable List<JobVertex> inputJobVertices, @Nullable List<JobVertex> outputJobVertices) { ExecutionJobVertex ejv = executionGraph.getAllVertices().get(originJobVertex.getID()); assertThat(ejv).isNotNull(); // verify basic properties assertThat(originJobVertex.getParallelism()).isEqualTo(ejv.getParallelism()); assertThat(executionGraph.getJobID()).isEqualTo(ejv.getJobId()); assertThat(originJobVertex.getID()).isEqualTo(ejv.getJobVertexId()); assertThat(originJobVertex).isEqualTo(ejv.getJobVertex()); // verify produced data sets if (outputJobVertices == null) { assertThat(ejv.getProducedDataSets()).isEmpty(); } else { assertThat(outputJobVertices).hasSize(ejv.getProducedDataSets().length); for (int i = 0; i < outputJobVertices.size(); i++) { assertThat(originJobVertex.getProducedDataSets().get(i).getId()) .isEqualTo(ejv.getProducedDataSets()[i].getId()); assertThat(originJobVertex.getParallelism()) .isEqualTo(ejv.getProducedDataSets()[0].getPartitions().length); } } // verify task vertices for their basic properties and their inputs assertThat(originJobVertex.getParallelism()).isEqualTo(ejv.getTaskVertices().length); int subtaskIndex = 0; for (ExecutionVertex ev : ejv.getTaskVertices()) { assertThat(executionGraph.getJobID()).isEqualTo(ev.getJobId()); assertThat(originJobVertex.getID()).isEqualTo(ev.getJobvertexId()); assertThat(originJobVertex.getParallelism()) .isEqualTo(ev.getTotalNumberOfParallelSubtasks()); assertThat(subtaskIndex).isEqualTo(ev.getParallelSubtaskIndex()); if (inputJobVertices == null) { assertThat(ev.getNumberOfInputs()).isZero(); } else { assertThat(inputJobVertices).hasSize(ev.getNumberOfInputs()); for (int i = 0; i < inputJobVertices.size(); i++) { ConsumedPartitionGroup consumedPartitionGroup = ev.getConsumedPartitionGroup(i); assertThat(inputJobVertices.get(i).getParallelism()) .isEqualTo(consumedPartitionGroup.size()); int expectedPartitionNum = 0; for (IntermediateResultPartitionID consumedPartitionId : consumedPartitionGroup) { assertThat(consumedPartitionId.getPartitionNumber()) .isEqualTo(expectedPartitionNum); expectedPartitionNum++; } } } subtaskIndex++; } }
Verifies the generated {@link ExecutionJobVertex} for a given {@link JobVertex} in a {@link ExecutionGraph}. @param executionGraph the generated execution graph @param originJobVertex the vertex to verify for @param inputJobVertices upstream vertices of the verified vertex, used to check inputs of generated vertex @param outputJobVertices downstream vertices of the verified vertex, used to check produced data sets of generated vertex
verifyGeneratedExecutionJobVertex
java
apache/flink
flink-runtime/src/test/java/org/apache/flink/runtime/executiongraph/ExecutionGraphTestUtils.java
https://github.com/apache/flink/blob/master/flink-runtime/src/test/java/org/apache/flink/runtime/executiongraph/ExecutionGraphTestUtils.java
Apache-2.0
private void testConnections( int sourceParallelism, int targetParallelism, int[][] expectedConsumedPartitionNumber) throws Exception { ExecutionJobVertex target = setUpExecutionGraphAndGetDownstreamVertex(sourceParallelism, targetParallelism); for (int vertexIndex = 0; vertexIndex < target.getTaskVertices().length; vertexIndex++) { ExecutionVertex ev = target.getTaskVertices()[vertexIndex]; ConsumedPartitionGroup consumedPartitionGroup = ev.getConsumedPartitionGroup(0); assertThat(expectedConsumedPartitionNumber[vertexIndex].length) .isEqualTo(consumedPartitionGroup.size()); int partitionIndex = 0; for (IntermediateResultPartitionID partitionId : consumedPartitionGroup) { assertThat(expectedConsumedPartitionNumber[vertexIndex][partitionIndex++]) .isEqualTo(partitionId.getPartitionNumber()); } } }
Verify the connections between upstream result partitions and downstream vertices.
testConnections
java
apache/flink
flink-runtime/src/test/java/org/apache/flink/runtime/executiongraph/PointwisePatternTest.java
https://github.com/apache/flink/blob/master/flink-runtime/src/test/java/org/apache/flink/runtime/executiongraph/PointwisePatternTest.java
Apache-2.0
public <U> U execute(@Nonnull SupplierWithException<U, Throwable> supplierWithException) { return CompletableFuture.supplyAsync( FunctionUtils.uncheckedSupplier(supplierWithException), mainThreadExecutor) .join(); }
Executes the given supplier with the main thread executor until completion, returns the result or a exception. This method blocks until the execution is complete.
execute
java
apache/flink
flink-runtime/src/test/java/org/apache/flink/runtime/executiongraph/TestingComponentMainThreadExecutor.java
https://github.com/apache/flink/blob/master/flink-runtime/src/test/java/org/apache/flink/runtime/executiongraph/TestingComponentMainThreadExecutor.java
Apache-2.0
@Test void testAssignSlotSharingGroup() { try { JobVertex v1 = new JobVertex("v1"); JobVertex v2 = new JobVertex("v2"); JobVertex v3 = new JobVertex("v3"); JobVertex v4 = new JobVertex("v4"); JobVertex v5 = new JobVertex("v5"); v1.setParallelism(4); v2.setParallelism(5); v3.setParallelism(7); v4.setParallelism(1); v5.setParallelism(11); v1.setInvokableClass(AbstractInvokable.class); v2.setInvokableClass(AbstractInvokable.class); v3.setInvokableClass(AbstractInvokable.class); v4.setInvokableClass(AbstractInvokable.class); v5.setInvokableClass(AbstractInvokable.class); connectNewDataSetAsInput( v2, v1, DistributionPattern.POINTWISE, ResultPartitionType.PIPELINED); connectNewDataSetAsInput( v5, v4, DistributionPattern.POINTWISE, ResultPartitionType.PIPELINED); SlotSharingGroup jg1 = new SlotSharingGroup(); v2.setSlotSharingGroup(jg1); v3.setSlotSharingGroup(jg1); SlotSharingGroup jg2 = new SlotSharingGroup(); v4.setSlotSharingGroup(jg2); v5.setSlotSharingGroup(jg2); List<JobVertex> vertices = new ArrayList<>(Arrays.asList(v1, v2, v3, v4, v5)); ExecutionGraph eg = TestingDefaultExecutionGraphBuilder.newBuilder() .setVertexParallelismStore( SchedulerBase.computeVertexParallelismStore(vertices)) .build(EXECUTOR_RESOURCE.getExecutor()); eg.attachJobGraph( vertices, UnregisteredMetricGroups.createUnregisteredJobManagerJobMetricGroup()); // verify that the vertices are all in the same slot sharing group SlotSharingGroup group1; SlotSharingGroup group2; // verify that v1 tasks are not in the same slot sharing group as v2 assertThat(eg.getJobVertex(v1.getID()).getSlotSharingGroup()) .isNotEqualTo(eg.getJobVertex(v2.getID()).getSlotSharingGroup()); // v2 and v3 are shared group1 = eg.getJobVertex(v2.getID()).getSlotSharingGroup(); assertThat(group1).isNotNull(); assertThat(eg.getJobVertex(v3.getID()).getSlotSharingGroup()).isEqualTo(group1); assertThat(group1.getJobVertexIds()).hasSize(2); assertThat(group1.getJobVertexIds().contains(v2.getID())).isTrue(); assertThat(group1.getJobVertexIds().contains(v3.getID())).isTrue(); // v4 and v5 are shared group2 = eg.getJobVertex(v4.getID()).getSlotSharingGroup(); assertThat(group2).isNotNull(); assertThat(eg.getJobVertex(v5.getID()).getSlotSharingGroup()).isEqualTo(group2); assertThat(group1.getJobVertexIds()).hasSize(2); assertThat(group2.getJobVertexIds().contains(v4.getID())).isTrue(); assertThat(group2.getJobVertexIds().contains(v5.getID())).isTrue(); } catch (Exception e) { e.printStackTrace(); fail(e.getMessage()); } }
Test setup: - v1 is isolated, no slot sharing. - v2 and v3 (not connected) share slots. - v4 and v5 (connected) share slots.
testAssignSlotSharingGroup
java
apache/flink
flink-runtime/src/test/java/org/apache/flink/runtime/executiongraph/VertexSlotSharingTest.java
https://github.com/apache/flink/blob/master/flink-runtime/src/test/java/org/apache/flink/runtime/executiongraph/VertexSlotSharingTest.java
Apache-2.0
@Test void testNormalFailureHandling() throws Exception { final Set<ExecutionVertexID> tasksToRestart = Collections.singleton(new ExecutionVertexID(new JobVertexID(), 0)); failoverStrategy.setTasksToRestart(tasksToRestart); Execution execution = FailureHandlingResultTest.createExecution(EXECUTOR_RESOURCE.getExecutor()); Exception cause = new Exception("test failure"); long timestamp = System.currentTimeMillis(); // trigger a task failure final FailureHandlingResult result = executionFailureHandler.getFailureHandlingResult(execution, cause, timestamp); // verify results assertThat(result.canRestart()).isTrue(); assertThat(result.getFailedExecution()).isPresent(); assertThat(result.getFailedExecution().get()).isSameAs(execution); assertThat(result.getRestartDelayMS()).isEqualTo(RESTART_DELAY_MS); assertThat(result.getVerticesToRestart()).isEqualTo(tasksToRestart); assertThat(result.getError()).isSameAs(cause); assertThat(result.getTimestamp()).isEqualTo(timestamp); assertThat(testingFailureEnricher.getSeenThrowables()).containsExactly(cause); assertThat(result.getFailureLabels().get()) .isEqualTo(testingFailureEnricher.getFailureLabels()); assertThat(executionFailureHandler.getNumberOfRestarts()).isOne(); checkMetrics(spanCollector, false, true); }
Tests the case that task restarting is accepted.
testNormalFailureHandling
java
apache/flink
flink-runtime/src/test/java/org/apache/flink/runtime/executiongraph/failover/ExecutionFailureHandlerTest.java
https://github.com/apache/flink/blob/master/flink-runtime/src/test/java/org/apache/flink/runtime/executiongraph/failover/ExecutionFailureHandlerTest.java
Apache-2.0
@Test void testRestartingSuppressedFailureHandlingResult() throws Exception { // restart strategy suppresses restarting backoffTimeStrategy.setCanRestart(false); // trigger a task failure Execution execution = FailureHandlingResultTest.createExecution(EXECUTOR_RESOURCE.getExecutor()); final Throwable error = new Exception("expected test failure"); final long timestamp = System.currentTimeMillis(); final FailureHandlingResult result = executionFailureHandler.getFailureHandlingResult(execution, error, timestamp); // verify results assertThat(result.canRestart()).isFalse(); assertThat(result.getFailedExecution()).isPresent(); assertThat(result.getFailedExecution().get()).isSameAs(execution); assertThat(result.getError()).hasCause(error); assertThat(result.getTimestamp()).isEqualTo(timestamp); assertThat(testingFailureEnricher.getSeenThrowables()).containsExactly(error); assertThat(result.getFailureLabels().get()) .isEqualTo(testingFailureEnricher.getFailureLabels()); assertThat(ExecutionFailureHandler.isUnrecoverableError(result.getError())).isFalse(); assertThatThrownBy(result::getVerticesToRestart) .as("getVerticesToRestart is not allowed when restarting is suppressed") .isInstanceOf(IllegalStateException.class); assertThatThrownBy(result::getRestartDelayMS) .as("getRestartDelayMS is not allowed when restarting is suppressed") .isInstanceOf(IllegalStateException.class); assertThat(executionFailureHandler.getNumberOfRestarts()).isZero(); checkMetrics(spanCollector, false, false); }
Tests the case that task restarting is suppressed.
testRestartingSuppressedFailureHandlingResult
java
apache/flink
flink-runtime/src/test/java/org/apache/flink/runtime/executiongraph/failover/ExecutionFailureHandlerTest.java
https://github.com/apache/flink/blob/master/flink-runtime/src/test/java/org/apache/flink/runtime/executiongraph/failover/ExecutionFailureHandlerTest.java
Apache-2.0
@Test void testRegionFailoverForRegionInternalErrors() { final TestingSchedulingTopology topology = new TestingSchedulingTopology(); TestingSchedulingExecutionVertex v1 = topology.newExecutionVertex(ExecutionState.FINISHED); TestingSchedulingExecutionVertex v2 = topology.newExecutionVertex(ExecutionState.FINISHED); TestingSchedulingExecutionVertex v3 = topology.newExecutionVertex(ExecutionState.FINISHED); TestingSchedulingExecutionVertex v4 = topology.newExecutionVertex(ExecutionState.FINISHED); TestingSchedulingExecutionVertex v5 = topology.newExecutionVertex(ExecutionState.SCHEDULED); TestingSchedulingExecutionVertex v6 = topology.newExecutionVertex(ExecutionState.RUNNING); topology.connect(v1, v4, ResultPartitionType.BLOCKING); topology.connect(v1, v5, ResultPartitionType.BLOCKING); topology.connect(v2, v4, ResultPartitionType.BLOCKING); topology.connect(v2, v5, ResultPartitionType.BLOCKING); topology.connect(v3, v6, ResultPartitionType.BLOCKING); RestartPipelinedRegionFailoverStrategy strategy = new RestartPipelinedRegionFailoverStrategy(topology); verifyThatFailedExecution(strategy, v1).restarts(v1, v4, v5); verifyThatFailedExecution(strategy, v2).restarts(v2, v4, v5); verifyThatFailedExecution(strategy, v3).restarts(v3, v6); verifyThatFailedExecution(strategy, v4).restarts(v4); verifyThatFailedExecution(strategy, v5).restarts(v5); verifyThatFailedExecution(strategy, v6).restarts(v6); }
Tests for scenes that a task fails for its own error, in which case the region containing the failed task and its consumer regions should be restarted. <pre> (v1) -+-> (v4) x (v2) -+-> (v5) (v3) -+-> (v6) ^ | (blocking) </pre> Each vertex is in an individual region.
testRegionFailoverForRegionInternalErrors
java
apache/flink
flink-runtime/src/test/java/org/apache/flink/runtime/executiongraph/failover/RestartPipelinedRegionFailoverStrategyTest.java
https://github.com/apache/flink/blob/master/flink-runtime/src/test/java/org/apache/flink/runtime/executiongraph/failover/RestartPipelinedRegionFailoverStrategyTest.java
Apache-2.0
@Test void testRegionFailoverForDataConsumptionErrors() throws Exception { TestingSchedulingTopology topology = new TestingSchedulingTopology(); TestingSchedulingExecutionVertex v1 = topology.newExecutionVertex(ExecutionState.FINISHED); TestingSchedulingExecutionVertex v2 = topology.newExecutionVertex(ExecutionState.FINISHED); TestingSchedulingExecutionVertex v3 = topology.newExecutionVertex(ExecutionState.FINISHED); TestingSchedulingExecutionVertex v4 = topology.newExecutionVertex(ExecutionState.RUNNING); TestingSchedulingExecutionVertex v5 = topology.newExecutionVertex(ExecutionState.RUNNING); TestingSchedulingExecutionVertex v6 = topology.newExecutionVertex(ExecutionState.RUNNING); topology.connect(v1, v4, ResultPartitionType.BLOCKING); topology.connect(v1, v5, ResultPartitionType.BLOCKING); topology.connect(v2, v4, ResultPartitionType.BLOCKING); topology.connect(v2, v5, ResultPartitionType.BLOCKING); topology.connect(v3, v6, ResultPartitionType.BLOCKING); RestartPipelinedRegionFailoverStrategy strategy = new RestartPipelinedRegionFailoverStrategy(topology); Iterator<TestingSchedulingResultPartition> v4InputEdgeIterator = v4.getConsumedResults().iterator(); TestingSchedulingResultPartition v1out = v4InputEdgeIterator.next(); verifyThatFailedExecution(strategy, v4) .partitionConnectionCause(v1out) .restarts(v1, v4, v5); TestingSchedulingResultPartition v2out = v4InputEdgeIterator.next(); verifyThatFailedExecution(strategy, v4) .partitionConnectionCause(v2out) .restarts(v2, v4, v5); Iterator<TestingSchedulingResultPartition> v5InputEdgeIterator = v5.getConsumedResults().iterator(); v1out = v5InputEdgeIterator.next(); verifyThatFailedExecution(strategy, v5) .partitionConnectionCause(v1out) .restarts(v1, v4, v5); v2out = v5InputEdgeIterator.next(); verifyThatFailedExecution(strategy, v5) .partitionConnectionCause(v2out) .restarts(v2, v4, v5); TestingSchedulingResultPartition v3out = v6.getConsumedResults().iterator().next(); verifyThatFailedExecution(strategy, v6).partitionConnectionCause(v3out).restarts(v3, v6); }
Tests for scenes that a task fails for data consumption error, in which case the region containing the failed task, the region containing the unavailable result partition and all their consumer regions should be restarted. <pre> (v1) -+-> (v4) x (v2) -+-> (v5) (v3) -+-> (v6) ^ | (blocking) </pre> Each vertex is in an individual region.
testRegionFailoverForDataConsumptionErrors
java
apache/flink
flink-runtime/src/test/java/org/apache/flink/runtime/executiongraph/failover/RestartPipelinedRegionFailoverStrategyTest.java
https://github.com/apache/flink/blob/master/flink-runtime/src/test/java/org/apache/flink/runtime/executiongraph/failover/RestartPipelinedRegionFailoverStrategyTest.java
Apache-2.0
@Test void testRegionFailoverForVariousResultPartitionAvailabilityCombinations() throws Exception { TestingSchedulingTopology topology = new TestingSchedulingTopology(); TestingSchedulingExecutionVertex v1 = topology.newExecutionVertex(ExecutionState.FINISHED); TestingSchedulingExecutionVertex v2 = topology.newExecutionVertex(ExecutionState.FINISHED); TestingSchedulingExecutionVertex v3 = topology.newExecutionVertex(ExecutionState.RUNNING); topology.connect(v1, v3, ResultPartitionType.BLOCKING); topology.connect(v2, v3, ResultPartitionType.BLOCKING); TestResultPartitionAvailabilityChecker availabilityChecker = new TestResultPartitionAvailabilityChecker(); RestartPipelinedRegionFailoverStrategy strategy = new RestartPipelinedRegionFailoverStrategy(topology, availabilityChecker); IntermediateResultPartitionID rp1ID = v1.getProducedResults().iterator().next().getId(); IntermediateResultPartitionID rp2ID = v2.getProducedResults().iterator().next().getId(); // ------------------------------------------------- // Combination1: (rp1 == available, rp2 == available) // ------------------------------------------------- availabilityChecker.failedPartitions.clear(); verifyThatFailedExecution(strategy, v1).restarts(v1, v3); verifyThatFailedExecution(strategy, v2).restarts(v2, v3); verifyThatFailedExecution(strategy, v3).restarts(v3); // ------------------------------------------------- // Combination2: (rp1 == unavailable, rp2 == available) // ------------------------------------------------- availabilityChecker.failedPartitions.clear(); availabilityChecker.markResultPartitionFailed(rp1ID); verifyThatFailedExecution(strategy, v1).restarts(v1, v3); verifyThatFailedExecution(strategy, v2).restarts(v1, v2, v3); verifyThatFailedExecution(strategy, v3).restarts(v1, v3); // ------------------------------------------------- // Combination3: (rp1 == available, rp2 == unavailable) // ------------------------------------------------- availabilityChecker.failedPartitions.clear(); availabilityChecker.markResultPartitionFailed(rp2ID); verifyThatFailedExecution(strategy, v1).restarts(v1, v2, v3); verifyThatFailedExecution(strategy, v2).restarts(v2, v3); verifyThatFailedExecution(strategy, v3).restarts(v2, v3); // ------------------------------------------------- // Combination4: (rp1 == unavailable, rp == unavailable) // ------------------------------------------------- availabilityChecker.failedPartitions.clear(); availabilityChecker.markResultPartitionFailed(rp1ID); availabilityChecker.markResultPartitionFailed(rp2ID); verifyThatFailedExecution(strategy, v1).restarts(v1, v2, v3); verifyThatFailedExecution(strategy, v2).restarts(v1, v2, v3); verifyThatFailedExecution(strategy, v3).restarts(v1, v2, v3); }
Tests to verify region failover results regarding different input result partition availability combinations. <pre> (v1) --rp1--\ (v3) (v2) --rp2--/ ^ | (blocking) </pre> Each vertex is in an individual region. rp1, rp2 are result partitions.
testRegionFailoverForVariousResultPartitionAvailabilityCombinations
java
apache/flink
flink-runtime/src/test/java/org/apache/flink/runtime/executiongraph/failover/RestartPipelinedRegionFailoverStrategyTest.java
https://github.com/apache/flink/blob/master/flink-runtime/src/test/java/org/apache/flink/runtime/executiongraph/failover/RestartPipelinedRegionFailoverStrategyTest.java
Apache-2.0
@Test void testRegionFailoverForMultipleVerticesRegions() throws Exception { TestingSchedulingTopology topology = new TestingSchedulingTopology(); TestingSchedulingExecutionVertex v1 = topology.newExecutionVertex(ExecutionState.FINISHED); TestingSchedulingExecutionVertex v2 = topology.newExecutionVertex(ExecutionState.FINISHED); TestingSchedulingExecutionVertex v3 = topology.newExecutionVertex(ExecutionState.RUNNING); TestingSchedulingExecutionVertex v4 = topology.newExecutionVertex(ExecutionState.RUNNING); TestingSchedulingExecutionVertex v5 = topology.newExecutionVertex(ExecutionState.FAILED); TestingSchedulingExecutionVertex v6 = topology.newExecutionVertex(ExecutionState.CANCELED); topology.connect(v1, v2, ResultPartitionType.PIPELINED); topology.connect(v2, v3, ResultPartitionType.BLOCKING); topology.connect(v3, v4, ResultPartitionType.PIPELINED); topology.connect(v4, v5, ResultPartitionType.BLOCKING); topology.connect(v5, v6, ResultPartitionType.PIPELINED); RestartPipelinedRegionFailoverStrategy strategy = new RestartPipelinedRegionFailoverStrategy(topology); verifyThatFailedExecution(strategy, v3).restarts(v3, v4, v5, v6); TestingSchedulingResultPartition v2out = v3.getConsumedResults().iterator().next(); verifyThatFailedExecution(strategy, v3) .partitionConnectionCause(v2out) .restarts(v1, v2, v3, v4, v5, v6); }
Tests region failover scenes for topology with multiple vertices. <pre> (v1) ---> (v2) --|--> (v3) ---> (v4) --|--> (v5) ---> (v6) ^ ^ ^ ^ ^ | | | | | (pipelined) (blocking) (pipelined) (blocking) (pipelined) </pre> Component 1: 1,2; component 2: 3,4; component 3: 5,6
testRegionFailoverForMultipleVerticesRegions
java
apache/flink
flink-runtime/src/test/java/org/apache/flink/runtime/executiongraph/failover/RestartPipelinedRegionFailoverStrategyTest.java
https://github.com/apache/flink/blob/master/flink-runtime/src/test/java/org/apache/flink/runtime/executiongraph/failover/RestartPipelinedRegionFailoverStrategyTest.java
Apache-2.0
@Test void testRegionFailoverDoesNotRestartCreatedExecutions() { TestingSchedulingTopology topology = new TestingSchedulingTopology(); TestingSchedulingExecutionVertex v1 = topology.newExecutionVertex(ExecutionState.CREATED); TestingSchedulingExecutionVertex v2 = topology.newExecutionVertex(ExecutionState.CREATED); topology.connect(v1, v2, ResultPartitionType.BLOCKING); FailoverStrategy strategy = new RestartPipelinedRegionFailoverStrategy(topology); verifyThatFailedExecution(strategy, v2).restarts(); TestingSchedulingResultPartition v1out = v2.getConsumedResults().iterator().next(); verifyThatFailedExecution(strategy, v2).partitionConnectionCause(v1out).restarts(); }
Tests region failover does not restart vertexes which are already in initial CREATED state. <pre> (v1) --|--> (v2) ^ | (blocking) </pre> Component 1: 1; component 2: 2
testRegionFailoverDoesNotRestartCreatedExecutions
java
apache/flink
flink-runtime/src/test/java/org/apache/flink/runtime/executiongraph/failover/RestartPipelinedRegionFailoverStrategyTest.java
https://github.com/apache/flink/blob/master/flink-runtime/src/test/java/org/apache/flink/runtime/executiongraph/failover/RestartPipelinedRegionFailoverStrategyTest.java
Apache-2.0
@Test void testRegionFailoverForPipelinedApproximate() { final TestingSchedulingTopology topology = new TestingSchedulingTopology(); TestingSchedulingExecutionVertex v1 = topology.newExecutionVertex(ExecutionState.RUNNING); TestingSchedulingExecutionVertex v2 = topology.newExecutionVertex(ExecutionState.RUNNING); TestingSchedulingExecutionVertex v3 = topology.newExecutionVertex(ExecutionState.RUNNING); TestingSchedulingExecutionVertex v4 = topology.newExecutionVertex(ExecutionState.RUNNING); topology.connect(v1, v2, ResultPartitionType.PIPELINED_APPROXIMATE); topology.connect(v1, v3, ResultPartitionType.PIPELINED_APPROXIMATE); topology.connect(v2, v4, ResultPartitionType.PIPELINED_APPROXIMATE); topology.connect(v3, v4, ResultPartitionType.PIPELINED_APPROXIMATE); RestartPipelinedRegionFailoverStrategy strategy = new RestartPipelinedRegionFailoverStrategy(topology); verifyThatFailedExecution(strategy, v1).restarts(v1, v2, v3, v4); verifyThatFailedExecution(strategy, v2).restarts(v2, v4); verifyThatFailedExecution(strategy, v3).restarts(v3, v4); verifyThatFailedExecution(strategy, v4).restarts(v4); }
Tests approximate local recovery downstream failover . <pre> (v1) -----> (v2) -----> (v4) | ^ |--------> (v3) --------| </pre>
testRegionFailoverForPipelinedApproximate
java
apache/flink
flink-runtime/src/test/java/org/apache/flink/runtime/executiongraph/failover/RestartPipelinedRegionFailoverStrategyTest.java
https://github.com/apache/flink/blob/master/flink-runtime/src/test/java/org/apache/flink/runtime/executiongraph/failover/RestartPipelinedRegionFailoverStrategyTest.java
Apache-2.0
@Test void testIndividualVertices() { TestingSchedulingTopology topology = new TestingSchedulingTopology(); TestingSchedulingExecutionVertex v1 = topology.newExecutionVertex(); TestingSchedulingExecutionVertex v2 = topology.newExecutionVertex(); TestingSchedulingExecutionVertex v3 = topology.newExecutionVertex(); Map<ExecutionVertexID, Set<SchedulingExecutionVertex>> pipelinedRegionByVertex = computePipelinedRegionByVertex(topology); Set<SchedulingExecutionVertex> r1 = pipelinedRegionByVertex.get(v1.getId()); Set<SchedulingExecutionVertex> r2 = pipelinedRegionByVertex.get(v2.getId()); Set<SchedulingExecutionVertex> r3 = pipelinedRegionByVertex.get(v3.getId()); assertDistinctRegions(r1, r2, r3); }
Tests that validates that a graph with single unconnected vertices works correctly. <pre> (v1) (v2) (v3) </pre>
testIndividualVertices
java
apache/flink
flink-runtime/src/test/java/org/apache/flink/runtime/executiongraph/failover/SchedulingPipelinedRegionComputeUtilTest.java
https://github.com/apache/flink/blob/master/flink-runtime/src/test/java/org/apache/flink/runtime/executiongraph/failover/SchedulingPipelinedRegionComputeUtilTest.java
Apache-2.0
@Test void testEmbarrassinglyParallelCase() { TestingSchedulingTopology topology = new TestingSchedulingTopology(); TestingSchedulingExecutionVertex va1 = topology.newExecutionVertex(); TestingSchedulingExecutionVertex va2 = topology.newExecutionVertex(); TestingSchedulingExecutionVertex va3 = topology.newExecutionVertex(); TestingSchedulingExecutionVertex vb1 = topology.newExecutionVertex(); TestingSchedulingExecutionVertex vb2 = topology.newExecutionVertex(); TestingSchedulingExecutionVertex vb3 = topology.newExecutionVertex(); topology.connect(va1, vb1, ResultPartitionType.PIPELINED) .connect(va2, vb2, ResultPartitionType.PIPELINED) .connect(va3, vb3, ResultPartitionType.PIPELINED); Map<ExecutionVertexID, Set<SchedulingExecutionVertex>> pipelinedRegionByVertex = computePipelinedRegionByVertex(topology); Set<SchedulingExecutionVertex> ra1 = pipelinedRegionByVertex.get(va1.getId()); Set<SchedulingExecutionVertex> ra2 = pipelinedRegionByVertex.get(va2.getId()); Set<SchedulingExecutionVertex> ra3 = pipelinedRegionByVertex.get(va3.getId()); Set<SchedulingExecutionVertex> rb1 = pipelinedRegionByVertex.get(vb1.getId()); Set<SchedulingExecutionVertex> rb2 = pipelinedRegionByVertex.get(vb2.getId()); Set<SchedulingExecutionVertex> rb3 = pipelinedRegionByVertex.get(vb3.getId()); assertSameRegion(ra1, rb1); assertSameRegion(ra2, rb2); assertSameRegion(ra3, rb3); assertDistinctRegions(ra1, ra2, ra3); }
Tests that validates that embarrassingly parallel chains of vertices work correctly. <pre> (a1) --> (b1) (a2) --> (b2) (a3) --> (b3) </pre>
testEmbarrassinglyParallelCase
java
apache/flink
flink-runtime/src/test/java/org/apache/flink/runtime/executiongraph/failover/SchedulingPipelinedRegionComputeUtilTest.java
https://github.com/apache/flink/blob/master/flink-runtime/src/test/java/org/apache/flink/runtime/executiongraph/failover/SchedulingPipelinedRegionComputeUtilTest.java
Apache-2.0
@Test void testOneComponentViaTwoExchanges() { TestingSchedulingTopology topology = new TestingSchedulingTopology(); TestingSchedulingExecutionVertex va1 = topology.newExecutionVertex(); TestingSchedulingExecutionVertex va2 = topology.newExecutionVertex(); TestingSchedulingExecutionVertex vb1 = topology.newExecutionVertex(); TestingSchedulingExecutionVertex vb2 = topology.newExecutionVertex(); TestingSchedulingExecutionVertex vc1 = topology.newExecutionVertex(); TestingSchedulingExecutionVertex vc2 = topology.newExecutionVertex(); topology.connect(va1, vb1, ResultPartitionType.PIPELINED) .connect(va1, vb2, ResultPartitionType.PIPELINED) .connect(va2, vb1, ResultPartitionType.PIPELINED) .connect(va2, vb2, ResultPartitionType.PIPELINED) .connect(vb1, vc1, ResultPartitionType.PIPELINED) .connect(vb1, vc2, ResultPartitionType.PIPELINED) .connect(vb2, vc1, ResultPartitionType.PIPELINED) .connect(vb2, vc2, ResultPartitionType.PIPELINED); Map<ExecutionVertexID, Set<SchedulingExecutionVertex>> pipelinedRegionByVertex = computePipelinedRegionByVertex(topology); Set<SchedulingExecutionVertex> ra1 = pipelinedRegionByVertex.get(va1.getId()); Set<SchedulingExecutionVertex> ra2 = pipelinedRegionByVertex.get(va2.getId()); Set<SchedulingExecutionVertex> rb1 = pipelinedRegionByVertex.get(vb1.getId()); Set<SchedulingExecutionVertex> rb2 = pipelinedRegionByVertex.get(vb2.getId()); Set<SchedulingExecutionVertex> rc1 = pipelinedRegionByVertex.get(vc1.getId()); Set<SchedulingExecutionVertex> rc2 = pipelinedRegionByVertex.get(vc2.getId()); assertSameRegion(ra1, ra2, rb1, rb2, rc1, rc2); }
Tests that validates that a single pipelined component via a sequence of all-to-all connections works correctly. <pre> (a1) -+-> (b1) -+-> (c1) X X (a2) -+-> (b2) -+-> (c2) </pre>
testOneComponentViaTwoExchanges
java
apache/flink
flink-runtime/src/test/java/org/apache/flink/runtime/executiongraph/failover/SchedulingPipelinedRegionComputeUtilTest.java
https://github.com/apache/flink/blob/master/flink-runtime/src/test/java/org/apache/flink/runtime/executiongraph/failover/SchedulingPipelinedRegionComputeUtilTest.java
Apache-2.0
@Test void testOneComponentViaCascadeOfJoins() { TestingSchedulingTopology topology = new TestingSchedulingTopology(); TestingSchedulingExecutionVertex v1 = topology.newExecutionVertex(); TestingSchedulingExecutionVertex v2 = topology.newExecutionVertex(); TestingSchedulingExecutionVertex v3 = topology.newExecutionVertex(); TestingSchedulingExecutionVertex v4 = topology.newExecutionVertex(); TestingSchedulingExecutionVertex v5 = topology.newExecutionVertex(); TestingSchedulingExecutionVertex v6 = topology.newExecutionVertex(); TestingSchedulingExecutionVertex v7 = topology.newExecutionVertex(); topology.connect(v1, v5, ResultPartitionType.PIPELINED) .connect(v2, v5, ResultPartitionType.PIPELINED) .connect(v3, v6, ResultPartitionType.PIPELINED) .connect(v4, v6, ResultPartitionType.PIPELINED) .connect(v5, v7, ResultPartitionType.PIPELINED) .connect(v6, v7, ResultPartitionType.PIPELINED); Map<ExecutionVertexID, Set<SchedulingExecutionVertex>> pipelinedRegionByVertex = computePipelinedRegionByVertex(topology); Set<SchedulingExecutionVertex> r1 = pipelinedRegionByVertex.get(v1.getId()); Set<SchedulingExecutionVertex> r2 = pipelinedRegionByVertex.get(v2.getId()); Set<SchedulingExecutionVertex> r3 = pipelinedRegionByVertex.get(v3.getId()); Set<SchedulingExecutionVertex> r4 = pipelinedRegionByVertex.get(v4.getId()); Set<SchedulingExecutionVertex> r5 = pipelinedRegionByVertex.get(v5.getId()); Set<SchedulingExecutionVertex> r6 = pipelinedRegionByVertex.get(v6.getId()); Set<SchedulingExecutionVertex> r7 = pipelinedRegionByVertex.get(v7.getId()); assertSameRegion(r1, r2, r3, r4, r5, r6, r7); }
Tests that validates that a single pipelined component via a cascade of joins works correctly. <pre> (v1)--+ +--(v5)-+ (v2)--+ | +--(v7) (v3)--+ | +--(v6)-+ (v4)--+ </pre>
testOneComponentViaCascadeOfJoins
java
apache/flink
flink-runtime/src/test/java/org/apache/flink/runtime/executiongraph/failover/SchedulingPipelinedRegionComputeUtilTest.java
https://github.com/apache/flink/blob/master/flink-runtime/src/test/java/org/apache/flink/runtime/executiongraph/failover/SchedulingPipelinedRegionComputeUtilTest.java
Apache-2.0
@Test void testOneComponentInstanceFromOneSource() { TestingSchedulingTopology topology = new TestingSchedulingTopology(); TestingSchedulingExecutionVertex v1 = topology.newExecutionVertex(); TestingSchedulingExecutionVertex v2 = topology.newExecutionVertex(); TestingSchedulingExecutionVertex v3 = topology.newExecutionVertex(); TestingSchedulingExecutionVertex v4 = topology.newExecutionVertex(); TestingSchedulingExecutionVertex v5 = topology.newExecutionVertex(); TestingSchedulingExecutionVertex v6 = topology.newExecutionVertex(); TestingSchedulingExecutionVertex v7 = topology.newExecutionVertex(); topology.connect(v1, v2, ResultPartitionType.PIPELINED) .connect(v1, v3, ResultPartitionType.PIPELINED) .connect(v2, v4, ResultPartitionType.PIPELINED) .connect(v2, v5, ResultPartitionType.PIPELINED) .connect(v3, v6, ResultPartitionType.PIPELINED) .connect(v3, v7, ResultPartitionType.PIPELINED); Map<ExecutionVertexID, Set<SchedulingExecutionVertex>> pipelinedRegionByVertex = computePipelinedRegionByVertex(topology); Set<SchedulingExecutionVertex> r1 = pipelinedRegionByVertex.get(v1.getId()); Set<SchedulingExecutionVertex> r2 = pipelinedRegionByVertex.get(v2.getId()); Set<SchedulingExecutionVertex> r3 = pipelinedRegionByVertex.get(v3.getId()); Set<SchedulingExecutionVertex> r4 = pipelinedRegionByVertex.get(v4.getId()); Set<SchedulingExecutionVertex> r5 = pipelinedRegionByVertex.get(v5.getId()); Set<SchedulingExecutionVertex> r6 = pipelinedRegionByVertex.get(v6.getId()); Set<SchedulingExecutionVertex> r7 = pipelinedRegionByVertex.get(v7.getId()); assertSameRegion(r1, r2, r3, r4, r5, r6, r7); }
Tests that validates that a single pipelined component instance from one source works correctly. <pre> +--(v4) +--(v2)-+ | +--(v5) (v1)--+ | +--(v6) +--(v3)-+ +--(v7) </pre>
testOneComponentInstanceFromOneSource
java
apache/flink
flink-runtime/src/test/java/org/apache/flink/runtime/executiongraph/failover/SchedulingPipelinedRegionComputeUtilTest.java
https://github.com/apache/flink/blob/master/flink-runtime/src/test/java/org/apache/flink/runtime/executiongraph/failover/SchedulingPipelinedRegionComputeUtilTest.java
Apache-2.0
@Test void testTwoComponentsViaBlockingExchange() { TestingSchedulingTopology topology = new TestingSchedulingTopology(); TestingSchedulingExecutionVertex va1 = topology.newExecutionVertex(); TestingSchedulingExecutionVertex va2 = topology.newExecutionVertex(); TestingSchedulingExecutionVertex vb1 = topology.newExecutionVertex(); TestingSchedulingExecutionVertex vb2 = topology.newExecutionVertex(); TestingSchedulingExecutionVertex vc1 = topology.newExecutionVertex(); TestingSchedulingExecutionVertex vc2 = topology.newExecutionVertex(); topology.connect(va1, vb1, ResultPartitionType.PIPELINED) .connect(va1, vb2, ResultPartitionType.PIPELINED) .connect(va2, vb1, ResultPartitionType.PIPELINED) .connect(va2, vb2, ResultPartitionType.PIPELINED) .connect(vb1, vc1, ResultPartitionType.BLOCKING) .connect(vb2, vc2, ResultPartitionType.BLOCKING); Map<ExecutionVertexID, Set<SchedulingExecutionVertex>> pipelinedRegionByVertex = computePipelinedRegionByVertex(topology); Set<SchedulingExecutionVertex> ra1 = pipelinedRegionByVertex.get(va1.getId()); Set<SchedulingExecutionVertex> ra2 = pipelinedRegionByVertex.get(va2.getId()); Set<SchedulingExecutionVertex> rb1 = pipelinedRegionByVertex.get(vb1.getId()); Set<SchedulingExecutionVertex> rb2 = pipelinedRegionByVertex.get(vb2.getId()); Set<SchedulingExecutionVertex> rc1 = pipelinedRegionByVertex.get(vc1.getId()); Set<SchedulingExecutionVertex> rc2 = pipelinedRegionByVertex.get(vc2.getId()); assertSameRegion(ra1, ra2, rb1, rb2); assertDistinctRegions(ra1, rc1, rc2); }
Tests the below topology. <pre> (a1) -+-> (b1) -+-> (c1) X (a2) -+-> (b2) -+-> (c2) ^ ^ | | (pipelined) (blocking) </pre>
testTwoComponentsViaBlockingExchange
java
apache/flink
flink-runtime/src/test/java/org/apache/flink/runtime/executiongraph/failover/SchedulingPipelinedRegionComputeUtilTest.java
https://github.com/apache/flink/blob/master/flink-runtime/src/test/java/org/apache/flink/runtime/executiongraph/failover/SchedulingPipelinedRegionComputeUtilTest.java
Apache-2.0
@Test void testTwoComponentsViaBlockingExchange2() { TestingSchedulingTopology topology = new TestingSchedulingTopology(); TestingSchedulingExecutionVertex va1 = topology.newExecutionVertex(); TestingSchedulingExecutionVertex va2 = topology.newExecutionVertex(); TestingSchedulingExecutionVertex vb1 = topology.newExecutionVertex(); TestingSchedulingExecutionVertex vb2 = topology.newExecutionVertex(); TestingSchedulingExecutionVertex vc1 = topology.newExecutionVertex(); TestingSchedulingExecutionVertex vc2 = topology.newExecutionVertex(); topology.connect(va1, vb1, ResultPartitionType.PIPELINED) .connect(va1, vb2, ResultPartitionType.PIPELINED) .connect(va2, vb1, ResultPartitionType.PIPELINED) .connect(va2, vb2, ResultPartitionType.PIPELINED) .connect(vb1, vc1, ResultPartitionType.BLOCKING) .connect(vb1, vc2, ResultPartitionType.BLOCKING) .connect(vb2, vc1, ResultPartitionType.BLOCKING) .connect(vb2, vc2, ResultPartitionType.BLOCKING); Map<ExecutionVertexID, Set<SchedulingExecutionVertex>> pipelinedRegionByVertex = computePipelinedRegionByVertex(topology); Set<SchedulingExecutionVertex> ra1 = pipelinedRegionByVertex.get(va1.getId()); Set<SchedulingExecutionVertex> ra2 = pipelinedRegionByVertex.get(va2.getId()); Set<SchedulingExecutionVertex> rb1 = pipelinedRegionByVertex.get(vb1.getId()); Set<SchedulingExecutionVertex> rb2 = pipelinedRegionByVertex.get(vb2.getId()); Set<SchedulingExecutionVertex> rc1 = pipelinedRegionByVertex.get(vc1.getId()); Set<SchedulingExecutionVertex> rc2 = pipelinedRegionByVertex.get(vc2.getId()); assertSameRegion(ra1, ra2, rb1, rb2); assertDistinctRegions(ra1, rc1, rc2); }
Tests the below topology. <pre> (a1) -+-> (b1) -+-> (c1) X X (a2) -+-> (b2) -+-> (c2) ^ ^ | | (pipelined) (blocking) </pre>
testTwoComponentsViaBlockingExchange2
java
apache/flink
flink-runtime/src/test/java/org/apache/flink/runtime/executiongraph/failover/SchedulingPipelinedRegionComputeUtilTest.java
https://github.com/apache/flink/blob/master/flink-runtime/src/test/java/org/apache/flink/runtime/executiongraph/failover/SchedulingPipelinedRegionComputeUtilTest.java
Apache-2.0
@Test void testMultipleComponentsViaCascadeOfJoins() { TestingSchedulingTopology topology = new TestingSchedulingTopology(); TestingSchedulingExecutionVertex v1 = topology.newExecutionVertex(); TestingSchedulingExecutionVertex v2 = topology.newExecutionVertex(); TestingSchedulingExecutionVertex v3 = topology.newExecutionVertex(); TestingSchedulingExecutionVertex v4 = topology.newExecutionVertex(); TestingSchedulingExecutionVertex v5 = topology.newExecutionVertex(); TestingSchedulingExecutionVertex v6 = topology.newExecutionVertex(); TestingSchedulingExecutionVertex v7 = topology.newExecutionVertex(); topology.connect(v1, v5, ResultPartitionType.PIPELINED) .connect(v2, v5, ResultPartitionType.PIPELINED) .connect(v3, v6, ResultPartitionType.PIPELINED) .connect(v4, v6, ResultPartitionType.PIPELINED) .connect(v5, v7, ResultPartitionType.BLOCKING) .connect(v6, v7, ResultPartitionType.BLOCKING); Map<ExecutionVertexID, Set<SchedulingExecutionVertex>> pipelinedRegionByVertex = computePipelinedRegionByVertex(topology); Set<SchedulingExecutionVertex> r1 = pipelinedRegionByVertex.get(v1.getId()); Set<SchedulingExecutionVertex> r2 = pipelinedRegionByVertex.get(v2.getId()); Set<SchedulingExecutionVertex> r3 = pipelinedRegionByVertex.get(v3.getId()); Set<SchedulingExecutionVertex> r4 = pipelinedRegionByVertex.get(v4.getId()); Set<SchedulingExecutionVertex> r5 = pipelinedRegionByVertex.get(v5.getId()); Set<SchedulingExecutionVertex> r6 = pipelinedRegionByVertex.get(v6.getId()); Set<SchedulingExecutionVertex> r7 = pipelinedRegionByVertex.get(v7.getId()); assertSameRegion(r1, r2, r5); assertSameRegion(r3, r4, r6); assertDistinctRegions(r1, r3, r7); }
Cascades of joins with partially blocking, partially pipelined exchanges. <pre> (1)--+ +--(5)-+ (2)--+ | (blocking) | +--(7) | (blocking) (3)--+ | +--(6)-+ (4)--+ </pre> <p>Component 1: 1, 2, 5; component 2: 3,4,6; component 3: 7
testMultipleComponentsViaCascadeOfJoins
java
apache/flink
flink-runtime/src/test/java/org/apache/flink/runtime/executiongraph/failover/SchedulingPipelinedRegionComputeUtilTest.java
https://github.com/apache/flink/blob/master/flink-runtime/src/test/java/org/apache/flink/runtime/executiongraph/failover/SchedulingPipelinedRegionComputeUtilTest.java
Apache-2.0
@Test void testDiamondWithMixedPipelinedAndBlockingExchanges() { TestingSchedulingTopology topology = new TestingSchedulingTopology(); TestingSchedulingExecutionVertex v1 = topology.newExecutionVertex(); TestingSchedulingExecutionVertex v2 = topology.newExecutionVertex(); TestingSchedulingExecutionVertex v3 = topology.newExecutionVertex(); TestingSchedulingExecutionVertex v4 = topology.newExecutionVertex(); topology.connect(v1, v2, ResultPartitionType.BLOCKING) .connect(v1, v3, ResultPartitionType.PIPELINED) .connect(v2, v4, ResultPartitionType.PIPELINED) .connect(v3, v4, ResultPartitionType.PIPELINED); Map<ExecutionVertexID, Set<SchedulingExecutionVertex>> pipelinedRegionByVertex = computePipelinedRegionByVertex(topology); Set<SchedulingExecutionVertex> r1 = pipelinedRegionByVertex.get(v1.getId()); Set<SchedulingExecutionVertex> r2 = pipelinedRegionByVertex.get(v2.getId()); Set<SchedulingExecutionVertex> r3 = pipelinedRegionByVertex.get(v3.getId()); Set<SchedulingExecutionVertex> r4 = pipelinedRegionByVertex.get(v4.getId()); assertSameRegion(r1, r2, r3, r4); }
Tests the below topology. <pre> (blocking) | v +|-(v2)-+ | | (v1)--+ +--(v4) | | +--(v3)-+ </pre>
testDiamondWithMixedPipelinedAndBlockingExchanges
java
apache/flink
flink-runtime/src/test/java/org/apache/flink/runtime/executiongraph/failover/SchedulingPipelinedRegionComputeUtilTest.java
https://github.com/apache/flink/blob/master/flink-runtime/src/test/java/org/apache/flink/runtime/executiongraph/failover/SchedulingPipelinedRegionComputeUtilTest.java
Apache-2.0
@Override public ExternalResourceDriver createExternalResourceDriver(Configuration config) { return new TestingExternalResourceDriver(); }
Implementation of {@link ExternalResourceDriverFactory} for testing purpose.
createExternalResourceDriver
java
apache/flink
flink-runtime/src/test/java/org/apache/flink/runtime/externalresource/TestingExternalResourceDriverFactory.java
https://github.com/apache/flink/blob/master/flink-runtime/src/test/java/org/apache/flink/runtime/externalresource/TestingExternalResourceDriverFactory.java
Apache-2.0
@Override public ExternalResourceDriver createExternalResourceDriver(Configuration config) throws Exception { throw new Exception(); }
Implementation of {@link ExternalResourceDriverFactory} for testing purpose which fails to create an {@link ExternalResourceDriver}.
createExternalResourceDriver
java
apache/flink
flink-runtime/src/test/java/org/apache/flink/runtime/externalresource/TestingFailedExternalResourceDriverFactory.java
https://github.com/apache/flink/blob/master/flink-runtime/src/test/java/org/apache/flink/runtime/externalresource/TestingFailedExternalResourceDriverFactory.java
Apache-2.0
private static PluginManager createPluginManager() { final Map<Class<?>, Iterator<?>> plugins = new HashMap<>(); plugins.put( FailureEnricherFactory.class, IteratorUtils.singletonIterator(new TestFailureEnricherFactory())); return new TestingPluginManager(plugins); }
Testing plugin manager for {@link FailureEnricherFactory} utilizing {@link TestFailureEnricherFactory}. @return the testing PluginManager
createPluginManager
java
apache/flink
flink-runtime/src/test/java/org/apache/flink/runtime/failure/FailureEnricherUtilsTest.java
https://github.com/apache/flink/blob/master/flink-runtime/src/test/java/org/apache/flink/runtime/failure/FailureEnricherUtilsTest.java
Apache-2.0
@Test public void testRegularHeartbeat() throws InterruptedException { final long heartbeatTimeout = 1000L; ResourceID ownResourceID = new ResourceID("foobar"); ResourceID targetResourceID = new ResourceID("barfoo"); final int outputPayload = 42; final ArrayBlockingQueue<String> reportedPayloads = new ArrayBlockingQueue<>(2); final TestingHeartbeatListener<String, Integer> heartbeatListener = new TestingHeartbeatListenerBuilder<String, Integer>() .setReportPayloadConsumer( (ignored, payload) -> reportedPayloads.offer(payload)) .setRetrievePayloadFunction((ignored) -> outputPayload) .createNewTestingHeartbeatListener(); HeartbeatManagerImpl<String, Integer> heartbeatManager = new HeartbeatManagerImpl<>( heartbeatTimeout, FAILED_RPC_THRESHOLD, ownResourceID, heartbeatListener, new ScheduledExecutorServiceAdapter(EXECUTOR_RESOURCE.getExecutor()), LOG); final ArrayBlockingQueue<Integer> reportedPayloadsHeartbeatTarget = new ArrayBlockingQueue<>(2); final TestingHeartbeatTarget<Integer> heartbeatTarget = new TestingHeartbeatTargetBuilder<Integer>() .setReceiveHeartbeatFunction( (ignoredA, payload) -> { reportedPayloadsHeartbeatTarget.offer(payload); return FutureUtils.completedVoidFuture(); }) .createTestingHeartbeatTarget(); heartbeatManager.monitorTarget(targetResourceID, heartbeatTarget); final String inputPayload1 = "foobar"; heartbeatManager.requestHeartbeat(targetResourceID, inputPayload1); assertThat(reportedPayloads.take()).isEqualTo(inputPayload1); assertThat(reportedPayloadsHeartbeatTarget.take()).isEqualTo(outputPayload); final String inputPayload2 = "barfoo"; heartbeatManager.receiveHeartbeat(targetResourceID, inputPayload2); assertThat(reportedPayloads.take()).isEqualTo(inputPayload2); }
Tests that regular heartbeat signal triggers the right callback functions in the {@link HeartbeatListener}.
testRegularHeartbeat
java
apache/flink
flink-runtime/src/test/java/org/apache/flink/runtime/heartbeat/HeartbeatManagerTest.java
https://github.com/apache/flink/blob/master/flink-runtime/src/test/java/org/apache/flink/runtime/heartbeat/HeartbeatManagerTest.java
Apache-2.0
@Test public void testHeartbeatMonitorUpdate() { long heartbeatTimeout = 1000L; ResourceID ownResourceID = new ResourceID("foobar"); ResourceID targetResourceID = new ResourceID("barfoo"); Object expectedObject = new Object(); HeartbeatListener<Object, Object> heartbeatListener = new TestingHeartbeatListenerBuilder<>() .setRetrievePayloadFunction( ignored -> CompletableFuture.completedFuture(expectedObject)) .createNewTestingHeartbeatListener(); final ManuallyTriggeredScheduledExecutor manuallyTriggeredScheduledExecutor = new ManuallyTriggeredScheduledExecutor(); HeartbeatManagerImpl<Object, Object> heartbeatManager = new HeartbeatManagerImpl<>( heartbeatTimeout, FAILED_RPC_THRESHOLD, ownResourceID, heartbeatListener, manuallyTriggeredScheduledExecutor, LOG); heartbeatManager.monitorTarget( targetResourceID, new TestingHeartbeatTargetBuilder<>().createTestingHeartbeatTarget()); heartbeatManager.receiveHeartbeat(targetResourceID, expectedObject); final List<ScheduledFuture<?>> scheduledTasksAfterHeartbeat = manuallyTriggeredScheduledExecutor.getAllScheduledTasks(); assertThat(scheduledTasksAfterHeartbeat).hasSize(2); // the first scheduled future should be cancelled by the heartbeat update assertThat(scheduledTasksAfterHeartbeat.get(0).isCancelled()).isTrue(); }
Tests that the heartbeat monitors are updated when receiving a new heartbeat signal.
testHeartbeatMonitorUpdate
java
apache/flink
flink-runtime/src/test/java/org/apache/flink/runtime/heartbeat/HeartbeatManagerTest.java
https://github.com/apache/flink/blob/master/flink-runtime/src/test/java/org/apache/flink/runtime/heartbeat/HeartbeatManagerTest.java
Apache-2.0
@Test public void testHeartbeatTimeout() throws Exception { int numHeartbeats = 6; final int payload = 42; ResourceID ownResourceID = new ResourceID("foobar"); ResourceID targetResourceID = new ResourceID("barfoo"); final CompletableFuture<ResourceID> timeoutFuture = new CompletableFuture<>(); final TestingHeartbeatListener<Integer, Integer> heartbeatListener = new TestingHeartbeatListenerBuilder<Integer, Integer>() .setRetrievePayloadFunction(ignored -> payload) .setNotifyHeartbeatTimeoutConsumer(timeoutFuture::complete) .createNewTestingHeartbeatListener(); HeartbeatManagerImpl<Integer, Integer> heartbeatManager = new HeartbeatManagerImpl<>( HEARTBEAT_TIMEOUT, FAILED_RPC_THRESHOLD, ownResourceID, heartbeatListener, new ScheduledExecutorServiceAdapter(EXECUTOR_RESOURCE.getExecutor()), LOG); final HeartbeatTarget<Integer> heartbeatTarget = new TestingHeartbeatTargetBuilder<Integer>().createTestingHeartbeatTarget(); heartbeatManager.monitorTarget(targetResourceID, heartbeatTarget); for (int i = 0; i < numHeartbeats; i++) { heartbeatManager.receiveHeartbeat(targetResourceID, payload); Thread.sleep(HEARTBEAT_INTERVAL); } FlinkAssertions.assertThatFuture(timeoutFuture).eventuallySucceeds(); ResourceID timeoutResourceID = timeoutFuture.get(2 * HEARTBEAT_TIMEOUT, TimeUnit.MILLISECONDS); assertThat(targetResourceID).isEqualTo(timeoutResourceID); }
Tests that a heartbeat timeout is signaled if the heartbeat is not reported in time.
testHeartbeatTimeout
java
apache/flink
flink-runtime/src/test/java/org/apache/flink/runtime/heartbeat/HeartbeatManagerTest.java
https://github.com/apache/flink/blob/master/flink-runtime/src/test/java/org/apache/flink/runtime/heartbeat/HeartbeatManagerTest.java
Apache-2.0
@Test public void testHeartbeatCluster() throws Exception { ResourceID resourceIdTarget = new ResourceID("foobar"); ResourceID resourceIDSender = new ResourceID("barfoo"); final int targetPayload = 42; final AtomicInteger numReportPayloadCallsTarget = new AtomicInteger(0); final TestingHeartbeatListener<String, Integer> heartbeatListenerTarget = new TestingHeartbeatListenerBuilder<String, Integer>() .setRetrievePayloadFunction(ignored -> targetPayload) .setReportPayloadConsumer( (ignoredA, ignoredB) -> numReportPayloadCallsTarget.incrementAndGet()) .createNewTestingHeartbeatListener(); final String senderPayload = "1337"; final CompletableFuture<ResourceID> targetHeartbeatTimeoutFuture = new CompletableFuture<>(); final AtomicInteger numReportPayloadCallsSender = new AtomicInteger(0); final TestingHeartbeatListener<Integer, String> heartbeatListenerSender = new TestingHeartbeatListenerBuilder<Integer, String>() .setRetrievePayloadFunction(ignored -> senderPayload) .setNotifyHeartbeatTimeoutConsumer(targetHeartbeatTimeoutFuture::complete) .setReportPayloadConsumer( (ignoredA, ignoredB) -> numReportPayloadCallsSender.incrementAndGet()) .createNewTestingHeartbeatListener(); HeartbeatManagerImpl<String, Integer> heartbeatManagerTarget = new HeartbeatManagerImpl<>( HEARTBEAT_TIMEOUT, FAILED_RPC_THRESHOLD, resourceIdTarget, heartbeatListenerTarget, new ScheduledExecutorServiceAdapter(EXECUTOR_RESOURCE.getExecutor()), LOG); HeartbeatManagerSenderImpl<Integer, String> heartbeatManagerSender = new HeartbeatManagerSenderImpl<>( HEARTBEAT_INTERVAL, HEARTBEAT_TIMEOUT, FAILED_RPC_THRESHOLD, resourceIDSender, heartbeatListenerSender, new ScheduledExecutorServiceAdapter(EXECUTOR_RESOURCE.getExecutor()), LOG); heartbeatManagerTarget.monitorTarget(resourceIDSender, heartbeatManagerSender); heartbeatManagerSender.monitorTarget(resourceIdTarget, heartbeatManagerTarget); Thread.sleep(2 * HEARTBEAT_TIMEOUT); assertThat(targetHeartbeatTimeoutFuture).isNotDone(); heartbeatManagerTarget.stop(); ResourceID timeoutResourceID = targetHeartbeatTimeoutFuture.get(2 * HEARTBEAT_TIMEOUT, TimeUnit.MILLISECONDS); assertThat(timeoutResourceID).isEqualTo(resourceIdTarget); int numberHeartbeats = (int) (2 * HEARTBEAT_TIMEOUT / HEARTBEAT_INTERVAL); assertThat(numReportPayloadCallsTarget.get()).isGreaterThanOrEqualTo(numberHeartbeats / 2); assertThat(numReportPayloadCallsSender.get()).isGreaterThanOrEqualTo(numberHeartbeats / 2); }
Tests the heartbeat interplay between the {@link HeartbeatManagerImpl} and the {@link HeartbeatManagerSenderImpl}. The sender should regularly trigger heartbeat requests which are fulfilled by the receiver. Upon stopping the receiver, the sender should notify the heartbeat listener about the heartbeat timeout. @throws Exception
testHeartbeatCluster
java
apache/flink
flink-runtime/src/test/java/org/apache/flink/runtime/heartbeat/HeartbeatManagerTest.java
https://github.com/apache/flink/blob/master/flink-runtime/src/test/java/org/apache/flink/runtime/heartbeat/HeartbeatManagerTest.java
Apache-2.0
@Test public void testTargetUnmonitoring() throws Exception { // this might be too aggressive for Travis, let's see... long heartbeatTimeout = 50L; ResourceID resourceID = new ResourceID("foobar"); ResourceID targetID = new ResourceID("target"); final int payload = 42; final CompletableFuture<ResourceID> timeoutFuture = new CompletableFuture<>(); final TestingHeartbeatListener<Integer, Integer> heartbeatListener = new TestingHeartbeatListenerBuilder<Integer, Integer>() .setRetrievePayloadFunction(ignored -> payload) .setNotifyHeartbeatTimeoutConsumer(timeoutFuture::complete) .createNewTestingHeartbeatListener(); HeartbeatManager<Integer, Integer> heartbeatManager = new HeartbeatManagerImpl<>( heartbeatTimeout, FAILED_RPC_THRESHOLD, resourceID, heartbeatListener, new ScheduledExecutorServiceAdapter(EXECUTOR_RESOURCE.getExecutor()), LOG); final HeartbeatTarget<Integer> heartbeatTarget = new TestingHeartbeatTargetBuilder<Integer>().createTestingHeartbeatTarget(); heartbeatManager.monitorTarget(targetID, heartbeatTarget); heartbeatManager.unmonitorTarget(targetID); assertThatThrownBy(() -> timeoutFuture.get(2 * heartbeatTimeout, TimeUnit.MILLISECONDS)) // the timeout should not be completed since we unmonitored the target .isInstanceOf(TimeoutException.class); }
Tests that after unmonitoring a target, there won't be a timeout triggered.
testTargetUnmonitoring
java
apache/flink
flink-runtime/src/test/java/org/apache/flink/runtime/heartbeat/HeartbeatManagerTest.java
https://github.com/apache/flink/blob/master/flink-runtime/src/test/java/org/apache/flink/runtime/heartbeat/HeartbeatManagerTest.java
Apache-2.0
@Test public void testLastHeartbeatFromUnregisteredTarget() { final long heartbeatTimeout = 100L; final ResourceID resourceId = ResourceID.generate(); final HeartbeatListener<Object, Object> heartbeatListener = new TestingHeartbeatListenerBuilder<>().createNewTestingHeartbeatListener(); HeartbeatManager<?, ?> heartbeatManager = new HeartbeatManagerImpl<>( heartbeatTimeout, FAILED_RPC_THRESHOLD, resourceId, heartbeatListener, new ScheduledExecutorServiceAdapter(EXECUTOR_RESOURCE.getExecutor()), LOG); try { assertThat(heartbeatManager.getLastHeartbeatFrom(ResourceID.generate())).isEqualTo(-1L); } finally { heartbeatManager.stop(); } }
Tests that the last heartbeat from an unregistered target equals -1.
testLastHeartbeatFromUnregisteredTarget
java
apache/flink
flink-runtime/src/test/java/org/apache/flink/runtime/heartbeat/HeartbeatManagerTest.java
https://github.com/apache/flink/blob/master/flink-runtime/src/test/java/org/apache/flink/runtime/heartbeat/HeartbeatManagerTest.java
Apache-2.0
@Test public void testLastHeartbeatFrom() { final long heartbeatTimeout = 100L; final ResourceID resourceId = ResourceID.generate(); final ResourceID target = ResourceID.generate(); HeartbeatManager<Object, Object> heartbeatManager = new HeartbeatManagerImpl<>( heartbeatTimeout, FAILED_RPC_THRESHOLD, resourceId, new TestingHeartbeatListenerBuilder<>().createNewTestingHeartbeatListener(), new ScheduledExecutorServiceAdapter(EXECUTOR_RESOURCE.getExecutor()), LOG); try { heartbeatManager.monitorTarget( target, new TestingHeartbeatTargetBuilder<>().createTestingHeartbeatTarget()); assertThat(heartbeatManager.getLastHeartbeatFrom(target)).isZero(); final long currentTime = System.currentTimeMillis(); heartbeatManager.receiveHeartbeat(target, null); assertThat(heartbeatManager.getLastHeartbeatFrom(target)) .isGreaterThanOrEqualTo(currentTime); } finally { heartbeatManager.stop(); } }
Tests that we can correctly retrieve the last heartbeat for registered targets.
testLastHeartbeatFrom
java
apache/flink
flink-runtime/src/test/java/org/apache/flink/runtime/heartbeat/HeartbeatManagerTest.java
https://github.com/apache/flink/blob/master/flink-runtime/src/test/java/org/apache/flink/runtime/heartbeat/HeartbeatManagerTest.java
Apache-2.0
@Test public void testHeartbeatManagerTargetPayload() throws Exception { final long heartbeatTimeout = 100L; final ResourceID someTargetId = ResourceID.generate(); final ResourceID specialTargetId = ResourceID.generate(); final Map<ResourceID, Integer> payloads = new HashMap<>(2); payloads.put(someTargetId, 0); payloads.put(specialTargetId, 1); final CompletableFuture<Integer> someHeartbeatPayloadFuture = new CompletableFuture<>(); final TestingHeartbeatTarget<Integer> someHeartbeatTarget = new TestingHeartbeatTargetBuilder<Integer>() .setReceiveHeartbeatFunction( (ignored, payload) -> { someHeartbeatPayloadFuture.complete(payload); return FutureUtils.completedVoidFuture(); }) .createTestingHeartbeatTarget(); final CompletableFuture<Integer> specialHeartbeatPayloadFuture = new CompletableFuture<>(); final TestingHeartbeatTarget<Integer> specialHeartbeatTarget = new TestingHeartbeatTargetBuilder<Integer>() .setReceiveHeartbeatFunction( (ignored, payload) -> { specialHeartbeatPayloadFuture.complete(payload); return FutureUtils.completedVoidFuture(); }) .createTestingHeartbeatTarget(); final TestingHeartbeatListener<Void, Integer> testingHeartbeatListener = new TestingHeartbeatListenerBuilder<Void, Integer>() .setRetrievePayloadFunction(payloads::get) .createNewTestingHeartbeatListener(); HeartbeatManager<?, Integer> heartbeatManager = new HeartbeatManagerImpl<>( heartbeatTimeout, FAILED_RPC_THRESHOLD, ResourceID.generate(), testingHeartbeatListener, new ScheduledExecutorServiceAdapter(EXECUTOR_RESOURCE.getExecutor()), LOG); try { heartbeatManager.monitorTarget(someTargetId, someHeartbeatTarget); heartbeatManager.monitorTarget(specialTargetId, specialHeartbeatTarget); heartbeatManager.requestHeartbeat(someTargetId, null); assertThat(someHeartbeatPayloadFuture.get()).isEqualTo(payloads.get(someTargetId)); heartbeatManager.requestHeartbeat(specialTargetId, null); assertThat(specialHeartbeatPayloadFuture.get()) .isEqualTo(payloads.get(specialTargetId)); } finally { heartbeatManager.stop(); } }
Tests that the heartbeat target {@link ResourceID} is properly passed to the {@link HeartbeatListener} by the {@link HeartbeatManagerImpl}.
testHeartbeatManagerTargetPayload
java
apache/flink
flink-runtime/src/test/java/org/apache/flink/runtime/heartbeat/HeartbeatManagerTest.java
https://github.com/apache/flink/blob/master/flink-runtime/src/test/java/org/apache/flink/runtime/heartbeat/HeartbeatManagerTest.java
Apache-2.0
@Override public HeartbeatMonitor<O> createHeartbeatMonitor( ResourceID resourceID, HeartbeatTarget<O> heartbeatTarget, ScheduledExecutor mainThreadExecutor, HeartbeatListener<?, O> heartbeatListener, long heartbeatTimeoutIntervalMs, int failedRpcRequestsUntilUnreachable) { return new TestingHeartbeatMonitor<>( resourceID, heartbeatTarget, mainThreadExecutor, heartbeatListener, heartbeatTimeoutIntervalMs, failedRpcRequestsUntilUnreachable); }
Factory instantiates testing monitor instance. @param <O> Type of the outgoing heartbeat payload
createHeartbeatMonitor
java
apache/flink
flink-runtime/src/test/java/org/apache/flink/runtime/heartbeat/TestingHeartbeatServices.java
https://github.com/apache/flink/blob/master/flink-runtime/src/test/java/org/apache/flink/runtime/heartbeat/TestingHeartbeatServices.java
Apache-2.0
@Test void testCloseAndCleanupAllDataDeletesBlobsAfterCleaningUpHAData() throws Exception { final Queue<CloseOperations> closeOperations = new ArrayDeque<>(3); final TestingBlobStoreService testingBlobStoreService = new TestingBlobStoreService(closeOperations); final TestingHaServices haServices = new TestingHaServices( new Configuration(), Executors.directExecutor(), testingBlobStoreService, closeOperations, () -> closeOperations.offer(CloseOperations.HA_CLEANUP), ignored -> {}); haServices.closeWithOptionalClean(true); assertThat(closeOperations) .contains( CloseOperations.HA_CLEANUP, CloseOperations.HA_CLOSE, CloseOperations.BLOB_CLEANUP); }
Tests that we first delete all pointers from the HA services before deleting the blobs. See FLINK-22014 for more details.
testCloseAndCleanupAllDataDeletesBlobsAfterCleaningUpHAData
java
apache/flink
flink-runtime/src/test/java/org/apache/flink/runtime/highavailability/AbstractHaServicesTest.java
https://github.com/apache/flink/blob/master/flink-runtime/src/test/java/org/apache/flink/runtime/highavailability/AbstractHaServicesTest.java
Apache-2.0
@Override public JobResultStore createJobResultStore() throws IOException { Path path = new Path(temporaryFolder.toURI()); return new FileSystemJobResultStore( path.getFileSystem(), path, false, Executors.directExecutor()); }
Tests for the {@link FileSystemJobResultStore} implementation of the {@link JobResultStore}'s contracts.
createJobResultStore
java
apache/flink
flink-runtime/src/test/java/org/apache/flink/runtime/highavailability/FileSystemJobResultStoreContractTest.java
https://github.com/apache/flink/blob/master/flink-runtime/src/test/java/org/apache/flink/runtime/highavailability/FileSystemJobResultStoreContractTest.java
Apache-2.0
private File expectedDirtyFile(JobResultEntry entry) { return new File( temporaryFolder.toURI().getPath(), entry.getJobId().toString() + FileSystemJobResultStore.DIRTY_FILE_EXTENSION); }
Generates the expected path for a dirty entry given a job entry. @param entry The job ID to construct the expected dirty path from. @return The expected dirty file.
expectedDirtyFile
java
apache/flink
flink-runtime/src/test/java/org/apache/flink/runtime/highavailability/FileSystemJobResultStoreFileOperationsTest.java
https://github.com/apache/flink/blob/master/flink-runtime/src/test/java/org/apache/flink/runtime/highavailability/FileSystemJobResultStoreFileOperationsTest.java
Apache-2.0
private File expectedCleanFile(JobResultEntry entry) { return new File( temporaryFolder.toURI().getPath(), entry.getJobId().toString() + FileSystemJobResultStore.FILE_EXTENSION); }
Generates the expected path for a clean entry given a job entry. @param entry The job entry to construct the expected clean path from. @return The expected clean file.
expectedCleanFile
java
apache/flink
flink-runtime/src/test/java/org/apache/flink/runtime/highavailability/FileSystemJobResultStoreFileOperationsTest.java
https://github.com/apache/flink/blob/master/flink-runtime/src/test/java/org/apache/flink/runtime/highavailability/FileSystemJobResultStoreFileOperationsTest.java
Apache-2.0
@Test public void testJobManagerLeaderElection() throws Exception { JobID jobId1 = new JobID(); JobID jobId2 = new JobID(); LeaderContender leaderContender1 = mock(LeaderContender.class); LeaderContender leaderContender2 = mock(LeaderContender.class); LeaderContender leaderContenderDifferentJobId = mock(LeaderContender.class); LeaderElection leaderElection1 = embeddedHaServices.getJobManagerLeaderElection(jobId1); leaderElection1.startLeaderElection(leaderContender1); LeaderElection leaderElection2 = embeddedHaServices.getJobManagerLeaderElection(jobId1); leaderElection2.startLeaderElection(leaderContender2); LeaderElection leaderElectionDifferentJobId = embeddedHaServices.getJobManagerLeaderElection(jobId2); leaderElectionDifferentJobId.startLeaderElection(leaderContenderDifferentJobId); ArgumentCaptor<UUID> leaderIdArgumentCaptor1 = ArgumentCaptor.forClass(UUID.class); ArgumentCaptor<UUID> leaderIdArgumentCaptor2 = ArgumentCaptor.forClass(UUID.class); verify(leaderContender1, atLeast(0)).grantLeadership(leaderIdArgumentCaptor1.capture()); verify(leaderContender2, atLeast(0)).grantLeadership(leaderIdArgumentCaptor2.capture()); assertTrue( leaderIdArgumentCaptor1.getAllValues().isEmpty() ^ leaderIdArgumentCaptor2.getAllValues().isEmpty()); verify(leaderContenderDifferentJobId).grantLeadership(any(UUID.class)); }
Tests that exactly one JobManager is elected as the leader for a given job id.
testJobManagerLeaderElection
java
apache/flink
flink-runtime/src/test/java/org/apache/flink/runtime/highavailability/nonha/embedded/EmbeddedHaServicesTest.java
https://github.com/apache/flink/blob/master/flink-runtime/src/test/java/org/apache/flink/runtime/highavailability/nonha/embedded/EmbeddedHaServicesTest.java
Apache-2.0
@Test public void testResourceManagerLeaderElection() throws Exception { LeaderContender leaderContender1 = mock(LeaderContender.class); LeaderContender leaderContender2 = mock(LeaderContender.class); LeaderElection leaderElection1 = embeddedHaServices.getResourceManagerLeaderElection(); leaderElection1.startLeaderElection(leaderContender1); LeaderElection leaderElection2 = embeddedHaServices.getResourceManagerLeaderElection(); leaderElection2.startLeaderElection(leaderContender2); ArgumentCaptor<UUID> leaderIdArgumentCaptor1 = ArgumentCaptor.forClass(UUID.class); ArgumentCaptor<UUID> leaderIdArgumentCaptor2 = ArgumentCaptor.forClass(UUID.class); verify(leaderContender1, atLeast(0)).grantLeadership(leaderIdArgumentCaptor1.capture()); verify(leaderContender2, atLeast(0)).grantLeadership(leaderIdArgumentCaptor2.capture()); assertTrue( leaderIdArgumentCaptor1.getAllValues().isEmpty() ^ leaderIdArgumentCaptor2.getAllValues().isEmpty()); }
Tests that exactly one ResourceManager is elected as the leader.
testResourceManagerLeaderElection
java
apache/flink
flink-runtime/src/test/java/org/apache/flink/runtime/highavailability/nonha/embedded/EmbeddedHaServicesTest.java
https://github.com/apache/flink/blob/master/flink-runtime/src/test/java/org/apache/flink/runtime/highavailability/nonha/embedded/EmbeddedHaServicesTest.java
Apache-2.0
@Test public void testResourceManagerLeaderRetrieval() throws Exception { LeaderElection leaderElection = embeddedHaServices.getResourceManagerLeaderElection(); LeaderRetrievalService leaderRetrievalService = embeddedHaServices.getResourceManagerLeaderRetriever(); runLeaderRetrievalTest(leaderElection, leaderRetrievalService); }
Tests the ResourceManager leader retrieval for a given job.
testResourceManagerLeaderRetrieval
java
apache/flink
flink-runtime/src/test/java/org/apache/flink/runtime/highavailability/nonha/embedded/EmbeddedHaServicesTest.java
https://github.com/apache/flink/blob/master/flink-runtime/src/test/java/org/apache/flink/runtime/highavailability/nonha/embedded/EmbeddedHaServicesTest.java
Apache-2.0
@Test public void testConcurrentLeadershipOperations() throws Exception { final LeaderElection leaderElection = embeddedHaServices.getDispatcherLeaderElection(); final TestingLeaderContender leaderContender = new TestingLeaderContender(); leaderElection.startLeaderElection(leaderContender); final UUID oldLeaderSessionId = leaderContender.getLeaderSessionFuture().get(); assertThat(leaderElection.hasLeadershipAsync(oldLeaderSessionId).get(), is(true)); embeddedHaServices.getDispatcherLeaderService().revokeLeadership().get(); assertThat(leaderElection.hasLeadershipAsync(oldLeaderSessionId).get(), is(false)); embeddedHaServices.getDispatcherLeaderService().grantLeadership(); final UUID newLeaderSessionId = leaderContender.getLeaderSessionFuture().get(); assertThat(leaderElection.hasLeadershipAsync(newLeaderSessionId).get(), is(true)); leaderElection.confirmLeadershipAsync(oldLeaderSessionId, ADDRESS).get(); leaderElection.confirmLeadershipAsync(newLeaderSessionId, ADDRESS).get(); assertThat(leaderElection.hasLeadershipAsync(newLeaderSessionId).get(), is(true)); leaderContender.tryRethrowException(); }
Tests that concurrent leadership operations (granting and revoking) leadership leave the system in a sane state.
testConcurrentLeadershipOperations
java
apache/flink
flink-runtime/src/test/java/org/apache/flink/runtime/highavailability/nonha/embedded/EmbeddedHaServicesTest.java
https://github.com/apache/flink/blob/master/flink-runtime/src/test/java/org/apache/flink/runtime/highavailability/nonha/embedded/EmbeddedHaServicesTest.java
Apache-2.0
@Override public JobResultStore createJobResultStore() throws IOException { return new EmbeddedJobResultStore(); }
Tests for the {@link EmbeddedJobResultStore} implementation of the {@link JobResultStore}'s contracts.
createJobResultStore
java
apache/flink
flink-runtime/src/test/java/org/apache/flink/runtime/highavailability/nonha/embedded/EmbeddedJobResultStoreContractTest.java
https://github.com/apache/flink/blob/master/flink-runtime/src/test/java/org/apache/flink/runtime/highavailability/nonha/embedded/EmbeddedJobResultStoreContractTest.java
Apache-2.0
@Test public void testConcurrentGrantLeadershipAndShutdown() throws Exception { final ManuallyTriggeredScheduledExecutorService executorService = new ManuallyTriggeredScheduledExecutorService(); final EmbeddedLeaderService embeddedLeaderService = new EmbeddedLeaderService(executorService); try { final TestingLeaderContender contender = new TestingLeaderContender(); final LeaderElection leaderElection = embeddedLeaderService.createLeaderElectionService("component_id"); leaderElection.startLeaderElection(contender); leaderElection.close(); assertThat(contender.getLeaderSessionFuture()) .as( "The future shouldn't have completed because the grant event wasn't processed, yet.") .isNotDone(); // the election service should still be running assertThat(embeddedLeaderService.isShutdown()).isFalse(); } finally { embeddedLeaderService.shutdown(); // triggers the grant event processing after shutdown executorService.triggerAll(); } }
Tests that the {@link EmbeddedLeaderService} can handle a concurrent grant leadership call and a shutdown.
testConcurrentGrantLeadershipAndShutdown
java
apache/flink
flink-runtime/src/test/java/org/apache/flink/runtime/highavailability/nonha/embedded/EmbeddedLeaderServiceTest.java
https://github.com/apache/flink/blob/master/flink-runtime/src/test/java/org/apache/flink/runtime/highavailability/nonha/embedded/EmbeddedLeaderServiceTest.java
Apache-2.0
@Test public void testConcurrentRevokeLeadershipAndShutdown() throws Exception { final ManuallyTriggeredScheduledExecutorService executorService = new ManuallyTriggeredScheduledExecutorService(); final EmbeddedLeaderService embeddedLeaderService = new EmbeddedLeaderService(executorService); try { final TestingLeaderContender contender = new TestingLeaderContender(); final LeaderElection leaderElection = embeddedLeaderService.createLeaderElectionService("component_id"); leaderElection.startLeaderElection(contender); // wait for the leadership executorService.trigger(); contender.getLeaderSessionFuture().get(); final CompletableFuture<Void> revokeLeadershipFuture = embeddedLeaderService.revokeLeadership(); leaderElection.close(); assertThat(revokeLeadershipFuture) .as( "The future shouldn't have completed because the revoke event wasn't processed, yet.") .isNotDone(); // the election service should still be running assertThat(embeddedLeaderService.isShutdown()).isFalse(); } finally { embeddedLeaderService.shutdown(); // triggers the revoke event processing after shutdown executorService.triggerAll(); } }
Tests that the {@link EmbeddedLeaderService} can handle a concurrent revoke leadership call and a shutdown.
testConcurrentRevokeLeadershipAndShutdown
java
apache/flink
flink-runtime/src/test/java/org/apache/flink/runtime/highavailability/nonha/embedded/EmbeddedLeaderServiceTest.java
https://github.com/apache/flink/blob/master/flink-runtime/src/test/java/org/apache/flink/runtime/highavailability/nonha/embedded/EmbeddedLeaderServiceTest.java
Apache-2.0
@Test public void testLeaderElection() throws Exception { JobID jobId = new JobID(); LeaderContender jmLeaderContender = mock(LeaderContender.class); LeaderContender rmLeaderContender = mock(LeaderContender.class); final LeaderElection jmLeaderElection = standaloneHaServices.getJobManagerLeaderElection(jobId); jmLeaderElection.startLeaderElection(jmLeaderContender); final LeaderElection rmLeaderElection = standaloneHaServices.getResourceManagerLeaderElection(); rmLeaderElection.startLeaderElection(rmLeaderContender); verify(jmLeaderContender).grantLeadership(eq(HighAvailabilityServices.DEFAULT_LEADER_ID)); verify(rmLeaderContender).grantLeadership(eq(HighAvailabilityServices.DEFAULT_LEADER_ID)); }
Tests that the standalone leader election services return a fixed address and leader session id.
testLeaderElection
java
apache/flink
flink-runtime/src/test/java/org/apache/flink/runtime/highavailability/nonha/standalone/StandaloneHaServicesTest.java
https://github.com/apache/flink/blob/master/flink-runtime/src/test/java/org/apache/flink/runtime/highavailability/nonha/standalone/StandaloneHaServicesTest.java
Apache-2.0
@Test public void testJobManagerLeaderRetrieval() throws Exception { JobID jobId1 = new JobID(); JobID jobId2 = new JobID(); LeaderRetrievalListener jmListener1 = mock(LeaderRetrievalListener.class); LeaderRetrievalListener jmListener2 = mock(LeaderRetrievalListener.class); LeaderRetrievalListener rmListener = mock(LeaderRetrievalListener.class); LeaderRetrievalService jmLeaderRetrievalService1 = standaloneHaServices.getJobManagerLeaderRetriever(jobId1); LeaderRetrievalService jmLeaderRetrievalService2 = standaloneHaServices.getJobManagerLeaderRetriever(jobId2); LeaderRetrievalService rmLeaderRetrievalService = standaloneHaServices.getResourceManagerLeaderRetriever(); jmLeaderRetrievalService1.start(jmListener1); jmLeaderRetrievalService2.start(jmListener2); rmLeaderRetrievalService.start(rmListener); verify(jmListener1) .notifyLeaderAddress(eq("UNKNOWN"), eq(HighAvailabilityServices.DEFAULT_LEADER_ID)); verify(jmListener2) .notifyLeaderAddress(eq("UNKNOWN"), eq(HighAvailabilityServices.DEFAULT_LEADER_ID)); verify(rmListener) .notifyLeaderAddress( eq(resourceManagerAddress), eq(HighAvailabilityServices.DEFAULT_LEADER_ID)); }
Tests that the standalone leader retrieval services return the specified address and the fixed leader session id.
testJobManagerLeaderRetrieval
java
apache/flink
flink-runtime/src/test/java/org/apache/flink/runtime/highavailability/nonha/standalone/StandaloneHaServicesTest.java
https://github.com/apache/flink/blob/master/flink-runtime/src/test/java/org/apache/flink/runtime/highavailability/nonha/standalone/StandaloneHaServicesTest.java
Apache-2.0
@Test public void testJobMasterLeaderRetrieval() throws Exception { JobID jobId1 = new JobID(); JobID jobId2 = new JobID(); final String jobManagerAddress1 = "foobar"; final String jobManagerAddress2 = "barfoo"; LeaderRetrievalListener jmListener1 = mock(LeaderRetrievalListener.class); LeaderRetrievalListener jmListener2 = mock(LeaderRetrievalListener.class); LeaderRetrievalService jmLeaderRetrievalService1 = standaloneHaServices.getJobManagerLeaderRetriever(jobId1, jobManagerAddress1); LeaderRetrievalService jmLeaderRetrievalService2 = standaloneHaServices.getJobManagerLeaderRetriever(jobId2, jobManagerAddress2); jmLeaderRetrievalService1.start(jmListener1); jmLeaderRetrievalService2.start(jmListener2); verify(jmListener1) .notifyLeaderAddress( eq(jobManagerAddress1), eq(HighAvailabilityServices.DEFAULT_LEADER_ID)); verify(jmListener2) .notifyLeaderAddress( eq(jobManagerAddress2), eq(HighAvailabilityServices.DEFAULT_LEADER_ID)); }
Tests that the standalone leader retrieval services return the given address and the fixed leader session id.
testJobMasterLeaderRetrieval
java
apache/flink
flink-runtime/src/test/java/org/apache/flink/runtime/highavailability/nonha/standalone/StandaloneHaServicesTest.java
https://github.com/apache/flink/blob/master/flink-runtime/src/test/java/org/apache/flink/runtime/highavailability/nonha/standalone/StandaloneHaServicesTest.java
Apache-2.0
public static void main(String[] args) throws Exception { boolean callerHasHook = Boolean.parseBoolean(args[0]); String tmpDirectory = args[1]; String signalFilePath = args[2]; LOG.info("The FileChannelManagerCleanupRunner process has started"); FileChannelManager manager = new FileChannelManagerImpl(new String[] {tmpDirectory}, DIR_NAME_PREFIX); if (callerHasHook) { // Verifies the case that both FileChannelManager and its upper component // have registered shutdown hooks, like in IOManager. ShutdownHookUtil.addShutdownHook(() -> manager.close(), "Caller", LOG); } LOG.info("The FileChannelManagerCleanupRunner is going to create the new file"); // Signals the main process to execute the kill action. new File(signalFilePath).createNewFile(); LOG.info("The FileChannelManagerCleanupRunner has created the new file"); // Blocks the process to wait to be killed. Thread.sleep(3 * TEST_TIMEOUT.toMillis()); }
The entry point class to test the file channel manager cleanup with shutdown hook.
main
java
apache/flink
flink-runtime/src/test/java/org/apache/flink/runtime/io/disk/FileChannelManagerImplTest.java
https://github.com/apache/flink/blob/master/flink-runtime/src/test/java/org/apache/flink/runtime/io/disk/FileChannelManagerImplTest.java
Apache-2.0
@Test void testRegisterTaskWithLimitedBuffers() throws Exception { // outgoing: 1 buffer per channel + 1 extra buffer per ResultPartition // incoming: 2 exclusive buffers per channel + 1 floating buffer per single gate final int bufferCount = 18 + 10 * 2; testRegisterTaskWithLimitedBuffers(bufferCount); }
Verifies that {@link Task#setupPartitionsAndGates(ResultPartitionWriter[], InputGate[])}} sets up (un)bounded buffer pool instances for various types of input and output channels working with the bare minimum of required buffers.
testRegisterTaskWithLimitedBuffers
java
apache/flink
flink-runtime/src/test/java/org/apache/flink/runtime/io/network/NettyShuffleEnvironmentTest.java
https://github.com/apache/flink/blob/master/flink-runtime/src/test/java/org/apache/flink/runtime/io/network/NettyShuffleEnvironmentTest.java
Apache-2.0
public void onEvent(TaskEvent actual) { checkState(!fired, "Should only fire once"); fired = true; checkArgument( actual == expected, "Fired on unexpected event: %s (expected: %s)", actual, expected); }
Event listener that expects a given {@link TaskEvent} once in its {@link #onEvent(TaskEvent)} call and will fail for any subsequent call. <p>Be sure to check that {@link #fired} is <tt>true</tt> to ensure that this handle has been called once.
onEvent
java
apache/flink
flink-runtime/src/test/java/org/apache/flink/runtime/io/network/TaskEventDispatcherTest.java
https://github.com/apache/flink/blob/master/flink-runtime/src/test/java/org/apache/flink/runtime/io/network/TaskEventDispatcherTest.java
Apache-2.0
public void onEvent(TaskEvent actual) { throw new IllegalStateException("Should never fire"); }
Event listener which ensures that it's {@link #onEvent(TaskEvent)} method is never called.
onEvent
java
apache/flink
flink-runtime/src/test/java/org/apache/flink/runtime/io/network/TaskEventDispatcherTest.java
https://github.com/apache/flink/blob/master/flink-runtime/src/test/java/org/apache/flink/runtime/io/network/TaskEventDispatcherTest.java
Apache-2.0
@Test void testSerialization() { long id = Integer.MAX_VALUE + 123123L; long timestamp = Integer.MAX_VALUE + 1228L; CheckpointOptions options = CheckpointOptions.forCheckpointWithDefaultLocation(); CheckpointBarrier barrier = new CheckpointBarrier(id, timestamp, options); assertThatThrownBy(() -> barrier.write(new DataOutputSerializer(1024))) .isInstanceOf(UnsupportedOperationException.class); assertThatThrownBy(() -> barrier.read(new DataInputDeserializer(new byte[32]))) .isInstanceOf(UnsupportedOperationException.class); }
Test serialization of the checkpoint barrier. The checkpoint barrier does not support its own serialization, in order to be immutable.
testSerialization
java
apache/flink
flink-runtime/src/test/java/org/apache/flink/runtime/io/network/api/CheckpointBarrierTest.java
https://github.com/apache/flink/blob/master/flink-runtime/src/test/java/org/apache/flink/runtime/io/network/api/CheckpointBarrierTest.java
Apache-2.0
@TestTemplate void testBroadcastEventBufferReferenceCounting() throws Exception { int bufferSize = 32 * 1024; int numSubpartitions = 2; ResultPartition partition = createResultPartition(bufferSize, numSubpartitions); RecordWriter<?> writer = createRecordWriter(partition); writer.broadcastEvent(EndOfPartitionEvent.INSTANCE); // get references to buffer consumers (copies from the original event buffer consumer) Buffer[] buffers = new Buffer[numSubpartitions]; // process all collected events (recycles the buffer) for (int i = 0; i < numSubpartitions; i++) { assertThat(partition.getNumberOfQueuedBuffers(i)).isOne(); ResultSubpartitionView view = partition.createSubpartitionView( new ResultSubpartitionIndexSet(i), new NoOpBufferAvailablityListener()); buffers[i] = view.getNextBuffer().buffer(); assertThat(parseBuffer(buffers[i], i).isEvent()).isTrue(); } for (int i = 0; i < numSubpartitions; ++i) { assertThat(buffers[i].isRecycled()).isTrue(); } }
Tests that event buffers are properly recycled when broadcasting events to multiple subpartitions.
testBroadcastEventBufferReferenceCounting
java
apache/flink
flink-runtime/src/test/java/org/apache/flink/runtime/io/network/api/writer/RecordWriterTest.java
https://github.com/apache/flink/blob/master/flink-runtime/src/test/java/org/apache/flink/runtime/io/network/api/writer/RecordWriterTest.java
Apache-2.0
@TestTemplate void testBroadcastEventBufferIndependence() throws Exception { verifyBroadcastBufferOrEventIndependence(true); }
Tests that broadcasted events' buffers are independent (in their (reader) indices) once they are put into the queue for Netty when broadcasting events to multiple subpartitions.
testBroadcastEventBufferIndependence
java
apache/flink
flink-runtime/src/test/java/org/apache/flink/runtime/io/network/api/writer/RecordWriterTest.java
https://github.com/apache/flink/blob/master/flink-runtime/src/test/java/org/apache/flink/runtime/io/network/api/writer/RecordWriterTest.java
Apache-2.0
@TestTemplate void testBroadcastEmitBufferIndependence() throws Exception { verifyBroadcastBufferOrEventIndependence(false); }
Tests that broadcasted records' buffers are independent (in their (reader) indices) once they are put into the queue for Netty when broadcasting events to multiple subpartitions.
testBroadcastEmitBufferIndependence
java
apache/flink
flink-runtime/src/test/java/org/apache/flink/runtime/io/network/api/writer/RecordWriterTest.java
https://github.com/apache/flink/blob/master/flink-runtime/src/test/java/org/apache/flink/runtime/io/network/api/writer/RecordWriterTest.java
Apache-2.0
public static boolean isInBlockingBufferRequest(StackTraceElement[] stackTrace) { if (stackTrace.length >= 8) { for (int x = 0; x < stackTrace.length - 2; x++) { if (stackTrace[x].getMethodName().equals("get") && stackTrace[x + 2] .getClassName() .equals(LocalBufferPool.class.getName())) { return true; } } } return false; }
Returns whether the stack trace represents a Thread in a blocking buffer request. @param stackTrace Stack trace of the Thread to check @return Flag indicating whether the Thread is in a blocking buffer request or not
isInBlockingBufferRequest
java
apache/flink
flink-runtime/src/test/java/org/apache/flink/runtime/io/network/buffer/BufferBuilderTestUtils.java
https://github.com/apache/flink/blob/master/flink-runtime/src/test/java/org/apache/flink/runtime/io/network/buffer/BufferBuilderTestUtils.java
Apache-2.0
@Test void testRequireMoreThanPossible1() { assertThatThrownBy( () -> networkBufferPool.createBufferPool( networkBufferPool.getTotalNumberOfMemorySegments() + 1, Integer.MAX_VALUE)) .isInstanceOf(IOException.class) .hasMessageContaining("Insufficient number of network buffers"); }
Tests creating one buffer pool which requires more buffers than available.
testRequireMoreThanPossible1
java
apache/flink
flink-runtime/src/test/java/org/apache/flink/runtime/io/network/buffer/BufferPoolFactoryTest.java
https://github.com/apache/flink/blob/master/flink-runtime/src/test/java/org/apache/flink/runtime/io/network/buffer/BufferPoolFactoryTest.java
Apache-2.0
@Test void testRequireMoreThanPossible2() throws IOException { BufferPool bufferPool = null; try { bufferPool = networkBufferPool.createBufferPool(numBuffers / 2 + 1, numBuffers); assertThatThrownBy( () -> networkBufferPool.createBufferPool( numBuffers / 2 + 1, numBuffers)) .isInstanceOf(IOException.class) .hasMessageContaining("Insufficient number of network buffers"); } finally { if (bufferPool != null) { bufferPool.lazyDestroy(); } } }
Tests creating two buffer pools which together require more buffers than available.
testRequireMoreThanPossible2
java
apache/flink
flink-runtime/src/test/java/org/apache/flink/runtime/io/network/buffer/BufferPoolFactoryTest.java
https://github.com/apache/flink/blob/master/flink-runtime/src/test/java/org/apache/flink/runtime/io/network/buffer/BufferPoolFactoryTest.java
Apache-2.0
@Test void testRequestAfterDestroy() { NetworkBufferPool networkBufferPool = new NetworkBufferPool(1, 4096); LocalBufferPool localBufferPool = new LocalBufferPool(networkBufferPool, 1); localBufferPool.lazyDestroy(); assertThatThrownBy(localBufferPool::requestBuffer) .withFailMessage("Call should have failed with an CancelTaskException") .isInstanceOf(CancelTaskException.class); }
Tests for the destruction of a {@link LocalBufferPool}.
testRequestAfterDestroy
java
apache/flink
flink-runtime/src/test/java/org/apache/flink/runtime/io/network/buffer/LocalBufferPoolDestroyTest.java
https://github.com/apache/flink/blob/master/flink-runtime/src/test/java/org/apache/flink/runtime/io/network/buffer/LocalBufferPoolDestroyTest.java
Apache-2.0
@Test void testDestroyWhileBlockingRequest() throws Exception { AtomicReference<Exception> asyncException = new AtomicReference<>(); NetworkBufferPool networkBufferPool = null; LocalBufferPool localBufferPool = null; try { networkBufferPool = new NetworkBufferPool(1, 4096); localBufferPool = new LocalBufferPool(networkBufferPool, 1); // Drain buffer pool assertThat(localBufferPool.requestBuffer()).isNotNull(); assertThat(localBufferPool.requestBuffer()).isNull(); // Start request Thread Thread thread = new Thread(new BufferRequestTask(localBufferPool, asyncException)); thread.start(); // Wait for request boolean success = false; for (int i = 0; i < 50; i++) { StackTraceElement[] stackTrace = thread.getStackTrace(); success = isInBlockingBufferRequest(stackTrace); if (success) { break; } else { // Retry Thread.sleep(500); } } // Verify that Thread was in blocking request assertThat(success) .withFailMessage("Did not trigger blocking buffer request.") .isTrue(); // Destroy the buffer pool localBufferPool.lazyDestroy(); // Wait for Thread to finish thread.join(); // Verify expected Exception assertThat(asyncException.get()) .withFailMessage("Did not throw expected Exception") .isInstanceOf(CancelTaskException.class); } finally { if (localBufferPool != null) { localBufferPool.lazyDestroy(); } if (networkBufferPool != null) { networkBufferPool.destroyAllBufferPools(); networkBufferPool.destroy(); } } }
Tests that a blocking request fails properly if the buffer pool is destroyed. <p>Starts a Thread, which triggers an unsatisfiable blocking buffer request. After making sure that the Thread is actually waiting in the blocking call, the buffer pool is destroyed and we check whether the request Thread threw the expected Exception.
testDestroyWhileBlockingRequest
java
apache/flink
flink-runtime/src/test/java/org/apache/flink/runtime/io/network/buffer/LocalBufferPoolDestroyTest.java
https://github.com/apache/flink/blob/master/flink-runtime/src/test/java/org/apache/flink/runtime/io/network/buffer/LocalBufferPoolDestroyTest.java
Apache-2.0
@Override public void run() { try { String msg = "Test assumption violated: expected no available buffer"; assertThat(bufferPool.requestBuffer()).withFailMessage(msg).isNull(); bufferPool.requestBufferBuilderBlocking(); } catch (Exception t) { asyncException.set(t); } }
Task triggering a blocking buffer request (the test assumes that no buffer is available).
run
java
apache/flink
flink-runtime/src/test/java/org/apache/flink/runtime/io/network/buffer/LocalBufferPoolDestroyTest.java
https://github.com/apache/flink/blob/master/flink-runtime/src/test/java/org/apache/flink/runtime/io/network/buffer/LocalBufferPoolDestroyTest.java
Apache-2.0
@Test void testRequestMemorySegmentsLessThanTotalBuffers() throws IOException { final int numBuffers = 10; NetworkBufferPool globalPool = new NetworkBufferPool(numBuffers, 128); List<MemorySegment> memorySegments = Collections.emptyList(); try { memorySegments = globalPool.requestUnpooledMemorySegments(numBuffers / 2); assertThat(memorySegments).hasSize(numBuffers / 2); globalPool.recycleUnpooledMemorySegments(memorySegments); memorySegments.clear(); assertThat(globalPool.getNumberOfAvailableMemorySegments()).isEqualTo(numBuffers); } finally { globalPool.recycleUnpooledMemorySegments(memorySegments); // just in case globalPool.destroy(); } }
Tests {@link NetworkBufferPool#requestUnpooledMemorySegments(int)} with the {@link NetworkBufferPool} currently containing the number of required free segments.
testRequestMemorySegmentsLessThanTotalBuffers
java
apache/flink
flink-runtime/src/test/java/org/apache/flink/runtime/io/network/buffer/NetworkBufferPoolTest.java
https://github.com/apache/flink/blob/master/flink-runtime/src/test/java/org/apache/flink/runtime/io/network/buffer/NetworkBufferPoolTest.java
Apache-2.0
@Test void testRequestMemorySegmentsMoreThanTotalBuffers() { final int numBuffers = 10; NetworkBufferPool globalPool = new NetworkBufferPool(numBuffers, 128); try { assertThatThrownBy(() -> globalPool.requestUnpooledMemorySegments(numBuffers + 1)) .isInstanceOf(IOException.class); assertThat(globalPool.getNumberOfAvailableMemorySegments()).isEqualTo(numBuffers); } finally { globalPool.destroy(); } }
Tests {@link NetworkBufferPool#requestUnpooledMemorySegments(int)} with the number of required buffers exceeding the capacity of {@link NetworkBufferPool}.
testRequestMemorySegmentsMoreThanTotalBuffers
java
apache/flink
flink-runtime/src/test/java/org/apache/flink/runtime/io/network/buffer/NetworkBufferPoolTest.java
https://github.com/apache/flink/blob/master/flink-runtime/src/test/java/org/apache/flink/runtime/io/network/buffer/NetworkBufferPoolTest.java
Apache-2.0
@Test void testRequestMemorySegmentsWithInvalidArgument() { NetworkBufferPool globalPool = new NetworkBufferPool(10, 128); // the number of requested buffers should be non-negative assertThatThrownBy(() -> globalPool.requestUnpooledMemorySegments(-1)) .isInstanceOf(IllegalArgumentException.class); globalPool.destroy(); }
Tests {@link NetworkBufferPool#requestUnpooledMemorySegments(int)} with the invalid argument to cause exception.
testRequestMemorySegmentsWithInvalidArgument
java
apache/flink
flink-runtime/src/test/java/org/apache/flink/runtime/io/network/buffer/NetworkBufferPoolTest.java
https://github.com/apache/flink/blob/master/flink-runtime/src/test/java/org/apache/flink/runtime/io/network/buffer/NetworkBufferPoolTest.java
Apache-2.0
@Test void testRequestMemorySegmentsWithBuffersTaken() throws IOException, InterruptedException { final int numBuffers = 10; NetworkBufferPool networkBufferPool = new NetworkBufferPool(numBuffers, 128); final List<Buffer> buffers = new ArrayList<>(numBuffers); List<MemorySegment> memorySegments = Collections.emptyList(); Thread bufferRecycler = null; BufferPool lbp1 = null; try { lbp1 = networkBufferPool.createBufferPool(numBuffers / 2, numBuffers); // take all buffers (more than the minimum required) for (int i = 0; i < numBuffers; ++i) { Buffer buffer = lbp1.requestBuffer(); buffers.add(buffer); assertThat(buffer).isNotNull(); } // requestMemorySegments() below will wait for buffers // this will make sure that enough buffers are freed eventually for it to continue final OneShotLatch isRunning = new OneShotLatch(); bufferRecycler = new Thread( () -> { try { isRunning.trigger(); Thread.sleep(100); } catch (InterruptedException ignored) { } for (Buffer buffer : buffers) { buffer.recycleBuffer(); } }); bufferRecycler.start(); // take more buffers than are freely available at the moment via requestMemorySegments() isRunning.await(); memorySegments = networkBufferPool.requestUnpooledMemorySegments(numBuffers / 2); assertThat(memorySegments).doesNotContainNull(); } finally { if (bufferRecycler != null) { bufferRecycler.join(); } if (lbp1 != null) { lbp1.lazyDestroy(); } networkBufferPool.recycleUnpooledMemorySegments(memorySegments); networkBufferPool.destroy(); } }
Tests {@link NetworkBufferPool#requestUnpooledMemorySegments(int)} with the {@link NetworkBufferPool} currently not containing the number of required free segments (currently occupied by a buffer pool).
testRequestMemorySegmentsWithBuffersTaken
java
apache/flink
flink-runtime/src/test/java/org/apache/flink/runtime/io/network/buffer/NetworkBufferPoolTest.java
https://github.com/apache/flink/blob/master/flink-runtime/src/test/java/org/apache/flink/runtime/io/network/buffer/NetworkBufferPoolTest.java
Apache-2.0
@Test void testIsAvailableOrNotAfterRequestAndRecycleSingleSegment() { final int numBuffers = 2; final NetworkBufferPool globalPool = new NetworkBufferPool(numBuffers, 128); try { // the global pool should be in available state initially assertThat(globalPool.getAvailableFuture()).isDone(); // request the first segment final MemorySegment segment1 = checkNotNull(globalPool.requestPooledMemorySegment()); assertThat(globalPool.getAvailableFuture()).isDone(); // request the second segment final MemorySegment segment2 = checkNotNull(globalPool.requestPooledMemorySegment()); assertThat(globalPool.getAvailableFuture()).isNotDone(); final CompletableFuture<?> availableFuture = globalPool.getAvailableFuture(); // recycle the first segment globalPool.recyclePooledMemorySegment(segment1); assertThat(availableFuture).isDone(); assertThat(globalPool.getAvailableFuture()).isDone(); // recycle the second segment globalPool.recyclePooledMemorySegment(segment2); assertThat(globalPool.getAvailableFuture()).isDone(); } finally { globalPool.destroy(); } }
Tests {@link NetworkBufferPool#isAvailable()}, verifying that the buffer availability is correctly maintained after memory segments are requested by {@link NetworkBufferPool#requestPooledMemorySegment()} and recycled by {@link NetworkBufferPool#recyclePooledMemorySegment(MemorySegment)}.
testIsAvailableOrNotAfterRequestAndRecycleSingleSegment
java
apache/flink
flink-runtime/src/test/java/org/apache/flink/runtime/io/network/buffer/NetworkBufferPoolTest.java
https://github.com/apache/flink/blob/master/flink-runtime/src/test/java/org/apache/flink/runtime/io/network/buffer/NetworkBufferPoolTest.java
Apache-2.0
@Test void testIsAvailableOrNotAfterRequestAndRecycleMultiSegments() throws Exception { final int numberOfSegmentsToRequest = 5; final int numBuffers = 2 * numberOfSegmentsToRequest; final NetworkBufferPool globalPool = new NetworkBufferPool(numBuffers, 128); try { // the global pool should be in available state initially assertThat(globalPool.getAvailableFuture()).isDone(); // request 5 segments List<MemorySegment> segments1 = globalPool.requestUnpooledMemorySegments(numberOfSegmentsToRequest); assertThat(globalPool.getAvailableFuture()).isDone(); assertThat(segments1).hasSize(numberOfSegmentsToRequest); // request another 5 segments List<MemorySegment> segments2 = globalPool.requestUnpooledMemorySegments(numberOfSegmentsToRequest); assertThat(globalPool.getAvailableFuture()).isNotDone(); assertThat(segments2).hasSize(numberOfSegmentsToRequest); // recycle 5 segments CompletableFuture<?> availableFuture = globalPool.getAvailableFuture(); globalPool.recycleUnpooledMemorySegments(segments1); assertThat(availableFuture).isDone(); // request another 5 segments final List<MemorySegment> segments3 = globalPool.requestUnpooledMemorySegments(numberOfSegmentsToRequest); assertThat(globalPool.getAvailableFuture()).isNotDone(); assertThat(segments3).hasSize(numberOfSegmentsToRequest); // recycle another 5 segments globalPool.recycleUnpooledMemorySegments(segments2); assertThat(globalPool.getAvailableFuture()).isDone(); // recycle the last 5 segments globalPool.recycleUnpooledMemorySegments(segments3); assertThat(globalPool.getAvailableFuture()).isDone(); } finally { globalPool.destroy(); } }
Tests {@link NetworkBufferPool#isAvailable()}, verifying that the buffer availability is correctly maintained after memory segments are requested by {@link NetworkBufferPool#requestUnpooledMemorySegments(int)} and recycled by {@link NetworkBufferPool#recycleUnpooledMemorySegments(Collection)}.
testIsAvailableOrNotAfterRequestAndRecycleMultiSegments
java
apache/flink
flink-runtime/src/test/java/org/apache/flink/runtime/io/network/buffer/NetworkBufferPoolTest.java
https://github.com/apache/flink/blob/master/flink-runtime/src/test/java/org/apache/flink/runtime/io/network/buffer/NetworkBufferPoolTest.java
Apache-2.0
@Test void testBlockingRequestFromMultiLocalBufferPool() throws IOException, InterruptedException { final int localPoolRequiredSize = 5; final int localPoolMaxSize = 10; final int numLocalBufferPool = 2; final int numberOfSegmentsToRequest = 10; final int numBuffers = numLocalBufferPool * localPoolMaxSize; final ExecutorService executorService = Executors.newFixedThreadPool(numLocalBufferPool); final NetworkBufferPool globalPool = new NetworkBufferPool(numBuffers, 128); final List<BufferPool> localBufferPools = new ArrayList<>(numLocalBufferPool); try { // create local buffer pools for (int i = 0; i < numLocalBufferPool; ++i) { final BufferPool localPool = globalPool.createBufferPool(localPoolRequiredSize, localPoolMaxSize); localBufferPools.add(localPool); assertThat(localPool.getAvailableFuture()).isDone(); } // request some segments from the global pool in two different ways final List<MemorySegment> segments = new ArrayList<>(numberOfSegmentsToRequest - 1); for (int i = 0; i < numberOfSegmentsToRequest - 1; ++i) { segments.add(globalPool.requestPooledMemorySegment()); } final List<MemorySegment> exclusiveSegments = globalPool.requestUnpooledMemorySegments( globalPool.getNumberOfAvailableMemorySegments() - 1); assertThat(globalPool.getAvailableFuture()).isDone(); for (final BufferPool localPool : localBufferPools) { assertThat(localPool.getAvailableFuture()).isDone(); } // blocking request buffers form local buffer pools final CountDownLatch latch = new CountDownLatch(numLocalBufferPool); final BlockingQueue<BufferBuilder> segmentsRequested = new ArrayBlockingQueue<>(numBuffers); final AtomicReference<Throwable> cause = new AtomicReference<>(); for (final BufferPool localPool : localBufferPools) { executorService.submit( () -> { try { for (int num = localPoolMaxSize; num > 0; --num) { segmentsRequested.add(localPool.requestBufferBuilderBlocking()); } } catch (Exception e) { cause.set(e); } finally { latch.countDown(); } }); } // wait until all available buffers are requested while (segmentsRequested.size() + segments.size() + exclusiveSegments.size() < numBuffers) { Thread.sleep(10); assertThat(cause.get()).isNull(); } final CompletableFuture<?> globalPoolAvailableFuture = globalPool.getAvailableFuture(); assertThat(globalPoolAvailableFuture).isNotDone(); final List<CompletableFuture<?>> localPoolAvailableFutures = new ArrayList<>(numLocalBufferPool); for (BufferPool localPool : localBufferPools) { CompletableFuture<?> localPoolAvailableFuture = localPool.getAvailableFuture(); localPoolAvailableFutures.add(localPoolAvailableFuture); assertThat(localPoolAvailableFuture).isNotDone(); } // recycle the previously requested segments for (MemorySegment segment : segments) { globalPool.recyclePooledMemorySegment(segment); } globalPool.recycleUnpooledMemorySegments(exclusiveSegments); assertThat(globalPoolAvailableFuture).isDone(); for (CompletableFuture<?> localPoolAvailableFuture : localPoolAvailableFutures) { assertThat(localPoolAvailableFuture).isDone(); } // wait until all blocking buffer requests finish latch.await(); assertThat(cause.get()).isNull(); assertThat(globalPool.getNumberOfAvailableMemorySegments()).isZero(); assertThat(globalPool.getAvailableFuture()).isNotDone(); for (BufferPool localPool : localBufferPools) { assertThat(localPool.getAvailableFuture()).isNotDone(); assertThat(localPool.bestEffortGetNumOfUsedBuffers()).isEqualTo(localPoolMaxSize); } // recycle all the requested buffers for (BufferBuilder bufferBuilder : segmentsRequested) { bufferBuilder.close(); } } finally { for (BufferPool bufferPool : localBufferPools) { bufferPool.lazyDestroy(); } executorService.shutdown(); globalPool.destroy(); } }
Tests that blocking request of multi local buffer pools can be fulfilled by recycled segments to the global network buffer pool.
testBlockingRequestFromMultiLocalBufferPool
java
apache/flink
flink-runtime/src/test/java/org/apache/flink/runtime/io/network/buffer/NetworkBufferPoolTest.java
https://github.com/apache/flink/blob/master/flink-runtime/src/test/java/org/apache/flink/runtime/io/network/buffer/NetworkBufferPoolTest.java
Apache-2.0
private static NetworkBuffer newBuffer(int length, int maxCapacity, boolean isBuffer) { return newBuffer(length, maxCapacity, isBuffer, FreeingBufferRecycler.INSTANCE); }
Creates a new buffer for testing. @param length buffer capacity @param maxCapacity buffer maximum capacity (will be used for the underlying {@link MemorySegment}) @param isBuffer whether the buffer should represent data (<tt>true</tt>) or an event (<tt>false</tt>) @return the buffer
newBuffer
java
apache/flink
flink-runtime/src/test/java/org/apache/flink/runtime/io/network/buffer/NetworkBufferTest.java
https://github.com/apache/flink/blob/master/flink-runtime/src/test/java/org/apache/flink/runtime/io/network/buffer/NetworkBufferTest.java
Apache-2.0
@Test void testWrappingOfRemoteErrorMessage() throws Exception { EmbeddedChannel ch = createEmbeddedChannel(); NetworkClientHandler handler = getClientHandler(ch); // Create input channels RemoteInputChannel[] rich = new RemoteInputChannel[] {createRemoteInputChannel(), createRemoteInputChannel()}; for (RemoteInputChannel r : rich) { when(r.getInputChannelId()).thenReturn(new InputChannelID()); handler.addInputChannel(r); } // Error msg for channel[0] ch.pipeline() .fireChannelRead( new NettyMessage.ErrorResponse( new RuntimeException("Expected test exception"), rich[0].getInputChannelId())); // Exception should not reach end of pipeline... assertThatNoException() .describedAs( "The exception reached the end of the pipeline and " + "was not handled correctly by the last handler.") .isThrownBy(ch::checkException); verify(rich[0], times(1)).onError(isA(RemoteTransportException.class)); verify(rich[1], never()).onError(any(Throwable.class)); // Fatal error for all channels ch.pipeline() .fireChannelRead( new NettyMessage.ErrorResponse( new RuntimeException("Expected test exception"))); // Exception should not reach end of pipeline... assertThatNoException() .describedAs( "The exception reached the end of the pipeline and " + "was not handled correctly by the last handler.") .isThrownBy(ch::checkException); verify(rich[0], times(2)).onError(isA(RemoteTransportException.class)); verify(rich[1], times(1)).onError(isA(RemoteTransportException.class)); }
Verifies that {@link NettyMessage.ErrorResponse} messages are correctly wrapped in {@link RemoteTransportException} instances.
testWrappingOfRemoteErrorMessage
java
apache/flink
flink-runtime/src/test/java/org/apache/flink/runtime/io/network/netty/ClientTransportErrorHandlingTest.java
https://github.com/apache/flink/blob/master/flink-runtime/src/test/java/org/apache/flink/runtime/io/network/netty/ClientTransportErrorHandlingTest.java
Apache-2.0
@Test void testExceptionCaught() throws Exception { EmbeddedChannel ch = createEmbeddedChannel(); NetworkClientHandler handler = getClientHandler(ch); // Create input channels RemoteInputChannel[] rich = new RemoteInputChannel[] {createRemoteInputChannel(), createRemoteInputChannel()}; for (RemoteInputChannel r : rich) { when(r.getInputChannelId()).thenReturn(new InputChannelID()); handler.addInputChannel(r); } ch.pipeline().fireExceptionCaught(new Exception()); // Exception should not reach end of pipeline... assertThatNoException() .describedAs( "The exception reached the end of the pipeline and " + "was not handled correctly by the last handler.") .isThrownBy(ch::checkException); // ...but all the registered channels should be notified. for (RemoteInputChannel r : rich) { verify(r).onError(isA(LocalTransportException.class)); } }
Verifies that fired Exceptions are handled correctly by the pipeline.
testExceptionCaught
java
apache/flink
flink-runtime/src/test/java/org/apache/flink/runtime/io/network/netty/ClientTransportErrorHandlingTest.java
https://github.com/apache/flink/blob/master/flink-runtime/src/test/java/org/apache/flink/runtime/io/network/netty/ClientTransportErrorHandlingTest.java
Apache-2.0
@Test void testConnectionResetByPeer() throws Throwable { EmbeddedChannel ch = createEmbeddedChannel(); NetworkClientHandler handler = getClientHandler(ch); RemoteInputChannel rich = addInputChannel(handler); final Throwable[] error = new Throwable[1]; // Verify the Exception doAnswer( (Answer<Void>) invocation -> { Throwable cause = (Throwable) invocation.getArguments()[0]; try { assertThat(cause) .isInstanceOf(RemoteTransportException.class); assertThat(cause) .hasMessageNotContaining( "Connection reset by peer"); assertThat(cause.getCause()) .isInstanceOf(IOException.class); assertThat(cause.getCause()) .hasMessage("Connection reset by peer"); } catch (Throwable t) { error[0] = t; } return null; }) .when(rich) .onError(any(Throwable.class)); ch.pipeline().fireExceptionCaught(new IOException("Connection reset by peer")); assertThat(error[0]).isNull(); }
Verifies that "Connection reset by peer" Exceptions are special-cased and are reported as an instance of {@link RemoteTransportException}.
testConnectionResetByPeer
java
apache/flink
flink-runtime/src/test/java/org/apache/flink/runtime/io/network/netty/ClientTransportErrorHandlingTest.java
https://github.com/apache/flink/blob/master/flink-runtime/src/test/java/org/apache/flink/runtime/io/network/netty/ClientTransportErrorHandlingTest.java
Apache-2.0
@ParameterizedTest(name = "{index} => isFullyFilled={0}, numOfPartialBuffers={1}") @MethodSource("bufferDescriptors") void testReceiveEmptyBuffer(boolean isFullyFilled, int numOfPartialBuffers) throws Exception { // Minimal mock of a remote input channel final BufferProvider bufferProvider = mock(BufferProvider.class); when(bufferProvider.requestBuffer()).thenReturn(TestBufferFactory.createBuffer(0)); final RemoteInputChannel inputChannel = mock(RemoteInputChannel.class); when(inputChannel.getInputChannelId()).thenReturn(new InputChannelID()); when(inputChannel.getBufferProvider()).thenReturn(bufferProvider); final CreditBasedPartitionRequestClientHandler client = new CreditBasedPartitionRequestClientHandler(); client.addInputChannel(inputChannel); final int backlog = 2; final BufferResponse receivedBuffer = createBufferResponse( createBuffer(isFullyFilled, numOfPartialBuffers, 0), 0, inputChannel.getInputChannelId(), backlog, new NetworkBufferAllocator(client)); // Read the empty buffer client.channelRead(mock(ChannelHandlerContext.class), receivedBuffer); // This should not throw an exception verify(inputChannel, never()).onError(any(Throwable.class)); verify(inputChannel, times(1)).onEmptyBuffer(0, backlog); }
Tests a fix for FLINK-1761. <p>FLINK-1761 discovered an IndexOutOfBoundsException, when receiving buffers of size 0.
testReceiveEmptyBuffer
java
apache/flink
flink-runtime/src/test/java/org/apache/flink/runtime/io/network/netty/CreditBasedPartitionRequestClientHandlerTest.java
https://github.com/apache/flink/blob/master/flink-runtime/src/test/java/org/apache/flink/runtime/io/network/netty/CreditBasedPartitionRequestClientHandlerTest.java
Apache-2.0
@ParameterizedTest(name = "{index} => isFullyFilled={0}, numOfPartialBuffers={1}") @MethodSource("bufferDescriptors") void testReceiveBuffer(boolean isFullyFilled, int numOfPartialBuffers) throws Exception { final NetworkBufferPool networkBufferPool = new NetworkBufferPool(10, 32 * numOfPartialBuffers); final SingleInputGate inputGate = createSingleInputGate(1, networkBufferPool); final RemoteInputChannel inputChannel = InputChannelBuilder.newBuilder().buildRemoteChannel(inputGate); try { inputGate.setInputChannels(inputChannel); final BufferPool bufferPool = networkBufferPool.createBufferPool(8, 8); inputGate.setBufferPool(bufferPool); inputGate.setupChannels(); final CreditBasedPartitionRequestClientHandler handler = new CreditBasedPartitionRequestClientHandler(); handler.addInputChannel(inputChannel); final int backlog = 2; final BufferResponse bufferResponse = createBufferResponse( createBuffer(isFullyFilled, numOfPartialBuffers, 32), 0, inputChannel.getInputChannelId(), backlog, new NetworkBufferAllocator(handler)); handler.channelRead(mock(ChannelHandlerContext.class), bufferResponse); assertThat(inputChannel.getNumberOfQueuedBuffers()).isEqualTo(numOfPartialBuffers); assertThat(inputChannel.getSenderBacklog()).isEqualTo(2); } finally { releaseResource(inputGate, networkBufferPool); } }
Verifies that {@link RemoteInputChannel#onBuffer(Buffer, int, int, int)} is called when a {@link BufferResponse} is received.
testReceiveBuffer
java
apache/flink
flink-runtime/src/test/java/org/apache/flink/runtime/io/network/netty/CreditBasedPartitionRequestClientHandlerTest.java
https://github.com/apache/flink/blob/master/flink-runtime/src/test/java/org/apache/flink/runtime/io/network/netty/CreditBasedPartitionRequestClientHandlerTest.java
Apache-2.0
@ParameterizedTest( name = "{index} => isFullyFilled={0}, numOfPartialBuffers={1}, compressionCodec={2}") @MethodSource("bufferDescriptorsWithCompression") void testReceiveCompressedBuffer( final boolean isFullyFilled, final int numOfPartialBuffers, final String compressionCodec) throws Exception { int bufferSize = 1024; BufferCompressor compressor = new BufferCompressor(bufferSize, CompressionCodec.valueOf(compressionCodec)); BufferDecompressor decompressor = new BufferDecompressor(bufferSize, CompressionCodec.valueOf(compressionCodec)); NetworkBufferPool networkBufferPool = new NetworkBufferPool(10, bufferSize); SingleInputGate inputGate = new SingleInputGateBuilder() .setBufferDecompressor(decompressor) .setSegmentProvider(networkBufferPool) .build(); RemoteInputChannel inputChannel = createRemoteInputChannel(inputGate, null); inputGate.setInputChannels(inputChannel); try { BufferPool bufferPool = networkBufferPool.createBufferPool(8, 8); inputGate.setBufferPool(bufferPool); inputGate.setupChannels(); CreditBasedPartitionRequestClientHandler handler = new CreditBasedPartitionRequestClientHandler(); handler.addInputChannel(inputChannel); BufferResponse bufferResponse = createBufferResponse( compressBuffer( compressor, createBuffer(isFullyFilled, numOfPartialBuffers, bufferSize)), 0, inputChannel.getInputChannelId(), 2, new NetworkBufferAllocator(handler)); assertThat(bufferResponse.isCompressed).isTrue(); handler.channelRead(null, bufferResponse); Buffer receivedBuffer = inputChannel.getNextReceivedBuffer(); assertThat(receivedBuffer).isNotNull(); assertThat(receivedBuffer.isCompressed()).isTrue(); receivedBuffer.recycleBuffer(); } finally { releaseResource(inputGate, networkBufferPool); } }
Verifies that {@link BufferResponse} of compressed {@link Buffer} can be handled correctly.
testReceiveCompressedBuffer
java
apache/flink
flink-runtime/src/test/java/org/apache/flink/runtime/io/network/netty/CreditBasedPartitionRequestClientHandlerTest.java
https://github.com/apache/flink/blob/master/flink-runtime/src/test/java/org/apache/flink/runtime/io/network/netty/CreditBasedPartitionRequestClientHandlerTest.java
Apache-2.0
@Test void testReceiveBacklogAnnouncement() throws Exception { int bufferSize = 1024; int numBuffers = 10; NetworkBufferPool networkBufferPool = new NetworkBufferPool(numBuffers, bufferSize); SingleInputGate inputGate = new SingleInputGateBuilder().setSegmentProvider(networkBufferPool).build(); RemoteInputChannel inputChannel = createRemoteInputChannel(inputGate, null); inputGate.setInputChannels(inputChannel); try { BufferPool bufferPool = networkBufferPool.createBufferPool(8, 8); inputGate.setBufferPool(bufferPool); inputGate.setupChannels(); CreditBasedPartitionRequestClientHandler handler = new CreditBasedPartitionRequestClientHandler(); handler.addInputChannel(inputChannel); assertThat(inputChannel.getNumberOfAvailableBuffers()).isEqualTo(2); assertThat(inputChannel.unsynchronizedGetFloatingBuffersAvailable()).isZero(); int backlog = 5; NettyMessage.BacklogAnnouncement announcement = new NettyMessage.BacklogAnnouncement(backlog, inputChannel.getInputChannelId()); handler.channelRead(null, announcement); assertThat(inputChannel.getNumberOfAvailableBuffers()).isEqualTo(7); assertThat(inputChannel.getNumberOfRequiredBuffers()).isEqualTo(7); assertThat(inputChannel.getSenderBacklog()).isEqualTo(backlog); assertThat(inputChannel.unsynchronizedGetFloatingBuffersAvailable()).isEqualTo(5); backlog = 12; announcement = new NettyMessage.BacklogAnnouncement(backlog, inputChannel.getInputChannelId()); handler.channelRead(null, announcement); assertThat(inputChannel.getNumberOfAvailableBuffers()).isEqualTo(10); assertThat(inputChannel.getNumberOfRequiredBuffers()).isEqualTo(14); assertThat(inputChannel.getSenderBacklog()).isEqualTo(backlog); assertThat(inputChannel.unsynchronizedGetFloatingBuffersAvailable()).isEqualTo(8); } finally { releaseResource(inputGate, networkBufferPool); } }
Verifies that {@link NettyMessage.BacklogAnnouncement} can be handled correctly.
testReceiveBacklogAnnouncement
java
apache/flink
flink-runtime/src/test/java/org/apache/flink/runtime/io/network/netty/CreditBasedPartitionRequestClientHandlerTest.java
https://github.com/apache/flink/blob/master/flink-runtime/src/test/java/org/apache/flink/runtime/io/network/netty/CreditBasedPartitionRequestClientHandlerTest.java
Apache-2.0
@ParameterizedTest(name = "{index} => isFullyFilled={0}, numOfPartialBuffers={1}") @MethodSource("bufferDescriptors") void testThrowExceptionForNoAvailableBuffer(boolean isFullyFilled, int numOfPartialBuffers) throws Exception { final SingleInputGate inputGate = createSingleInputGate(1); final RemoteInputChannel inputChannel = spy(InputChannelBuilder.newBuilder().buildRemoteChannel(inputGate)); final CreditBasedPartitionRequestClientHandler handler = new CreditBasedPartitionRequestClientHandler(); handler.addInputChannel(inputChannel); assertThat(inputChannel.getNumberOfAvailableBuffers()) .as("There should be no buffers available in the channel.") .isZero(); final BufferResponse bufferResponse = createBufferResponse( createBuffer( isFullyFilled, numOfPartialBuffers, TestBufferFactory.BUFFER_SIZE), 0, inputChannel.getInputChannelId(), 2, new NetworkBufferAllocator(handler)); assertThat(bufferResponse.getBuffer()).isNull(); handler.channelRead(mock(ChannelHandlerContext.class), bufferResponse); verify(inputChannel, times(1)).onError(any(IllegalStateException.class)); }
Verifies that {@link RemoteInputChannel#onError(Throwable)} is called when a {@link BufferResponse} is received but no available buffer in input channel.
testThrowExceptionForNoAvailableBuffer
java
apache/flink
flink-runtime/src/test/java/org/apache/flink/runtime/io/network/netty/CreditBasedPartitionRequestClientHandlerTest.java
https://github.com/apache/flink/blob/master/flink-runtime/src/test/java/org/apache/flink/runtime/io/network/netty/CreditBasedPartitionRequestClientHandlerTest.java
Apache-2.0
@ParameterizedTest(name = "{index} => isFullyFilled={0}, numOfPartialBuffers={1}") @MethodSource("bufferDescriptors") void testNotifyCreditAvailable(boolean isFullyFilled, int numOfPartialBuffers) throws Exception { final CreditBasedPartitionRequestClientHandler handler = new CreditBasedPartitionRequestClientHandler(); final NetworkBufferAllocator allocator = new NetworkBufferAllocator(handler); final EmbeddedChannel channel = new EmbeddedChannel(handler); final PartitionRequestClient client = new NettyPartitionRequestClient( channel, handler, mock(ConnectionID.class), mock(PartitionRequestClientFactory.class)); final NetworkBufferPool networkBufferPool = new NetworkBufferPool(10, 32 * numOfPartialBuffers); final SingleInputGate inputGate = createSingleInputGate(2, networkBufferPool); final RemoteInputChannel[] inputChannels = new RemoteInputChannel[2]; inputChannels[0] = createRemoteInputChannel(inputGate, client); inputChannels[1] = createRemoteInputChannel(inputGate, client); try { inputGate.setInputChannels(inputChannels); final BufferPool bufferPool = networkBufferPool.createBufferPool(6, 6); inputGate.setBufferPool(bufferPool); inputGate.setupChannels(); inputChannels[0].requestSubpartitions(); inputChannels[1].requestSubpartitions(); // The two input channels should send partition requests assertThat(channel.isWritable()).isTrue(); Object readFromOutbound = channel.readOutbound(); assertThat(readFromOutbound).isInstanceOf(PartitionRequest.class); assertThat(inputChannels[0].getInputChannelId()) .isEqualTo(((PartitionRequest) readFromOutbound).receiverId); assertThat(((PartitionRequest) readFromOutbound).credit).isEqualTo(2); readFromOutbound = channel.readOutbound(); assertThat(readFromOutbound).isInstanceOf(PartitionRequest.class); assertThat(inputChannels[1].getInputChannelId()) .isEqualTo(((PartitionRequest) readFromOutbound).receiverId); assertThat(((PartitionRequest) readFromOutbound).credit).isEqualTo(2); // The buffer response will take one available buffer from input channel, and it will // trigger // requesting (backlog + numExclusiveBuffers - numAvailableBuffers) floating buffers final BufferResponse bufferResponse1 = createBufferResponse( createBuffer(isFullyFilled, numOfPartialBuffers, 32), 0, inputChannels[0].getInputChannelId(), 1, allocator); final BufferResponse bufferResponse2 = createBufferResponse( createBuffer(isFullyFilled, numOfPartialBuffers, 32), 0, inputChannels[1].getInputChannelId(), 1, allocator); handler.channelRead(mock(ChannelHandlerContext.class), bufferResponse1); handler.channelRead(mock(ChannelHandlerContext.class), bufferResponse2); assertThat(inputChannels[0].getUnannouncedCredit()).isEqualTo(2); assertThat(inputChannels[1].getUnannouncedCredit()).isEqualTo(2); channel.runPendingTasks(); // The two input channels should notify credits availability via the writable channel readFromOutbound = channel.readOutbound(); assertThat(readFromOutbound).isInstanceOf(AddCredit.class); assertThat(inputChannels[0].getInputChannelId()) .isEqualTo(((AddCredit) readFromOutbound).receiverId); assertThat(((AddCredit) readFromOutbound).credit).isEqualTo(2); readFromOutbound = channel.readOutbound(); assertThat(readFromOutbound).isInstanceOf(AddCredit.class); assertThat(inputChannels[1].getInputChannelId()) .isEqualTo(((AddCredit) readFromOutbound).receiverId); assertThat(((AddCredit) readFromOutbound).credit).isEqualTo(2); assertThat((Object) channel.readOutbound()).isNull(); ByteBuf channelBlockingBuffer = blockChannel(channel); // Trigger notify credits availability via buffer response on the condition of an // un-writable channel final BufferResponse bufferResponse3 = createBufferResponse( createBuffer(isFullyFilled, numOfPartialBuffers, 32), numOfPartialBuffers, inputChannels[0].getInputChannelId(), 1, allocator); handler.channelRead(mock(ChannelHandlerContext.class), bufferResponse3); assertThat(inputChannels[0].getUnannouncedCredit()).isOne(); assertThat(inputChannels[1].getUnannouncedCredit()).isZero(); channel.runPendingTasks(); // The input channel will not notify credits via un-writable channel assertThat(channel.isWritable()).isFalse(); assertThat((Object) channel.readOutbound()).isNull(); // Flush the buffer to make the channel writable again channel.flush(); assertThat(channelBlockingBuffer).isSameAs(channel.readOutbound()); // The input channel should notify credits via channel's writability changed event assertThat(channel.isWritable()).isTrue(); readFromOutbound = channel.readOutbound(); assertThat(readFromOutbound).isInstanceOf(AddCredit.class); assertThat(((AddCredit) readFromOutbound).credit).isOne(); assertThat(inputChannels[0].getUnannouncedCredit()).isZero(); assertThat(inputChannels[1].getUnannouncedCredit()).isZero(); // no more messages assertThat((Object) channel.readOutbound()).isNull(); } finally { releaseResource(inputGate, networkBufferPool); channel.close(); } }
Verifies that {@link RemoteInputChannel} is enqueued in the pipeline for notifying credits, and verifies the behaviour of credit notification by triggering channel's writability changed.
testNotifyCreditAvailable
java
apache/flink
flink-runtime/src/test/java/org/apache/flink/runtime/io/network/netty/CreditBasedPartitionRequestClientHandlerTest.java
https://github.com/apache/flink/blob/master/flink-runtime/src/test/java/org/apache/flink/runtime/io/network/netty/CreditBasedPartitionRequestClientHandlerTest.java
Apache-2.0
@Override public void onBuffer(Buffer buffer, int sequenceNumber, int backlog, int subpartitionId) throws IOException { buffer.recycleBuffer(); throw new IOException(expectedMessage); }
The test remote input channel to throw expected exception while calling {@link RemoteInputChannel#onBuffer(Buffer, int, int, int)}.
onBuffer
java
apache/flink
flink-runtime/src/test/java/org/apache/flink/runtime/io/network/netty/CreditBasedPartitionRequestClientHandlerTest.java
https://github.com/apache/flink/blob/master/flink-runtime/src/test/java/org/apache/flink/runtime/io/network/netty/CreditBasedPartitionRequestClientHandlerTest.java
Apache-2.0
@Parameters(name = "SSL provider = {0}") public static List<String> parameters() { return SSLUtilsTest.AVAILABLE_SSL_PROVIDERS; }
Tests for the SSL connection between Netty Server and Client used for the data plane.
parameters
java
apache/flink
flink-runtime/src/test/java/org/apache/flink/runtime/io/network/netty/NettyClientServerSslTest.java
https://github.com/apache/flink/blob/master/flink-runtime/src/test/java/org/apache/flink/runtime/io/network/netty/NettyClientServerSslTest.java
Apache-2.0
@TestTemplate void testValidSslConnection() throws Exception { testValidSslConnection(createSslConfig()); }
Verify valid ssl configuration and connection.
testValidSslConnection
java
apache/flink
flink-runtime/src/test/java/org/apache/flink/runtime/io/network/netty/NettyClientServerSslTest.java
https://github.com/apache/flink/blob/master/flink-runtime/src/test/java/org/apache/flink/runtime/io/network/netty/NettyClientServerSslTest.java
Apache-2.0
@TestTemplate public void testInvalidSslConfiguration() throws Exception { NettyProtocol protocol = new NettyTestUtil.NoOpProtocol(); Configuration config = createSslConfig(); // Modify the keystore password to an incorrect one config.set(SecurityOptions.SSL_INTERNAL_KEYSTORE_PASSWORD, "invalidpassword"); try (NetUtils.Port port = NetUtils.getAvailablePort()) { NettyConfig nettyConfig = createNettyConfig(config, port); assertThatThrownBy(() -> NettyTestUtil.initServerAndClient(protocol, nettyConfig)) .withFailMessage("Created server and client from invalid configuration") .isInstanceOf(IOException.class); } }
Verify failure on invalid ssl configuration.
testInvalidSslConfiguration
java
apache/flink
flink-runtime/src/test/java/org/apache/flink/runtime/io/network/netty/NettyClientServerSslTest.java
https://github.com/apache/flink/blob/master/flink-runtime/src/test/java/org/apache/flink/runtime/io/network/netty/NettyClientServerSslTest.java
Apache-2.0
@Override public void initChannel(SocketChannel channel) throws Exception { super.initChannel(channel); SslHandler sslHandler = (SslHandler) channel.pipeline().get("ssl"); assertThat(sslHandler).isNotNull(); serverHandler[0] = sslHandler; latch.trigger(); }
Wrapper around {@link NettyServer.ServerChannelInitializer} making the server's SSL handler available for the tests.
initChannel
java
apache/flink
flink-runtime/src/test/java/org/apache/flink/runtime/io/network/netty/NettyClientServerSslTest.java
https://github.com/apache/flink/blob/master/flink-runtime/src/test/java/org/apache/flink/runtime/io/network/netty/NettyClientServerSslTest.java
Apache-2.0
@ParameterizedTest(name = "{index} => isFullyFilled={0}, numOfPartialBuffers={1}") @MethodSource("bufferDescriptors") void testClientMessageDecode(boolean isFullyFilled, int numOfPartialBuffers) throws Exception { setup(numOfPartialBuffers); testNettyMessageClientDecoding(isFullyFilled, numOfPartialBuffers, false, false, false); }
Verifies that the client side decoder works well for unreleased input channels.
testClientMessageDecode
java
apache/flink
flink-runtime/src/test/java/org/apache/flink/runtime/io/network/netty/NettyMessageClientDecoderDelegateTest.java
https://github.com/apache/flink/blob/master/flink-runtime/src/test/java/org/apache/flink/runtime/io/network/netty/NettyMessageClientDecoderDelegateTest.java
Apache-2.0
@ParameterizedTest(name = "{index} => isFullyFilled={0}, numOfPartialBuffers={1}") @MethodSource("bufferDescriptors") void testClientMessageDecodeWithEmptyBuffers(boolean isFullyFilled, int numOfPartialBuffers) throws Exception { setup(numOfPartialBuffers); testNettyMessageClientDecoding(isFullyFilled, numOfPartialBuffers, true, false, false); }
Verifies that the client side decoder works well for empty buffers. Empty buffers should not consume data buffers of the input channels.
testClientMessageDecodeWithEmptyBuffers
java
apache/flink
flink-runtime/src/test/java/org/apache/flink/runtime/io/network/netty/NettyMessageClientDecoderDelegateTest.java
https://github.com/apache/flink/blob/master/flink-runtime/src/test/java/org/apache/flink/runtime/io/network/netty/NettyMessageClientDecoderDelegateTest.java
Apache-2.0