code
stringlengths
25
201k
docstring
stringlengths
19
96.2k
func_name
stringlengths
0
235
language
stringclasses
1 value
repo
stringlengths
8
51
path
stringlengths
11
314
url
stringlengths
62
377
license
stringclasses
7 values
@Override public MockKeyedStateBackend<K> build() { Map<String, Map<K, Map<Object, Object>>> stateValues = new HashMap<>(); Map<String, StateSnapshotTransformer<Object>> stateSnapshotFilters = new HashMap<>(); MockRestoreOperation<K> restoreOperation = new MockRestoreOperation<>(restoreStateHandles, stateValues); restoreOperation.restore(); return new MockKeyedStateBackend<>( kvStateRegistry, keySerializerProvider.currentSchemaSerializer(), userCodeClassLoader, executionConfig, ttlTimeProvider, latencyTrackingStateConfig, sizeTrackingStateConfig, stateValues, stateSnapshotFilters, cancelStreamRegistry, new InternalKeyContextImpl<>(keyGroupRange, numberOfKeyGroups), snapshotSupplier); }
Builder class for {@link MockKeyedStateBackend}. @param <K> The data type that the key serializer serializes.
build
java
apache/flink
flink-runtime/src/test/java/org/apache/flink/runtime/state/ttl/mock/MockKeyedStateBackendBuilder.java
https://github.com/apache/flink/blob/master/flink-runtime/src/test/java/org/apache/flink/runtime/state/ttl/mock/MockKeyedStateBackendBuilder.java
Apache-2.0
@Override public Void restore() { state.forEach( ksh -> stateValues.putAll( copy( ((MockKeyedStateBackend.MockKeyedStateHandle<K>) ksh) .snapshotStates, Collections.emptyMap()))); return null; }
Implementation of mock restore operation. @param <K> The data type that the serializer serializes.
restore
java
apache/flink
flink-runtime/src/test/java/org/apache/flink/runtime/state/ttl/mock/MockRestoreOperation.java
https://github.com/apache/flink/blob/master/flink-runtime/src/test/java/org/apache/flink/runtime/state/ttl/mock/MockRestoreOperation.java
Apache-2.0
@Test void doesNotReconnectAfterTargetLostLeadership() throws Exception { final JobID jobId = new JobID(); final SettableLeaderRetrievalService leaderRetrievalService = new SettableLeaderRetrievalService(); final TestingHighAvailabilityServices haServices = new TestingHighAvailabilityServicesBuilder() .setJobMasterLeaderRetrieverFunction(ignored -> leaderRetrievalService) .build(); final TestingJobMasterGateway jobMasterGateway = registerJobMaster(); final OneShotLatch jobManagerGainedLeadership = new OneShotLatch(); final TestingJobLeaderListener testingJobLeaderListener = new TestingJobLeaderListener(ignored -> jobManagerGainedLeadership.trigger()); final JobLeaderService jobLeaderService = createAndStartJobLeaderService(haServices, testingJobLeaderListener); try { jobLeaderService.addJob(jobId, jobMasterGateway.getAddress()); leaderRetrievalService.notifyListener(jobMasterGateway.getAddress(), UUID.randomUUID()); jobManagerGainedLeadership.await(); // revoke the leadership leaderRetrievalService.notifyListener(null, null); testingJobLeaderListener.waitUntilJobManagerLostLeadership(); jobLeaderService.reconnect(jobId); } finally { jobLeaderService.stop(); } }
Tests that the JobLeaderService won't try to reconnect to JobMaster after it has lost the leadership. See FLINK-16836.
doesNotReconnectAfterTargetLostLeadership
java
apache/flink
flink-runtime/src/test/java/org/apache/flink/runtime/taskexecutor/DefaultJobLeaderServiceTest.java
https://github.com/apache/flink/blob/master/flink-runtime/src/test/java/org/apache/flink/runtime/taskexecutor/DefaultJobLeaderServiceTest.java
Apache-2.0
@Override public CompletableFuture<ExecutionState> requestPartitionProducerState( JobID jobId, IntermediateDataSetID intermediateDataSetId, ResultPartitionID r) { return null; }
A No-Op implementation of the {@link PartitionProducerStateChecker}.
requestPartitionProducerState
java
apache/flink
flink-runtime/src/test/java/org/apache/flink/runtime/taskexecutor/NoOpPartitionProducerStateChecker.java
https://github.com/apache/flink/blob/master/flink-runtime/src/test/java/org/apache/flink/runtime/taskexecutor/NoOpPartitionProducerStateChecker.java
Apache-2.0
@Test void testJobReExecutionAfterTaskExecutorTermination() throws Exception { final JobGraph jobGraph = createJobGraph(PARALLELISM); final MiniCluster miniCluster = MINI_CLUSTER_EXTENSION.getMiniCluster(); final CompletableFuture<JobResult> jobResultFuture = submitJobAndWaitUntilRunning(jobGraph, miniCluster); // kill one TaskExecutor which should fail the job execution miniCluster.terminateTaskManager(0); final JobResult jobResult = jobResultFuture.get(); assertThat(jobResult.isSuccess()).isFalse(); miniCluster.startTaskManager(); final JobGraph newJobGraph = createJobGraph(PARALLELISM); BlockingOperator.unblock(); miniCluster.submitJob(newJobGraph).get(); miniCluster.requestJobResult(newJobGraph.getJobID()).get(); }
Tests that a job can be re-executed after the job has failed due to a TaskExecutor termination.
testJobReExecutionAfterTaskExecutorTermination
java
apache/flink
flink-runtime/src/test/java/org/apache/flink/runtime/taskexecutor/TaskExecutorITCase.java
https://github.com/apache/flink/blob/master/flink-runtime/src/test/java/org/apache/flink/runtime/taskexecutor/TaskExecutorITCase.java
Apache-2.0
@Override protected void doInvoke() throws Exception { getEnvironment() .getOperatorCoordinatorEventGateway() .sendRequestToCoordinator( new OperatorID(), new SerializedValue<>( new TestingCoordinationRequestHandler.Request<>(0L))); waitUntilCancelled(); }
Test invokable that fails when receiving a coordination request.
doInvoke
java
apache/flink
flink-runtime/src/test/java/org/apache/flink/runtime/taskexecutor/TaskExecutorOperatorEventHandlingTest.java
https://github.com/apache/flink/blob/master/flink-runtime/src/test/java/org/apache/flink/runtime/taskexecutor/TaskExecutorOperatorEventHandlingTest.java
Apache-2.0
@BeforeEach void setup() { UserClassLoaderExtractingInvokable.clearQueue(); }
Tests for the {@link TaskExecutor TaskExecutor's} slot lifetime and its dependencies.
setup
java
apache/flink
flink-runtime/src/test/java/org/apache/flink/runtime/taskexecutor/TaskExecutorSlotLifetimeTest.java
https://github.com/apache/flink/blob/master/flink-runtime/src/test/java/org/apache/flink/runtime/taskexecutor/TaskExecutorSlotLifetimeTest.java
Apache-2.0
@BeforeEach void setUp(TestInfo testInfo) { this.testInfo = testInfo; }
Tests for submission logic of the {@link TaskExecutor}.
setUp
java
apache/flink
flink-runtime/src/test/java/org/apache/flink/runtime/taskexecutor/TaskExecutorSubmissionTest.java
https://github.com/apache/flink/blob/master/flink-runtime/src/test/java/org/apache/flink/runtime/taskexecutor/TaskExecutorSubmissionTest.java
Apache-2.0
@Test void testCancellingDependentAndStateUpdateFails() throws Exception { ResourceID producerLocation = ResourceID.generate(); NettyShuffleDescriptor sdd = createRemoteWithIdAndLocation( new IntermediateResultPartitionID(), producerLocation); TaskDeploymentDescriptor tdd1 = createSender(sdd); TaskDeploymentDescriptor tdd2 = createReceiver(sdd); ExecutionAttemptID eid1 = tdd1.getExecutionAttemptId(); ExecutionAttemptID eid2 = tdd2.getExecutionAttemptId(); final CompletableFuture<Void> task1RunningFuture = new CompletableFuture<>(); final CompletableFuture<Void> task2RunningFuture = new CompletableFuture<>(); final CompletableFuture<Void> task1FailedFuture = new CompletableFuture<>(); final CompletableFuture<Void> task2CanceledFuture = new CompletableFuture<>(); final JobMasterId jobMasterId = JobMasterId.generate(); TestingJobMasterGateway testingJobMasterGateway = new TestingJobMasterGatewayBuilder() .setFencingTokenSupplier(() -> jobMasterId) .setUpdateTaskExecutionStateFunction( taskExecutionState -> { if (taskExecutionState != null && taskExecutionState.getID().equals(eid1) && taskExecutionState.getExecutionState() == ExecutionState.RUNNING) { return FutureUtils.completedExceptionally( new ExecutionGraphException( "The execution attempt " + eid2 + " was not found.")); } else { return CompletableFuture.completedFuture(Acknowledge.get()); } }) .build(); try (TaskSubmissionTestEnvironment env = new TaskSubmissionTestEnvironment.Builder(jobId) .setResourceID(producerLocation) .setSlotSize(2) .addTaskManagerActionListener( eid1, ExecutionState.RUNNING, task1RunningFuture) .addTaskManagerActionListener( eid2, ExecutionState.RUNNING, task2RunningFuture) .addTaskManagerActionListener( eid1, ExecutionState.FAILED, task1FailedFuture) .addTaskManagerActionListener( eid2, ExecutionState.CANCELED, task2CanceledFuture) .setJobMasterId(jobMasterId) .setJobMasterGateway(testingJobMasterGateway) .useRealNonMockShuffleEnvironment() .build(EXECUTOR_EXTENSION.getExecutor())) { TaskExecutorGateway tmGateway = env.getTaskExecutorGateway(); TaskSlotTable<Task> taskSlotTable = env.getTaskSlotTable(); taskSlotTable.allocateSlot(0, jobId, tdd1.getAllocationId(), Duration.ofSeconds(60)); tmGateway.submitTask(tdd1, jobMasterId, timeout).get(); task1RunningFuture.get(); taskSlotTable.allocateSlot(1, jobId, tdd2.getAllocationId(), Duration.ofSeconds(60)); tmGateway.submitTask(tdd2, jobMasterId, timeout).get(); task2RunningFuture.get(); task1FailedFuture.get(); assertThat(taskSlotTable.getTask(eid1).getExecutionState()) .isEqualTo(ExecutionState.FAILED); tmGateway.cancelTask(eid2, timeout); task2CanceledFuture.get(); assertThat(taskSlotTable.getTask(eid2).getExecutionState()) .isEqualTo(ExecutionState.CANCELED); } }
This tests creates two tasks. The sender sends data but fails to send the state update back to the job manager. the second one blocks to be canceled
testCancellingDependentAndStateUpdateFails
java
apache/flink
flink-runtime/src/test/java/org/apache/flink/runtime/taskexecutor/TaskExecutorSubmissionTest.java
https://github.com/apache/flink/blob/master/flink-runtime/src/test/java/org/apache/flink/runtime/taskexecutor/TaskExecutorSubmissionTest.java
Apache-2.0
@Test void testRemotePartitionNotFound() throws Exception { try (NetUtils.Port port = NetUtils.getAvailablePort()) { final int dataPort = port.getPort(); Configuration config = new Configuration(); config.set(NettyShuffleEnvironmentOptions.DATA_PORT, dataPort); config.set(NettyShuffleEnvironmentOptions.NETWORK_REQUEST_BACKOFF_INITIAL, 100); config.set(NettyShuffleEnvironmentOptions.NETWORK_REQUEST_BACKOFF_MAX, 200); // Remote location (on the same TM though) for the partition NettyShuffleDescriptor sdd = NettyShuffleDescriptorBuilder.newBuilder().setDataPort(dataPort).buildRemote(); TaskDeploymentDescriptor tdd = createReceiver(sdd); ExecutionAttemptID eid = tdd.getExecutionAttemptId(); final CompletableFuture<Void> taskRunningFuture = new CompletableFuture<>(); final CompletableFuture<Void> taskFailedFuture = new CompletableFuture<>(); try (TaskSubmissionTestEnvironment env = new TaskSubmissionTestEnvironment.Builder(jobId) .setSlotSize(2) .addTaskManagerActionListener( eid, ExecutionState.RUNNING, taskRunningFuture) .addTaskManagerActionListener( eid, ExecutionState.FAILED, taskFailedFuture) .setConfiguration(config) .setLocalCommunication(false) .useRealNonMockShuffleEnvironment() .build(EXECUTOR_EXTENSION.getExecutor())) { TaskExecutorGateway tmGateway = env.getTaskExecutorGateway(); TaskSlotTable<Task> taskSlotTable = env.getTaskSlotTable(); taskSlotTable.allocateSlot(0, jobId, tdd.getAllocationId(), Duration.ofSeconds(60)); tmGateway.submitTask(tdd, env.getJobMasterId(), timeout).get(); taskRunningFuture.get(); taskFailedFuture.get(); assertThat(taskSlotTable.getTask(eid).getFailureCause()) .isInstanceOf(PartitionNotFoundException.class); } } }
Tests that repeated remote {@link PartitionNotFoundException}s ultimately fail the receiver.
testRemotePartitionNotFound
java
apache/flink
flink-runtime/src/test/java/org/apache/flink/runtime/taskexecutor/TaskExecutorSubmissionTest.java
https://github.com/apache/flink/blob/master/flink-runtime/src/test/java/org/apache/flink/runtime/taskexecutor/TaskExecutorSubmissionTest.java
Apache-2.0
@Test void testUpdateTaskInputPartitionsFailure() throws Exception { final ExecutionAttemptID eid = createExecutionAttemptId(); final TaskDeploymentDescriptor tdd = createTestTaskDeploymentDescriptor("test task", eid, BlockingNoOpInvokable.class); final CompletableFuture<Void> taskRunningFuture = new CompletableFuture<>(); final CompletableFuture<Void> taskFailedFuture = new CompletableFuture<>(); final ShuffleEnvironment<?, ?> shuffleEnvironment = mock(ShuffleEnvironment.class, Mockito.RETURNS_MOCKS); try (TaskSubmissionTestEnvironment env = new TaskSubmissionTestEnvironment.Builder(jobId) .setShuffleEnvironment(shuffleEnvironment) .setSlotSize(1) .addTaskManagerActionListener( eid, ExecutionState.RUNNING, taskRunningFuture) .addTaskManagerActionListener(eid, ExecutionState.FAILED, taskFailedFuture) .build(EXECUTOR_EXTENSION.getExecutor())) { TaskExecutorGateway tmGateway = env.getTaskExecutorGateway(); TaskSlotTable<Task> taskSlotTable = env.getTaskSlotTable(); taskSlotTable.allocateSlot(0, jobId, tdd.getAllocationId(), Duration.ofSeconds(60)); tmGateway.submitTask(tdd, env.getJobMasterId(), timeout).get(); taskRunningFuture.get(); final ResourceID producerLocation = env.getTaskExecutor().getResourceID(); NettyShuffleDescriptor shuffleDescriptor = createRemoteWithIdAndLocation( new IntermediateResultPartitionID(), producerLocation); final PartitionInfo partitionUpdate = new PartitionInfo(new IntermediateDataSetID(), shuffleDescriptor); doThrow(new IOException()) .when(shuffleEnvironment) .updatePartitionInfo(eid, partitionUpdate); final CompletableFuture<Acknowledge> updateFuture = tmGateway.updatePartitions( eid, Collections.singletonList(partitionUpdate), timeout); updateFuture.get(); taskFailedFuture.get(); Task task = taskSlotTable.getTask(tdd.getExecutionAttemptId()); assertThat(task.getExecutionState()).isEqualTo(ExecutionState.FAILED); assertThat(task.getFailureCause()).isInstanceOf(IOException.class); } }
Tests that the TaskManager fails the task if the partition update fails.
testUpdateTaskInputPartitionsFailure
java
apache/flink
flink-runtime/src/test/java/org/apache/flink/runtime/taskexecutor/TaskExecutorSubmissionTest.java
https://github.com/apache/flink/blob/master/flink-runtime/src/test/java/org/apache/flink/runtime/taskexecutor/TaskExecutorSubmissionTest.java
Apache-2.0
@Test void testJobLeaderDetection() throws Exception { final TaskSlotTable<Task> taskSlotTable = TaskSlotUtils.createTaskSlotTable(1, EXECUTOR_EXTENSION.getExecutor()); final JobLeaderService jobLeaderService = new DefaultJobLeaderService( unresolvedTaskManagerLocation, RetryingRegistrationConfiguration.defaultConfiguration()); final TestingResourceManagerGateway resourceManagerGateway = new TestingResourceManagerGateway(); CompletableFuture<Void> initialSlotReportFuture = new CompletableFuture<>(); resourceManagerGateway.setSendSlotReportFunction( resourceIDInstanceIDSlotReportTuple3 -> { initialSlotReportFuture.complete(null); return CompletableFuture.completedFuture(Acknowledge.get()); }); final CompletableFuture<Collection<SlotOffer>> offeredSlotsFuture = new CompletableFuture<>(); final TestingJobMasterGateway jobMasterGateway = new TestingJobMasterGatewayBuilder() .setOfferSlotsFunction( (resourceID, slotOffers) -> { offeredSlotsFuture.complete(new ArrayList<>(slotOffers)); return CompletableFuture.completedFuture(slotOffers); }) .build(); rpc.registerGateway(resourceManagerGateway.getAddress(), resourceManagerGateway); rpc.registerGateway(jobMasterGateway.getAddress(), jobMasterGateway); final AllocationID allocationId = new AllocationID(); final TaskExecutorLocalStateStoresManager localStateStoresManager = createTaskExecutorLocalStateStoresManager(); final TaskManagerServices taskManagerServices = new TaskManagerServicesBuilder() .setUnresolvedTaskManagerLocation(unresolvedTaskManagerLocation) .setTaskSlotTable(taskSlotTable) .setJobLeaderService(jobLeaderService) .setTaskStateManager(localStateStoresManager) .build(); TaskExecutor taskManager = createTaskExecutor(taskManagerServices); try { taskManager.start(); final TaskExecutorGateway tmGateway = taskManager.getSelfGateway(TaskExecutorGateway.class); // tell the task manager about the rm leader resourceManagerLeaderRetriever.notifyListener( resourceManagerGateway.getAddress(), resourceManagerGateway.getFencingToken().toUUID()); initialSlotReportFuture.get(); requestSlot( tmGateway, jobId, allocationId, buildSlotID(0), ResourceProfile.ZERO, jobMasterGateway.getAddress(), resourceManagerGateway.getFencingToken()); // now inform the task manager about the new job leader jobManagerLeaderRetriever.notifyListener( jobMasterGateway.getAddress(), jobMasterGateway.getFencingToken().toUUID()); final Collection<SlotOffer> offeredSlots = offeredSlotsFuture.get(); final Collection<AllocationID> allocationIds = offeredSlots.stream() .map(SlotOffer::getAllocationId) .collect(Collectors.toList()); assertThat(allocationIds).containsExactlyInAnyOrder(allocationId); } finally { RpcUtils.terminateRpcEndpoint(taskManager); } }
Tests that a TaskManager detects a job leader for which it has reserved slots. Upon detecting the job leader, it will offer all reserved slots to the JobManager.
testJobLeaderDetection
java
apache/flink
flink-runtime/src/test/java/org/apache/flink/runtime/taskexecutor/TaskExecutorTest.java
https://github.com/apache/flink/blob/master/flink-runtime/src/test/java/org/apache/flink/runtime/taskexecutor/TaskExecutorTest.java
Apache-2.0
@Test void testSlotAcceptance() throws Exception { final InstanceID registrationId = new InstanceID(); final OneShotLatch taskExecutorIsRegistered = new OneShotLatch(); final CompletableFuture<Tuple3<InstanceID, SlotID, AllocationID>> availableSlotFuture = new CompletableFuture<>(); final TestingResourceManagerGateway resourceManagerGateway = createRmWithTmRegisterAndNotifySlotHooks( registrationId, taskExecutorIsRegistered, availableSlotFuture); final AllocationID allocationId1 = new AllocationID(); final AllocationID allocationId2 = new AllocationID(); final SlotOffer offer1 = new SlotOffer(allocationId1, 0, ResourceProfile.ANY); final OneShotLatch offerSlotsLatch = new OneShotLatch(); final OneShotLatch taskInTerminalState = new OneShotLatch(); final CompletableFuture<Collection<SlotOffer>> offerResultFuture = new CompletableFuture<>(); final TestingJobMasterGateway jobMasterGateway = createJobMasterWithSlotOfferAndTaskTerminationHooks( offerSlotsLatch, taskInTerminalState, offerResultFuture); rpc.registerGateway(resourceManagerGateway.getAddress(), resourceManagerGateway); rpc.registerGateway(jobMasterGateway.getAddress(), jobMasterGateway); final TaskSlotTable<Task> taskSlotTable = TaskSlotUtils.createTaskSlotTable(2, EXECUTOR_EXTENSION.getExecutor()); final TaskManagerServices taskManagerServices = createTaskManagerServicesWithTaskSlotTable(taskSlotTable); final TestingTaskExecutor taskManager = createTestingTaskExecutor(taskManagerServices); try { taskManager.start(); taskManager.waitUntilStarted(); final TaskExecutorGateway tmGateway = taskManager.getSelfGateway(TaskExecutorGateway.class); // wait until registered at the RM taskExecutorIsRegistered.await(); // request 2 slots for the given allocation ids AllocationID[] allocationIds = new AllocationID[] {allocationId1, allocationId2}; for (int i = 0; i < allocationIds.length; i++) { requestSlot( tmGateway, jobId, allocationIds[i], buildSlotID(i), ResourceProfile.UNKNOWN, jobMasterGateway.getAddress(), resourceManagerGateway.getFencingToken()); } // notify job leader to start slot offering jobManagerLeaderRetriever.notifyListener( jobMasterGateway.getAddress(), jobMasterGateway.getFencingToken().toUUID()); // wait until slots have been offered offerSlotsLatch.await(); offerResultFuture.complete(Collections.singletonList(offer1)); final Tuple3<InstanceID, SlotID, AllocationID> instanceIDSlotIDAllocationIDTuple3 = availableSlotFuture.get(); final Tuple3<InstanceID, SlotID, AllocationID> expectedResult = Tuple3.of(registrationId, buildSlotID(1), allocationId2); assertThat(instanceIDSlotIDAllocationIDTuple3).isEqualTo(expectedResult); // the slot 1 can be activate for task submission submit(allocationId1, jobMasterGateway, tmGateway, NoOpInvokable.class); // wait for the task completion taskInTerminalState.await(); // the slot 2 can NOT be activate for task submission assertThatThrownBy( () -> submit( allocationId2, jobMasterGateway, tmGateway, NoOpInvokable.class)) .withFailMessage( "It should not be possible to submit task to acquired by JM slot with index 1 (allocationId2)") .isInstanceOf(CompletionException.class) .hasCauseInstanceOf(TaskSubmissionException.class); // the slot 2 is free to request requestSlot( tmGateway, jobId, allocationId2, buildSlotID(1), ResourceProfile.UNKNOWN, jobMasterGateway.getAddress(), resourceManagerGateway.getFencingToken()); } finally { RpcUtils.terminateRpcEndpoint(taskManager); } }
Tests that accepted slots go into state assigned and the others are returned to the resource manager.
testSlotAcceptance
java
apache/flink
flink-runtime/src/test/java/org/apache/flink/runtime/taskexecutor/TaskExecutorTest.java
https://github.com/apache/flink/blob/master/flink-runtime/src/test/java/org/apache/flink/runtime/taskexecutor/TaskExecutorTest.java
Apache-2.0
@Test void testRejectedSlotNotFreedIfAnotherOfferIsPending() throws Exception { testSlotOfferResponseWithPendingSlotOffer(ResponseOrder.REJECT_THEN_ACCEPT); }
Tests that the task executor does not release a slot that was rejected by the job master, if another slot offer is currently in progress.
testRejectedSlotNotFreedIfAnotherOfferIsPending
java
apache/flink
flink-runtime/src/test/java/org/apache/flink/runtime/taskexecutor/TaskExecutorTest.java
https://github.com/apache/flink/blob/master/flink-runtime/src/test/java/org/apache/flink/runtime/taskexecutor/TaskExecutorTest.java
Apache-2.0
@Test void testAcceptedSlotNotActivatedIfAnotherOfferIsPending() throws Exception { testSlotOfferResponseWithPendingSlotOffer(ResponseOrder.ACCEPT_THEN_REJECT); }
Tests that the task executor does not activate a slot that was accepted by the job master, if another slot offer is currently in progress.
testAcceptedSlotNotActivatedIfAnotherOfferIsPending
java
apache/flink
flink-runtime/src/test/java/org/apache/flink/runtime/taskexecutor/TaskExecutorTest.java
https://github.com/apache/flink/blob/master/flink-runtime/src/test/java/org/apache/flink/runtime/taskexecutor/TaskExecutorTest.java
Apache-2.0
@Test void testFreeingInactiveSlotDoesNotFail() throws Exception { final OneShotLatch taskExecutorIsRegistered = new OneShotLatch(); final CompletableFuture<Tuple3<InstanceID, SlotID, AllocationID>> availableSlotFuture = new CompletableFuture<>(); final TestingResourceManagerGateway resourceManagerGateway = createRmWithTmRegisterAndNotifySlotHooks( new InstanceID(), taskExecutorIsRegistered, availableSlotFuture); rpc.registerGateway(resourceManagerGateway.getAddress(), resourceManagerGateway); final MultiShotLatch offerSlotsLatch = new MultiShotLatch(); final TestingJobMasterGateway jobMasterGateway = new TestingJobMasterGatewayBuilder() .setOfferSlotsFunction( (resourceID, slotOffers) -> { offerSlotsLatch.trigger(); return new CompletableFuture<>(); }) .build(); rpc.registerGateway(jobMasterGateway.getAddress(), jobMasterGateway); final TaskSlotTable<Task> taskSlotTable = TaskSlotUtils.createTaskSlotTable(1, EXECUTOR_EXTENSION.getExecutor()); final TaskExecutorLocalStateStoresManager localStateStoresManager = createTaskExecutorLocalStateStoresManager(); final TaskManagerServices taskManagerServices = new TaskManagerServicesBuilder() .setUnresolvedTaskManagerLocation(unresolvedTaskManagerLocation) .setTaskSlotTable(taskSlotTable) .setTaskStateManager(localStateStoresManager) .build(); final TestingTaskExecutor taskExecutor = createTestingTaskExecutor(taskManagerServices); final ThreadSafeTaskSlotTable<Task> threadSafeTaskSlotTable = new ThreadSafeTaskSlotTable<>( taskSlotTable, taskExecutor.getMainThreadExecutableForTesting()); try { taskExecutor.start(); taskExecutor.waitUntilStarted(); final TaskExecutorGateway tmGateway = taskExecutor.getSelfGateway(TaskExecutorGateway.class); taskExecutorIsRegistered.await(); jobManagerLeaderRetriever.notifyListener( jobMasterGateway.getAddress(), jobMasterGateway.getFencingToken().toUUID()); final AllocationID allocationId = new AllocationID(); requestSlot( tmGateway, jobId, allocationId, buildSlotID(0), ResourceProfile.UNKNOWN, jobMasterGateway.getAddress(), resourceManagerGateway.getFencingToken()); offerSlotsLatch.await(); tmGateway.freeSlot(allocationId, new RuntimeException("test exception"), timeout).get(); assertThat(availableSlotFuture.get().f2).isEqualTo(allocationId); assertThat(threadSafeTaskSlotTable.getAllocationIdsPerJob(jobId)).isEmpty(); } finally { RpcUtils.terminateRpcEndpoint(taskExecutor); } }
Tests that freeing an inactive slot is a legal operation that does not throw an exception.
testFreeingInactiveSlotDoesNotFail
java
apache/flink
flink-runtime/src/test/java/org/apache/flink/runtime/taskexecutor/TaskExecutorTest.java
https://github.com/apache/flink/blob/master/flink-runtime/src/test/java/org/apache/flink/runtime/taskexecutor/TaskExecutorTest.java
Apache-2.0
@Test void testRMHeartbeatStopWhenLeadershipRevoked() throws Exception { final long heartbeatInterval = 1L; final long heartbeatTimeout = 10000L; final long pollTimeout = 1000L; final RecordingHeartbeatServices heartbeatServices = new RecordingHeartbeatServices(heartbeatInterval, heartbeatTimeout); final ResourceID rmResourceID = ResourceID.generate(); final TaskSlotTable<Task> taskSlotTable = TaskSlotUtils.createTaskSlotTable(1, EXECUTOR_EXTENSION.getExecutor()); final String rmAddress = "rm"; final TestingResourceManagerGateway rmGateway = new TestingResourceManagerGateway( ResourceManagerId.generate(), rmResourceID, rmAddress, rmAddress); rpc.registerGateway(rmAddress, rmGateway); final TaskExecutorLocalStateStoresManager localStateStoresManager = createTaskExecutorLocalStateStoresManager(); final TaskManagerServices taskManagerServices = new TaskManagerServicesBuilder() .setUnresolvedTaskManagerLocation(unresolvedTaskManagerLocation) .setTaskSlotTable(taskSlotTable) .setTaskStateManager(localStateStoresManager) .build(); final TaskExecutor taskExecutor = createTaskExecutor(taskManagerServices, heartbeatServices); try { taskExecutor.start(); final BlockingQueue<ResourceID> unmonitoredTargets = heartbeatServices.getUnmonitoredTargets(); final BlockingQueue<ResourceID> monitoredTargets = heartbeatServices.getMonitoredTargets(); resourceManagerLeaderRetriever.notifyListener( rmAddress, rmGateway.getFencingToken().toUUID()); // wait for TM registration by checking the registered heartbeat targets assertThat(monitoredTargets.poll(pollTimeout, TimeUnit.MILLISECONDS)) .isEqualTo(rmResourceID); // let RM lose leadership resourceManagerLeaderRetriever.notifyListener(null, null); // the timeout should not have triggered since it is much higher assertThat(unmonitoredTargets.poll(pollTimeout, TimeUnit.MILLISECONDS)) .isEqualTo(rmResourceID); } finally { RpcUtils.terminateRpcEndpoint(taskExecutor); } }
Tests that the heartbeat is stopped once the TaskExecutor detects that the RM is no longer leader. <p>See FLINK-8462
testRMHeartbeatStopWhenLeadershipRevoked
java
apache/flink
flink-runtime/src/test/java/org/apache/flink/runtime/taskexecutor/TaskExecutorTest.java
https://github.com/apache/flink/blob/master/flink-runtime/src/test/java/org/apache/flink/runtime/taskexecutor/TaskExecutorTest.java
Apache-2.0
@Test void testIgnoringSlotRequestsIfNotRegistered() throws Exception { final TaskExecutor taskExecutor = createTaskExecutor(1); taskExecutor.start(); try { final TestingResourceManagerGateway testingResourceManagerGateway = new TestingResourceManagerGateway(); final CompletableFuture<RegistrationResponse> registrationFuture = new CompletableFuture<>(); final CompletableFuture<ResourceID> taskExecutorResourceIdFuture = new CompletableFuture<>(); testingResourceManagerGateway.setRegisterTaskExecutorFunction( taskExecutorRegistration -> { taskExecutorResourceIdFuture.complete( taskExecutorRegistration.getResourceId()); return registrationFuture; }); rpc.registerGateway( testingResourceManagerGateway.getAddress(), testingResourceManagerGateway); resourceManagerLeaderRetriever.notifyListener( testingResourceManagerGateway.getAddress(), testingResourceManagerGateway.getFencingToken().toUUID()); final TaskExecutorGateway taskExecutorGateway = taskExecutor.getSelfGateway(TaskExecutorGateway.class); final ResourceID resourceId = taskExecutorResourceIdFuture.get(); final CompletableFuture<Acknowledge> slotRequestResponse = taskExecutorGateway.requestSlot( new SlotID(resourceId, 0), jobId, new AllocationID(), ResourceProfile.ZERO, "foobar", testingResourceManagerGateway.getFencingToken(), timeout); assertThatFuture(slotRequestResponse) .withFailMessage( "We should not be able to request slots before the TaskExecutor is registered at the ResourceManager.") .eventuallyFailsWith(ExecutionException.class) .withCauseInstanceOf(TaskManagerException.class); } finally { RpcUtils.terminateRpcEndpoint(taskExecutor); } }
Tests that we ignore slot requests if the TaskExecutor is not registered at a ResourceManager.
testIgnoringSlotRequestsIfNotRegistered
java
apache/flink
flink-runtime/src/test/java/org/apache/flink/runtime/taskexecutor/TaskExecutorTest.java
https://github.com/apache/flink/blob/master/flink-runtime/src/test/java/org/apache/flink/runtime/taskexecutor/TaskExecutorTest.java
Apache-2.0
@Test void testReconnectionAttemptIfExplicitlyDisconnected() throws Exception { final TaskSlotTable<Task> taskSlotTable = TaskSlotUtils.createTaskSlotTable(1, EXECUTOR_EXTENSION.getExecutor()); final UnresolvedTaskManagerLocation unresolvedTaskManagerLocation = new LocalUnresolvedTaskManagerLocation(); final TaskExecutor taskExecutor = createTaskExecutor( new TaskManagerServicesBuilder() .setTaskSlotTable(taskSlotTable) .setUnresolvedTaskManagerLocation(unresolvedTaskManagerLocation) .build()); taskExecutor.start(); try { final TestingResourceManagerGateway testingResourceManagerGateway = new TestingResourceManagerGateway(); final ClusterInformation clusterInformation = new ClusterInformation("foobar", 1234); final CompletableFuture<RegistrationResponse> registrationResponseFuture = CompletableFuture.completedFuture( new TaskExecutorRegistrationSuccess( new InstanceID(), ResourceID.generate(), clusterInformation, null)); final BlockingQueue<ResourceID> registrationQueue = new ArrayBlockingQueue<>(1); testingResourceManagerGateway.setRegisterTaskExecutorFunction( taskExecutorRegistration -> { registrationQueue.offer(taskExecutorRegistration.getResourceId()); return registrationResponseFuture; }); rpc.registerGateway( testingResourceManagerGateway.getAddress(), testingResourceManagerGateway); resourceManagerLeaderRetriever.notifyListener( testingResourceManagerGateway.getAddress(), testingResourceManagerGateway.getFencingToken().toUUID()); final ResourceID firstRegistrationAttempt = registrationQueue.take(); assertThat(firstRegistrationAttempt) .isEqualTo(unresolvedTaskManagerLocation.getResourceID()); final TaskExecutorGateway taskExecutorGateway = taskExecutor.getSelfGateway(TaskExecutorGateway.class); assertThat(registrationQueue).isEmpty(); taskExecutorGateway.disconnectResourceManager(new FlinkException("Test exception")); final ResourceID secondRegistrationAttempt = registrationQueue.take(); assertThat(secondRegistrationAttempt) .isEqualTo(unresolvedTaskManagerLocation.getResourceID()); } finally { RpcUtils.terminateRpcEndpoint(taskExecutor); } }
Tests that the TaskExecutor tries to reconnect to a ResourceManager from which it was explicitly disconnected.
testReconnectionAttemptIfExplicitlyDisconnected
java
apache/flink
flink-runtime/src/test/java/org/apache/flink/runtime/taskexecutor/TaskExecutorTest.java
https://github.com/apache/flink/blob/master/flink-runtime/src/test/java/org/apache/flink/runtime/taskexecutor/TaskExecutorTest.java
Apache-2.0
@Test void testInitialSlotReportFailure() throws Exception { final TaskSlotTable<Task> taskSlotTable = TaskSlotUtils.createTaskSlotTable(1, EXECUTOR_EXTENSION.getExecutor()); final UnresolvedTaskManagerLocation unresolvedTaskManagerLocation = new LocalUnresolvedTaskManagerLocation(); final TaskManagerServices taskManagerServices = new TaskManagerServicesBuilder() .setTaskSlotTable(taskSlotTable) .setUnresolvedTaskManagerLocation(unresolvedTaskManagerLocation) .build(); final TaskExecutor taskExecutor = createTaskExecutor(taskManagerServices); taskExecutor.start(); try { final TestingResourceManagerGateway testingResourceManagerGateway = new TestingResourceManagerGateway(); final BlockingQueue<CompletableFuture<Acknowledge>> responseQueue = new ArrayBlockingQueue<>(2); testingResourceManagerGateway.setSendSlotReportFunction( resourceIDInstanceIDSlotReportTuple3 -> { try { return responseQueue.take(); } catch (InterruptedException e) { return FutureUtils.completedExceptionally(e); } }); final CompletableFuture<RegistrationResponse> registrationResponse = CompletableFuture.completedFuture( new TaskExecutorRegistrationSuccess( new InstanceID(), testingResourceManagerGateway.getOwnResourceId(), new ClusterInformation("foobar", 1234), null)); final CountDownLatch numberRegistrations = new CountDownLatch(2); testingResourceManagerGateway.setRegisterTaskExecutorFunction( taskExecutorRegistration -> { numberRegistrations.countDown(); return registrationResponse; }); responseQueue.offer( FutureUtils.completedExceptionally(new FlinkException("Test exception"))); responseQueue.offer(CompletableFuture.completedFuture(Acknowledge.get())); rpc.registerGateway( testingResourceManagerGateway.getAddress(), testingResourceManagerGateway); resourceManagerLeaderRetriever.notifyListener( testingResourceManagerGateway.getAddress(), testingResourceManagerGateway.getFencingToken().toUUID()); // wait for the second registration attempt numberRegistrations.await(); } finally { RpcUtils.terminateRpcEndpoint(taskExecutor); } }
Tests that the {@link TaskExecutor} tries to reconnect if the initial slot report fails.
testInitialSlotReportFailure
java
apache/flink
flink-runtime/src/test/java/org/apache/flink/runtime/taskexecutor/TaskExecutorTest.java
https://github.com/apache/flink/blob/master/flink-runtime/src/test/java/org/apache/flink/runtime/taskexecutor/TaskExecutorTest.java
Apache-2.0
@Test void testOfferSlotToJobMasterAfterTimeout() throws Exception { final TaskSlotTable<Task> taskSlotTable = TaskSlotUtils.createTaskSlotTable(2, EXECUTOR_EXTENSION.getExecutor()); final TaskManagerServices taskManagerServices = new TaskManagerServicesBuilder().setTaskSlotTable(taskSlotTable).build(); final TaskExecutor taskExecutor = createTaskExecutor(taskManagerServices); final AllocationID allocationId = new AllocationID(); final CompletableFuture<ResourceID> initialSlotReportFuture = new CompletableFuture<>(); final TestingResourceManagerGateway testingResourceManagerGateway = new TestingResourceManagerGateway(); testingResourceManagerGateway.setSendSlotReportFunction( resourceIDInstanceIDSlotReportTuple3 -> { initialSlotReportFuture.complete(null); return CompletableFuture.completedFuture(Acknowledge.get()); }); rpc.registerGateway( testingResourceManagerGateway.getAddress(), testingResourceManagerGateway); resourceManagerLeaderRetriever.notifyListener( testingResourceManagerGateway.getAddress(), testingResourceManagerGateway.getFencingToken().toUUID()); final CountDownLatch slotOfferings = new CountDownLatch(3); final CompletableFuture<AllocationID> offeredSlotFuture = new CompletableFuture<>(); final TestingJobMasterGateway jobMasterGateway = new TestingJobMasterGatewayBuilder() .setOfferSlotsFunction( (resourceID, slotOffers) -> { assertThat(slotOffers).hasSize(1); slotOfferings.countDown(); if (slotOfferings.getCount() == 0) { offeredSlotFuture.complete( slotOffers.iterator().next().getAllocationId()); return CompletableFuture.completedFuture(slotOffers); } else { return FutureUtils.completedExceptionally( new TimeoutException()); } }) .build(); final String jobManagerAddress = jobMasterGateway.getAddress(); rpc.registerGateway(jobManagerAddress, jobMasterGateway); jobManagerLeaderRetriever.notifyListener( jobManagerAddress, jobMasterGateway.getFencingToken().toUUID()); try { taskExecutor.start(); final TaskExecutorGateway taskExecutorGateway = taskExecutor.getSelfGateway(TaskExecutorGateway.class); // wait for the connection to the ResourceManager initialSlotReportFuture.get(); requestSlot( taskExecutorGateway, jobId, allocationId, new SlotID(taskExecutor.getResourceID(), 0), ResourceProfile.ZERO, jobManagerAddress, testingResourceManagerGateway.getFencingToken()); slotOfferings.await(); assertThatFuture(offeredSlotFuture).eventuallySucceeds().isEqualTo(allocationId); assertThat(taskSlotTable.isSlotFree(1)).isTrue(); } finally { RpcUtils.terminateRpcEndpoint(taskExecutor); } }
Tests that offers slots to job master timeout and retry.
testOfferSlotToJobMasterAfterTimeout
java
apache/flink
flink-runtime/src/test/java/org/apache/flink/runtime/taskexecutor/TaskExecutorTest.java
https://github.com/apache/flink/blob/master/flink-runtime/src/test/java/org/apache/flink/runtime/taskexecutor/TaskExecutorTest.java
Apache-2.0
@Test void testSyncSlotsWithJobMasterByHeartbeat() throws Exception { final CountDownLatch activeSlots = new CountDownLatch(2); final TaskSlotTable<Task> taskSlotTable = new ActivateSlotNotifyingTaskSlotTable(2, activeSlots); final TaskManagerServices taskManagerServices = new TaskManagerServicesBuilder().setTaskSlotTable(taskSlotTable).build(); final TaskExecutor taskExecutor = createTaskExecutor(taskManagerServices); final TestingResourceManagerGateway testingResourceManagerGateway = new TestingResourceManagerGateway(); final BlockingQueue<AllocationID> allocationsNotifiedFree = new ArrayBlockingQueue<>(2); OneShotLatch initialSlotReporting = new OneShotLatch(); testingResourceManagerGateway.setSendSlotReportFunction( resourceIDInstanceIDSlotReportTuple3 -> { initialSlotReporting.trigger(); return CompletableFuture.completedFuture(Acknowledge.get()); }); testingResourceManagerGateway.setNotifySlotAvailableConsumer( instanceIDSlotIDAllocationIDTuple3 -> allocationsNotifiedFree.offer(instanceIDSlotIDAllocationIDTuple3.f2)); rpc.registerGateway( testingResourceManagerGateway.getAddress(), testingResourceManagerGateway); resourceManagerLeaderRetriever.notifyListener( testingResourceManagerGateway.getAddress(), testingResourceManagerGateway.getFencingToken().toUUID()); final BlockingQueue<AllocationID> failedSlotFutures = new ArrayBlockingQueue<>(2); final ResourceID jobManagerResourceId = ResourceID.generate(); final TestingJobMasterGateway jobMasterGateway = new TestingJobMasterGatewayBuilder() .setFailSlotConsumer( (resourceID, allocationID, throwable) -> failedSlotFutures.offer(allocationID)) .setOfferSlotsFunction( (resourceID, slotOffers) -> CompletableFuture.completedFuture( new ArrayList<>(slotOffers))) .setRegisterTaskManagerFunction( (ignoredJobId, ignoredTaskManagerRegistrationInformation) -> CompletableFuture.completedFuture( new JMTMRegistrationSuccess(jobManagerResourceId))) .build(); final String jobManagerAddress = jobMasterGateway.getAddress(); rpc.registerGateway(jobManagerAddress, jobMasterGateway); jobManagerLeaderRetriever.notifyListener( jobManagerAddress, jobMasterGateway.getFencingToken().toUUID()); taskExecutor.start(); try { final TaskExecutorGateway taskExecutorGateway = taskExecutor.getSelfGateway(TaskExecutorGateway.class); initialSlotReporting.await(); final AllocationID allocationIdInBoth = new AllocationID(); final AllocationID allocationIdOnlyInJM = new AllocationID(); final AllocationID allocationIdOnlyInTM = new AllocationID(); taskExecutorGateway.requestSlot( new SlotID(taskExecutor.getResourceID(), 0), jobId, allocationIdInBoth, ResourceProfile.ZERO, "foobar", testingResourceManagerGateway.getFencingToken(), timeout); taskExecutorGateway.requestSlot( new SlotID(taskExecutor.getResourceID(), 1), jobId, allocationIdOnlyInTM, ResourceProfile.ZERO, "foobar", testingResourceManagerGateway.getFencingToken(), timeout); activeSlots.await(); List<AllocatedSlotInfo> allocatedSlotInfos = Arrays.asList( new AllocatedSlotInfo(0, allocationIdInBoth), new AllocatedSlotInfo(1, allocationIdOnlyInJM)); AllocatedSlotReport allocatedSlotReport = new AllocatedSlotReport(jobId, allocatedSlotInfos); taskExecutorGateway.heartbeatFromJobManager(jobManagerResourceId, allocatedSlotReport); assertThat(failedSlotFutures.take()).isEqualTo(allocationIdOnlyInJM); assertThat(allocationsNotifiedFree.take()).isEqualTo(allocationIdOnlyInTM); assertThat(failedSlotFutures.poll(5L, TimeUnit.MILLISECONDS)).isNull(); assertThat(allocationsNotifiedFree.poll(5L, TimeUnit.MILLISECONDS)).isNull(); } finally { RpcUtils.terminateRpcEndpoint(taskExecutor); } }
Tests that the TaskExecutor syncs its slots view with the JobMaster's view via the AllocatedSlotReport reported by the heartbeat (See FLINK-11059).
testSyncSlotsWithJobMasterByHeartbeat
java
apache/flink
flink-runtime/src/test/java/org/apache/flink/runtime/taskexecutor/TaskExecutorTest.java
https://github.com/apache/flink/blob/master/flink-runtime/src/test/java/org/apache/flink/runtime/taskexecutor/TaskExecutorTest.java
Apache-2.0
@Tag("org.apache.flink.testutils.junit.FailsInGHAContainerWithRootUser") @Test void testIODirectoryNotWritable() throws Exception { File nonWritable = TempDirUtils.newFolder(tempFolder); Assumptions.assumeTrue( nonWritable.setWritable(false, false), "Cannot create non-writable temporary file directory. Skipping test."); try { Configuration cfg = createFlinkConfiguration(); cfg.set(CoreOptions.TMP_DIRS, nonWritable.getAbsolutePath()); assertThatThrownBy( () -> startTaskManager( cfg, rpcService, highAvailabilityServices, WORKING_DIRECTORY_EXTENSION_WRAPPER .getCustomExtension() .createNewWorkingDirectory()), "Should fail synchronously with an IOException") .isInstanceOf(IOException.class); } finally { // noinspection ResultOfMethodCallIgnored nonWritable.setWritable(true, false); try { FileUtils.deleteDirectory(nonWritable); } catch (IOException e) { // best effort } } }
Tests that the TaskManagerRunner startup fails synchronously when the I/O directories are not writable.
testIODirectoryNotWritable
java
apache/flink
flink-runtime/src/test/java/org/apache/flink/runtime/taskexecutor/TaskManagerRunnerStartupTest.java
https://github.com/apache/flink/blob/master/flink-runtime/src/test/java/org/apache/flink/runtime/taskexecutor/TaskManagerRunnerStartupTest.java
Apache-2.0
@Test void testMemoryConfigWrong() { Configuration cfg = createFlinkConfiguration(); // something invalid cfg.set(TaskManagerOptions.NETWORK_MEMORY_MIN, MemorySize.parse("100m")); cfg.set(TaskManagerOptions.NETWORK_MEMORY_MAX, MemorySize.parse("10m")); assertThatThrownBy( () -> startTaskManager( cfg, rpcService, highAvailabilityServices, WORKING_DIRECTORY_EXTENSION_WRAPPER .getCustomExtension() .createNewWorkingDirectory())) .isInstanceOf(IllegalConfigurationException.class); }
Tests that the TaskManagerRunner startup fails synchronously when the memory configuration is wrong.
testMemoryConfigWrong
java
apache/flink
flink-runtime/src/test/java/org/apache/flink/runtime/taskexecutor/TaskManagerRunnerStartupTest.java
https://github.com/apache/flink/blob/master/flink-runtime/src/test/java/org/apache/flink/runtime/taskexecutor/TaskManagerRunnerStartupTest.java
Apache-2.0
@Test void testStartupWhenNetworkStackFailsToInitialize() throws Exception { final ServerSocket blocker = new ServerSocket(0, 50, InetAddress.getByName(LOCAL_HOST)); try { final Configuration cfg = createFlinkConfiguration(); cfg.set(NettyShuffleEnvironmentOptions.DATA_PORT, blocker.getLocalPort()); cfg.set(TaskManagerOptions.BIND_HOST, LOCAL_HOST); assertThatThrownBy( () -> startTaskManager( cfg, rpcService, highAvailabilityServices, WORKING_DIRECTORY_EXTENSION_WRAPPER .getCustomExtension() .createNewWorkingDirectory()), "Should throw IOException when the network stack cannot be initialized.") .isInstanceOf(IOException.class); } finally { IOUtils.closeQuietly(blocker); } }
Tests that the TaskManagerRunner startup fails if the network stack cannot be initialized.
testStartupWhenNetworkStackFailsToInitialize
java
apache/flink
flink-runtime/src/test/java/org/apache/flink/runtime/taskexecutor/TaskManagerRunnerStartupTest.java
https://github.com/apache/flink/blob/master/flink-runtime/src/test/java/org/apache/flink/runtime/taskexecutor/TaskManagerRunnerStartupTest.java
Apache-2.0
@Test void testSampleTaskThreadInfo() throws Exception { Set<IdleTestTask> tasks = new HashSet<>(); executeWithTerminationGuarantee( () -> { tasks.add(new IdleTestTask()); tasks.add(new IdleTestTask()); Thread.sleep(2000); Map<Long, ExecutionAttemptID> threads = collectExecutionAttempts(tasks); final Map<ExecutionAttemptID, Collection<ThreadInfoSample>> threadInfoSamples = threadInfoSampleService .requestThreadInfoSamples(threads, requestParams) .get(); int count = 0; for (Collection<ThreadInfoSample> samples : threadInfoSamples.values()) { for (ThreadInfoSample sample : samples) { count++; StackTraceElement[] traces = sample.getStackTrace(); assertThat(traces).hasSizeLessThanOrEqualTo(MAX_STACK_TRACK_DEPTH); } } assertThat(count).isEqualTo(NUMBER_OF_SAMPLES * 2); }, tasks); }
Tests successful thread info samples request.
testSampleTaskThreadInfo
java
apache/flink
flink-runtime/src/test/java/org/apache/flink/runtime/taskexecutor/ThreadInfoSampleServiceTest.java
https://github.com/apache/flink/blob/master/flink-runtime/src/test/java/org/apache/flink/runtime/taskexecutor/ThreadInfoSampleServiceTest.java
Apache-2.0
@Test void testTruncateStackTraceIfLimitIsSpecified() throws Exception { Set<IdleTestTask> tasks = new HashSet<>(); executeWithTerminationGuarantee( () -> { tasks.add(new IdleTestTask()); Map<Long, ExecutionAttemptID> threads = collectExecutionAttempts(tasks); final Map<ExecutionAttemptID, Collection<ThreadInfoSample>> threadInfoSamples1 = threadInfoSampleService .requestThreadInfoSamples(threads, requestParams) .get(); final Map<ExecutionAttemptID, Collection<ThreadInfoSample>> threadInfoSamples2 = threadInfoSampleService .requestThreadInfoSamples( threads, new ThreadInfoSamplesRequest( 1, NUMBER_OF_SAMPLES, DELAY_BETWEEN_SAMPLES, MAX_STACK_TRACK_DEPTH - 6)) .get(); for (Collection<ThreadInfoSample> samples : threadInfoSamples1.values()) { for (ThreadInfoSample sample : samples) { assertThat(sample.getStackTrace()) .hasSizeLessThanOrEqualTo(MAX_STACK_TRACK_DEPTH); } } for (Collection<ThreadInfoSample> samples : threadInfoSamples2.values()) { for (ThreadInfoSample sample : samples) { assertThat(sample.getStackTrace()).hasSize(MAX_STACK_TRACK_DEPTH - 6); } } }, tasks); }
Tests that stack traces are truncated when exceeding the configured depth.
testTruncateStackTraceIfLimitIsSpecified
java
apache/flink
flink-runtime/src/test/java/org/apache/flink/runtime/taskexecutor/ThreadInfoSampleServiceTest.java
https://github.com/apache/flink/blob/master/flink-runtime/src/test/java/org/apache/flink/runtime/taskexecutor/ThreadInfoSampleServiceTest.java
Apache-2.0
@Test void testThrowExceptionIfNumSamplesIsNegative() { Set<IdleTestTask> tasks = new HashSet<>(); assertThatThrownBy( () -> executeWithTerminationGuarantee( () -> { tasks.add(new IdleTestTask()); Map<Long, ExecutionAttemptID> threads = collectExecutionAttempts(tasks); threadInfoSampleService.requestThreadInfoSamples( threads, new ThreadInfoSamplesRequest( 1, -1, DELAY_BETWEEN_SAMPLES, MAX_STACK_TRACK_DEPTH)); }, tasks)) .isInstanceOf(IllegalArgumentException.class) .hasMessage("numSamples must be positive"); }
Test that negative numSamples parameter is handled.
testThrowExceptionIfNumSamplesIsNegative
java
apache/flink
flink-runtime/src/test/java/org/apache/flink/runtime/taskexecutor/ThreadInfoSampleServiceTest.java
https://github.com/apache/flink/blob/master/flink-runtime/src/test/java/org/apache/flink/runtime/taskexecutor/ThreadInfoSampleServiceTest.java
Apache-2.0
@Test void testTryMarkSlotActive() throws Exception { runInMainThread( 3, taskSlotTable -> { final JobID jobId1 = new JobID(); final AllocationID allocationId1 = new AllocationID(); taskSlotTable.allocateSlot(0, jobId1, allocationId1, SLOT_TIMEOUT); final AllocationID allocationId2 = new AllocationID(); taskSlotTable.allocateSlot(1, jobId1, allocationId2, SLOT_TIMEOUT); final AllocationID allocationId3 = new AllocationID(); final JobID jobId2 = new JobID(); taskSlotTable.allocateSlot(2, jobId2, allocationId3, SLOT_TIMEOUT); taskSlotTable.markSlotActive(allocationId1); assertThat(taskSlotTable.isAllocated(0, jobId1, allocationId1)).isTrue(); assertThat(taskSlotTable.isAllocated(1, jobId1, allocationId2)).isTrue(); assertThat(taskSlotTable.isAllocated(2, jobId2, allocationId3)).isTrue(); assertThat(taskSlotTable.getActiveTaskSlotAllocationIdsPerJob(jobId1)) .isEqualTo(Sets.newHashSet(allocationId1)); assertThat(taskSlotTable.tryMarkSlotActive(jobId1, allocationId1)).isTrue(); assertThat(taskSlotTable.tryMarkSlotActive(jobId1, allocationId2)).isTrue(); assertThat(taskSlotTable.tryMarkSlotActive(jobId1, allocationId3)).isFalse(); assertThat(taskSlotTable.getActiveTaskSlotAllocationIdsPerJob(jobId1)) .isEqualTo(new HashSet<>(Arrays.asList(allocationId2, allocationId1))); }); }
Tests that one can mark allocated slots as active.
testTryMarkSlotActive
java
apache/flink
flink-runtime/src/test/java/org/apache/flink/runtime/taskexecutor/slot/TaskSlotTableImplTest.java
https://github.com/apache/flink/blob/master/flink-runtime/src/test/java/org/apache/flink/runtime/taskexecutor/slot/TaskSlotTableImplTest.java
Apache-2.0
@Test void testInconsistentStaticSlotAllocation() throws Exception { runInMainThread( 2, taskSlotTable -> { final JobID jobId = new JobID(); final AllocationID allocationId1 = new AllocationID(); final AllocationID allocationId2 = new AllocationID(); assertThatNoException() .isThrownBy( () -> taskSlotTable.allocateSlot( 0, jobId, allocationId1, SLOT_TIMEOUT)); assertThatThrownBy( () -> taskSlotTable.allocateSlot( 1, jobId, allocationId1, SLOT_TIMEOUT)) .isInstanceOf(SlotAllocationException.class); assertThatThrownBy( () -> taskSlotTable.allocateSlot( 0, jobId, allocationId2, SLOT_TIMEOUT)) .isInstanceOf(SlotAllocationException.class); assertThat(taskSlotTable.isAllocated(0, jobId, allocationId1)).isTrue(); assertThat(taskSlotTable.isSlotFree(1)).isTrue(); Iterator<TaskSlot<TaskSlotPayload>> allocatedSlots = taskSlotTable.getAllocatedSlots(jobId); assertThat(allocatedSlots.next().getIndex()).isZero(); assertThat(allocatedSlots.hasNext()).isFalse(); }); }
Tests that inconsistent static slot allocation with the same AllocationID to a different slot is rejected.
testInconsistentStaticSlotAllocation
java
apache/flink
flink-runtime/src/test/java/org/apache/flink/runtime/taskexecutor/slot/TaskSlotTableImplTest.java
https://github.com/apache/flink/blob/master/flink-runtime/src/test/java/org/apache/flink/runtime/taskexecutor/slot/TaskSlotTableImplTest.java
Apache-2.0
@Test public void testCancelAsyncProducerAndConsumer(@InjectMiniCluster MiniCluster flink) throws Exception { Deadline deadline = Deadline.now().plus(Duration.ofMinutes(2)); // Job with async producer and consumer JobVertex producer = new JobVertex("AsyncProducer"); producer.setParallelism(1); producer.setInvokableClass(AsyncProducer.class); JobVertex consumer = new JobVertex("AsyncConsumer"); consumer.setParallelism(1); consumer.setInvokableClass(AsyncConsumer.class); connectNewDataSetAsInput( consumer, producer, DistributionPattern.POINTWISE, ResultPartitionType.PIPELINED); SlotSharingGroup slot = new SlotSharingGroup(); producer.setSlotSharingGroup(slot); consumer.setSlotSharingGroup(slot); JobGraph jobGraph = JobGraphTestUtils.streamingJobGraph(producer, consumer); // Submit job and wait until running flink.runDetached(jobGraph); FutureUtils.retrySuccessfulWithDelay( () -> flink.getJobStatus(jobGraph.getJobID()), Duration.ofMillis(10), deadline, status -> status == JobStatus.RUNNING, new ScheduledExecutorServiceAdapter(EXECUTOR_RESOURCE.getExecutor())) .get(deadline.timeLeft().toMillis(), TimeUnit.MILLISECONDS); boolean producerBlocked = false; for (int i = 0; i < 50; i++) { Thread thread = ASYNC_PRODUCER_THREAD; if (thread != null && thread.isAlive()) { StackTraceElement[] stackTrace = thread.getStackTrace(); producerBlocked = isInBlockingBufferRequest(stackTrace); } if (producerBlocked) { break; } else { // Retry Thread.sleep(500L); } } // Verify that async producer is in blocking request assertTrue( "Producer thread is not blocked: " + Arrays.toString(ASYNC_PRODUCER_THREAD.getStackTrace()), producerBlocked); boolean consumerWaiting = false; for (int i = 0; i < 50; i++) { Thread thread = ASYNC_CONSUMER_THREAD; if (thread != null && thread.isAlive()) { consumerWaiting = thread.getState() == Thread.State.WAITING; } if (consumerWaiting) { break; } else { // Retry Thread.sleep(500L); } } // Verify that async consumer is in blocking request assertTrue("Consumer thread is not blocked.", consumerWaiting); flink.cancelJob(jobGraph.getJobID()) .get(deadline.timeLeft().toMillis(), TimeUnit.MILLISECONDS); // wait until the job is canceled FutureUtils.retrySuccessfulWithDelay( () -> flink.getJobStatus(jobGraph.getJobID()), Duration.ofMillis(10), deadline, status -> status == JobStatus.CANCELED, new ScheduledExecutorServiceAdapter(EXECUTOR_RESOURCE.getExecutor())) .get(deadline.timeLeft().toMillis(), TimeUnit.MILLISECONDS); // Verify the expected Exceptions assertNotNull(ASYNC_PRODUCER_EXCEPTION); assertEquals(CancelTaskException.class, ASYNC_PRODUCER_EXCEPTION.getClass()); assertNotNull(ASYNC_CONSUMER_EXCEPTION); assertEquals(IllegalStateException.class, ASYNC_CONSUMER_EXCEPTION.getClass()); }
Tests that a task waiting on an async producer/consumer that is stuck in a blocking buffer request can be properly cancelled. <p>This is currently required for the Flink Kafka sources, which spawn a separate Thread consuming from Kafka and producing the intermediate streams in the spawned Thread instead of the main task Thread.
testCancelAsyncProducerAndConsumer
java
apache/flink
flink-runtime/src/test/java/org/apache/flink/runtime/taskmanager/TaskCancelAsyncProducerConsumerITCase.java
https://github.com/apache/flink/blob/master/flink-runtime/src/test/java/org/apache/flink/runtime/taskmanager/TaskCancelAsyncProducerConsumerITCase.java
Apache-2.0
@Override public void invoke() throws Exception { Thread consumer = new ConsumerThread(getEnvironment().getInputGate(0)); // Publish the async consumer for the main test Thread ASYNC_CONSUMER_THREAD = consumer; consumer.start(); // Wait for the consumer Thread to finish. This is executed in the // main Task thread and will be interrupted on cancellation. while (consumer.isAlive()) { try { consumer.join(); } catch (InterruptedException ignored) { } } }
Invokable consuming buffers in a separate Thread (not the main Task thread).
invoke
java
apache/flink
flink-runtime/src/test/java/org/apache/flink/runtime/taskmanager/TaskCancelAsyncProducerConsumerITCase.java
https://github.com/apache/flink/blob/master/flink-runtime/src/test/java/org/apache/flink/runtime/taskmanager/TaskCancelAsyncProducerConsumerITCase.java
Apache-2.0
@Test public void testTriggerPartitionStateUpdate() throws Exception { final IntermediateDataSetID resultId = new IntermediateDataSetID(); final ResultPartitionID partitionId = new ResultPartitionID(); final PartitionProducerStateChecker partitionChecker = mock(PartitionProducerStateChecker.class); AtomicInteger callCount = new AtomicInteger(0); RemoteChannelStateChecker remoteChannelStateChecker = new RemoteChannelStateChecker(partitionId, "test task"); // Test all branches of trigger partition state check { // Reset latches setup(); // PartitionProducerDisposedException final Task task = createTaskBuilder() .setInvokable(InvokableBlockingInInvoke.class) .setPartitionProducerStateChecker(partitionChecker) .build(Executors.directExecutor()); TestTaskBuilder.setTaskState(task, ExecutionState.RUNNING); final CompletableFuture<ExecutionState> promise = new CompletableFuture<>(); when(partitionChecker.requestPartitionProducerState( eq(task.getJobID()), eq(resultId), eq(partitionId))) .thenReturn(promise); task.requestPartitionProducerState( resultId, partitionId, checkResult -> assertThat( remoteChannelStateChecker.isProducerReadyOrAbortConsumption( checkResult), is(false))); promise.completeExceptionally(new PartitionProducerDisposedException(partitionId)); assertEquals(ExecutionState.CANCELING, task.getExecutionState()); } { // Reset latches setup(); // Any other exception final Task task = createTaskBuilder() .setInvokable(InvokableBlockingInInvoke.class) .setPartitionProducerStateChecker(partitionChecker) .build(Executors.directExecutor()); TestTaskBuilder.setTaskState(task, ExecutionState.RUNNING); final CompletableFuture<ExecutionState> promise = new CompletableFuture<>(); when(partitionChecker.requestPartitionProducerState( eq(task.getJobID()), eq(resultId), eq(partitionId))) .thenReturn(promise); task.requestPartitionProducerState( resultId, partitionId, checkResult -> assertThat( remoteChannelStateChecker.isProducerReadyOrAbortConsumption( checkResult), is(false))); promise.completeExceptionally(new RuntimeException("Any other exception")); assertEquals(ExecutionState.FAILED, task.getExecutionState()); } { callCount.set(0); // Reset latches setup(); // TimeoutException handled special => retry // Any other exception final Task task = createTaskBuilder() .setInvokable(InvokableBlockingInInvoke.class) .setPartitionProducerStateChecker(partitionChecker) .build(Executors.directExecutor()); try { task.startTaskThread(); awaitInvokableLatch(task); CompletableFuture<ExecutionState> promise = new CompletableFuture<>(); when(partitionChecker.requestPartitionProducerState( eq(task.getJobID()), eq(resultId), eq(partitionId))) .thenReturn(promise); task.requestPartitionProducerState( resultId, partitionId, checkResult -> { if (remoteChannelStateChecker.isProducerReadyOrAbortConsumption( checkResult)) { callCount.incrementAndGet(); } }); promise.completeExceptionally(new TimeoutException()); assertEquals(ExecutionState.RUNNING, task.getExecutionState()); assertEquals(1, callCount.get()); } finally { task.getExecutingThread().interrupt(); task.getExecutingThread().join(); } } { callCount.set(0); // Reset latches setup(); // Success final Task task = createTaskBuilder() .setInvokable(InvokableBlockingInInvoke.class) .setPartitionProducerStateChecker(partitionChecker) .build(Executors.directExecutor()); try { task.startTaskThread(); awaitInvokableLatch(task); CompletableFuture<ExecutionState> promise = new CompletableFuture<>(); when(partitionChecker.requestPartitionProducerState( eq(task.getJobID()), eq(resultId), eq(partitionId))) .thenReturn(promise); task.requestPartitionProducerState( resultId, partitionId, checkResult -> { if (remoteChannelStateChecker.isProducerReadyOrAbortConsumption( checkResult)) { callCount.incrementAndGet(); } }); promise.complete(ExecutionState.RUNNING); assertEquals(ExecutionState.RUNNING, task.getExecutionState()); assertEquals(1, callCount.get()); } finally { task.getExecutingThread().interrupt(); task.getExecutingThread().join(); } } }
Tests the trigger partition state update future completions.
testTriggerPartitionStateUpdate
java
apache/flink
flink-runtime/src/test/java/org/apache/flink/runtime/taskmanager/TaskTest.java
https://github.com/apache/flink/blob/master/flink-runtime/src/test/java/org/apache/flink/runtime/taskmanager/TaskTest.java
Apache-2.0
@Test public void testInterruptibleSharedLockInInvokeAndCancel() throws Exception { final TaskManagerActions taskManagerActions = new ProhibitFatalErrorTaskManagerActions(); final Configuration config = new Configuration(); config.set(TaskManagerOptions.TASK_CANCELLATION_INTERVAL, Duration.ofMillis(5L)); config.set(TaskManagerOptions.TASK_CANCELLATION_TIMEOUT, Duration.ofMillis(1000L)); final Task task = createTaskBuilder() .setInvokable(InvokableInterruptibleSharedLockInInvokeAndCancel.class) .setTaskManagerConfig(config) .setTaskManagerActions(taskManagerActions) .build(Executors.directExecutor()); task.startTaskThread(); awaitInvokableLatch(task); task.cancelExecution(); task.getExecutingThread().join(); }
The 'invoke' method holds a lock (trigger awaitLatch after acquisition) and cancel cannot complete because it also tries to acquire the same lock. This is resolved by the watch dog, no fatal error.
testInterruptibleSharedLockInInvokeAndCancel
java
apache/flink
flink-runtime/src/test/java/org/apache/flink/runtime/taskmanager/TaskTest.java
https://github.com/apache/flink/blob/master/flink-runtime/src/test/java/org/apache/flink/runtime/taskmanager/TaskTest.java
Apache-2.0
@Test public void testFatalErrorAfterUnInterruptibleInvoke() throws Exception { final CompletableFuture<Throwable> fatalErrorFuture = new CompletableFuture<>(); final TestingTaskManagerActions taskManagerActions = TestingTaskManagerActions.newBuilder() .setNotifyFatalErrorConsumer( (s, throwable) -> fatalErrorFuture.complete(throwable)) .build(); final Configuration config = new Configuration(); config.set(TaskManagerOptions.TASK_CANCELLATION_TIMEOUT, Duration.ofMillis(10L)); final Task task = createTaskBuilder() .setInvokable(InvokableUnInterruptibleBlockingInvoke.class) .setTaskManagerConfig(config) .setTaskManagerActions(taskManagerActions) .build(Executors.directExecutor()); try { task.startTaskThread(); awaitInvokableLatch(task); task.cancelExecution(); // wait for the notification of notifyFatalError final Throwable fatalError = fatalErrorFuture.join(); assertThat(fatalError, is(notNullValue())); } finally { // Interrupt again to clean up Thread triggerInvokableLatch(task); task.getExecutingThread().interrupt(); task.getExecutingThread().join(); } }
The 'invoke' method blocks infinitely, but cancel() does not block. Only resolved by a fatal error.
testFatalErrorAfterUnInterruptibleInvoke
java
apache/flink
flink-runtime/src/test/java/org/apache/flink/runtime/taskmanager/TaskTest.java
https://github.com/apache/flink/blob/master/flink-runtime/src/test/java/org/apache/flink/runtime/taskmanager/TaskTest.java
Apache-2.0
@Test public void testFatalErrorOnCanceling() throws Exception { final CompletableFuture<Throwable> fatalErrorFuture = new CompletableFuture<>(); final TestingTaskManagerActions taskManagerActions = TestingTaskManagerActions.newBuilder() .setNotifyFatalErrorConsumer( (s, throwable) -> fatalErrorFuture.complete(throwable)) .build(); final Configuration config = new Configuration(); config.set(TaskManagerOptions.TASK_CANCELLATION_INTERVAL, Duration.ofMillis(5L)); config.set(TaskManagerOptions.TASK_CANCELLATION_TIMEOUT, Duration.ofMillis(50L)); // We need to remember the original object since all changes in `startTaskThread` applies // to it rather than to spy object. Task task = createTaskBuilder() .setInvokable(InvokableBlockingWithTrigger.class) .setTaskManagerConfig(config) .setTaskManagerActions(taskManagerActions) .build(Executors.directExecutor()); final Task spyTask = spy(task); final Class<OutOfMemoryError> fatalErrorType = OutOfMemoryError.class; doThrow(fatalErrorType) .when(spyTask) .cancelOrFailAndCancelInvokableInternal(eq(ExecutionState.CANCELING), eq(null)); try { spyTask.startTaskThread(); awaitInvokableLatch(task); spyTask.cancelExecution(); // wait for the notification of notifyFatalError final Throwable fatalError = fatalErrorFuture.join(); assertThat(fatalError, instanceOf(fatalErrorType)); } finally { triggerInvokableLatch(task); } }
Tests that a fatal error gotten from canceling task is notified.
testFatalErrorOnCanceling
java
apache/flink
flink-runtime/src/test/java/org/apache/flink/runtime/taskmanager/TaskTest.java
https://github.com/apache/flink/blob/master/flink-runtime/src/test/java/org/apache/flink/runtime/taskmanager/TaskTest.java
Apache-2.0
@Override public void notifyFatalError(String message, Throwable cause) { throw new RuntimeException("Unexpected FatalError notification"); }
Customized TaskManagerActions that ensures no call of notifyFatalError.
notifyFatalError
java
apache/flink
flink-runtime/src/test/java/org/apache/flink/runtime/taskmanager/TaskTest.java
https://github.com/apache/flink/blob/master/flink-runtime/src/test/java/org/apache/flink/runtime/taskmanager/TaskTest.java
Apache-2.0
@Override public void invoke() { awaitTriggerLatch(); throw new RuntimeException("test"); }
{@link AbstractInvokable} which throws {@link RuntimeException} on invoke.
invoke
java
apache/flink
flink-runtime/src/test/java/org/apache/flink/runtime/taskmanager/TaskTest.java
https://github.com/apache/flink/blob/master/flink-runtime/src/test/java/org/apache/flink/runtime/taskmanager/TaskTest.java
Apache-2.0
@Override public void invoke() { awaitTriggerLatch(); throw new CancelTaskException(); }
{@link AbstractInvokable} which throws {@link CancelTaskException} on invoke.
invoke
java
apache/flink
flink-runtime/src/test/java/org/apache/flink/runtime/taskmanager/TaskTest.java
https://github.com/apache/flink/blob/master/flink-runtime/src/test/java/org/apache/flink/runtime/taskmanager/TaskTest.java
Apache-2.0
@Override public void invoke() throws Exception { final Object o = new Object(); //noinspection SynchronizationOnLocalVariableOrMethodParameter synchronized (o) { //noinspection InfiniteLoopStatement while (true) { o.wait(); } } }
A task that does nothing but blocks indefinitely, until the executing thread is interrupted.
invoke
java
apache/flink
flink-runtime/src/test/java/org/apache/flink/runtime/testtasks/BlockingNoOpInvokable.java
https://github.com/apache/flink/blob/master/flink-runtime/src/test/java/org/apache/flink/runtime/testtasks/BlockingNoOpInvokable.java
Apache-2.0
@Override public void invoke() {}
A simple task that does nothing and finishes immediately.
invoke
java
apache/flink
flink-runtime/src/test/java/org/apache/flink/runtime/testtasks/NoOpInvokable.java
https://github.com/apache/flink/blob/master/flink-runtime/src/test/java/org/apache/flink/runtime/testtasks/NoOpInvokable.java
Apache-2.0
@Override public void invoke() throws Exception { Thread.sleep(waitingTime); }
A simple task that does nothing and finishes after a short delay of 100 milliseconds.
invoke
java
apache/flink
flink-runtime/src/test/java/org/apache/flink/runtime/testtasks/WaitingNoOpInvokable.java
https://github.com/apache/flink/blob/master/flink-runtime/src/test/java/org/apache/flink/runtime/testtasks/WaitingNoOpInvokable.java
Apache-2.0
public static String getCurrentClasspath() { RuntimeMXBean bean = ManagementFactory.getRuntimeMXBean(); return bean.getClassPath(); }
Gets the classpath with which the current JVM was started. @return The classpath with which the current JVM was started.
getCurrentClasspath
java
apache/flink
flink-runtime/src/test/java/org/apache/flink/runtime/testutils/CommonTestUtils.java
https://github.com/apache/flink/blob/master/flink-runtime/src/test/java/org/apache/flink/runtime/testutils/CommonTestUtils.java
Apache-2.0
public static File createTemporaryLog4JProperties() throws IOException { File log4jProps = File.createTempFile(FileUtils.getRandomFilename(""), "-log4j.properties"); log4jProps.deleteOnExit(); CommonTestUtils.printLog4jDebugConfig(log4jProps); return log4jProps; }
Create a temporary log4j configuration for the test.
createTemporaryLog4JProperties
java
apache/flink
flink-runtime/src/test/java/org/apache/flink/runtime/testutils/CommonTestUtils.java
https://github.com/apache/flink/blob/master/flink-runtime/src/test/java/org/apache/flink/runtime/testutils/CommonTestUtils.java
Apache-2.0
public static void waitForCheckpoint(JobID jobID, MiniCluster miniCluster, int numCheckpoints) throws Exception { waitForCheckpoints( jobID, miniCluster, checkpointStatsSnapshot -> checkpointStatsSnapshot != null && checkpointStatsSnapshot .getCounts() .getNumberOfCompletedCheckpoints() >= numCheckpoints); }
Wait for (at least) the given number of successful checkpoints.
waitForCheckpoint
java
apache/flink
flink-runtime/src/test/java/org/apache/flink/runtime/testutils/CommonTestUtils.java
https://github.com/apache/flink/blob/master/flink-runtime/src/test/java/org/apache/flink/runtime/testutils/CommonTestUtils.java
Apache-2.0
public static Optional<String> getLatestCompletedCheckpointPath( JobID jobID, MiniCluster cluster) throws InterruptedException, ExecutionException, FlinkJobNotFoundException { return Optional.ofNullable( cluster.getExecutionGraph(jobID).get().getCheckpointStatsSnapshot()) .flatMap( stats -> Optional.ofNullable( stats.getHistory().getLatestCompletedCheckpoint())) .map(CompletedCheckpointStats::getExternalPath); }
@return the path as {@link java.net.URI} to the latest checkpoint. @throws FlinkJobNotFoundException if job not found
getLatestCompletedCheckpointPath
java
apache/flink
flink-runtime/src/test/java/org/apache/flink/runtime/testutils/CommonTestUtils.java
https://github.com/apache/flink/blob/master/flink-runtime/src/test/java/org/apache/flink/runtime/testutils/CommonTestUtils.java
Apache-2.0
public static void main(String[] args) { try { ParameterTool params = ParameterTool.fromArgs(args); Configuration config = params.getConfiguration(); LOG.info("Configuration: {}.", config); config.set(JobManagerOptions.PORT, 0); config.set(RestOptions.BIND_PORT, "0"); final StandaloneSessionClusterEntrypoint clusterEntrypoint = new StandaloneSessionClusterEntrypoint(config); ClusterEntrypoint.runClusterEntrypoint(clusterEntrypoint); } catch (Throwable t) { LOG.error("Failed to start Dispatcher process", t); System.exit(1); } }
Entrypoint of the DispatcherProcessEntryPoint. <p>Other arguments are parsed to a {@link Configuration} and passed to the Dispatcher, for instance: <code>--high-availability ZOOKEEPER --high-availability.zookeeper.quorum "xyz:123:456"</code>.
main
java
apache/flink
flink-runtime/src/test/java/org/apache/flink/runtime/testutils/DispatcherProcess.java
https://github.com/apache/flink/blob/master/flink-runtime/src/test/java/org/apache/flink/runtime/testutils/DispatcherProcess.java
Apache-2.0
@Override public MetricReporter createMetricReporter(Properties properties) { String id = properties.getProperty(ID); checkState( id != null, "Reporter id not found. Did you use InMemoryReporter#addConfiguration?"); return REPORTERS.get(UUID.fromString(id)); }
The factory for the {@link InMemoryReporter}.
createMetricReporter
java
apache/flink
flink-runtime/src/test/java/org/apache/flink/runtime/testutils/InMemoryReporter.java
https://github.com/apache/flink/blob/master/flink-runtime/src/test/java/org/apache/flink/runtime/testutils/InMemoryReporter.java
Apache-2.0
@Test void testRandomizationWithSeed() { final Double[] alternatives = IntStream.range(0, 1000).boxed().map(Double::valueOf).toArray(Double[]::new); final Set<Double> uniqueValues = new HashSet<>(1); for (int i = 0; i < 100; i++) { final PseudoRandomValueSelector selector = PseudoRandomValueSelector.create("test" + i); uniqueValues.add(selectValue(selector, CPU_CORES, alternatives)); } assertThat(uniqueValues).hasSizeGreaterThan(1); }
Tests that the selector will return different values for different seeds.
testRandomizationWithSeed
java
apache/flink
flink-runtime/src/test/java/org/apache/flink/runtime/testutils/PseudoRandomValueSelectorTest.java
https://github.com/apache/flink/blob/master/flink-runtime/src/test/java/org/apache/flink/runtime/testutils/PseudoRandomValueSelectorTest.java
Apache-2.0
@Test void testStableRandomization() { final Double[] doubles = IntStream.range(0, 1000).boxed().map(Double::valueOf).toArray(Double[]::new); final MemorySize[] memorySizes = IntStream.range(0, 1000) .mapToObj(MemorySize::ofMebiBytes) .toArray(MemorySize[]::new); final String[] strings = IntStream.range(0, 1000).mapToObj(i -> "string" + i).toArray(String[]::new); final Set<Tuple3<Double, MemorySize, String>> uniqueValues = new HashSet<>(1); for (int i = 0; i < 100; i++) { final PseudoRandomValueSelector selector = PseudoRandomValueSelector.create("test"); uniqueValues.add( new Tuple3<>( selectValue(selector, CPU_CORES, doubles), selectValue(selector, TOTAL_PROCESS_MEMORY, memorySizes), selectValue(selector, SAVEPOINT_DIRECTORY, strings))); } assertThat(uniqueValues).hasSize(1); }
Tests that the selector produces the same value for the same seed.
testStableRandomization
java
apache/flink
flink-runtime/src/test/java/org/apache/flink/runtime/testutils/PseudoRandomValueSelectorTest.java
https://github.com/apache/flink/blob/master/flink-runtime/src/test/java/org/apache/flink/runtime/testutils/PseudoRandomValueSelectorTest.java
Apache-2.0
@Test void readCommitId() { assumeNotNull(ZooKeeperTestUtils.runsOnCIInfrastructure()); // this information is only valid after executing process-resources on flink-runtime final String envCommitId = EnvironmentInformation.getGitCommitId(); assumeFalse(envCommitId.equals(EnvironmentInformation.UNKNOWN_COMMIT_ID)); // test if git is available try { new ProcessBuilder("git", "version").start(); } catch (IOException e) { assumeNoException(e); } final Optional<String> gitCommitId = PseudoRandomValueSelector.getGitCommitId(); assertThat(gitCommitId).isPresent().contains(envCommitId); }
Tests that reading through git command yields the same as {@link EnvironmentInformation}. <p>This test assumes that both sources of information are available (CI).
readCommitId
java
apache/flink
flink-runtime/src/test/java/org/apache/flink/runtime/testutils/PseudoRandomValueSelectorTest.java
https://github.com/apache/flink/blob/master/flink-runtime/src/test/java/org/apache/flink/runtime/testutils/PseudoRandomValueSelectorTest.java
Apache-2.0
public CompletableFuture<Integer> getSystemExitFuture() { return systemExitFuture; }
Returns a {@link CompletableFuture} that is completed with the exit code when {@link System#exit(int)} is called.
getSystemExitFuture
java
apache/flink
flink-runtime/src/test/java/org/apache/flink/runtime/testutils/SystemExitTrackingSecurityManager.java
https://github.com/apache/flink/blob/master/flink-runtime/src/test/java/org/apache/flink/runtime/testutils/SystemExitTrackingSecurityManager.java
Apache-2.0
@Override public void run() { try { destroy(); } catch (Throwable t) { LOG.error("Error during process cleanup shutdown hook.", t); } }
flag to mark the process as already destroyed
run
java
apache/flink
flink-runtime/src/test/java/org/apache/flink/runtime/testutils/TestJvmProcess.java
https://github.com/apache/flink/blob/master/flink-runtime/src/test/java/org/apache/flink/runtime/testutils/TestJvmProcess.java
Apache-2.0
public void setJVMMemory(int jvmMemoryInMb) { checkArgument(jvmMemoryInMb >= 80, "Process JVM Requires at least 80 MBs of memory."); checkState(process == null, "Cannot set memory after process was started"); this.jvmMemoryInMb = jvmMemoryInMb; }
Sets the memory for the process (<code>-Xms</code> and <code>-Xmx</code> flags) (>= 80). @param jvmMemoryInMb Amount of memory in Megabytes for the JVM (>= 80).
setJVMMemory
java
apache/flink
flink-runtime/src/test/java/org/apache/flink/runtime/testutils/TestJvmProcess.java
https://github.com/apache/flink/blob/master/flink-runtime/src/test/java/org/apache/flink/runtime/testutils/TestJvmProcess.java
Apache-2.0
public static Configuration createZooKeeperHAConfig( String zooKeeperQuorum, String fsStateHandlePath) { return configureZooKeeperHA(new Configuration(), zooKeeperQuorum, fsStateHandlePath); }
Creates a configuration to operate in {@link HighAvailabilityMode#ZOOKEEPER}. @param zooKeeperQuorum ZooKeeper quorum to connect to @param fsStateHandlePath Base path for file system state backend (for checkpoints and recovery) @return A new configuration to operate in {@link HighAvailabilityMode#ZOOKEEPER}.
createZooKeeperHAConfig
java
apache/flink
flink-runtime/src/test/java/org/apache/flink/runtime/testutils/ZooKeeperTestUtils.java
https://github.com/apache/flink/blob/master/flink-runtime/src/test/java/org/apache/flink/runtime/testutils/ZooKeeperTestUtils.java
Apache-2.0
public static Configuration configureZooKeeperHA( Configuration config, String zooKeeperQuorum, String fsStateHandlePath) { checkNotNull(config, "Configuration"); checkNotNull(zooKeeperQuorum, "ZooKeeper quorum"); checkNotNull(fsStateHandlePath, "File state handle backend path"); // ZooKeeper recovery mode config.set(HighAvailabilityOptions.HA_MODE, "ZOOKEEPER"); config.set(HighAvailabilityOptions.HA_ZOOKEEPER_QUORUM, zooKeeperQuorum); int connTimeout = 5000; if (runsOnCIInfrastructure()) { // The regular timeout is to aggressive for Travis and connections are often lost. LOG.info( "Detected CI environment: Configuring connection and session timeout of 30 seconds"); connTimeout = 30000; } config.set( HighAvailabilityOptions.ZOOKEEPER_CONNECTION_TIMEOUT, Duration.ofMillis(connTimeout)); config.set( HighAvailabilityOptions.ZOOKEEPER_SESSION_TIMEOUT, Duration.ofMillis(connTimeout)); // File system state backend config.set(StateBackendOptions.STATE_BACKEND, "hashmap"); config.set(CheckpointingOptions.CHECKPOINTS_DIRECTORY, fsStateHandlePath + "/checkpoints"); config.set(HighAvailabilityOptions.HA_STORAGE_PATH, fsStateHandlePath + "/recovery"); config.set(RpcOptions.ASK_TIMEOUT_DURATION, Duration.ofSeconds(100)); return config; }
Sets all necessary configuration keys to operate in {@link HighAvailabilityMode#ZOOKEEPER}. @param config Configuration to use @param zooKeeperQuorum ZooKeeper quorum to connect to @param fsStateHandlePath Base path for file system state backend (for checkpoints and recovery) @return The modified configuration to operate in {@link HighAvailabilityMode#ZOOKEEPER}.
configureZooKeeperHA
java
apache/flink
flink-runtime/src/test/java/org/apache/flink/runtime/testutils/ZooKeeperTestUtils.java
https://github.com/apache/flink/blob/master/flink-runtime/src/test/java/org/apache/flink/runtime/testutils/ZooKeeperTestUtils.java
Apache-2.0
public static boolean runsOnCIInfrastructure() { return System.getenv().containsKey("CI") || System.getenv().containsKey("TF_BUILD"); }
@return true, if a CI environment is detected.
runsOnCIInfrastructure
java
apache/flink
flink-runtime/src/test/java/org/apache/flink/runtime/testutils/ZooKeeperTestUtils.java
https://github.com/apache/flink/blob/master/flink-runtime/src/test/java/org/apache/flink/runtime/testutils/ZooKeeperTestUtils.java
Apache-2.0
public static RecordSerializer get() { return INSTANCE; }
Implementation of the (de)serialization and copying logic for the {@link Record}.
get
java
apache/flink
flink-runtime/src/test/java/org/apache/flink/runtime/testutils/recordutils/RecordSerializer.java
https://github.com/apache/flink/blob/master/flink-runtime/src/test/java/org/apache/flink/runtime/testutils/recordutils/RecordSerializer.java
Apache-2.0
@Test void testSizeWithMaxSize0() { final BoundedFIFOQueue<Integer> testInstance = new BoundedFIFOQueue<>(0); assertThat(testInstance).isEmpty(); testInstance.add(1); assertThat(testInstance).isEmpty(); }
Tests that {@link BoundedFIFOQueue#size()} returns the number of elements currently stored in the queue with a {@code maxSize} of 0.
testSizeWithMaxSize0
java
apache/flink
flink-runtime/src/test/java/org/apache/flink/runtime/util/BoundedFIFOQueueTest.java
https://github.com/apache/flink/blob/master/flink-runtime/src/test/java/org/apache/flink/runtime/util/BoundedFIFOQueueTest.java
Apache-2.0
@Test void testSizeWithMaxSize2() { final BoundedFIFOQueue<Integer> testInstance = new BoundedFIFOQueue<>(2); assertThat(testInstance).isEmpty(); testInstance.add(5); assertThat(testInstance).hasSize(1); testInstance.add(6); assertThat(testInstance).hasSize(2); // adding a 3rd element won't increase the size anymore testInstance.add(7); assertThat(testInstance).hasSize(2); }
Tests that {@link BoundedFIFOQueue#size()} returns the number of elements currently stored in the queue with a {@code maxSize} of 2.
testSizeWithMaxSize2
java
apache/flink
flink-runtime/src/test/java/org/apache/flink/runtime/util/BoundedFIFOQueueTest.java
https://github.com/apache/flink/blob/master/flink-runtime/src/test/java/org/apache/flink/runtime/util/BoundedFIFOQueueTest.java
Apache-2.0
@Override public void onFatalError(Throwable exception) { FatalExitExceptionHandler.INSTANCE.uncaughtException(Thread.currentThread(), exception); }
Fatal error handler that exits the JVM.
onFatalError
java
apache/flink
flink-runtime/src/test/java/org/apache/flink/runtime/util/ExitJVMFatalErrorHandler.java
https://github.com/apache/flink/blob/master/flink-runtime/src/test/java/org/apache/flink/runtime/util/ExitJVMFatalErrorHandler.java
Apache-2.0
public static void main(String[] args) throws Exception { Configuration configuration = new Configuration(); // configure FlinkSecurityManager to intercept calls to System.exit(). configuration.set( ClusterOptions.INTERCEPT_USER_SYSTEM_EXIT, ClusterOptions.UserSystemExitMode.THROW); FlinkSecurityManager.setFromConfiguration(configuration); FlinkSecurityManager.forceProcessExit(222); System.exit(0); }
An entry point that attempts to force exit the process.
main
java
apache/flink
flink-runtime/src/test/java/org/apache/flink/runtime/util/FlinkSecurityManagerITCase.java
https://github.com/apache/flink/blob/master/flink-runtime/src/test/java/org/apache/flink/runtime/util/FlinkSecurityManagerITCase.java
Apache-2.0
public static void main(String[] args) throws Exception { Configuration configuration = new Configuration(); // configure FlinkSecurityManager to intercept calls to System.exit(). configuration.set( ClusterOptions.INTERCEPT_USER_SYSTEM_EXIT, ClusterOptions.UserSystemExitMode.THROW); FlinkSecurityManager.setFromConfiguration(configuration); FlinkSecurityManager.monitorUserSystemExitForCurrentThread(); // expect this call to be ignored try { System.exit(123); } catch (Throwable t) { System.err.println( "Caught exception during system exit with message: " + t.getMessage()); } }
An entry point that attempts to exit the process but expects that call to be ignored.
main
java
apache/flink
flink-runtime/src/test/java/org/apache/flink/runtime/util/FlinkSecurityManagerITCase.java
https://github.com/apache/flink/blob/master/flink-runtime/src/test/java/org/apache/flink/runtime/util/FlinkSecurityManagerITCase.java
Apache-2.0
public static List<Throwable> getExceptionCauses(Throwable e, List<Throwable> causes) { if (e.getCause() == null) { return causes; } else { causes.add(e.getCause()); getExceptionCauses(e.getCause(), causes); } return causes; }
A utility method to analyze the exceptions and collect the clauses @param e the root exception (Throwable) object @param causes the list of exceptions that caused the root exceptions @return a list of Throwable
getExceptionCauses
java
apache/flink
flink-runtime/src/test/java/org/apache/flink/runtime/util/StartupUtils.java
https://github.com/apache/flink/blob/master/flink-runtime/src/test/java/org/apache/flink/runtime/util/StartupUtils.java
Apache-2.0
public TaskEvent getLastReceivedEvent() { return this.receivedEvent; }
Returns the event which was last received by this event listener. If no event has been received so far the return value is <code>null</code>. @return the event which was last received, possibly <code>null</code>
getLastReceivedEvent
java
apache/flink
flink-runtime/src/test/java/org/apache/flink/runtime/util/event/TaskEventHandlerTest.java
https://github.com/apache/flink/blob/master/flink-runtime/src/test/java/org/apache/flink/runtime/util/event/TaskEventHandlerTest.java
Apache-2.0
@Test void testEventNotificationManager() { final TaskEventHandler evm = new TaskEventHandler(); final TestEventListener listener = new TestEventListener(); evm.subscribe(listener, StringTaskEvent.class); final StringTaskEvent stringTaskEvent1 = new StringTaskEvent("Test 1"); evm.publish(stringTaskEvent1); evm.publish(new IntegerTaskEvent(5)); assertThat(listener.getLastReceivedEvent()).isNotNull(); StringTaskEvent receivedStringEvent = (StringTaskEvent) listener.getLastReceivedEvent(); assertThat(stringTaskEvent1).isEqualTo(receivedStringEvent); }
Tests the publish/subscribe mechanisms implemented in the {@link TaskEventHandler}.
testEventNotificationManager
java
apache/flink
flink-runtime/src/test/java/org/apache/flink/runtime/util/event/TaskEventHandlerTest.java
https://github.com/apache/flink/blob/master/flink-runtime/src/test/java/org/apache/flink/runtime/util/event/TaskEventHandlerTest.java
Apache-2.0
@SuppressWarnings("Convert2MethodRef") public static void main(String[] args) throws Exception { StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment(); DataStreamSource<String> input = env.fromData("Please filter", "the words", "but not this"); DataStream<String> output = input.filter((v) -> WordFilter.filter(v)); output.print(); env.execute(); }
Filter with lambda that is directly passed to {@link DataStream#filter(FilterFunction)}.
main
java
apache/flink
flink-runtime/src/test/java/org/apache/flink/runtime/util/jartestprogram/FilterWithLambda.java
https://github.com/apache/flink/blob/master/flink-runtime/src/test/java/org/apache/flink/runtime/util/jartestprogram/FilterWithLambda.java
Apache-2.0
public static boolean filter(String value) { return !value.contains("not"); }
Static filter method for lambda tests.
filter
java
apache/flink
flink-runtime/src/test/java/org/apache/flink/runtime/util/jartestprogram/WordFilter.java
https://github.com/apache/flink/blob/master/flink-runtime/src/test/java/org/apache/flink/runtime/util/jartestprogram/WordFilter.java
Apache-2.0
private boolean isNoPermissionOrAllocateSymbol(ProfilingInfo profilingInfo) { boolean isNoPermission = profilingInfo.getStatus() == ProfilingInfo.ProfilingStatus.FAILED && !StringUtils.isNullOrWhitespaceOnly(profilingInfo.getMessage()) && profilingInfo.getMessage().contains(NO_ACCESS_TO_PERF_EVENTS); boolean isNoAllocateSymbol = profilingInfo.getStatus() == ProfilingInfo.ProfilingStatus.FAILED && !StringUtils.isNullOrWhitespaceOnly(profilingInfo.getMessage()) && profilingInfo.getMessage().contains(NO_ALLOC_SYMBOL_FOUND); return isNoPermission || isNoAllocateSymbol; }
Check profiling instance failed caused by no permission to perf_events or missing of JDK debug symbols. @return true if no permission to access perf_events or no AllocTracer symbols found.
isNoPermissionOrAllocateSymbol
java
apache/flink
flink-runtime/src/test/java/org/apache/flink/runtime/util/profiler/ProfilingServiceTest.java
https://github.com/apache/flink/blob/master/flink-runtime/src/test/java/org/apache/flink/runtime/util/profiler/ProfilingServiceTest.java
Apache-2.0
@Test void testRpcGatewayRetrieval() throws Exception { final String expectedValue = "foobar"; final String expectedValue2 = "barfoo"; final UUID leaderSessionId = UUID.randomUUID(); RpcGatewayRetriever<UUID, DummyGateway> gatewayRetriever = new RpcGatewayRetriever<>( rpcService, DummyGateway.class, Function.identity(), TestingRetryStrategies.NO_RETRY_STRATEGY); SettableLeaderRetrievalService settableLeaderRetrievalService = new SettableLeaderRetrievalService(); DummyRpcEndpoint dummyRpcEndpoint = new DummyRpcEndpoint(rpcService, "dummyRpcEndpoint1", expectedValue); DummyRpcEndpoint dummyRpcEndpoint2 = new DummyRpcEndpoint(rpcService, "dummyRpcEndpoint2", expectedValue2); rpcService.registerGateway( dummyRpcEndpoint.getAddress(), dummyRpcEndpoint.getSelfGateway(DummyGateway.class)); rpcService.registerGateway( dummyRpcEndpoint2.getAddress(), dummyRpcEndpoint2.getSelfGateway(DummyGateway.class)); try { dummyRpcEndpoint.start(); dummyRpcEndpoint2.start(); settableLeaderRetrievalService.start(gatewayRetriever); final CompletableFuture<DummyGateway> gatewayFuture = gatewayRetriever.getFuture(); assertThat(gatewayFuture).isNotDone(); settableLeaderRetrievalService.notifyListener( dummyRpcEndpoint.getAddress(), leaderSessionId); final DummyGateway dummyGateway = gatewayFuture.get(TIMEOUT.toMillis(), TimeUnit.MILLISECONDS); assertThat(dummyGateway.getAddress()).isEqualTo(dummyRpcEndpoint.getAddress()); FlinkAssertions.assertThatFuture(dummyGateway.foobar(TIMEOUT)) .eventuallySucceeds() .isEqualTo(expectedValue); // elect a new leader settableLeaderRetrievalService.notifyListener( dummyRpcEndpoint2.getAddress(), leaderSessionId); final CompletableFuture<DummyGateway> gatewayFuture2 = gatewayRetriever.getFuture(); final DummyGateway dummyGateway2 = gatewayFuture2.get(TIMEOUT.toMillis(), TimeUnit.MILLISECONDS); assertThat(dummyGateway2.getAddress()).isEqualTo(dummyRpcEndpoint2.getAddress()); FlinkAssertions.assertThatFuture(dummyGateway2.foobar(TIMEOUT)) .eventuallySucceeds() .isEqualTo(expectedValue2); } finally { RpcUtils.terminateRpcEndpoint(dummyRpcEndpoint, dummyRpcEndpoint2); } }
Tests that the RpcGatewayRetriever can retrieve the specified gateway type from a leader retrieval service.
testRpcGatewayRetrieval
java
apache/flink
flink-runtime/src/test/java/org/apache/flink/runtime/webmonitor/retriever/impl/RpcGatewayRetrieverTest.java
https://github.com/apache/flink/blob/master/flink-runtime/src/test/java/org/apache/flink/runtime/webmonitor/retriever/impl/RpcGatewayRetrieverTest.java
Apache-2.0
@Override public CompletableFuture<MetricQueryServiceGateway> retrieveService(String queryServicePath) { return FutureUtils.completedExceptionally( new FlinkException( "Cannot retrieve metric query service for " + queryServicePath + '.')); }
{@link MetricQueryServiceRetriever} implementation which always fails the retrieval of the metric query service.
retrieveService
java
apache/flink
flink-runtime/src/test/java/org/apache/flink/runtime/webmonitor/retriever/impl/VoidMetricQueryServiceRetriever.java
https://github.com/apache/flink/blob/master/flink-runtime/src/test/java/org/apache/flink/runtime/webmonitor/retriever/impl/VoidMetricQueryServiceRetriever.java
Apache-2.0
@Test void testThreadInfoRequestWithException() throws Exception { Map<ImmutableSet<ExecutionAttemptID>, CompletableFuture<TaskExecutorThreadInfoGateway>> executionWithGateways = createMockSubtaskWithGateways( CompletionType.SUCCESSFULLY, CompletionType.EXCEPTIONALLY); CompletableFuture<VertexThreadInfoStats> requestFuture = coordinator.triggerThreadInfoRequest( executionWithGateways, DEFAULT_NUMBER_OF_SAMPLES, DEFAULT_DELAY_BETWEEN_SAMPLES, DEFAULT_MAX_STACK_TRACE_DEPTH); assertThatThrownBy(requestFuture::get, "The request must be failed.") .isInstanceOf(ExecutionException.class) .hasCauseInstanceOf(RuntimeException.class); }
Tests that failed thread info request to one of the tasks fails the future.
testThreadInfoRequestWithException
java
apache/flink
flink-runtime/src/test/java/org/apache/flink/runtime/webmonitor/threadinfo/ThreadInfoRequestCoordinatorTest.java
https://github.com/apache/flink/blob/master/flink-runtime/src/test/java/org/apache/flink/runtime/webmonitor/threadinfo/ThreadInfoRequestCoordinatorTest.java
Apache-2.0
@Test void testThreadInfoRequestTimeout() throws Exception { Map<ImmutableSet<ExecutionAttemptID>, CompletableFuture<TaskExecutorThreadInfoGateway>> executionWithGateways = createMockSubtaskWithGateways( CompletionType.SUCCESSFULLY, CompletionType.TIMEOUT); CompletableFuture<VertexThreadInfoStats> requestFuture = coordinator.triggerThreadInfoRequest( executionWithGateways, DEFAULT_NUMBER_OF_SAMPLES, DEFAULT_DELAY_BETWEEN_SAMPLES, DEFAULT_MAX_STACK_TRACE_DEPTH); try { assertThatThrownBy(requestFuture::get, "The request must be failed.") .satisfies(anyCauseMatches(REQUEST_TIMEOUT_MESSAGE)); } finally { coordinator.shutDown(); } }
Tests that thread info stats request times out if not finished in time.
testThreadInfoRequestTimeout
java
apache/flink
flink-runtime/src/test/java/org/apache/flink/runtime/webmonitor/threadinfo/ThreadInfoRequestCoordinatorTest.java
https://github.com/apache/flink/blob/master/flink-runtime/src/test/java/org/apache/flink/runtime/webmonitor/threadinfo/ThreadInfoRequestCoordinatorTest.java
Apache-2.0
@TestTemplate void testCachedStatsNotUpdatedWithinRefreshInterval() throws Exception { final VertexThreadInfoStats unusedThreadInfoStats = createThreadInfoStats(1, TIME_GAP); // Test for trigger request at job vertex level final VertexThreadInfoTracker tracker = createThreadInfoTracker( STATS_REFRESH_INTERVAL, threadInfoStatsDefaultSample, unusedThreadInfoStats); // stores threadInfoStatsDefaultSample in job vertex cache and execution vertex cache doInitialJobVertexRequestAndVerifyResult(tracker); Optional<VertexThreadInfoStats> result = tracker.getJobVertexStats(JOB_ID, executionJobVertex); // cached result is returned instead of unusedThreadInfoStats assertExpectedEqualsReceived(threadInfoStatsDefaultSample, result); for (int subtaskIndex = 0; subtaskIndex < PARALLELISM; subtaskIndex++) { assertExpectedEqualsReceived( generateThreadInfoStatsForExecutionVertex( threadInfoStatsDefaultSample, attemptIDS[subtaskIndex]), tracker.getExecutionVertexStats(JOB_ID, executionJobVertex, subtaskIndex)); } // Test for trigger request at execution vertex level for (int subtaskIndex = 0; subtaskIndex < PARALLELISM; subtaskIndex++) { final VertexThreadInfoTracker tracker1 = createThreadInfoTracker( STATS_REFRESH_INTERVAL, threadInfoStatsDefaultSample, unusedThreadInfoStats); // stores threadInfoStatsDefaultSample in cache doInitialExecutionVertexRequestAndVerifyResult(tracker1, subtaskIndex); // cached result is returned instead of unusedThreadInfoStats assertExpectedEqualsReceived( generateThreadInfoStatsForExecutionVertex( threadInfoStatsDefaultSample, attemptIDS[0]), tracker1.getExecutionVertexStats(JOB_ID, executionJobVertex, subtaskIndex)); } }
Tests that cached result is reused within refresh interval.
testCachedStatsNotUpdatedWithinRefreshInterval
java
apache/flink
flink-runtime/src/test/java/org/apache/flink/runtime/webmonitor/threadinfo/VertexThreadInfoTrackerTest.java
https://github.com/apache/flink/blob/master/flink-runtime/src/test/java/org/apache/flink/runtime/webmonitor/threadinfo/VertexThreadInfoTrackerTest.java
Apache-2.0
@TestTemplate void testJobVertexCachedStatsUpdatedAfterRefreshInterval() throws Exception { final Duration shortRefreshInterval = Duration.ofMillis(1000); // first entry is in the past, so refresh is triggered immediately upon fetching it final VertexThreadInfoStats initialThreadInfoStats = createThreadInfoStats( Instant.now().minus(10, ChronoUnit.SECONDS), REQUEST_ID, Duration.ofMillis(5)); final VertexThreadInfoStats threadInfoStatsAfterRefresh = createThreadInfoStats(1, TIME_GAP); // register a CountDownLatch with the cache so we can await refresh of the entry CountDownLatch cacheRefreshed = new CountDownLatch(1); Cache<VertexThreadInfoTracker.JobVertexKey, VertexThreadInfoStats> jobVertexStatsCache = createCache(CLEAN_UP_INTERVAL, new LatchRemovalListener<>(cacheRefreshed)); final VertexThreadInfoTracker tracker = createThreadInfoTracker( CLEAN_UP_INTERVAL, shortRefreshInterval, jobVertexStatsCache, null, initialThreadInfoStats, threadInfoStatsAfterRefresh); // no stats yet, but the request triggers async collection of stats assertThat(tracker.getJobVertexStats(JOB_ID, executionJobVertex)).isNotPresent(); // block until the async call completes and the first result is available tracker.getResultAvailableFuture().get(); // retrieve the entry, triggering the refresh as side effect assertExpectedEqualsReceived( initialThreadInfoStats, tracker.getJobVertexStats(JOB_ID, executionJobVertex)); // wait until the entry is refreshed cacheRefreshed.await(); // verify that we get the second result on the next request Optional<VertexThreadInfoStats> result = tracker.getJobVertexStats(JOB_ID, executionJobVertex); assertExpectedEqualsReceived(threadInfoStatsAfterRefresh, result); }
Tests that cached job vertex result is NOT reused after refresh interval.
testJobVertexCachedStatsUpdatedAfterRefreshInterval
java
apache/flink
flink-runtime/src/test/java/org/apache/flink/runtime/webmonitor/threadinfo/VertexThreadInfoTrackerTest.java
https://github.com/apache/flink/blob/master/flink-runtime/src/test/java/org/apache/flink/runtime/webmonitor/threadinfo/VertexThreadInfoTrackerTest.java
Apache-2.0
@TestTemplate void testExecutionVertexCachedStatsUpdatedAfterRefreshInterval() throws Exception { final Duration shortRefreshInterval = Duration.ofMillis(1000); // first entry is in the past, so refresh is triggered immediately upon fetching it final VertexThreadInfoStats initialThreadInfoStats = createThreadInfoStats( Instant.now().minus(10, ChronoUnit.SECONDS), REQUEST_ID, Duration.ofMillis(5)); final VertexThreadInfoStats threadInfoStatsAfterRefresh = createThreadInfoStats(1, TIME_GAP); for (int subtaskIndex = 0; subtaskIndex < PARALLELISM; subtaskIndex++) { // register a CountDownLatch with the cache so we can await refresh of the entry CountDownLatch cacheRefreshed = new CountDownLatch(1); Cache<VertexThreadInfoTracker.ExecutionVertexKey, VertexThreadInfoStats> executionVertexStatsCache = createCache( CLEAN_UP_INTERVAL, new LatchRemovalListener<>(cacheRefreshed)); final VertexThreadInfoTracker tracker = createThreadInfoTracker( CLEAN_UP_INTERVAL, shortRefreshInterval, null, executionVertexStatsCache, initialThreadInfoStats, threadInfoStatsAfterRefresh); // no stats yet, but the request triggers async collection of stats assertThat(tracker.getExecutionVertexStats(JOB_ID, executionJobVertex, subtaskIndex)) .isNotPresent(); // block until the async call completes and the first result is available tracker.getResultAvailableFuture().get(); // retrieve the entry, triggering the refresh as side effect assertExpectedEqualsReceived( generateThreadInfoStatsForExecutionVertex( initialThreadInfoStats, attemptIDS[subtaskIndex]), tracker.getExecutionVertexStats(JOB_ID, executionJobVertex, subtaskIndex)); // wait until the entry is refreshed cacheRefreshed.await(); // verify that we get the second result on the next request Optional<VertexThreadInfoStats> result = tracker.getExecutionVertexStats(JOB_ID, executionJobVertex, subtaskIndex); assertExpectedEqualsReceived( generateThreadInfoStatsForExecutionVertex( threadInfoStatsAfterRefresh, attemptIDS[subtaskIndex]), result); } }
Tests that cached execution vertex result is NOT reused after refresh interval.
testExecutionVertexCachedStatsUpdatedAfterRefreshInterval
java
apache/flink
flink-runtime/src/test/java/org/apache/flink/runtime/webmonitor/threadinfo/VertexThreadInfoTrackerTest.java
https://github.com/apache/flink/blob/master/flink-runtime/src/test/java/org/apache/flink/runtime/webmonitor/threadinfo/VertexThreadInfoTrackerTest.java
Apache-2.0
@TestTemplate void testExecutionVertexShouldBeIgnoredWhenJobVertexIsPending() throws Exception { CompletableFuture<VertexThreadInfoStats> statsFuture = new CompletableFuture<>(); TestingBlockingAndCountableCoordinator coordinator = new TestingBlockingAndCountableCoordinator(statsFuture); final VertexThreadInfoTracker tracker = createThreadInfoTracker( CLEAN_UP_INTERVAL, STATS_REFRESH_INTERVAL, null, null, coordinator); // Request the job vertex stats and keep pending assertThat(tracker.getJobVertexStats(JOB_ID, executionJobVertex)).isNotPresent(); assertThat(coordinator.getTriggerCounter()).isOne(); for (int subtaskIndex = 0; subtaskIndex < PARALLELISM; subtaskIndex++) { // These execution vertex requests shouldn't return any result, and should be ignored // directly due to the corresponding job vertex is pending. assertThat(tracker.getExecutionVertexStats(JOB_ID, executionJobVertex, subtaskIndex)) .isNotPresent(); assertThat(tracker.getResultAvailableFuture()).isNotCompleted(); assertThat(coordinator.getTriggerCounter()).isOne(); } // Complete the job vertex request statsFuture.complete(threadInfoStatsDefaultSample); tracker.getResultAvailableFuture().get(); for (int subtaskIndex = 0; subtaskIndex < PARALLELISM; subtaskIndex++) { assertThat(tracker.getExecutionVertexStats(JOB_ID, executionJobVertex, subtaskIndex)) .isPresent(); // These execution vertex requests still should be ignored due to cached result is // reused within refresh interval. assertThat(coordinator.getTriggerCounter()).isOne(); } }
Tests that the execution vertex request should be ignored when the corresponding job vertex request is pending.
testExecutionVertexShouldBeIgnoredWhenJobVertexIsPending
java
apache/flink
flink-runtime/src/test/java/org/apache/flink/runtime/webmonitor/threadinfo/VertexThreadInfoTrackerTest.java
https://github.com/apache/flink/blob/master/flink-runtime/src/test/java/org/apache/flink/runtime/webmonitor/threadinfo/VertexThreadInfoTrackerTest.java
Apache-2.0
@TestTemplate void testCachedStatsCleanedAfterCleanupInterval() throws Exception { final Duration shortCleanUpInterval = Duration.ofMillis(1); // register a CountDownLatch with the cache so we can await expiry of the entry CountDownLatch cacheExpired = new CountDownLatch(1); // Test for trigger request at job vertex level Cache<VertexThreadInfoTracker.JobVertexKey, VertexThreadInfoStats> jobVertexStatsCache = createCache(shortCleanUpInterval, new LatchRemovalListener<>(cacheExpired)); final VertexThreadInfoTracker tracker = createThreadInfoTracker( shortCleanUpInterval, STATS_REFRESH_INTERVAL, jobVertexStatsCache, null, threadInfoStatsDefaultSample); // no stats yet, but the request triggers async collection of stats assertThat(tracker.getJobVertexStats(JOB_ID, executionJobVertex)).isNotPresent(); // wait until one eviction was registered cacheExpired.await(); assertThat(tracker.getJobVertexStats(JOB_ID, executionJobVertex)).isNotPresent(); // Test for trigger request at execution vertex level for (int subtaskIndex = 0; subtaskIndex < PARALLELISM; subtaskIndex++) { // register a CountDownLatch with the cache so we can await expiry of the entry CountDownLatch executionCacheExpired = new CountDownLatch(1); Cache<VertexThreadInfoTracker.ExecutionVertexKey, VertexThreadInfoStats> executionVertexStatsCache = createCache( shortCleanUpInterval, new LatchRemovalListener<>(executionCacheExpired)); final VertexThreadInfoTracker executionTracker = createThreadInfoTracker( shortCleanUpInterval, STATS_REFRESH_INTERVAL, null, executionVertexStatsCache, threadInfoStatsDefaultSample); // no stats yet, but the request triggers async collection of stats assertThat( executionTracker.getExecutionVertexStats( JOB_ID, executionJobVertex, subtaskIndex)) .isNotPresent(); // wait until one eviction was registered executionCacheExpired.await(); assertThat( executionTracker.getExecutionVertexStats( JOB_ID, executionJobVertex, subtaskIndex)) .isNotPresent(); } }
Tests that cached results are removed within the cleanup interval.
testCachedStatsCleanedAfterCleanupInterval
java
apache/flink
flink-runtime/src/test/java/org/apache/flink/runtime/webmonitor/threadinfo/VertexThreadInfoTrackerTest.java
https://github.com/apache/flink/blob/master/flink-runtime/src/test/java/org/apache/flink/runtime/webmonitor/threadinfo/VertexThreadInfoTrackerTest.java
Apache-2.0
@TestTemplate void testCachedStatsNotCleanedWithinCleanupInterval() throws Exception { final VertexThreadInfoTracker tracker = createThreadInfoTracker(); doInitialJobVertexRequestAndVerifyResult(tracker); tracker.cleanUpStatsCache(); // the thread info stats with the same requestId should still be there assertExpectedEqualsReceived( threadInfoStatsDefaultSample, tracker.getJobVertexStats(JOB_ID, executionJobVertex)); for (int subtaskIndex = 0; subtaskIndex < PARALLELISM; subtaskIndex++) { assertExpectedEqualsReceived( generateThreadInfoStatsForExecutionVertex( threadInfoStatsDefaultSample, attemptIDS[subtaskIndex]), tracker.getExecutionVertexStats(JOB_ID, executionJobVertex, subtaskIndex)); } }
Tests that cached results are NOT removed within the cleanup interval.
testCachedStatsNotCleanedWithinCleanupInterval
java
apache/flink
flink-runtime/src/test/java/org/apache/flink/runtime/webmonitor/threadinfo/VertexThreadInfoTrackerTest.java
https://github.com/apache/flink/blob/master/flink-runtime/src/test/java/org/apache/flink/runtime/webmonitor/threadinfo/VertexThreadInfoTrackerTest.java
Apache-2.0
@Test void testGetAllSortedByName() throws Exception { // Setup final TestingLongStateHandleHelper stateHandleProvider = new TestingLongStateHandleHelper(); ZooKeeperStateHandleStore<TestingLongStateHandleHelper.LongStateHandle> store = new ZooKeeperStateHandleStore<>(getZooKeeperClient(), stateHandleProvider); // Config final String basePath = "/testGetAllSortedByName"; final Long[] expected = new Long[] {311222268470898L, 132812888L, 27255442L, 11122233124L}; // Test for (long val : expected) { final String pathInZooKeeper = String.format("%s%016d", basePath, val); store.addAndLock( pathInZooKeeper, new TestingLongStateHandleHelper.LongStateHandle(val)); } List<Tuple2<RetrievableStateHandle<TestingLongStateHandleHelper.LongStateHandle>, String>> actual = store.getAllAndLock(); assertThat(actual).hasSize(expected.length); // bring the elements in sort order Arrays.sort(expected); actual.sort(Comparator.comparing(o -> o.f1)); for (int i = 0; i < expected.length; i++) { assertThat((Long) actual.get(i).f0.retrieveState().getValue()).isEqualTo(expected[i]); } }
Tests that the state is returned sorted.
testGetAllSortedByName
java
apache/flink
flink-runtime/src/test/java/org/apache/flink/runtime/zookeeper/ZooKeeperStateHandleStoreTest.java
https://github.com/apache/flink/blob/master/flink-runtime/src/test/java/org/apache/flink/runtime/zookeeper/ZooKeeperStateHandleStoreTest.java
Apache-2.0
@Test void testRemove() throws Exception { // Setup final TestingLongStateHandleHelper stateHandleProvider = new TestingLongStateHandleHelper(); ZooKeeperStateHandleStore<TestingLongStateHandleHelper.LongStateHandle> store = new ZooKeeperStateHandleStore<>(getZooKeeperClient(), stateHandleProvider); // Config final String pathInZooKeeper = "/testRemove"; store.addAndLock( pathInZooKeeper, new TestingLongStateHandleHelper.LongStateHandle(27255442L)); final int numberOfGlobalDiscardCalls = TestingLongStateHandleHelper.getGlobalDiscardCount(); // Test store.releaseAndTryRemove(pathInZooKeeper); // Verify discarded assertThat(getZooKeeperClient().getChildren().forPath("/")).isEmpty(); assertThat(TestingLongStateHandleHelper.getGlobalDiscardCount()) .isEqualTo(numberOfGlobalDiscardCalls + 1); }
Tests that state handles are correctly removed.
testRemove
java
apache/flink
flink-runtime/src/test/java/org/apache/flink/runtime/zookeeper/ZooKeeperStateHandleStoreTest.java
https://github.com/apache/flink/blob/master/flink-runtime/src/test/java/org/apache/flink/runtime/zookeeper/ZooKeeperStateHandleStoreTest.java
Apache-2.0
@Test void testCorruptedData() throws Exception { final TestingLongStateHandleHelper stateStorage = new TestingLongStateHandleHelper(); ZooKeeperStateHandleStore<TestingLongStateHandleHelper.LongStateHandle> store = new ZooKeeperStateHandleStore<>(getZooKeeperClient(), stateStorage); final Collection<Long> input = new HashSet<>(); input.add(1L); input.add(2L); input.add(3L); for (Long aLong : input) { store.addAndLock("/" + aLong, new TestingLongStateHandleHelper.LongStateHandle(aLong)); } // corrupt one of the entries getZooKeeperClient().setData().forPath("/" + 2, new byte[2]); List<Tuple2<RetrievableStateHandle<TestingLongStateHandleHelper.LongStateHandle>, String>> allEntries = store.getAllAndLock(); Collection<Long> expected = new HashSet<>(input); expected.remove(2L); Collection<Long> actual = new HashSet<>(expected.size()); for (Tuple2<RetrievableStateHandle<TestingLongStateHandleHelper.LongStateHandle>, String> entry : allEntries) { actual.add(entry.f0.retrieveState().getValue()); } assertThat(actual).isEqualTo(expected); }
Tests that the ZooKeeperStateHandleStore can handle corrupted data by releasing and trying to remove the respective ZooKeeper ZNodes.
testCorruptedData
java
apache/flink
flink-runtime/src/test/java/org/apache/flink/runtime/zookeeper/ZooKeeperStateHandleStoreTest.java
https://github.com/apache/flink/blob/master/flink-runtime/src/test/java/org/apache/flink/runtime/zookeeper/ZooKeeperStateHandleStoreTest.java
Apache-2.0
@Test void testConcurrentDeleteOperation() throws Exception { final TestingLongStateHandleHelper longStateStorage = new TestingLongStateHandleHelper(); ZooKeeperStateHandleStore<TestingLongStateHandleHelper.LongStateHandle> zkStore1 = new ZooKeeperStateHandleStore<>(getZooKeeperClient(), longStateStorage); ZooKeeperStateHandleStore<TestingLongStateHandleHelper.LongStateHandle> zkStore2 = new ZooKeeperStateHandleStore<>(getZooKeeperClient(), longStateStorage); final String statePath = "/state"; zkStore1.addAndLock(statePath, new TestingLongStateHandleHelper.LongStateHandle(42L)); RetrievableStateHandle<TestingLongStateHandleHelper.LongStateHandle> stateHandle = zkStore2.getAndLock(statePath); // this should not remove the referenced node because we are still holding a state handle // reference via zkStore2 zkStore1.releaseAndTryRemove(statePath); // sanity check assertThat(stateHandle.retrieveState().getValue()).isEqualTo(42L); Stat nodeStat = getZooKeeperClient().checkExists().forPath(statePath); assertThat(nodeStat) .as("NodeStat should not be null, otherwise the referenced node does not exist.") .isNotNull(); zkStore2.releaseAndTryRemove(statePath); nodeStat = getZooKeeperClient().checkExists().forPath(statePath); assertThat(nodeStat) .as("NodeState should be null, because the referenced node should no longer exist.") .isNull(); }
FLINK-6612 <p>Tests that a concurrent delete operation cannot succeed if another instance holds a lock on the specified node.
testConcurrentDeleteOperation
java
apache/flink
flink-runtime/src/test/java/org/apache/flink/runtime/zookeeper/ZooKeeperStateHandleStoreTest.java
https://github.com/apache/flink/blob/master/flink-runtime/src/test/java/org/apache/flink/runtime/zookeeper/ZooKeeperStateHandleStoreTest.java
Apache-2.0
@Test void testLockCleanupWhenGetAndLockFails() throws Exception { final TestingLongStateHandleHelper longStateStorage = new TestingLongStateHandleHelper(); ZooKeeperStateHandleStore<TestingLongStateHandleHelper.LongStateHandle> zkStore1 = new ZooKeeperStateHandleStore<>(getZooKeeperClient(), longStateStorage); ZooKeeperStateHandleStore<TestingLongStateHandleHelper.LongStateHandle> zkStore2 = new ZooKeeperStateHandleStore<>(getZooKeeperClient(), longStateStorage); final String path = "/state"; zkStore1.addAndLock(path, new TestingLongStateHandleHelper.LongStateHandle(42L)); final byte[] corruptedData = {1, 2}; // corrupt the data getZooKeeperClient().setData().forPath(path, corruptedData); assertThatExceptionOfType(IOException.class) .as("Should fail because we cannot deserialize the node's data") .isThrownBy(() -> zkStore2.getAndLock(path)); // check that there is no lock node left String lockNodePath = zkStore2.getInstanceLockPath(path); Stat stat = getZooKeeperClient().checkExists().forPath(lockNodePath); // zkStore2 should not have created a lock node assertThat(stat).as("zkStore2 should not have created a lock node.").isNull(); Collection<String> children = getZooKeeperClient().getChildren().forPath(path); // there should be exactly one lock node from zkStore1 assertThat(children).hasSize(1); zkStore1.releaseAndTryRemove(path); stat = getZooKeeperClient().checkExists().forPath(path); assertThat(stat).as("The state node should have been removed.").isNull(); }
FLINK-6612 <p>Tests that getAndLock removes a created lock if the RetrievableStateHandle cannot be retrieved (e.g. deserialization problem).
testLockCleanupWhenGetAndLockFails
java
apache/flink
flink-runtime/src/test/java/org/apache/flink/runtime/zookeeper/ZooKeeperStateHandleStoreTest.java
https://github.com/apache/flink/blob/master/flink-runtime/src/test/java/org/apache/flink/runtime/zookeeper/ZooKeeperStateHandleStoreTest.java
Apache-2.0
@Test void testLockCleanupWhenClientTimesOut() throws Exception { final TestingLongStateHandleHelper longStateStorage = new TestingLongStateHandleHelper(); Configuration configuration = new Configuration(); configuration.set( HighAvailabilityOptions.HA_ZOOKEEPER_QUORUM, zooKeeperExtension.getConnectString()); configuration.set( HighAvailabilityOptions.ZOOKEEPER_SESSION_TIMEOUT, Duration.ofMillis(100)); configuration.set(HighAvailabilityOptions.HA_ZOOKEEPER_ROOT, "timeout"); try (CuratorFrameworkWithUnhandledErrorListener curatorFrameworkWrapper = ZooKeeperUtils.startCuratorFramework( configuration, NoOpFatalErrorHandler.INSTANCE); CuratorFrameworkWithUnhandledErrorListener curatorFrameworkWrapper2 = ZooKeeperUtils.startCuratorFramework( configuration, NoOpFatalErrorHandler.INSTANCE)) { CuratorFramework client = curatorFrameworkWrapper.asCuratorFramework(); CuratorFramework client2 = curatorFrameworkWrapper2.asCuratorFramework(); ZooKeeperStateHandleStore<TestingLongStateHandleHelper.LongStateHandle> zkStore = new ZooKeeperStateHandleStore<>(client, longStateStorage); final String path = "/state"; zkStore.addAndLock(path, new TestingLongStateHandleHelper.LongStateHandle(42L)); // this should delete all ephemeral nodes client.close(); Stat stat = client2.checkExists().forPath(path); // check that our state node still exists assertThat(stat).isNotNull(); Collection<String> children = client2.getChildren().forPath(ZooKeeperStateHandleStore.getRootLockPath(path)); // check that the lock node has been released assertThat(children).isEmpty(); } }
FLINK-6612 <p>Tests that lock nodes will be released if the client dies.
testLockCleanupWhenClientTimesOut
java
apache/flink
flink-runtime/src/test/java/org/apache/flink/runtime/zookeeper/ZooKeeperStateHandleStoreTest.java
https://github.com/apache/flink/blob/master/flink-runtime/src/test/java/org/apache/flink/runtime/zookeeper/ZooKeeperStateHandleStoreTest.java
Apache-2.0
@Test void testRelease() throws Exception { final TestingLongStateHandleHelper longStateStorage = new TestingLongStateHandleHelper(); ZooKeeperStateHandleStore<TestingLongStateHandleHelper.LongStateHandle> zkStore = new ZooKeeperStateHandleStore<>(getZooKeeperClient(), longStateStorage); final String path = "/state"; zkStore.addAndLock(path, new TestingLongStateHandleHelper.LongStateHandle(42L)); final String lockPath = zkStore.getInstanceLockPath(path); Stat stat = getZooKeeperClient().checkExists().forPath(lockPath); assertThat(stat).as("Expected an existing lock").isNotNull(); zkStore.release(path); stat = getZooKeeperClient() .checkExists() .forPath(ZooKeeperStateHandleStore.getRootLockPath(path)); // release should have removed the lock child assertThat(stat.getNumChildren()).as("Expected no lock nodes as children").isZero(); zkStore.releaseAndTryRemove(path); stat = getZooKeeperClient().checkExists().forPath(path); assertThat(stat).as("State node should have been removed.").isNull(); }
FLINK-6612 <p>Tests that we can release a locked state handles in the ZooKeeperStateHandleStore.
testRelease
java
apache/flink
flink-runtime/src/test/java/org/apache/flink/runtime/zookeeper/ZooKeeperStateHandleStoreTest.java
https://github.com/apache/flink/blob/master/flink-runtime/src/test/java/org/apache/flink/runtime/zookeeper/ZooKeeperStateHandleStoreTest.java
Apache-2.0
@Test void testConstructor() { int expectParallelism = 100; StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment(); MockSource mockSource = new MockSource(Boundedness.BOUNDED, 10); DataStreamSource<Integer> stream = env.fromSource(mockSource, WatermarkStrategy.noWatermarks(), "TestingSource"); stream.setParallelism(expectParallelism); assertThat(stream.isParallel()).isTrue(); assertThat(stream.getParallelism()).isEqualTo(expectParallelism); }
Test constructor for new Sources (FLIP-27).
testConstructor
java
apache/flink
flink-runtime/src/test/java/org/apache/flink/streaming/api/datastream/DataStreamSourceTest.java
https://github.com/apache/flink/blob/master/flink-runtime/src/test/java/org/apache/flink/streaming/api/datastream/DataStreamSourceTest.java
Apache-2.0
@Test void testEventTimeTimersDontInterfere() throws Exception { try (KeyedOneInputStreamOperatorTestHarness<Integer, Tuple2<Integer, String>, String> testHarness = createTestHarness()) { testHarness.open(); testHarness.processWatermark(0L); testHarness.processElement(new Tuple2<>(1, "SET_EVENT_TIME_TIMER:20"), 0); testHarness.processElement(new Tuple2<>(0, "SET_STATE:HELLO"), 0); testHarness.processElement(new Tuple2<>(1, "SET_STATE:CIAO"), 0); testHarness.processElement(new Tuple2<>(0, "SET_EVENT_TIME_TIMER:10"), 0); testHarness.processWatermark(10L); assertThat(extractResult(testHarness)).contains("ON_EVENT_TIME:HELLO"); testHarness.processWatermark(20L); assertThat(extractResult(testHarness)).contains("ON_EVENT_TIME:CIAO"); } }
Verify that firing event-time timers see the state of the key that was active when the timer was set.
testEventTimeTimersDontInterfere
java
apache/flink
flink-runtime/src/test/java/org/apache/flink/streaming/api/operators/AbstractStreamOperatorTest.java
https://github.com/apache/flink/blob/master/flink-runtime/src/test/java/org/apache/flink/streaming/api/operators/AbstractStreamOperatorTest.java
Apache-2.0
@Test void testProcessingTimeTimersDontInterfere() throws Exception { try (KeyedOneInputStreamOperatorTestHarness<Integer, Tuple2<Integer, String>, String> testHarness = createTestHarness()) { testHarness.open(); testHarness.setProcessingTime(0L); testHarness.processElement(new Tuple2<>(1, "SET_PROC_TIME_TIMER:20"), 0); testHarness.processElement(new Tuple2<>(0, "SET_STATE:HELLO"), 0); testHarness.processElement(new Tuple2<>(1, "SET_STATE:CIAO"), 0); testHarness.processElement(new Tuple2<>(0, "SET_PROC_TIME_TIMER:10"), 0); testHarness.setProcessingTime(10L); assertThat(extractResult(testHarness)).contains("ON_PROC_TIME:HELLO"); testHarness.setProcessingTime(20L); assertThat(extractResult(testHarness)).contains("ON_PROC_TIME:CIAO"); } }
Verify that firing processing-time timers see the state of the key that was active when the timer was set.
testProcessingTimeTimersDontInterfere
java
apache/flink
flink-runtime/src/test/java/org/apache/flink/streaming/api/operators/AbstractStreamOperatorTest.java
https://github.com/apache/flink/blob/master/flink-runtime/src/test/java/org/apache/flink/streaming/api/operators/AbstractStreamOperatorTest.java
Apache-2.0
@Test void testEnsureProcessingTimeTimerRegisteredOnRestore() throws Exception { OperatorSubtaskState snapshot; try (KeyedOneInputStreamOperatorTestHarness<Integer, Tuple2<Integer, String>, String> testHarness = createTestHarness()) { testHarness.open(); testHarness.setProcessingTime(0L); testHarness.processElement(new Tuple2<>(1, "SET_PROC_TIME_TIMER:20"), 0); testHarness.processElement(new Tuple2<>(0, "SET_STATE:HELLO"), 0); testHarness.processElement(new Tuple2<>(1, "SET_STATE:CIAO"), 0); testHarness.processElement(new Tuple2<>(0, "SET_PROC_TIME_TIMER:10"), 0); snapshot = testHarness.snapshot(0, 0); } try (KeyedOneInputStreamOperatorTestHarness<Integer, Tuple2<Integer, String>, String> testHarness1 = createTestHarness()) { testHarness1.setProcessingTime(0L); testHarness1.setup(); testHarness1.initializeState(snapshot); testHarness1.open(); testHarness1.setProcessingTime(10L); assertThat(extractResult(testHarness1)).contains("ON_PROC_TIME:HELLO"); testHarness1.setProcessingTime(20L); assertThat(extractResult(testHarness1)).contains("ON_PROC_TIME:CIAO"); } }
Verify that a low-level timer is set for processing-time timers in case of restore.
testEnsureProcessingTimeTimerRegisteredOnRestore
java
apache/flink
flink-runtime/src/test/java/org/apache/flink/streaming/api/operators/AbstractStreamOperatorTest.java
https://github.com/apache/flink/blob/master/flink-runtime/src/test/java/org/apache/flink/streaming/api/operators/AbstractStreamOperatorTest.java
Apache-2.0
@Test void testProcessingTimeAndEventTimeDontInterfere() throws Exception { try (KeyedOneInputStreamOperatorTestHarness<Integer, Tuple2<Integer, String>, String> testHarness = createTestHarness()) { testHarness.open(); testHarness.setProcessingTime(0L); testHarness.processWatermark(0L); testHarness.processElement(new Tuple2<>(0, "SET_PROC_TIME_TIMER:10"), 0); testHarness.processElement(new Tuple2<>(0, "SET_EVENT_TIME_TIMER:20"), 0); testHarness.processElement(new Tuple2<>(0, "SET_STATE:HELLO"), 0); testHarness.processWatermark(20L); assertThat(extractResult(testHarness)).contains("ON_EVENT_TIME:HELLO"); testHarness.setProcessingTime(10L); assertThat(extractResult(testHarness)).contains("ON_PROC_TIME:HELLO"); } }
Verify that timers for the different time domains don't clash.
testProcessingTimeAndEventTimeDontInterfere
java
apache/flink
flink-runtime/src/test/java/org/apache/flink/streaming/api/operators/AbstractStreamOperatorTest.java
https://github.com/apache/flink/blob/master/flink-runtime/src/test/java/org/apache/flink/streaming/api/operators/AbstractStreamOperatorTest.java
Apache-2.0
@SuppressWarnings({"unchecked", "rawtypes"}) private <T> List<T> extractResult(OneInputStreamOperatorTestHarness<?, T> testHarness) { List<StreamRecord<? extends T>> streamRecords = testHarness.extractOutputStreamRecords(); List<T> result = new ArrayList<>(); for (Object in : streamRecords) { if (in instanceof StreamRecord) { result.add((T) ((StreamRecord) in).getValue()); } } testHarness.getOutput().clear(); return result; }
Extracts the result values form the test harness and clear the output queue.
extractResult
java
apache/flink
flink-runtime/src/test/java/org/apache/flink/streaming/api/operators/AbstractStreamOperatorTest.java
https://github.com/apache/flink/blob/master/flink-runtime/src/test/java/org/apache/flink/streaming/api/operators/AbstractStreamOperatorTest.java
Apache-2.0
@Test void testRestoreProcedureOrderAndFailure() throws Exception { CloseableRegistry closeableRegistry = new CloseableRegistry(); CheckpointStreamFactory checkpointStreamFactory = new MemCheckpointStreamFactory(1024); ListStateDescriptor<Integer> stateDescriptor = new ListStateDescriptor<>("test-state", Integer.class); OperatorStateBackend originalBackend = backendSupplier.apply(Collections.emptyList()); SnapshotResult<OperatorStateHandle> snapshotResult; try { ListState<Integer> listState = originalBackend.getListState(stateDescriptor); listState.add(0); listState.add(1); listState.add(2); listState.add(3); RunnableFuture<SnapshotResult<OperatorStateHandle>> snapshot = originalBackend.snapshot( 0L, 0L, checkpointStreamFactory, CheckpointOptions.forCheckpointWithDefaultLocation()); snapshot.run(); snapshotResult = snapshot.get(); } finally { originalBackend.close(); originalBackend.dispose(); } OperatorStateHandle firstFailHandle = mock(OperatorStateHandle.class); OperatorStateHandle secondSuccessHandle = spy(snapshotResult.getJobManagerOwnedSnapshot()); OperatorStateHandle thirdNotUsedHandle = mock(OperatorStateHandle.class); List<StateObjectCollection<OperatorStateHandle>> sortedRestoreOptions = Arrays.asList( new StateObjectCollection<>(Collections.singletonList(firstFailHandle)), new StateObjectCollection<>(Collections.singletonList(secondSuccessHandle)), new StateObjectCollection<>(Collections.singletonList(thirdNotUsedHandle))); BackendRestorerProcedure<OperatorStateBackend, OperatorStateHandle> restorerProcedure = new BackendRestorerProcedure<>( backendSupplier, closeableRegistry, "test op state backend"); OperatorStateBackend restoredBackend = restorerProcedure.createAndRestore( sortedRestoreOptions, StateObject.StateObjectSizeStatsCollector.create()); assertThat(restoredBackend).isNotNull(); try { verify(firstFailHandle).openInputStream(); verify(secondSuccessHandle).openInputStream(); verify(thirdNotUsedHandle, times(0)).openInputStream(); ListState<Integer> listState = restoredBackend.getListState(stateDescriptor); Iterator<Integer> stateIterator = listState.get().iterator(); assertThat(stateIterator.next()).isZero(); assertThat(stateIterator.next()).isOne(); assertThat(stateIterator.next()).isEqualTo(2); assertThat(stateIterator.next()).isEqualTo(3); assertThat(stateIterator).isExhausted(); } finally { restoredBackend.close(); restoredBackend.dispose(); } }
Tests that the restore procedure follows the order of the iterator and will retries failed attempts if there are more options.
testRestoreProcedureOrderAndFailure
java
apache/flink
flink-runtime/src/test/java/org/apache/flink/streaming/api/operators/BackendRestorerProcedureTest.java
https://github.com/apache/flink/blob/master/flink-runtime/src/test/java/org/apache/flink/streaming/api/operators/BackendRestorerProcedureTest.java
Apache-2.0
@Test void testExceptionThrownIfAllRestoresFailed() throws Exception { CloseableRegistry closeableRegistry = new CloseableRegistry(); OperatorStateHandle firstFailHandle = mock(OperatorStateHandle.class); OperatorStateHandle secondFailHandle = mock(OperatorStateHandle.class); OperatorStateHandle thirdFailHandle = mock(OperatorStateHandle.class); List<StateObjectCollection<OperatorStateHandle>> sortedRestoreOptions = Arrays.asList( new StateObjectCollection<>(Collections.singletonList(firstFailHandle)), new StateObjectCollection<>(Collections.singletonList(secondFailHandle)), new StateObjectCollection<>(Collections.singletonList(thirdFailHandle))); BackendRestorerProcedure<OperatorStateBackend, OperatorStateHandle> restorerProcedure = new BackendRestorerProcedure<>( backendSupplier, closeableRegistry, "test op state backend"); assertThatThrownBy( () -> restorerProcedure.createAndRestore( sortedRestoreOptions, StateObject.StateObjectSizeStatsCollector.create())) .isInstanceOf(FlinkException.class); verify(firstFailHandle).openInputStream(); verify(secondFailHandle).openInputStream(); verify(thirdFailHandle).openInputStream(); }
Tests if there is an exception if all restore attempts are exhausted and failed.
testExceptionThrownIfAllRestoresFailed
java
apache/flink
flink-runtime/src/test/java/org/apache/flink/streaming/api/operators/BackendRestorerProcedureTest.java
https://github.com/apache/flink/blob/master/flink-runtime/src/test/java/org/apache/flink/streaming/api/operators/BackendRestorerProcedureTest.java
Apache-2.0
@Test void testCanBeCanceledViaRegistry() throws Exception { CloseableRegistry closeableRegistry = new CloseableRegistry(); OneShotLatch waitForBlock = new OneShotLatch(); OneShotLatch unblock = new OneShotLatch(); OperatorStateHandle blockingRestoreHandle = mock(OperatorStateHandle.class); when(blockingRestoreHandle.openInputStream()) .thenReturn(new BlockingFSDataInputStream(waitForBlock, unblock)); List<StateObjectCollection<OperatorStateHandle>> sortedRestoreOptions = Collections.singletonList( new StateObjectCollection<>( Collections.singletonList(blockingRestoreHandle))); BackendRestorerProcedure<OperatorStateBackend, OperatorStateHandle> restorerProcedure = new BackendRestorerProcedure<>( backendSupplier, closeableRegistry, "test op state backend"); AtomicReference<Exception> exceptionReference = new AtomicReference<>(null); Thread restoreThread = new Thread( () -> { try { restorerProcedure.createAndRestore( sortedRestoreOptions, StateObject.StateObjectSizeStatsCollector.create()); } catch (Exception e) { exceptionReference.set(e); } }); restoreThread.start(); waitForBlock.await(); closeableRegistry.close(); unblock.trigger(); restoreThread.join(); Exception exception = exceptionReference.get(); assertThat(exception).isInstanceOf(FlinkException.class); }
Test that the restore can be stopped via the provided closeable registry.
testCanBeCanceledViaRegistry
java
apache/flink
flink-runtime/src/test/java/org/apache/flink/streaming/api/operators/BackendRestorerProcedureTest.java
https://github.com/apache/flink/blob/master/flink-runtime/src/test/java/org/apache/flink/streaming/api/operators/BackendRestorerProcedureTest.java
Apache-2.0
@TestTemplate void testOnlySetsOnePhysicalProcessingTimeTimer() throws Exception { @SuppressWarnings("unchecked") Triggerable<Integer, String> mockTriggerable = mock(Triggerable.class); TestKeyContext keyContext = new TestKeyContext(); TestProcessingTimeService processingTimeService = new TestProcessingTimeService(); PriorityQueueSetFactory priorityQueueSetFactory = new HeapPriorityQueueSetFactory(testKeyGroupRange, maxParallelism, 128); InternalTimerServiceImpl<Integer, String> timerService = createAndStartInternalTimerService( mockTriggerable, keyContext, processingTimeService, testKeyGroupRange, priorityQueueSetFactory); int key = getKeyInKeyGroupRange(testKeyGroupRange, maxParallelism); keyContext.setCurrentKey(key); timerService.registerProcessingTimeTimer("ciao", 10); timerService.registerProcessingTimeTimer("ciao", 20); timerService.registerProcessingTimeTimer("ciao", 30); timerService.registerProcessingTimeTimer("hello", 10); timerService.registerProcessingTimeTimer("hello", 20); assertThat(timerService.numProcessingTimeTimers()).isEqualTo(5); assertThat(timerService.numProcessingTimeTimers("hello")).isEqualTo(2); assertThat(timerService.numProcessingTimeTimers("ciao")).isEqualTo(3); assertThat(processingTimeService.getNumActiveTimers()).isOne(); assertThat(processingTimeService.getActiveTimerTimestamps()).contains(10L); processingTimeService.setCurrentTime(10); assertThat(timerService.numProcessingTimeTimers()).isEqualTo(3); assertThat(timerService.numProcessingTimeTimers("hello")).isOne(); assertThat(timerService.numProcessingTimeTimers("ciao")).isEqualTo(2); assertThat(processingTimeService.getNumActiveTimers()).isOne(); assertThat(processingTimeService.getActiveTimerTimestamps()).contains(20L); processingTimeService.setCurrentTime(20); assertThat(timerService.numProcessingTimeTimers()).isOne(); assertThat(timerService.numProcessingTimeTimers("hello")).isZero(); assertThat(timerService.numProcessingTimeTimers("ciao")).isOne(); assertThat(processingTimeService.getNumActiveTimers()).isOne(); assertThat(processingTimeService.getActiveTimerTimestamps()).contains(30L); processingTimeService.setCurrentTime(30); assertThat(timerService.numProcessingTimeTimers()).isZero(); assertThat(processingTimeService.getNumActiveTimers()).isZero(); timerService.registerProcessingTimeTimer("ciao", 40); assertThat(processingTimeService.getNumActiveTimers()).isOne(); }
Verify that we only ever have one processing-time task registered at the {@link ProcessingTimeService}.
testOnlySetsOnePhysicalProcessingTimeTimer
java
apache/flink
flink-runtime/src/test/java/org/apache/flink/streaming/api/operators/InternalTimerServiceImplTest.java
https://github.com/apache/flink/blob/master/flink-runtime/src/test/java/org/apache/flink/streaming/api/operators/InternalTimerServiceImplTest.java
Apache-2.0
@TestTemplate void testRegisterEarlierProcessingTimerMovesPhysicalProcessingTimer() { @SuppressWarnings("unchecked") Triggerable<Integer, String> mockTriggerable = mock(Triggerable.class); TestKeyContext keyContext = new TestKeyContext(); TestProcessingTimeService processingTimeService = new TestProcessingTimeService(); InternalTimerServiceImpl<Integer, String> timerService = createAndStartInternalTimerService( mockTriggerable, keyContext, processingTimeService, testKeyGroupRange, createQueueFactory()); int key = getKeyInKeyGroupRange(testKeyGroupRange, maxParallelism); keyContext.setCurrentKey(key); timerService.registerProcessingTimeTimer("ciao", 20); assertThat(timerService.numProcessingTimeTimers()).isOne(); assertThat(processingTimeService.getNumActiveTimers()).isOne(); assertThat(processingTimeService.getActiveTimerTimestamps()).contains(20L); timerService.registerProcessingTimeTimer("ciao", 10); assertThat(timerService.numProcessingTimeTimers()).isEqualTo(2L); assertThat(processingTimeService.getNumActiveTimers()).isOne(); assertThat(processingTimeService.getActiveTimerTimestamps()).contains(10L); }
Verify that registering a processing-time timer that is earlier than the existing timers removes the one physical timer and creates one for the earlier timestamp {@link ProcessingTimeService}.
testRegisterEarlierProcessingTimerMovesPhysicalProcessingTimer
java
apache/flink
flink-runtime/src/test/java/org/apache/flink/streaming/api/operators/InternalTimerServiceImplTest.java
https://github.com/apache/flink/blob/master/flink-runtime/src/test/java/org/apache/flink/streaming/api/operators/InternalTimerServiceImplTest.java
Apache-2.0
@Test void fixConstants() { String expectedTimerStatePrefix = "_timer_state"; assertThat(InternalTimeServiceManagerImpl.TIMER_STATE_PREFIX) .isEqualTo(expectedTimerStatePrefix); assertThat(InternalTimeServiceManagerImpl.PROCESSING_TIMER_PREFIX) .isEqualTo(expectedTimerStatePrefix + "/processing_"); assertThat(InternalTimeServiceManagerImpl.EVENT_TIMER_PREFIX) .isEqualTo(expectedTimerStatePrefix + "/event_"); }
This test fixes some constants, because changing them can harm backwards compatibility.
fixConstants
java
apache/flink
flink-runtime/src/test/java/org/apache/flink/streaming/api/operators/InternalTimeServiceManagerImplTest.java
https://github.com/apache/flink/blob/master/flink-runtime/src/test/java/org/apache/flink/streaming/api/operators/InternalTimeServiceManagerImplTest.java
Apache-2.0
@Test void testCancelAndCleanup() throws Exception { OperatorSnapshotFutures operatorSnapshotResult = new OperatorSnapshotFutures(); operatorSnapshotResult.cancel(); KeyedStateHandle keyedManagedStateHandle = mock(KeyedStateHandle.class); SnapshotResult<KeyedStateHandle> keyedStateManagedResult = SnapshotResult.of(keyedManagedStateHandle); RunnableFuture<SnapshotResult<KeyedStateHandle>> keyedStateManagedFuture = spy(DoneFuture.of(keyedStateManagedResult)); KeyedStateHandle keyedRawStateHandle = mock(KeyedStateHandle.class); SnapshotResult<KeyedStateHandle> keyedStateRawResult = SnapshotResult.of(keyedRawStateHandle); RunnableFuture<SnapshotResult<KeyedStateHandle>> keyedStateRawFuture = spy(DoneFuture.of(keyedStateRawResult)); OperatorStateHandle operatorManagedStateHandle = mock(OperatorStreamStateHandle.class); SnapshotResult<OperatorStateHandle> operatorStateManagedResult = SnapshotResult.of(operatorManagedStateHandle); RunnableFuture<SnapshotResult<OperatorStateHandle>> operatorStateManagedFuture = spy(DoneFuture.of(operatorStateManagedResult)); OperatorStateHandle operatorRawStateHandle = mock(OperatorStreamStateHandle.class); SnapshotResult<OperatorStateHandle> operatorStateRawResult = SnapshotResult.of(operatorRawStateHandle); RunnableFuture<SnapshotResult<OperatorStateHandle>> operatorStateRawFuture = spy(DoneFuture.of(operatorStateRawResult)); InputChannelStateHandle inputChannelRawStateHandle = mock(InputChannelStateHandle.class); SnapshotResult<StateObjectCollection<InputStateHandle>> inputChannelStateRawResult = SnapshotResult.of(StateObjectCollection.singleton(inputChannelRawStateHandle)); Future<SnapshotResult<StateObjectCollection<InputStateHandle>>> inputChannelStateRawFuture = spy(DoneFuture.of(inputChannelStateRawResult)); ResultSubpartitionStateHandle resultSubpartitionRawStateHandle = mock(ResultSubpartitionStateHandle.class); SnapshotResult<StateObjectCollection<OutputStateHandle>> resultSubpartitionStateRawResult = SnapshotResult.of( StateObjectCollection.singleton(resultSubpartitionRawStateHandle)); Future<SnapshotResult<StateObjectCollection<OutputStateHandle>>> resultSubpartitionStateRawFuture = spy(DoneFuture.of(resultSubpartitionStateRawResult)); operatorSnapshotResult = new OperatorSnapshotFutures( keyedStateManagedFuture, keyedStateRawFuture, operatorStateManagedFuture, operatorStateRawFuture, inputChannelStateRawFuture, resultSubpartitionStateRawFuture); operatorSnapshotResult.cancel(); verify(keyedStateManagedFuture).cancel(true); verify(keyedStateRawFuture).cancel(true); verify(operatorStateManagedFuture).cancel(true); verify(operatorStateRawFuture).cancel(true); verify(inputChannelStateRawFuture).cancel(true); verify(resultSubpartitionStateRawFuture).cancel(true); verify(keyedManagedStateHandle).discardState(); verify(keyedRawStateHandle).discardState(); verify(operatorManagedStateHandle).discardState(); verify(operatorRawStateHandle).discardState(); verify(inputChannelRawStateHandle).discardState(); verify(resultSubpartitionRawStateHandle).discardState(); }
Tests that all runnable futures in an OperatorSnapshotResult are properly cancelled and if the StreamStateHandle result is retrievable that the state handle are discarded.
testCancelAndCleanup
java
apache/flink
flink-runtime/src/test/java/org/apache/flink/streaming/api/operators/OperatorSnapshotFuturesTest.java
https://github.com/apache/flink/blob/master/flink-runtime/src/test/java/org/apache/flink/streaming/api/operators/OperatorSnapshotFuturesTest.java
Apache-2.0
@Test void testProject() throws Exception { TypeInformation<Tuple5<Integer, String, Integer, String, Integer>> inType = TypeExtractor.getForObject( new Tuple5<Integer, String, Integer, String, Integer>(2, "a", 3, "b", 4)); int[] fields = new int[] {4, 4, 3}; TupleSerializer<Tuple3<Integer, Integer, String>> serializer = new TupleTypeInfo<Tuple3<Integer, Integer, String>>( StreamProjection.extractFieldTypes(fields, inType)) .createSerializer(new SerializerConfigImpl()); @SuppressWarnings("unchecked") StreamProject< Tuple5<Integer, String, Integer, String, Integer>, Tuple3<Integer, Integer, String>> operator = new StreamProject< Tuple5<Integer, String, Integer, String, Integer>, Tuple3<Integer, Integer, String>>(fields, serializer); OneInputStreamOperatorTestHarness< Tuple5<Integer, String, Integer, String, Integer>, Tuple3<Integer, Integer, String>> testHarness = new OneInputStreamOperatorTestHarness< Tuple5<Integer, String, Integer, String, Integer>, Tuple3<Integer, Integer, String>>(operator); long initialTime = 0L; ConcurrentLinkedQueue<Object> expectedOutput = new ConcurrentLinkedQueue<Object>(); testHarness.open(); testHarness.processElement( new StreamRecord<Tuple5<Integer, String, Integer, String, Integer>>( new Tuple5<Integer, String, Integer, String, Integer>(2, "a", 3, "b", 4), initialTime + 1)); testHarness.processElement( new StreamRecord<Tuple5<Integer, String, Integer, String, Integer>>( new Tuple5<Integer, String, Integer, String, Integer>(2, "s", 3, "c", 2), initialTime + 2)); testHarness.processElement( new StreamRecord<Tuple5<Integer, String, Integer, String, Integer>>( new Tuple5<Integer, String, Integer, String, Integer>(2, "a", 3, "c", 2), initialTime + 3)); testHarness.processWatermark(new Watermark(initialTime + 2)); testHarness.processElement( new StreamRecord<Tuple5<Integer, String, Integer, String, Integer>>( new Tuple5<Integer, String, Integer, String, Integer>(2, "a", 3, "a", 7), initialTime + 4)); expectedOutput.add( new StreamRecord<Tuple3<Integer, Integer, String>>( new Tuple3<Integer, Integer, String>(4, 4, "b"), initialTime + 1)); expectedOutput.add( new StreamRecord<Tuple3<Integer, Integer, String>>( new Tuple3<Integer, Integer, String>(2, 2, "c"), initialTime + 2)); expectedOutput.add( new StreamRecord<Tuple3<Integer, Integer, String>>( new Tuple3<Integer, Integer, String>(2, 2, "c"), initialTime + 3)); expectedOutput.add(new Watermark(initialTime + 2)); expectedOutput.add( new StreamRecord<Tuple3<Integer, Integer, String>>( new Tuple3<Integer, Integer, String>(7, 7, "a"), initialTime + 4)); TestHarnessUtil.assertOutputEquals( "Output was not correct.", expectedOutput, testHarness.getOutput()); }
Tests for {@link StreamProject}. These test that: <ul> <li>Timestamps of processed elements match the input timestamp <li>Watermarks are correctly forwarded </ul>
testProject
java
apache/flink
flink-runtime/src/test/java/org/apache/flink/streaming/api/operators/StreamProjectTest.java
https://github.com/apache/flink/blob/master/flink-runtime/src/test/java/org/apache/flink/streaming/api/operators/StreamProjectTest.java
Apache-2.0