code
stringlengths
25
201k
docstring
stringlengths
19
96.2k
func_name
stringlengths
0
235
language
stringclasses
1 value
repo
stringlengths
8
51
path
stringlengths
11
314
url
stringlengths
62
377
license
stringclasses
7 values
@Test void testExecutionGraphEntryInvalidation() throws Exception { final Duration timeout = Duration.ofMillis(100L); final Duration timeToLive = Duration.ofMillis(1L); final CountingRestfulGateway restfulGateway = createCountingRestfulGateway( expectedJobId, CompletableFuture.completedFuture(expectedExecutionGraphInfo), CompletableFuture.completedFuture(expectedExecutionGraphInfo)); try (ExecutionGraphCache executionGraphCache = new DefaultExecutionGraphCache(timeout, timeToLive)) { CompletableFuture<ExecutionGraphInfo> executionGraphInfoFuture = executionGraphCache.getExecutionGraphInfo(expectedJobId, restfulGateway); assertThatFuture(executionGraphInfoFuture) .eventuallySucceeds() .isEqualTo(expectedExecutionGraphInfo); // sleep for the TTL Thread.sleep(timeToLive.toMillis() * 5L); CompletableFuture<ExecutionGraphInfo> executionGraphInfoFuture2 = executionGraphCache.getExecutionGraphInfo(expectedJobId, restfulGateway); assertThatFuture(executionGraphInfoFuture2) .eventuallySucceeds() .isEqualTo(expectedExecutionGraphInfo); assertThat(restfulGateway.getNumRequestJobCalls()).isEqualTo(2); } }
Tests that an AccessExecutionGraph is invalidated after its TTL expired.
testExecutionGraphEntryInvalidation
java
apache/flink
flink-runtime/src/test/java/org/apache/flink/runtime/rest/handler/legacy/DefaultExecutionGraphCacheTest.java
https://github.com/apache/flink/blob/master/flink-runtime/src/test/java/org/apache/flink/runtime/rest/handler/legacy/DefaultExecutionGraphCacheTest.java
Apache-2.0
@Test void testImmediateCacheInvalidationAfterFailure() throws Exception { final Duration timeout = Duration.ofMillis(100L); final Duration timeToLive = Duration.ofHours(1L); // let's first answer with a JobNotFoundException and then only with the correct result final CountingRestfulGateway restfulGateway = createCountingRestfulGateway( expectedJobId, FutureUtils.completedExceptionally( new FlinkJobNotFoundException(expectedJobId)), CompletableFuture.completedFuture(expectedExecutionGraphInfo)); try (ExecutionGraphCache executionGraphCache = new DefaultExecutionGraphCache(timeout, timeToLive)) { CompletableFuture<ExecutionGraphInfo> executionGraphFuture = executionGraphCache.getExecutionGraphInfo(expectedJobId, restfulGateway); assertThatThrownBy(() -> executionGraphFuture.get()) .hasCauseInstanceOf(FlinkException.class); CompletableFuture<ExecutionGraphInfo> executionGraphFuture2 = executionGraphCache.getExecutionGraphInfo(expectedJobId, restfulGateway); assertThatFuture(executionGraphFuture2) .eventuallySucceeds() .isEqualTo(expectedExecutionGraphInfo); } }
Tests that a failure in requesting an AccessExecutionGraph from the gateway, will not create a cache entry --> another cache request will trigger a new gateway request.
testImmediateCacheInvalidationAfterFailure
java
apache/flink
flink-runtime/src/test/java/org/apache/flink/runtime/rest/handler/legacy/DefaultExecutionGraphCacheTest.java
https://github.com/apache/flink/blob/master/flink-runtime/src/test/java/org/apache/flink/runtime/rest/handler/legacy/DefaultExecutionGraphCacheTest.java
Apache-2.0
@Test void testCacheEntryCleanup() throws Exception { final Duration timeout = Duration.ofMillis(100L); final Duration timeToLive = Duration.ofMillis(1L); final JobID expectedJobId2 = new JobID(); final ExecutionGraphInfo expectedExecutionGraphInfo2 = new ExecutionGraphInfo(new ArchivedExecutionGraphBuilder().build()); final AtomicInteger requestJobCalls = new AtomicInteger(0); final TestingRestfulGateway restfulGateway = new TestingRestfulGateway.Builder() .setRequestExecutionGraphInfoFunction( jobId -> { requestJobCalls.incrementAndGet(); if (jobId.equals(expectedJobId)) { return CompletableFuture.completedFuture( expectedExecutionGraphInfo); } else if (jobId.equals(expectedJobId2)) { return CompletableFuture.completedFuture( expectedExecutionGraphInfo2); } else { throw new AssertionError("Invalid job id received."); } }) .build(); try (ExecutionGraphCache executionGraphCache = new DefaultExecutionGraphCache(timeout, timeToLive)) { CompletableFuture<ExecutionGraphInfo> executionGraph1Future = executionGraphCache.getExecutionGraphInfo(expectedJobId, restfulGateway); CompletableFuture<ExecutionGraphInfo> executionGraph2Future = executionGraphCache.getExecutionGraphInfo(expectedJobId2, restfulGateway); assertThatFuture(executionGraph1Future) .eventuallySucceeds() .isEqualTo(expectedExecutionGraphInfo); assertThatFuture(executionGraph2Future) .eventuallySucceeds() .isEqualTo(expectedExecutionGraphInfo2); assertThat(requestJobCalls.get()).isEqualTo(2); Thread.sleep(timeToLive.toMillis()); executionGraphCache.cleanup(); assertThat(executionGraphCache.size()).isZero(); } }
Tests that cache entries are cleaned up when their TTL has expired upon calling {@link DefaultExecutionGraphCache#cleanup()}.
testCacheEntryCleanup
java
apache/flink
flink-runtime/src/test/java/org/apache/flink/runtime/rest/handler/legacy/DefaultExecutionGraphCacheTest.java
https://github.com/apache/flink/blob/master/flink-runtime/src/test/java/org/apache/flink/runtime/rest/handler/legacy/DefaultExecutionGraphCacheTest.java
Apache-2.0
@Test void testFileServing() throws Exception { final Duration cacheEntryDuration = Duration.ofMillis(1000L); final Queue<CompletableFuture<TransientBlobKey>> requestFileUploads = new ArrayDeque<>(1); requestFileUploads.add(CompletableFuture.completedFuture(transientBlobKey1)); final TestingTaskManagerFileHandler testingTaskManagerFileHandler = createTestTaskManagerFileHandler( cacheEntryDuration, requestFileUploads, EXPECTED_TASK_MANAGER_ID); final File outputFile = TempDirUtils.newFile(temporaryFolder.toPath()); final TestingChannelHandlerContext testingContext = new TestingChannelHandlerContext(outputFile); testingTaskManagerFileHandler.respondToRequest( testingContext, HTTP_REQUEST, handlerRequest, null); assertThat(outputFile).isNotEmpty(); assertThat(FileUtils.readFileUtf8(outputFile)).isEqualTo(fileContent1); }
Tests that the {@link AbstractTaskManagerFileHandler} serves the requested file.
testFileServing
java
apache/flink
flink-runtime/src/test/java/org/apache/flink/runtime/rest/handler/taskmanager/AbstractTaskManagerFileHandlerTest.java
https://github.com/apache/flink/blob/master/flink-runtime/src/test/java/org/apache/flink/runtime/rest/handler/taskmanager/AbstractTaskManagerFileHandlerTest.java
Apache-2.0
@Test void testNullFieldsNotSet() throws JsonProcessingException { ObjectMapper objMapper = RestMapperUtils.getStrictObjectMapper(); String json = objMapper.writeValueAsString( new JobExceptionsInfoWithHistory.ExceptionInfo( "exception name", "stacktrace", 0L)); assertThat(json).doesNotContain("taskName"); assertThat(json).doesNotContain("location"); }
{@code taskName} and {@code location} should not be exposed if not set. @throws JsonProcessingException is not expected to be thrown
testNullFieldsNotSet
java
apache/flink
flink-runtime/src/test/java/org/apache/flink/runtime/rest/messages/JobExceptionsInfoWithHistoryTest.java
https://github.com/apache/flink/blob/master/flink-runtime/src/test/java/org/apache/flink/runtime/rest/messages/JobExceptionsInfoWithHistoryTest.java
Apache-2.0
protected Collection<Class<?>> getTypeParameters() { return Collections.emptyList(); }
Returns the class of the test response. @return class of the test response type
getTypeParameters
java
apache/flink
flink-runtime/src/test/java/org/apache/flink/runtime/rest/messages/RestResponseMarshallingTestBase.java
https://github.com/apache/flink/blob/master/flink-runtime/src/test/java/org/apache/flink/runtime/rest/messages/RestResponseMarshallingTestBase.java
Apache-2.0
@Override protected CompletableFuture<RES> handleRequest( @Nullable HandlerRequest<REQ> request, @Nullable G gateway) throws RestHandlerException { final CompletableFuture<RES> result = responseQueue.poll(); if (result != null) { return result; } else { return FutureUtils.completedExceptionally( new NoSuchElementException("No pre-defined Futures left.")); } }
Utility {@link TestRestHandler} maintaining a queue of CompletableFuture's. @param <G> The RestfulGateway used by the handler. @param <REQ> The RequestBody type the handler is processing. @param <RES> The ResponseBody type the handler is returning. @param <M> The MessageParameters type utilized by this handler.
handleRequest
java
apache/flink
flink-runtime/src/test/java/org/apache/flink/runtime/rest/util/TestRestHandler.java
https://github.com/apache/flink/blob/master/flink-runtime/src/test/java/org/apache/flink/runtime/rest/util/TestRestHandler.java
Apache-2.0
public static Builder builder(Configuration configuration) { return new Builder(configuration); }
Utility {@link RestServerEndpoint} for setting up a rest server with a given set of handlers.
builder
java
apache/flink
flink-runtime/src/test/java/org/apache/flink/runtime/rest/util/TestRestServerEndpoint.java
https://github.com/apache/flink/blob/master/flink-runtime/src/test/java/org/apache/flink/runtime/rest/util/TestRestServerEndpoint.java
Apache-2.0
@Override public void onFatalError(Throwable exception) { throw new FlinkRuntimeException("Could not handle the fatal error, failing", exception); }
Testing {@link FatalErrorHandler} implementation which directly failed.
onFatalError
java
apache/flink
flink-runtime/src/test/java/org/apache/flink/runtime/rpc/DirectlyFailingFatalErrorHandler.java
https://github.com/apache/flink/blob/master/flink-runtime/src/test/java/org/apache/flink/runtime/rpc/DirectlyFailingFatalErrorHandler.java
Apache-2.0
@Test void testFencing() throws Exception { final UUID fencingToken = UUID.randomUUID(); final UUID wrongFencingToken = UUID.randomUUID(); final String value = "barfoo"; FencedTestingEndpoint fencedTestingEndpoint = new FencedTestingEndpoint(rpcService, value, fencingToken); try { fencedTestingEndpoint.start(); final FencedTestingGateway properFencedGateway = rpcService .connect( fencedTestingEndpoint.getAddress(), fencingToken, FencedTestingGateway.class) .get(timeout.toMillis(), TimeUnit.MILLISECONDS); final FencedTestingGateway wronglyFencedGateway = rpcService .connect( fencedTestingEndpoint.getAddress(), wrongFencingToken, FencedTestingGateway.class) .get(timeout.toMillis(), TimeUnit.MILLISECONDS); assertThat( properFencedGateway .foobar(timeout) .get(timeout.toMillis(), TimeUnit.MILLISECONDS)) .isEqualTo(value); try { wronglyFencedGateway.foobar(timeout).get(timeout.toMillis(), TimeUnit.MILLISECONDS); fail("This should fail since we have the wrong fencing token."); } catch (ExecutionException e) { assertThat(ExceptionUtils.stripExecutionException(e)) .isInstanceOf(FencingTokenException.class); } } finally { RpcUtils.terminateRpcEndpoint(fencedTestingEndpoint); fencedTestingEndpoint.validateResourceClosed(); } }
Tests that messages with the wrong fencing token are filtered out.
testFencing
java
apache/flink
flink-runtime/src/test/java/org/apache/flink/runtime/rpc/FencedRpcEndpointTest.java
https://github.com/apache/flink/blob/master/flink-runtime/src/test/java/org/apache/flink/runtime/rpc/FencedRpcEndpointTest.java
Apache-2.0
@Test void testSelfGateway() throws Exception { int expectedValue = 1337; BaseEndpoint baseEndpoint = new BaseEndpoint(rpcService, expectedValue); try { baseEndpoint.start(); BaseGateway baseGateway = baseEndpoint.getSelfGateway(BaseGateway.class); CompletableFuture<Integer> foobar = baseGateway.foobar(); assertThat(foobar.get()).isEqualTo(expectedValue); } finally { RpcUtils.terminateRpcEndpoint(baseEndpoint); baseEndpoint.validateResourceClosed(); } }
Tests that we can obtain the self gateway from a RpcEndpoint and can interact with it via the self gateway.
testSelfGateway
java
apache/flink
flink-runtime/src/test/java/org/apache/flink/runtime/rpc/RpcEndpointTest.java
https://github.com/apache/flink/blob/master/flink-runtime/src/test/java/org/apache/flink/runtime/rpc/RpcEndpointTest.java
Apache-2.0
@Test void testWrongSelfGateway() throws ExecutionException, InterruptedException { int expectedValue = 1337; BaseEndpoint baseEndpoint = new BaseEndpoint(rpcService, expectedValue); try { baseEndpoint.start(); assertThatThrownBy(() -> baseEndpoint.getSelfGateway(DifferentGateway.class)) .withFailMessage( "Expected to fail with a RuntimeException since we requested the wrong gateway type.") .isInstanceOf(RuntimeException.class); } finally { RpcUtils.terminateRpcEndpoint(baseEndpoint); baseEndpoint.validateResourceClosed(); } }
Tests that we cannot accidentally obtain a wrong self gateway type which is not implemented by the RpcEndpoint.
testWrongSelfGateway
java
apache/flink
flink-runtime/src/test/java/org/apache/flink/runtime/rpc/RpcEndpointTest.java
https://github.com/apache/flink/blob/master/flink-runtime/src/test/java/org/apache/flink/runtime/rpc/RpcEndpointTest.java
Apache-2.0
@Test void testEndpointInheritance() throws Exception { int foobar = 1; int barfoo = 2; String foo = "foobar"; ExtendedEndpoint endpoint = new ExtendedEndpoint(rpcService, foobar, barfoo, foo); try { endpoint.start(); BaseGateway baseGateway = endpoint.getSelfGateway(BaseGateway.class); ExtendedGateway extendedGateway = endpoint.getSelfGateway(ExtendedGateway.class); DifferentGateway differentGateway = endpoint.getSelfGateway(DifferentGateway.class); assertThat(baseGateway.foobar().get()).isEqualTo(foobar); assertThat(extendedGateway.foobar().get()).isEqualTo(foobar); assertThat(extendedGateway.barfoo().get()).isEqualTo(barfoo); assertThat(differentGateway.foo().get()).isEqualTo(foo); } finally { RpcUtils.terminateRpcEndpoint(endpoint); endpoint.validateResourceClosed(); } }
Tests that we can extend existing RpcEndpoints and can communicate with them via the self gateways.
testEndpointInheritance
java
apache/flink
flink-runtime/src/test/java/org/apache/flink/runtime/rpc/RpcEndpointTest.java
https://github.com/apache/flink/blob/master/flink-runtime/src/test/java/org/apache/flink/runtime/rpc/RpcEndpointTest.java
Apache-2.0
@Test void testRunningState() throws InterruptedException, ExecutionException, TimeoutException { RunningStateTestingEndpoint endpoint = new RunningStateTestingEndpoint( rpcService, CompletableFuture.completedFuture(null)); RunningStateTestingEndpointGateway gateway = endpoint.getSelfGateway(RunningStateTestingEndpointGateway.class); try { endpoint.start(); assertThat(gateway.queryIsRunningFlag().get()).isTrue(); } finally { RpcUtils.terminateRpcEndpoint(endpoint); endpoint.validateResourceClosed(); } }
Tests that the RPC is running after it has been started.
testRunningState
java
apache/flink
flink-runtime/src/test/java/org/apache/flink/runtime/rpc/RpcEndpointTest.java
https://github.com/apache/flink/blob/master/flink-runtime/src/test/java/org/apache/flink/runtime/rpc/RpcEndpointTest.java
Apache-2.0
@Test void testCallAsync() throws InterruptedException, ExecutionException, TimeoutException { final RpcEndpoint endpoint = new BaseEndpoint(rpcService); final Integer expectedInteger = 12345; try { endpoint.start(); final CompletableFuture<Integer> integerFuture = endpoint.callAsync( () -> { endpoint.validateRunsInMainThread(); return expectedInteger; }, Duration.ofSeconds(10L)); assertThat(integerFuture.get()).isEqualTo(expectedInteger); } finally { RpcUtils.terminateRpcEndpoint(endpoint); endpoint.validateResourceClosed(); } }
Tests executing the callable in the main thread of the underlying RPC service, returning a future for the result of the callable. If the callable is not completed within the given timeout, then the future will be failed with a TimeoutException. This schedule method is called directly from RpcEndpoint, MainThreadExecutor do not support this method.
testCallAsync
java
apache/flink
flink-runtime/src/test/java/org/apache/flink/runtime/rpc/RpcEndpointTest.java
https://github.com/apache/flink/blob/master/flink-runtime/src/test/java/org/apache/flink/runtime/rpc/RpcEndpointTest.java
Apache-2.0
public List<ScheduledTask<?>> getCollectedScheduledTasks() { return scheduledTasks; }
Returns the collected {@link ScheduledTask ScheduledTasks}. This collection can be re-organized in-place. @return The list of scheduled tasks.
getCollectedScheduledTasks
java
apache/flink
flink-runtime/src/test/java/org/apache/flink/runtime/scheduler/DefaultSchedulerTest.java
https://github.com/apache/flink/blob/master/flink-runtime/src/test/java/org/apache/flink/runtime/scheduler/DefaultSchedulerTest.java
Apache-2.0
void scheduleCollectedScheduledTasks() { for (ScheduledTask<?> scheduledTask : scheduledTasks) { super.schedule( scheduledTask.getCallable(), scheduledTask.getDelay(TimeUnit.MILLISECONDS), TimeUnit.MILLISECONDS); } scheduledTasks.clear(); }
Actually schedules the collected {@link ScheduledTask ScheduledTasks}.
scheduleCollectedScheduledTasks
java
apache/flink
flink-runtime/src/test/java/org/apache/flink/runtime/scheduler/DefaultSchedulerTest.java
https://github.com/apache/flink/blob/master/flink-runtime/src/test/java/org/apache/flink/runtime/scheduler/DefaultSchedulerTest.java
Apache-2.0
@Override public void triggerNonPeriodicScheduledTask() { scheduleCollectedScheduledTasks(); super.triggerNonPeriodicScheduledTask(); }
Schedules all already collected tasks before actually triggering the actual scheduling of the next task in the queue.
triggerNonPeriodicScheduledTask
java
apache/flink
flink-runtime/src/test/java/org/apache/flink/runtime/scheduler/DefaultSchedulerTest.java
https://github.com/apache/flink/blob/master/flink-runtime/src/test/java/org/apache/flink/runtime/scheduler/DefaultSchedulerTest.java
Apache-2.0
@Override public void triggerNonPeriodicScheduledTasks() { scheduleCollectedScheduledTasks(); super.triggerNonPeriodicScheduledTasks(); }
Schedules all already collected tasks before actually triggering the actual scheduling of all tasks in the queue.
triggerNonPeriodicScheduledTasks
java
apache/flink
flink-runtime/src/test/java/org/apache/flink/runtime/scheduler/DefaultSchedulerTest.java
https://github.com/apache/flink/blob/master/flink-runtime/src/test/java/org/apache/flink/runtime/scheduler/DefaultSchedulerTest.java
Apache-2.0
@Test void testExecutionGraphHistoryBeingDerivedFromFailedExecutionGraph() { final ArchivedExecutionGraph executionGraph = ArchivedExecutionGraph.createSparseArchivedExecutionGraph( new JobID(), "test job name", JobStatus.FAILED, JobType.STREAMING, new RuntimeException("Expected RuntimeException"), null, System.currentTimeMillis()); final ExecutionGraphInfo executionGraphInfo = new ExecutionGraphInfo(executionGraph); assertThat(executionGraphInfo.getArchivedExecutionGraph().getJobType()) .isEqualTo(JobType.STREAMING); final ErrorInfo failureInfo = executionGraphInfo.getArchivedExecutionGraph().getFailureInfo(); final RootExceptionHistoryEntry actualEntry = Iterables.getOnlyElement(executionGraphInfo.getExceptionHistory()); assertThat(failureInfo).isNotNull(); assertThat(failureInfo.getException()).isEqualTo(actualEntry.getException()); assertThat(failureInfo.getTimestamp()).isEqualTo(actualEntry.getTimestamp()); assertThat(actualEntry.isGlobal()).isTrue(); assertThat(actualEntry.getFailingTaskName()).isNull(); assertThat(actualEntry.getTaskManagerLocation()).isNull(); }
{@code ExecutionGraphInfoTest} tests the proper initialization of {@link ExecutionGraphInfo}.
testExecutionGraphHistoryBeingDerivedFromFailedExecutionGraph
java
apache/flink
flink-runtime/src/test/java/org/apache/flink/runtime/scheduler/ExecutionGraphInfoTest.java
https://github.com/apache/flink/blob/master/flink-runtime/src/test/java/org/apache/flink/runtime/scheduler/ExecutionGraphInfoTest.java
Apache-2.0
@Test void testGetEmptyTaskManagerLocationIfVertexNotScheduled() throws Exception { final JobVertex jobVertex = ExecutionGraphTestUtils.createNoOpVertex(1); final ExecutionGraph eg = ExecutionGraphTestUtils.createExecutionGraph( EXECUTOR_EXTENSION.getExecutor(), jobVertex); final ExecutionGraphToInputsLocationsRetrieverAdapter inputsLocationsRetriever = new ExecutionGraphToInputsLocationsRetrieverAdapter(eg); ExecutionVertexID executionVertexId = new ExecutionVertexID(jobVertex.getID(), 0); Optional<CompletableFuture<TaskManagerLocation>> taskManagerLocation = inputsLocationsRetriever.getTaskManagerLocation(executionVertexId); assertThat(taskManagerLocation).isNotPresent(); }
Tests that it will get empty task manager location if vertex is not scheduled.
testGetEmptyTaskManagerLocationIfVertexNotScheduled
java
apache/flink
flink-runtime/src/test/java/org/apache/flink/runtime/scheduler/ExecutionGraphToInputsLocationsRetrieverAdapterTest.java
https://github.com/apache/flink/blob/master/flink-runtime/src/test/java/org/apache/flink/runtime/scheduler/ExecutionGraphToInputsLocationsRetrieverAdapterTest.java
Apache-2.0
@Test void testGetTaskManagerLocationWhenScheduled() throws Exception { final JobVertex jobVertex = ExecutionGraphTestUtils.createNoOpVertex(1); final TestingLogicalSlot testingLogicalSlot = new TestingLogicalSlotBuilder().createTestingLogicalSlot(); final ExecutionGraph eg = ExecutionGraphTestUtils.createExecutionGraph( EXECUTOR_EXTENSION.getExecutor(), jobVertex); final ExecutionGraphToInputsLocationsRetrieverAdapter inputsLocationsRetriever = new ExecutionGraphToInputsLocationsRetrieverAdapter(eg); final ExecutionVertex onlyExecutionVertex = eg.getAllExecutionVertices().iterator().next(); onlyExecutionVertex.getCurrentExecutionAttempt().transitionState(ExecutionState.SCHEDULED); onlyExecutionVertex.deployToSlot(testingLogicalSlot); ExecutionVertexID executionVertexId = new ExecutionVertexID(jobVertex.getID(), 0); Optional<CompletableFuture<TaskManagerLocation>> taskManagerLocationOptional = inputsLocationsRetriever.getTaskManagerLocation(executionVertexId); assertThat(taskManagerLocationOptional).isPresent(); final CompletableFuture<TaskManagerLocation> taskManagerLocationFuture = taskManagerLocationOptional.get(); assertThat(taskManagerLocationFuture.get()) .isEqualTo(testingLogicalSlot.getTaskManagerLocation()); }
Tests that it can get the task manager location in an Execution.
testGetTaskManagerLocationWhenScheduled
java
apache/flink
flink-runtime/src/test/java/org/apache/flink/runtime/scheduler/ExecutionGraphToInputsLocationsRetrieverAdapterTest.java
https://github.com/apache/flink/blob/master/flink-runtime/src/test/java/org/apache/flink/runtime/scheduler/ExecutionGraphToInputsLocationsRetrieverAdapterTest.java
Apache-2.0
@Test void testGetNonExistingExecutionVertexWillThrowException() throws Exception { final JobVertex jobVertex = ExecutionGraphTestUtils.createNoOpVertex(1); final ExecutionGraph eg = ExecutionGraphTestUtils.createExecutionGraph( EXECUTOR_EXTENSION.getExecutor(), jobVertex); final ExecutionGraphToInputsLocationsRetrieverAdapter inputsLocationsRetriever = new ExecutionGraphToInputsLocationsRetrieverAdapter(eg); ExecutionVertexID invalidExecutionVertexId = new ExecutionVertexID(new JobVertexID(), 0); assertThatThrownBy( () -> inputsLocationsRetriever.getTaskManagerLocation( invalidExecutionVertexId), "Should throw exception if execution vertex doesn't exist!") .isInstanceOf(IllegalStateException.class); }
Tests that it will throw exception when getting the task manager location of a non existing execution.
testGetNonExistingExecutionVertexWillThrowException
java
apache/flink
flink-runtime/src/test/java/org/apache/flink/runtime/scheduler/ExecutionGraphToInputsLocationsRetrieverAdapterTest.java
https://github.com/apache/flink/blob/master/flink-runtime/src/test/java/org/apache/flink/runtime/scheduler/ExecutionGraphToInputsLocationsRetrieverAdapterTest.java
Apache-2.0
@Test void testSettingMaxNumberOfCheckpointsToRetain() throws Exception { final int maxNumberOfCheckpointsToRetain = 10; final Configuration jobManagerConfig = new Configuration(); jobManagerConfig.set( CheckpointingOptions.MAX_RETAINED_CHECKPOINTS, maxNumberOfCheckpointsToRetain); final CompletedCheckpointStore completedCheckpointStore = SchedulerUtils.createCompletedCheckpointStore( jobManagerConfig, new StandaloneCheckpointRecoveryFactory(), Executors.directExecutor(), log, new JobID(), RecoveryClaimMode.CLAIM); assertThat(completedCheckpointStore.getMaxNumberOfRetainedCheckpoints()) .isEqualTo(maxNumberOfCheckpointsToRetain); }
Tests for the {@link SchedulerUtils} utilities.
testSettingMaxNumberOfCheckpointsToRetain
java
apache/flink
flink-runtime/src/test/java/org/apache/flink/runtime/scheduler/SchedulerUtilsTest.java
https://github.com/apache/flink/blob/master/flink-runtime/src/test/java/org/apache/flink/runtime/scheduler/SchedulerUtilsTest.java
Apache-2.0
@Test void returnsIncidentBlockingPartitions() throws Exception { final JobVertex a = ExecutionGraphTestUtils.createNoOpVertex(1); final JobVertex b = ExecutionGraphTestUtils.createNoOpVertex(1); final JobVertex c = ExecutionGraphTestUtils.createNoOpVertex(1); final JobVertex d = ExecutionGraphTestUtils.createNoOpVertex(1); final JobVertex e = ExecutionGraphTestUtils.createNoOpVertex(1); connectNewDataSetAsInput(b, a, DistributionPattern.POINTWISE, ResultPartitionType.BLOCKING); connectNewDataSetAsInput( c, b, DistributionPattern.POINTWISE, ResultPartitionType.PIPELINED); connectNewDataSetAsInput( d, b, DistributionPattern.POINTWISE, ResultPartitionType.PIPELINED); connectNewDataSetAsInput(e, c, DistributionPattern.POINTWISE, ResultPartitionType.BLOCKING); connectNewDataSetAsInput( e, d, DistributionPattern.POINTWISE, ResultPartitionType.PIPELINED); final DefaultExecutionGraph simpleTestGraph = ExecutionGraphTestUtils.createExecutionGraph( EXECUTOR_EXTENSION.getExecutor(), a, b, c, d, e); final DefaultExecutionTopology topology = DefaultExecutionTopology.fromExecutionGraph(simpleTestGraph); final DefaultSchedulingPipelinedRegion firstPipelinedRegion = topology.getPipelinedRegionOfVertex(new ExecutionVertexID(a.getID(), 0)); final DefaultSchedulingPipelinedRegion secondPipelinedRegion = topology.getPipelinedRegionOfVertex(new ExecutionVertexID(e.getID(), 0)); final DefaultExecutionVertex vertexB0 = topology.getVertex(new ExecutionVertexID(b.getID(), 0)); final IntermediateResultPartitionID b0ConsumedResultPartition = Iterables.getOnlyElement(vertexB0.getConsumedResults()).getId(); final Set<IntermediateResultPartitionID> secondPipelinedRegionConsumedResults = new HashSet<>(); for (ConsumedPartitionGroup consumedPartitionGroup : secondPipelinedRegion.getAllNonPipelinedConsumedPartitionGroups()) { for (IntermediateResultPartitionID partitionId : consumedPartitionGroup) { if (!secondPipelinedRegion.contains( topology.getResultPartition(partitionId).getProducer().getId())) { secondPipelinedRegionConsumedResults.add(partitionId); } } } assertThat( firstPipelinedRegion .getAllNonPipelinedConsumedPartitionGroups() .iterator() .hasNext()) .isFalse(); assertThat(secondPipelinedRegionConsumedResults).contains(b0ConsumedResultPartition); }
Tests if the consumed inputs of the pipelined regions are computed correctly using the Job graph below. <pre> c / X a -+- b e \ / d </pre> <p>Pipelined regions: {a}, {b, c, d, e}
returnsIncidentBlockingPartitions
java
apache/flink
flink-runtime/src/test/java/org/apache/flink/runtime/scheduler/adapter/DefaultSchedulingPipelinedRegionTest.java
https://github.com/apache/flink/blob/master/flink-runtime/src/test/java/org/apache/flink/runtime/scheduler/adapter/DefaultSchedulingPipelinedRegionTest.java
Apache-2.0
private static JobGraph createJobGraphWithSlotSharingGroup() { final SlotSharingGroup slotSharingGroup = new SlotSharingGroup(); final JobVertex source = new JobVertex("Source"); source.setInvokableClass(NoOpInvokable.class); source.setParallelism(PARALLELISM); source.setSlotSharingGroup(slotSharingGroup); final JobVertex sink = new JobVertex("sink"); sink.setInvokableClass(NoOpInvokable.class); sink.setParallelism(PARALLELISM); sink.setSlotSharingGroup(slotSharingGroup); connectNewDataSetAsInput( sink, source, DistributionPattern.POINTWISE, ResultPartitionType.PIPELINED); return JobGraphTestUtils.streamingJobGraph(source, sink); }
Returns a JobGraph that requires slot sharing to work in order to be able to run with a single slot.
createJobGraphWithSlotSharingGroup
java
apache/flink
flink-runtime/src/test/java/org/apache/flink/runtime/scheduler/adaptive/AdaptiveSchedulerSlotSharingITCase.java
https://github.com/apache/flink/blob/master/flink-runtime/src/test/java/org/apache/flink/runtime/scheduler/adaptive/AdaptiveSchedulerSlotSharingITCase.java
Apache-2.0
public DefaultStateTransitionManager createTestInstanceInCooldownPhase() { return createTestInstance(ignored -> this.transitionIntoCooldownTimeframe()); }
Creates the {@code DefaultStateTransitionManager} test instance and advances into a period in time where the instance is in cooldown phase.
createTestInstanceInCooldownPhase
java
apache/flink
flink-runtime/src/test/java/org/apache/flink/runtime/scheduler/adaptive/DefaultStateTransitionManagerTest.java
https://github.com/apache/flink/blob/master/flink-runtime/src/test/java/org/apache/flink/runtime/scheduler/adaptive/DefaultStateTransitionManagerTest.java
Apache-2.0
public DefaultStateTransitionManager createTestInstanceThatPassedCooldownPhase() { return createTestInstance(ignored -> this.transitionOutOfCooldownPhase()); }
Creates the {@code DefaultStateTransitionManager} test instance and advances into a period in time where the instance is in stabilizing phase.
createTestInstanceThatPassedCooldownPhase
java
apache/flink
flink-runtime/src/test/java/org/apache/flink/runtime/scheduler/adaptive/DefaultStateTransitionManagerTest.java
https://github.com/apache/flink/blob/master/flink-runtime/src/test/java/org/apache/flink/runtime/scheduler/adaptive/DefaultStateTransitionManagerTest.java
Apache-2.0
public DefaultStateTransitionManager createTestInstanceInStabilizedPhase() { return createTestInstance( manager -> { manager.onChange(); passResourceStabilizationTimeout(); }); }
Creates the {@code DefaultStateTransitionManager} test instance and advances into a period in time where the instance is in stabilized phase.
createTestInstanceInStabilizedPhase
java
apache/flink
flink-runtime/src/test/java/org/apache/flink/runtime/scheduler/adaptive/DefaultStateTransitionManagerTest.java
https://github.com/apache/flink/blob/master/flink-runtime/src/test/java/org/apache/flink/runtime/scheduler/adaptive/DefaultStateTransitionManagerTest.java
Apache-2.0
public DefaultStateTransitionManager createTestInstanceInTransitioningPhase() { return createTestInstance( manager -> { manager.onChange(); passResourceStabilizationTimeout(); manager.onTrigger(); clearStateTransition(); }); }
Creates the {@code DefaultStateTransitionManager} test instance in terminal transitioning phase.
createTestInstanceInTransitioningPhase
java
apache/flink
flink-runtime/src/test/java/org/apache/flink/runtime/scheduler/adaptive/DefaultStateTransitionManagerTest.java
https://github.com/apache/flink/blob/master/flink-runtime/src/test/java/org/apache/flink/runtime/scheduler/adaptive/DefaultStateTransitionManagerTest.java
Apache-2.0
public void expectInput(Consumer<T> asserter) { consumer = Preconditions.checkNotNull(asserter); trap = () -> { throw new AssertionError("No transition to " + stateName); }; }
Expect an input, and validate it with the given asserter (if the state transition hasn't been validated, it will fail in the close method). @param asserter Consumer which validates the input to the state transition.
expectInput
java
apache/flink
flink-runtime/src/test/java/org/apache/flink/runtime/scheduler/adaptive/StateValidator.java
https://github.com/apache/flink/blob/master/flink-runtime/src/test/java/org/apache/flink/runtime/scheduler/adaptive/StateValidator.java
Apache-2.0
public void validateInput(T input) { trap = () -> {}; consumer.accept(input); expectNoStateTransition(); }
Call this method on the state transition, to register the transition, and validate the passed arguments. @param input Argument(s) of the state transition. @throws NullPointerException If no consumer has been set (an unexpected state transition occurred)
validateInput
java
apache/flink
flink-runtime/src/test/java/org/apache/flink/runtime/scheduler/adaptive/StateValidator.java
https://github.com/apache/flink/blob/master/flink-runtime/src/test/java/org/apache/flink/runtime/scheduler/adaptive/StateValidator.java
Apache-2.0
public final void expectNoStateTransition() { consumer = (T) -> fail( "No consumer has been set for " + stateName + ". Unexpected state transition (duplicate?)"); }
If the validator has been activated, check if input has been provided (e.g. a state transition happened).
expectNoStateTransition
java
apache/flink
flink-runtime/src/test/java/org/apache/flink/runtime/scheduler/adaptive/StateValidator.java
https://github.com/apache/flink/blob/master/flink-runtime/src/test/java/org/apache/flink/runtime/scheduler/adaptive/StateValidator.java
Apache-2.0
private StreamGraph createStreamGraph() { StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment(); env.disableOperatorChaining(); env.setRuntimeMode(RuntimeExecutionMode.BATCH); env.fromSequence(0, 1) .name("Source") .forward() .map(i -> i) .name("Map") .rescale() .print() .name("Sink") .disableChaining(); env.setParallelism(1); return env.getStreamGraph(); }
Create a stream graph with the following topology. <pre> Source -- forward --> Map -- rescale --> Sink </pre>
createStreamGraph
java
apache/flink
flink-runtime/src/test/java/org/apache/flink/runtime/scheduler/adaptivebatch/DefaultAdaptiveExecutionHandlerTest.java
https://github.com/apache/flink/blob/master/flink-runtime/src/test/java/org/apache/flink/runtime/scheduler/adaptivebatch/DefaultAdaptiveExecutionHandlerTest.java
Apache-2.0
public void setup(JobConfiguration jobConfiguration) throws Exception { super.setup(); jobVertices = createDefaultJobVertices(jobConfiguration); executionGraph = createAndInitExecutionGraph( jobVertices, jobConfiguration, scheduledExecutorService); final TestingLogicalSlotBuilder slotBuilder = new TestingLogicalSlotBuilder(); for (ExecutionJobVertex ejv : executionGraph.getVerticesTopologically()) { for (ExecutionVertex ev : ejv.getTaskVertices()) { final LogicalSlot slot = slotBuilder.createTestingLogicalSlot(); final Execution execution = ev.getCurrentExecutionAttempt(); execution.registerProducedPartitions(slot.getTaskManagerLocation()).get(); if (!execution.tryAssignResource(slot)) { throw new RuntimeException("Error when assigning slot to execution."); } } } }
The base class of benchmarks related to deploying tasks.
setup
java
apache/flink
flink-runtime/src/test/java/org/apache/flink/runtime/scheduler/benchmark/deploying/DeployingTasksBenchmarkBase.java
https://github.com/apache/flink/blob/master/flink-runtime/src/test/java/org/apache/flink/runtime/scheduler/benchmark/deploying/DeployingTasksBenchmarkBase.java
Apache-2.0
@ParameterizedTest @EnumSource( value = JobConfiguration.class, names = {"STREAMING_TEST", "BATCH_TEST", "STREAMING_EVENLY_TEST", "BATCH_EVENLY_TEST"}) void deployAndRestarts(JobConfiguration jobConfiguration) throws Exception { HandleGlobalFailureAndRestartAllTasksBenchmark benchmark = new HandleGlobalFailureAndRestartAllTasksBenchmark(); benchmark.setup(jobConfiguration); benchmark.handleGlobalFailureAndRestartAllTasks(); benchmark.teardown(); }
The benchmark of restarting tasks in a STREAMING/BATCH job. The related method is {@link DefaultScheduler#handleGlobalFailure}.
deployAndRestarts
java
apache/flink
flink-runtime/src/test/java/org/apache/flink/runtime/scheduler/benchmark/e2e/HandleGlobalFailureAndRestartAllTasksBenchmarkTest.java
https://github.com/apache/flink/blob/master/flink-runtime/src/test/java/org/apache/flink/runtime/scheduler/benchmark/e2e/HandleGlobalFailureAndRestartAllTasksBenchmarkTest.java
Apache-2.0
public void setup(JobConfiguration jobConfiguration) throws Exception { super.setup(); jobVertices = createDefaultJobVertices(jobConfiguration); source = jobVertices.get(0); executionGraph = createAndInitExecutionGraph( jobVertices, jobConfiguration, scheduledExecutorService); schedulingTopology = executionGraph.getSchedulingTopology(); strategy = new RestartPipelinedRegionFailoverStrategy(schedulingTopology); }
The base class of benchmarks related to failover.
setup
java
apache/flink
flink-runtime/src/test/java/org/apache/flink/runtime/scheduler/benchmark/failover/FailoverBenchmarkBase.java
https://github.com/apache/flink/blob/master/flink-runtime/src/test/java/org/apache/flink/runtime/scheduler/benchmark/failover/FailoverBenchmarkBase.java
Apache-2.0
public void setup(JobConfiguration jobConfiguration) throws Exception { super.setup(); schedulerOperations = new TestingSchedulerOperations(); jobVertices = createDefaultJobVertices(jobConfiguration); scheduler = createAndInitScheduler(jobVertices, jobConfiguration, scheduledExecutorService); executionGraph = scheduler.getExecutionGraph(); schedulingTopology = executionGraph.getSchedulingTopology(); }
The base class of benchmarks related to scheduling tasks.
setup
java
apache/flink
flink-runtime/src/test/java/org/apache/flink/runtime/scheduler/benchmark/scheduling/SchedulingBenchmarkBase.java
https://github.com/apache/flink/blob/master/flink-runtime/src/test/java/org/apache/flink/runtime/scheduler/benchmark/scheduling/SchedulingBenchmarkBase.java
Apache-2.0
@Test void testScheduleTopologyWithHybridAndBlockingEdge() throws Exception { final JobVertex v1 = createJobVertex("v1", 1); final JobVertex v2 = createJobVertex("v2", 1); final JobVertex v3 = createJobVertex("v3", 1); final JobVertex v4 = createJobVertex("v4", 1); connectNewDataSetAsInput( v2, v1, DistributionPattern.POINTWISE, ResultPartitionType.HYBRID_FULL); connectNewDataSetAsInput( v3, v2, DistributionPattern.POINTWISE, ResultPartitionType.BLOCKING); connectNewDataSetAsInput( v4, v3, DistributionPattern.POINTWISE, ResultPartitionType.HYBRID_SELECTIVE); final List<JobVertex> ordered = new ArrayList<>(Arrays.asList(v1, v2, v3, v4)); final JobGraph jobGraph = JobGraphBuilder.newBatchJobGraphBuilder().addJobVertices(ordered).build(); final ExecutionGraph executionGraph = TestingDefaultExecutionGraphBuilder.newBuilder() .setJobGraph(jobGraph) .build(EXECUTOR_RESOURCE.getExecutor()); final SchedulingTopology schedulingTopology = executionGraph.getSchedulingTopology(); PipelinedRegionSchedulingStrategy schedulingStrategy = startScheduling(schedulingTopology); // v1 & v2 will be scheduled as v1 is a source and v1 -> v2 is a hybrid downstream. assertThat(testingSchedulerOperation.getScheduledVertices()).hasSize(2); final ExecutionVertex v11 = executionGraph.getJobVertex(v1.getID()).getTaskVertices()[0]; final ExecutionVertex v21 = executionGraph.getJobVertex(v2.getID()).getTaskVertices()[0]; assertThat(testingSchedulerOperation.getScheduledVertices().get(0)) .containsExactly(v11.getID()); assertThat(testingSchedulerOperation.getScheduledVertices().get(1)) .containsExactly(v21.getID()); // finish v2 to trigger new round of scheduling. v21.finishPartitionsIfNeeded(); schedulingStrategy.onExecutionStateChange(v21.getID(), ExecutionState.FINISHED); assertThat(testingSchedulerOperation.getScheduledVertices()).hasSize(4); final ExecutionVertex v31 = executionGraph.getJobVertex(v3.getID()).getTaskVertices()[0]; final ExecutionVertex v41 = executionGraph.getJobVertex(v4.getID()).getTaskVertices()[0]; assertThat(testingSchedulerOperation.getScheduledVertices().get(2)) .containsExactly(v31.getID()); assertThat(testingSchedulerOperation.getScheduledVertices().get(3)) .containsExactly(v41.getID()); }
Source and it's downstream with hybrid edge will be scheduled. When blocking result partition finished, it's downstream will be scheduled. <pre> V1 ----> V2 ----> V3 ----> V4 | | | Hybrid Blocking Hybrid </pre>
testScheduleTopologyWithHybridAndBlockingEdge
java
apache/flink
flink-runtime/src/test/java/org/apache/flink/runtime/scheduler/strategy/PipelinedRegionSchedulingStrategyTest.java
https://github.com/apache/flink/blob/master/flink-runtime/src/test/java/org/apache/flink/runtime/scheduler/strategy/PipelinedRegionSchedulingStrategyTest.java
Apache-2.0
@Test void testSchedulingRegionWithInnerNonPipelinedEdge() throws Exception { final JobVertex v1 = createJobVertex("v1", 1); final JobVertex v2 = createJobVertex("v2", 1); final JobVertex v3 = createJobVertex("v3", 1); final JobVertex v4 = createJobVertex("v4", 1); final JobVertex v5 = createJobVertex("v5", 1); connectNewDataSetAsInput( v2, v1, DistributionPattern.POINTWISE, ResultPartitionType.PIPELINED); connectNewDataSetAsInput( v3, v2, DistributionPattern.POINTWISE, ResultPartitionType.PIPELINED); connectNewDataSetAsInput( v4, v2, DistributionPattern.POINTWISE, ResultPartitionType.PIPELINED); connectNewDataSetAsInput( v5, v2, DistributionPattern.POINTWISE, ResultPartitionType.PIPELINED); connectNewDataSetAsInput( v3, v1, DistributionPattern.POINTWISE, ResultPartitionType.HYBRID_FULL); connectNewDataSetAsInput( v4, v1, DistributionPattern.POINTWISE, ResultPartitionType.HYBRID_SELECTIVE); connectNewDataSetAsInput( v4, v1, DistributionPattern.POINTWISE, ResultPartitionType.BLOCKING); final List<JobVertex> ordered = new ArrayList<>(Arrays.asList(v1, v2, v3, v4, v5)); final JobGraph jobGraph = JobGraphBuilder.newBatchJobGraphBuilder().addJobVertices(ordered).build(); final ExecutionGraph executionGraph = TestingDefaultExecutionGraphBuilder.newBuilder() .setJobGraph(jobGraph) .build(EXECUTOR_RESOURCE.getExecutor()); final SchedulingTopology schedulingTopology = executionGraph.getSchedulingTopology(); startScheduling(schedulingTopology); assertThat(testingSchedulerOperation.getScheduledVertices()).hasSize(1); List<ExecutionVertexID> executionVertexIds = testingSchedulerOperation.getScheduledVertices().get(0); assertThat(executionVertexIds).hasSize(5); }
Inner non-pipelined edge will not affect it's region be scheduled.
testSchedulingRegionWithInnerNonPipelinedEdge
java
apache/flink
flink-runtime/src/test/java/org/apache/flink/runtime/scheduler/strategy/PipelinedRegionSchedulingStrategyTest.java
https://github.com/apache/flink/blob/master/flink-runtime/src/test/java/org/apache/flink/runtime/scheduler/strategy/PipelinedRegionSchedulingStrategyTest.java
Apache-2.0
@Override protected List<TestingSchedulingResultPartition> connect() { final List<TestingSchedulingResultPartition> resultPartitions = new ArrayList<>(); final IntermediateDataSetID intermediateDataSetId = new IntermediateDataSetID(); for (int idx = 0; idx < producers.size(); idx++) { resultPartitions.addAll( connectConsumersToProducers( Collections.singletonList(consumers.get(idx)), Collections.singletonList(producers.get(idx)), intermediateDataSetId, resultPartitionType, resultPartitionState)); } return resultPartitions; }
Builder for {@link TestingSchedulingResultPartition} of {@link DistributionPattern#POINTWISE}.
connect
java
apache/flink
flink-runtime/src/test/java/org/apache/flink/runtime/scheduler/strategy/TestingSchedulingTopology.java
https://github.com/apache/flink/blob/master/flink-runtime/src/test/java/org/apache/flink/runtime/scheduler/strategy/TestingSchedulingTopology.java
Apache-2.0
@Override protected List<TestingSchedulingResultPartition> connect() { return connectConsumersToProducers( consumers, producers, new IntermediateDataSetID(), resultPartitionType, resultPartitionState); }
Builder for {@link TestingSchedulingResultPartition} of {@link DistributionPattern#ALL_TO_ALL}.
connect
java
apache/flink
flink-runtime/src/test/java/org/apache/flink/runtime/scheduler/strategy/TestingSchedulingTopology.java
https://github.com/apache/flink/blob/master/flink-runtime/src/test/java/org/apache/flink/runtime/scheduler/strategy/TestingSchedulingTopology.java
Apache-2.0
@Test public void testSecurityContextShouldFallbackToSecond() throws Exception { Configuration testFlinkConf = new Configuration(); testFlinkConf.set( SecurityOptions.SECURITY_CONTEXT_FACTORY_CLASSES, Lists.newArrayList( IncompatibleTestSecurityContextFactory.class.getCanonicalName(), TestSecurityContextFactory.class.getCanonicalName())); SecurityConfiguration testSecurityConf = new SecurityConfiguration(testFlinkConf); SecurityUtils.install(testSecurityConf); assertEquals( TestSecurityContextFactory.TestSecurityContext.class, SecurityUtils.getInstalledContext().getClass()); SecurityUtils.uninstall(); assertEquals(NoOpSecurityContext.class, SecurityUtils.getInstalledContext().getClass()); }
Verify that we fall back to a second configuration if the first one is incompatible.
testSecurityContextShouldFallbackToSecond
java
apache/flink
flink-runtime/src/test/java/org/apache/flink/runtime/security/SecurityUtilsTest.java
https://github.com/apache/flink/blob/master/flink-runtime/src/test/java/org/apache/flink/runtime/security/SecurityUtilsTest.java
Apache-2.0
private void testJaasModuleFilePath(String workingDir) throws IOException { Configuration configuration = new Configuration(); // set the string for CoreOptions.TMP_DIRS to mock the working directory. configuration.set(CoreOptions.TMP_DIRS, workingDir); SecurityConfiguration sc = new SecurityConfiguration(configuration); JaasModule module = new JaasModule(sc); module.install(); assertJaasFileLocateInRightDirectory(workingDir); }
Test that the jaas config file is created in the working directory.
testJaasModuleFilePath
java
apache/flink
flink-runtime/src/test/java/org/apache/flink/runtime/security/modules/JaasModuleTest.java
https://github.com/apache/flink/blob/master/flink-runtime/src/test/java/org/apache/flink/runtime/security/modules/JaasModuleTest.java
Apache-2.0
@Override public SecurityModule createModule(SecurityConfiguration securityConfig) { return new TestSecurityModule(); }
Test security module factory class for service provider discovery.
createModule
java
apache/flink
flink-runtime/src/test/java/org/apache/flink/runtime/security/modules/TestSecurityModuleFactory.java
https://github.com/apache/flink/blob/master/flink-runtime/src/test/java/org/apache/flink/runtime/security/modules/TestSecurityModuleFactory.java
Apache-2.0
@Override public NettyShuffleMaster createShuffleMaster(ShuffleMasterContext shuffleMasterContext) { return new TestShuffleMaster(shuffleMasterContext.getConfiguration()); }
An {@link TestShuffleServiceFactory} implementation for testing.
createShuffleMaster
java
apache/flink
flink-runtime/src/test/java/org/apache/flink/runtime/shuffle/ShuffleMasterTest.java
https://github.com/apache/flink/blob/master/flink-runtime/src/test/java/org/apache/flink/runtime/shuffle/ShuffleMasterTest.java
Apache-2.0
static SplitsAssignment<MockSourceSplit> getSplitsAssignment( int numSubtasks, int startingSplitId) { Map<Integer, List<MockSourceSplit>> assignments = new HashMap<>(); int splitId = startingSplitId; for (int subtaskIndex = 0; subtaskIndex < numSubtasks; subtaskIndex++) { List<MockSourceSplit> subtaskAssignment = new ArrayList<>(); for (int j = 0; j < subtaskIndex + 1; j++) { subtaskAssignment.add(new MockSourceSplit(splitId++)); } assignments.put(subtaskIndex, subtaskAssignment); } return new SplitsAssignment<>(assignments); }
Create a SplitsAssignment. The assignments looks like following: Subtask 0: Splits {0} Subtask 1: Splits {1, 2} Subtask 2: Splits {3, 4, 5}
getSplitsAssignment
java
apache/flink
flink-runtime/src/test/java/org/apache/flink/runtime/source/coordinator/CoordinatorTestUtils.java
https://github.com/apache/flink/blob/master/flink-runtime/src/test/java/org/apache/flink/runtime/source/coordinator/CoordinatorTestUtils.java
Apache-2.0
@Test void testLoadJobManagerStorageNoParameters() throws Exception { // we configure with the explicit string (rather than // AbstractStateBackend#X_STATE_BACKEND_NAME) // to guard against config-breaking changes of the name final Configuration config = new Configuration(); config.set(CheckpointingOptions.CHECKPOINT_STORAGE, "jobmanager"); CheckpointStorage storage = CheckpointStorageLoader.fromConfig(config, cl, null).get(); assertThat(storage).isInstanceOf(JobManagerCheckpointStorage.class); }
Validates loading a job manager checkpoint storage from the cluster configuration.
testLoadJobManagerStorageNoParameters
java
apache/flink
flink-runtime/src/test/java/org/apache/flink/runtime/state/CheckpointStorageLoaderTest.java
https://github.com/apache/flink/blob/master/flink-runtime/src/test/java/org/apache/flink/runtime/state/CheckpointStorageLoaderTest.java
Apache-2.0
@Test void testLoadJobManagerStorageWithParameters() throws Exception { final String savepointDir = new Path(TempDirUtils.newFolder(tmp).toURI()).toString(); final Path expectedSavepointPath = new Path(savepointDir); // we configure with the explicit string (rather than // AbstractStateBackend#X_STATE_BACKEND_NAME) // to guard against config-breaking changes of the name final Configuration config1 = new Configuration(); config1.set(CheckpointingOptions.CHECKPOINT_STORAGE, "jobmanager"); config1.set(CheckpointingOptions.SAVEPOINT_DIRECTORY, savepointDir); CheckpointStorage storage1 = CheckpointStorageLoader.fromConfig(config1, cl, null).get(); assertThat(storage1).isInstanceOf(JobManagerCheckpointStorage.class); assertThat(((JobManagerCheckpointStorage) storage1).getSavepointPath()) .isEqualTo(expectedSavepointPath); }
Validates loading a job manager checkpoint storage with additional parameters from the cluster configuration.
testLoadJobManagerStorageWithParameters
java
apache/flink
flink-runtime/src/test/java/org/apache/flink/runtime/state/CheckpointStorageLoaderTest.java
https://github.com/apache/flink/blob/master/flink-runtime/src/test/java/org/apache/flink/runtime/state/CheckpointStorageLoaderTest.java
Apache-2.0
@Test void testConfigureJobManagerStorage() throws Exception { final String savepointDir = new Path(TempDirUtils.newFolder(tmp).toURI()).toString(); final Path expectedSavepointPath = new Path(savepointDir); final int maxSize = 100; final Configuration config = new Configuration(); config.set( CheckpointingOptions.CHECKPOINT_STORAGE, "filesystem"); // check that this is not accidentally picked up config.set(CheckpointingOptions.SAVEPOINT_DIRECTORY, savepointDir); CheckpointStorage storage1 = CheckpointStorageLoader.load( new JobManagerCheckpointStorage(maxSize), new ModernStateBackend(), new Configuration(), config, cl, LOG); assertThat(storage1).isInstanceOf(JobManagerCheckpointStorage.class); JobManagerCheckpointStorage jmStorage = (JobManagerCheckpointStorage) storage1; assertThat(jmStorage.getSavepointPath()) .is(matching(normalizedPath(expectedSavepointPath))); assertThat(jmStorage.getMaxStateSize()).isEqualTo(maxSize); }
Validates taking the application-defined job manager checkpoint storage and adding additional parameters from the cluster configuration.
testConfigureJobManagerStorage
java
apache/flink
flink-runtime/src/test/java/org/apache/flink/runtime/state/CheckpointStorageLoaderTest.java
https://github.com/apache/flink/blob/master/flink-runtime/src/test/java/org/apache/flink/runtime/state/CheckpointStorageLoaderTest.java
Apache-2.0
@Test void testConfigureJobManagerStorageWithParameters() throws Exception { final String savepointDirCluster = new Path(TempDirUtils.newFolder(tmp).toURI()).toString(); final String savepointDirJob = new Path(TempDirUtils.newFolder(tmp).toURI()).toString(); final Configuration clusterConfig = new Configuration(); final Configuration jobConfig = new Configuration(); clusterConfig.set(CheckpointingOptions.SAVEPOINT_DIRECTORY, savepointDirCluster); jobConfig.set(CheckpointingOptions.SAVEPOINT_DIRECTORY, savepointDirJob); CheckpointStorage storage = CheckpointStorageLoader.load( new JobManagerCheckpointStorage(), new ModernStateBackend(), jobConfig, clusterConfig, cl, LOG); assertThat(storage).isInstanceOf(JobManagerCheckpointStorage.class); JobManagerCheckpointStorage jmStorage = (JobManagerCheckpointStorage) storage; assertThat(jmStorage.getSavepointPath()).isEqualTo(new Path(savepointDirCluster)); }
Tests that job parameters take precedence over cluster configurations.
testConfigureJobManagerStorageWithParameters
java
apache/flink
flink-runtime/src/test/java/org/apache/flink/runtime/state/CheckpointStorageLoaderTest.java
https://github.com/apache/flink/blob/master/flink-runtime/src/test/java/org/apache/flink/runtime/state/CheckpointStorageLoaderTest.java
Apache-2.0
@Test void testLoadFileSystemCheckpointStorage() throws Exception { final String checkpointDir = new Path(TempDirUtils.newFolder(tmp).toURI()).toString(); final String savepointDir = new Path(TempDirUtils.newFolder(tmp).toURI()).toString(); final Path expectedCheckpointsPath = new Path(checkpointDir); final Path expectedSavepointsPath = new Path(savepointDir); final MemorySize threshold = MemorySize.parse("900kb"); final int minWriteBufferSize = 1024; // we configure with the explicit string (rather than // AbstractStateBackend#X_STATE_BACKEND_NAME) // to guard against config-breaking changes of the name final Configuration config1 = new Configuration(); config1.set(CheckpointingOptions.CHECKPOINT_STORAGE, "filesystem"); config1.set(CheckpointingOptions.CHECKPOINTS_DIRECTORY, checkpointDir); config1.set(CheckpointingOptions.SAVEPOINT_DIRECTORY, savepointDir); config1.set(CheckpointingOptions.FS_SMALL_FILE_THRESHOLD, threshold); config1.set(CheckpointingOptions.FS_WRITE_BUFFER_SIZE, minWriteBufferSize); CheckpointStorage storage1 = CheckpointStorageLoader.fromConfig(config1, cl, null).get(); assertThat(storage1).isInstanceOf(FileSystemCheckpointStorage.class); FileSystemCheckpointStorage fs1 = (FileSystemCheckpointStorage) storage1; assertThat(fs1.getCheckpointPath()).is(matching(normalizedPath(expectedCheckpointsPath))); assertThat(fs1.getSavepointPath()).is(matching(normalizedPath(expectedSavepointsPath))); assertThat(fs1.getMinFileSizeThreshold()).isEqualTo(threshold.getBytes()); assertThat(fs1.getWriteBufferSize()) .isEqualTo(Math.max(threshold.getBytes(), minWriteBufferSize)); }
Validates loading a file system checkpoint storage with additional parameters from the cluster configuration.
testLoadFileSystemCheckpointStorage
java
apache/flink
flink-runtime/src/test/java/org/apache/flink/runtime/state/CheckpointStorageLoaderTest.java
https://github.com/apache/flink/blob/master/flink-runtime/src/test/java/org/apache/flink/runtime/state/CheckpointStorageLoaderTest.java
Apache-2.0
@Test void testLoadFileSystemCheckpointStorageMixed() throws Exception { final Path appCheckpointDir = new Path(TempDirUtils.newFolder(tmp).toURI()); final String checkpointDir = new Path(TempDirUtils.newFolder(tmp).toURI()).toString(); final String savepointDir = new Path(TempDirUtils.newFolder(tmp).toURI()).toString(); final Path expectedSavepointsPath = new Path(savepointDir); final int threshold = 1000000; final int writeBufferSize = 4000000; final FileSystemCheckpointStorage storage = new FileSystemCheckpointStorage(appCheckpointDir, threshold, writeBufferSize); final Configuration config = new Configuration(); config.set( CheckpointingOptions.CHECKPOINT_STORAGE, "jobmanager"); // this should not be picked up config.set( CheckpointingOptions.CHECKPOINTS_DIRECTORY, checkpointDir); // this should not be picked up config.set(CheckpointingOptions.SAVEPOINT_DIRECTORY, savepointDir); config.set( CheckpointingOptions.FS_SMALL_FILE_THRESHOLD, MemorySize.parse("20")); // this should not be picked up config.set( CheckpointingOptions.FS_WRITE_BUFFER_SIZE, 3000000); // this should not be picked up final CheckpointStorage loadedStorage1 = CheckpointStorageLoader.load( storage, new ModernStateBackend(), new Configuration(), config, cl, LOG); assertThat(loadedStorage1).isInstanceOf(FileSystemCheckpointStorage.class); final FileSystemCheckpointStorage fs1 = (FileSystemCheckpointStorage) loadedStorage1; assertThat(fs1.getCheckpointPath()).is(matching(normalizedPath(appCheckpointDir))); assertThat(fs1.getSavepointPath()).is(matching(normalizedPath(expectedSavepointsPath))); assertThat(fs1.getMinFileSizeThreshold()).isEqualTo(threshold); assertThat(fs1.getWriteBufferSize()).isEqualTo(writeBufferSize); }
Validates taking the application-defined file system state backend and adding with additional parameters from the cluster configuration, but giving precedence to application-defined parameters over configuration-defined parameters.
testLoadFileSystemCheckpointStorageMixed
java
apache/flink
flink-runtime/src/test/java/org/apache/flink/runtime/state/CheckpointStorageLoaderTest.java
https://github.com/apache/flink/blob/master/flink-runtime/src/test/java/org/apache/flink/runtime/state/CheckpointStorageLoaderTest.java
Apache-2.0
static void verifyDiscard(StateObject stateHandle, TernaryBoolean expected) { if (stateHandle instanceof DiscardRecordedStateObject) { DiscardRecordedStateObject testingHandle = (DiscardRecordedStateObject) stateHandle; if (expected.getAsBoolean() != null) { assertThat(testingHandle.isDisposed()).isEqualTo(expected.getAsBoolean()); } } else { throw new IllegalStateException("stateHandle must be DiscardRecordedStateObject !"); } }
A test mock of {@link StateObject} which need record itself whether been discarded.
verifyDiscard
java
apache/flink
flink-runtime/src/test/java/org/apache/flink/runtime/state/DiscardRecordedStateObject.java
https://github.com/apache/flink/blob/master/flink-runtime/src/test/java/org/apache/flink/runtime/state/DiscardRecordedStateObject.java
Apache-2.0
@Test void testDuplicatedWrite() throws Exception { int streamCapacity = 1024 * 1024; TestMemoryCheckpointOutputStream primaryStream = new TestMemoryCheckpointOutputStream(streamCapacity); TestMemoryCheckpointOutputStream secondaryStream = new TestMemoryCheckpointOutputStream(streamCapacity); TestMemoryCheckpointOutputStream referenceStream = new TestMemoryCheckpointOutputStream(streamCapacity); DuplicatingCheckpointOutputStream duplicatingStream = new DuplicatingCheckpointOutputStream(primaryStream, secondaryStream, 64); Random random = new Random(42); for (int i = 0; i < 500; ++i) { int choice = random.nextInt(3); if (choice == 0) { int val = random.nextInt(); referenceStream.write(val); duplicatingStream.write(val); } else { byte[] bytes = new byte[random.nextInt(128)]; random.nextBytes(bytes); if (choice == 1) { referenceStream.write(bytes); duplicatingStream.write(bytes); } else { int off = bytes.length > 0 ? random.nextInt(bytes.length) : 0; int len = bytes.length > 0 ? random.nextInt(bytes.length - off) : 0; referenceStream.write(bytes, off, len); duplicatingStream.write(bytes, off, len); } } assertThat(duplicatingStream.getPos()).isEqualTo(referenceStream.getPos()); } StreamStateHandle refStateHandle = referenceStream.closeAndGetHandle(); StreamStateHandle primaryStateHandle = duplicatingStream.closeAndGetPrimaryHandle(); StreamStateHandle secondaryStateHandle = duplicatingStream.closeAndGetSecondaryHandle(); assertThat( CommonTestUtils.isStreamContentEqual( refStateHandle.openInputStream(), primaryStateHandle.openInputStream())) .isTrue(); assertThat( CommonTestUtils.isStreamContentEqual( refStateHandle.openInputStream(), secondaryStateHandle.openInputStream())) .isTrue(); refStateHandle.discardState(); primaryStateHandle.discardState(); secondaryStateHandle.discardState(); }
Test that all writes are duplicated to both streams and that the state reflects what was written.
testDuplicatedWrite
java
apache/flink
flink-runtime/src/test/java/org/apache/flink/runtime/state/DuplicatingCheckpointOutputStreamTest.java
https://github.com/apache/flink/blob/master/flink-runtime/src/test/java/org/apache/flink/runtime/state/DuplicatingCheckpointOutputStreamTest.java
Apache-2.0
private void testFailingSecondaryStream( DuplicatingCheckpointOutputStream duplicatingStream, StreamTestMethod testMethod) throws Exception { testMethod.call(); duplicatingStream.write(42); FailingCheckpointOutStream secondary = (FailingCheckpointOutStream) duplicatingStream.getSecondaryOutputStream(); assertThat(secondary.isClosed()).isTrue(); long pos = duplicatingStream.getPos(); StreamStateHandle primaryHandle = duplicatingStream.closeAndGetPrimaryHandle(); if (primaryHandle != null) { assertThat(primaryHandle.getStateSize()).isEqualTo(pos); } assertThatThrownBy(duplicatingStream::closeAndGetSecondaryHandle) .isInstanceOf(IOException.class) .hasCause(duplicatingStream.getSecondaryStreamException()); }
Tests that an exception from interacting with the secondary stream does not effect duplicating to the primary stream, but is reflected later when we want the secondary state handle.
testFailingSecondaryStream
java
apache/flink
flink-runtime/src/test/java/org/apache/flink/runtime/state/DuplicatingCheckpointOutputStreamTest.java
https://github.com/apache/flink/blob/master/flink-runtime/src/test/java/org/apache/flink/runtime/state/DuplicatingCheckpointOutputStreamTest.java
Apache-2.0
private void testFailingPrimaryStream( DuplicatingCheckpointOutputStream duplicatingStream, StreamTestMethod testMethod) throws Exception { try { assertThatThrownBy(testMethod::call).isInstanceOf(IOException.class); } finally { IOUtils.closeQuietly(duplicatingStream); } }
Test that a failing primary stream brings up an exception.
testFailingPrimaryStream
java
apache/flink
flink-runtime/src/test/java/org/apache/flink/runtime/state/DuplicatingCheckpointOutputStreamTest.java
https://github.com/apache/flink/blob/master/flink-runtime/src/test/java/org/apache/flink/runtime/state/DuplicatingCheckpointOutputStreamTest.java
Apache-2.0
@Test void testUnalignedStreamsException() throws IOException { int streamCapacity = 1024 * 1024; TestMemoryCheckpointOutputStream primaryStream = new TestMemoryCheckpointOutputStream(streamCapacity); TestMemoryCheckpointOutputStream secondaryStream = new TestMemoryCheckpointOutputStream(streamCapacity); primaryStream.write(42); DuplicatingCheckpointOutputStream stream = new DuplicatingCheckpointOutputStream(primaryStream, secondaryStream); assertThat(stream.getSecondaryStreamException()).isNotNull(); assertThat(secondaryStream.isClosed()).isTrue(); stream.write(23); assertThatThrownBy(stream::closeAndGetSecondaryHandle) .isInstanceOf(IOException.class) .hasCause(stream.getSecondaryStreamException()); StreamStateHandle primaryHandle = stream.closeAndGetPrimaryHandle(); try (FSDataInputStream inputStream = primaryHandle.openInputStream(); ) { assertThat(inputStream.read()).isEqualTo(42); assertThat(inputStream.read()).isEqualTo(23); assertThat(inputStream.read()).isEqualTo(-1); } }
Tests that in case of unaligned stream positions, the secondary stream is closed and the primary still works. This is important because some code may rely on seeking to stream offsets in the created state files and if the streams are not aligned this code could fail.
testUnalignedStreamsException
java
apache/flink
flink-runtime/src/test/java/org/apache/flink/runtime/state/DuplicatingCheckpointOutputStreamTest.java
https://github.com/apache/flink/blob/master/flink-runtime/src/test/java/org/apache/flink/runtime/state/DuplicatingCheckpointOutputStreamTest.java
Apache-2.0
@Test void testUnregisteredDiscarding() throws Exception { IncrementalRemoteKeyedStateHandle stateHandle = create(new Random(42)); stateHandle.discardState(); for (HandleAndLocalPath handleAndLocalPath : stateHandle.getPrivateState()) { verifyDiscard(handleAndLocalPath.getHandle(), TernaryBoolean.TRUE); } for (HandleAndLocalPath handleAndLocalPath : stateHandle.getSharedState()) { verifyDiscard(handleAndLocalPath.getHandle(), TernaryBoolean.TRUE); } verify(stateHandle.getMetaDataStateHandle()).discardState(); }
This test checks, that for an unregistered {@link IncrementalRemoteKeyedStateHandle} all state (including shared) is discarded.
testUnregisteredDiscarding
java
apache/flink
flink-runtime/src/test/java/org/apache/flink/runtime/state/IncrementalRemoteKeyedStateHandleTest.java
https://github.com/apache/flink/blob/master/flink-runtime/src/test/java/org/apache/flink/runtime/state/IncrementalRemoteKeyedStateHandleTest.java
Apache-2.0
@Test void testSharedStateDeRegistration() throws Exception { SharedStateRegistry registry = spy(new SharedStateRegistryImpl()); // Create two state handles with overlapping shared state IncrementalRemoteKeyedStateHandle stateHandle1 = create(new Random(42)); IncrementalRemoteKeyedStateHandle stateHandle2 = create(new Random(42)); // Both handles should not be registered and not discarded by now. for (HandleAndLocalPath handleAndLocalPath : stateHandle1.getSharedState()) { verifyDiscard(handleAndLocalPath.getHandle(), TernaryBoolean.FALSE); } for (HandleAndLocalPath handleAndLocalPath : stateHandle2.getSharedState()) { verifyDiscard(handleAndLocalPath.getHandle(), TernaryBoolean.FALSE); } // Now we register both ... stateHandle1.registerSharedStates(registry, 0L); registry.checkpointCompleted(0L); stateHandle2.registerSharedStates(registry, 0L); for (HandleAndLocalPath handleAndLocalPath : stateHandle1.getSharedState()) { StreamStateHandle handle = handleAndLocalPath.getHandle(); SharedStateRegistryKey registryKey = SharedStateRegistryKey.forStreamStateHandle(handle); // stateHandle1 and stateHandle2 has same shared states, so same key register 2 times verify(registry, times(2)).registerReference(registryKey, handle, 0L); } for (HandleAndLocalPath handleAndLocalPath : stateHandle2.getSharedState()) { StreamStateHandle handle = handleAndLocalPath.getHandle(); SharedStateRegistryKey registryKey = SharedStateRegistryKey.forStreamStateHandle(handle); // stateHandle1 and stateHandle2 has same shared states, so same key register 2 times verify(registry, times(2)).registerReference(registryKey, handle, 0L); } // We discard the first stateHandle1.discardState(); // Should be unregistered, non-shared discarded, shared not discarded for (HandleAndLocalPath handleAndLocalPath : stateHandle1.getSharedState()) { verifyDiscard(handleAndLocalPath.getHandle(), TernaryBoolean.FALSE); } for (HandleAndLocalPath handleAndLocalPath : stateHandle2.getSharedState()) { verifyDiscard(handleAndLocalPath.getHandle(), TernaryBoolean.FALSE); } for (HandleAndLocalPath handleAndLocalPath : stateHandle1.getPrivateState()) { verify(handleAndLocalPath.getHandle(), times(1)).discardState(); } for (HandleAndLocalPath handleAndLocalPath : stateHandle2.getPrivateState()) { verify(handleAndLocalPath.getHandle(), times(0)).discardState(); } verify(stateHandle1.getMetaDataStateHandle(), times(1)).discardState(); verify(stateHandle2.getMetaDataStateHandle(), times(0)).discardState(); // We discard the second stateHandle2.discardState(); // Now everything should be unregistered and discarded registry.unregisterUnusedState(Long.MAX_VALUE); for (HandleAndLocalPath handleAndLocalPath : stateHandle1.getSharedState()) { verifyDiscard(handleAndLocalPath.getHandle(), TernaryBoolean.TRUE); } for (HandleAndLocalPath handleAndLocalPath : stateHandle2.getSharedState()) { verifyDiscard(handleAndLocalPath.getHandle(), TernaryBoolean.TRUE); } verify(stateHandle1.getMetaDataStateHandle(), times(1)).discardState(); verify(stateHandle2.getMetaDataStateHandle(), times(1)).discardState(); }
This test checks, that for a registered {@link IncrementalRemoteKeyedStateHandle} discards respect all shared state and only discard it one all references are released.
testSharedStateDeRegistration
java
apache/flink
flink-runtime/src/test/java/org/apache/flink/runtime/state/IncrementalRemoteKeyedStateHandleTest.java
https://github.com/apache/flink/blob/master/flink-runtime/src/test/java/org/apache/flink/runtime/state/IncrementalRemoteKeyedStateHandleTest.java
Apache-2.0
@Override public int compare(TestElement o1, TestElement o2) { ByteArrayOutputStreamWithPos os = new ByteArrayOutputStreamWithPos(); DataOutputViewStreamWrapper ow = new DataOutputViewStreamWrapper(os); try { TestElementSerializer.INSTANCE.serialize(o1, ow); byte[] a1 = os.toByteArray(); os.reset(); TestElementSerializer.INSTANCE.serialize(o2, ow); byte[] a2 = os.toByteArray(); return UnsignedBytes.lexicographicalComparator().compare(a1, a2); } catch (Exception e) { throw new RuntimeException(e); } }
Comparator for test elements, operating on the serialized bytes of the elements.
compare
java
apache/flink
flink-runtime/src/test/java/org/apache/flink/runtime/state/InternalPriorityQueueTestBase.java
https://github.com/apache/flink/blob/master/flink-runtime/src/test/java/org/apache/flink/runtime/state/InternalPriorityQueueTestBase.java
Apache-2.0
private static Map<String, List<String>> getExpectedSplit( Map<String, List<String>> states, int newParallelism, int idx) { final Map<String, List<String>> newStates = new HashMap<>(); for (String stateName : states.keySet()) { final int stateSize = states.get(stateName).size(); newStates.put( stateName, states.get(stateName) .subList( idx * stateSize / newParallelism, (idx + 1) * stateSize / newParallelism)); } return newStates; }
This is a simplified version of what RR partitioner does, so it only works in case there is no remainder.
getExpectedSplit
java
apache/flink
flink-runtime/src/test/java/org/apache/flink/runtime/state/OperatorStateRestoreOperationTest.java
https://github.com/apache/flink/blob/master/flink-runtime/src/test/java/org/apache/flink/runtime/state/OperatorStateRestoreOperationTest.java
Apache-2.0
@Test void testRegistryNormal() { SharedStateRegistry sharedStateRegistry = new SharedStateRegistryImpl(); // register one state TestSharedState firstState = new TestSharedState("first"); StreamStateHandle result = sharedStateRegistry.registerReference( firstState.getRegistrationKey(), firstState, 0L); assertThat(result).isSameAs(firstState); assertThat(firstState.isDiscarded()).isFalse(); // register another state TestSharedState secondState = new TestSharedState("second"); result = sharedStateRegistry.registerReference( secondState.getRegistrationKey(), secondState, 0L); assertThat(result).isSameAs(secondState); assertThat(firstState.isDiscarded()).isFalse(); assertThat(secondState.isDiscarded()).isFalse(); sharedStateRegistry.unregisterUnusedState(1L); assertThat(secondState.isDiscarded()).isTrue(); assertThat(firstState.isDiscarded()).isTrue(); }
Validate that all states can be correctly registered at the registry.
testRegistryNormal
java
apache/flink
flink-runtime/src/test/java/org/apache/flink/runtime/state/SharedStateRegistryTest.java
https://github.com/apache/flink/blob/master/flink-runtime/src/test/java/org/apache/flink/runtime/state/SharedStateRegistryTest.java
Apache-2.0
@Test void mkdirs() throws Exception { File folderRoot = temporaryFolder.toFile(); File newFolder = new File(folderRoot, String.valueOf(UUID.randomUUID())); File innerNewFolder = new File(newFolder, String.valueOf(UUID.randomUUID())); Path path = innerNewFolder.toPath(); assertThat(newFolder).doesNotExist(); assertThat(innerNewFolder).doesNotExist(); SnapshotDirectory snapshotDirectory = SnapshotDirectory.permanent(path); assertThat(snapshotDirectory.exists()).isFalse(); assertThat(newFolder).doesNotExist(); assertThat(innerNewFolder).doesNotExist(); assertThat(snapshotDirectory.mkdirs()).isTrue(); assertThat(newFolder).isDirectory(); assertThat(innerNewFolder).isDirectory(); assertThat(snapshotDirectory.exists()).isTrue(); }
Tests if mkdirs for snapshot directories works.
mkdirs
java
apache/flink
flink-runtime/src/test/java/org/apache/flink/runtime/state/SnapshotDirectoryTest.java
https://github.com/apache/flink/blob/master/flink-runtime/src/test/java/org/apache/flink/runtime/state/SnapshotDirectoryTest.java
Apache-2.0
@Test void exists() throws Exception { File folderRoot = temporaryFolder.toFile(); File folderA = new File(folderRoot, String.valueOf(UUID.randomUUID())); assertThat(folderA).doesNotExist(); Path path = folderA.toPath(); SnapshotDirectory snapshotDirectory = SnapshotDirectory.permanent(path); assertThat(snapshotDirectory.exists()).isFalse(); assertThat(folderA.mkdirs()).isTrue(); assertThat(snapshotDirectory.exists()).isTrue(); assertThat(folderA.delete()).isTrue(); assertThat(snapshotDirectory.exists()).isFalse(); }
Tests if indication of directory existence works.
exists
java
apache/flink
flink-runtime/src/test/java/org/apache/flink/runtime/state/SnapshotDirectoryTest.java
https://github.com/apache/flink/blob/master/flink-runtime/src/test/java/org/apache/flink/runtime/state/SnapshotDirectoryTest.java
Apache-2.0
@Test void listStatus() throws Exception { File folderRoot = temporaryFolder.toFile(); File folderA = new File(folderRoot, String.valueOf(UUID.randomUUID())); File folderB = new File(folderA, String.valueOf(UUID.randomUUID())); assertThat(folderB.mkdirs()).isTrue(); File file = new File(folderA, "test.txt"); assertThat(file.createNewFile()).isTrue(); Path path = folderA.toPath(); SnapshotDirectory snapshotDirectory = SnapshotDirectory.permanent(path); assertThat(snapshotDirectory.exists()).isTrue(); assertThat(Arrays.toString(snapshotDirectory.listDirectory())) .isEqualTo(Arrays.toString(snapshotDirectory.listDirectory())); assertThat(Arrays.toString(snapshotDirectory.listDirectory())) .isEqualTo(Arrays.toString(FileUtils.listDirectory(path))); }
Tests listing of file statuses works like listing on the path directly.
listStatus
java
apache/flink
flink-runtime/src/test/java/org/apache/flink/runtime/state/SnapshotDirectoryTest.java
https://github.com/apache/flink/blob/master/flink-runtime/src/test/java/org/apache/flink/runtime/state/SnapshotDirectoryTest.java
Apache-2.0
@Test void completeSnapshotAndGetHandle() throws Exception { File folderRoot = temporaryFolder.toFile(); File folderA = new File(folderRoot, String.valueOf(UUID.randomUUID())); assertThat(folderA.mkdirs()).isTrue(); Path folderAPath = folderA.toPath(); SnapshotDirectory snapshotDirectory = SnapshotDirectory.permanent(folderAPath); // check that completed checkpoint dirs are not deleted as incomplete. DirectoryStateHandle handle = snapshotDirectory.completeSnapshotAndGetHandle(); assertThat(handle).isNotNull(); assertThat(snapshotDirectory.cleanup()).isTrue(); assertThat(folderA).isDirectory(); assertThat(handle.getDirectory()).isEqualTo(folderAPath); handle.discardState(); assertThat(folderA).doesNotExist(); assertThat(folderA.mkdirs()).isTrue(); SnapshotDirectory newSnapshotDirectory = SnapshotDirectory.permanent(folderAPath); assertThat(newSnapshotDirectory.cleanup()).isTrue(); assertThatThrownBy(newSnapshotDirectory::completeSnapshotAndGetHandle) .isInstanceOf(IOException.class); }
Tests that reporting the handle of a completed snapshot works as expected and that the directory for completed snapshot is not deleted by {@link #deleteIfNotCompeltedSnapshot()}.
completeSnapshotAndGetHandle
java
apache/flink
flink-runtime/src/test/java/org/apache/flink/runtime/state/SnapshotDirectoryTest.java
https://github.com/apache/flink/blob/master/flink-runtime/src/test/java/org/apache/flink/runtime/state/SnapshotDirectoryTest.java
Apache-2.0
@Test void deleteIfNotCompeltedSnapshot() throws Exception { File folderRoot = temporaryFolder.toFile(); File folderA = new File(folderRoot, String.valueOf(UUID.randomUUID())); File folderB = new File(folderA, String.valueOf(UUID.randomUUID())); assertThat(folderB.mkdirs()).isTrue(); File file = new File(folderA, "test.txt"); assertThat(file.createNewFile()).isTrue(); Path folderAPath = folderA.toPath(); SnapshotDirectory snapshotDirectory = SnapshotDirectory.permanent(folderAPath); assertThat(snapshotDirectory.cleanup()).isTrue(); assertThat(folderA).doesNotExist(); assertThat(folderA.mkdirs()).isTrue(); assertThat(file.createNewFile()).isTrue(); snapshotDirectory = SnapshotDirectory.permanent(folderAPath); snapshotDirectory.completeSnapshotAndGetHandle(); assertThat(snapshotDirectory.cleanup()).isTrue(); assertThat(folderA).isDirectory(); assertThat(file).exists(); }
Tests that snapshot director behaves correct for delete calls. Completed snapshots should not be deleted, only ongoing snapshots can.
deleteIfNotCompeltedSnapshot
java
apache/flink
flink-runtime/src/test/java/org/apache/flink/runtime/state/SnapshotDirectoryTest.java
https://github.com/apache/flink/blob/master/flink-runtime/src/test/java/org/apache/flink/runtime/state/SnapshotDirectoryTest.java
Apache-2.0
@Test void isSnapshotOngoing() throws Exception { File folderRoot = temporaryFolder.toFile(); File folderA = new File(folderRoot, String.valueOf(UUID.randomUUID())); assertThat(folderA.mkdirs()).isTrue(); Path pathA = folderA.toPath(); SnapshotDirectory snapshotDirectory = SnapshotDirectory.permanent(pathA); assertThat(snapshotDirectory.isSnapshotCompleted()).isFalse(); assertThat(snapshotDirectory.completeSnapshotAndGetHandle()).isNotNull(); assertThat(snapshotDirectory.isSnapshotCompleted()).isTrue(); snapshotDirectory = SnapshotDirectory.permanent(pathA); assertThat(snapshotDirectory.isSnapshotCompleted()).isFalse(); snapshotDirectory.cleanup(); assertThat(snapshotDirectory.isSnapshotCompleted()).isFalse(); }
This test checks that completing or deleting the snapshot influence the #isSnapshotOngoing() flag.
isSnapshotOngoing
java
apache/flink
flink-runtime/src/test/java/org/apache/flink/runtime/state/SnapshotDirectoryTest.java
https://github.com/apache/flink/blob/master/flink-runtime/src/test/java/org/apache/flink/runtime/state/SnapshotDirectoryTest.java
Apache-2.0
@Test void temporary() throws Exception { File folderRoot = temporaryFolder.toFile(); File folder = new File(folderRoot, String.valueOf(UUID.randomUUID())); assertThat(folder.mkdirs()).isTrue(); SnapshotDirectory tmpSnapshotDirectory = SnapshotDirectory.temporary(folder); // temporary snapshot directories should not return a handle, because they will be deleted. assertThat(tmpSnapshotDirectory.completeSnapshotAndGetHandle()).isNull(); // check that the directory is deleted even after we called #completeSnapshotAndGetHandle. assertThat(tmpSnapshotDirectory.cleanup()).isTrue(); assertThat(folder).doesNotExist(); }
Tests that temporary directories have the right behavior on completion and deletion.
temporary
java
apache/flink
flink-runtime/src/test/java/org/apache/flink/runtime/state/SnapshotDirectoryTest.java
https://github.com/apache/flink/blob/master/flink-runtime/src/test/java/org/apache/flink/runtime/state/SnapshotDirectoryTest.java
Apache-2.0
protected CheckpointStorage getCheckpointStorage() throws Exception { return new JobManagerCheckpointStorage(); }
Tests for the {@link KeyedStateBackend} and {@link OperatorStateBackend} as produced by various {@link StateBackend}s. <p>The tests in this test base focuses on the verification of state serializers usage when they are either compatible or requiring state migration after restoring the state backends.
getCheckpointStorage
java
apache/flink
flink-runtime/src/test/java/org/apache/flink/runtime/state/StateBackendMigrationTestBase.java
https://github.com/apache/flink/blob/master/flink-runtime/src/test/java/org/apache/flink/runtime/state/StateBackendMigrationTestBase.java
Apache-2.0
@TestTemplate void testKryoRegisteringRestoreResilienceWithRegisteredType() throws Exception { CheckpointStreamFactory streamFactory = createStreamFactory(); SharedStateRegistry sharedStateRegistry = new SharedStateRegistryImpl(); TypeInformation<TestPojo> pojoType = new GenericTypeInfo<>(TestPojo.class); // make sure that we are in fact using the KryoSerializer assertThat(pojoType.createSerializer(env.getExecutionConfig().getSerializerConfig())) .isInstanceOf(KryoSerializer.class); ValueStateDescriptor<TestPojo> kvId = new ValueStateDescriptor<>("id", pojoType); CheckpointableKeyedStateBackend<Integer> backend = createKeyedBackend(IntSerializer.INSTANCE, env); try { ValueState<TestPojo> state = backend.getPartitionedState( VoidNamespace.INSTANCE, VoidNamespaceSerializer.INSTANCE, kvId); // ============== create snapshot - no Kryo registration or specific / default // serializers // ============== // make some more modifications backend.setCurrentKey(1); state.update(new TestPojo("u1", 1)); backend.setCurrentKey(2); state.update(new TestPojo("u2", 2)); KeyedStateHandle snapshot = runSnapshot( backend.snapshot( 682375462378L, 2, streamFactory, CheckpointOptions.forCheckpointWithDefaultLocation()), sharedStateRegistry); IOUtils.closeQuietly(backend); backend.dispose(); // ====================================== restore snapshot // ====================================== ((SerializerConfigImpl) env.getExecutionConfig().getSerializerConfig()) .registerKryoType(TestPojo.class); backend = restoreKeyedBackend(IntSerializer.INSTANCE, snapshot, env); snapshot.discardState(); state = backend.getPartitionedState( VoidNamespace.INSTANCE, VoidNamespaceSerializer.INSTANCE, kvId); backend.setCurrentKey(1); assertThat(state.value()).isEqualTo(new TestPojo("u1", 1)); backend.setCurrentKey(2); assertThat(state.value()).isEqualTo(new TestPojo("u2", 2)); } finally { IOUtils.closeQuietly(backend); backend.dispose(); } }
Verify state restore resilience when: - snapshot was taken without any Kryo registrations, specific serializers or default serializers for the state type - restored with the state type registered (no specific serializer) <p>This test should not fail, because de- / serialization of the state should not be performed with Kryo's default {@link com.esotericsoftware.kryo.serializers.FieldSerializer}.
testKryoRegisteringRestoreResilienceWithRegisteredType
java
apache/flink
flink-runtime/src/test/java/org/apache/flink/runtime/state/StateBackendTestBase.java
https://github.com/apache/flink/blob/master/flink-runtime/src/test/java/org/apache/flink/runtime/state/StateBackendTestBase.java
Apache-2.0
@TestTemplate @SuppressWarnings("unchecked") void testKryoRegisteringRestoreResilienceWithDefaultSerializer() throws Exception { assumeThat(supportsMetaInfoVerification()).isTrue(); CheckpointStreamFactory streamFactory = createStreamFactory(); SharedStateRegistry sharedStateRegistry = new SharedStateRegistryImpl(); CheckpointableKeyedStateBackend<Integer> backend = createKeyedBackend(IntSerializer.INSTANCE, env); try { TypeInformation<TestPojo> pojoType = new GenericTypeInfo<>(TestPojo.class); // make sure that we are in fact using the KryoSerializer assertThat(pojoType.createSerializer(env.getExecutionConfig().getSerializerConfig())) .isInstanceOf(KryoSerializer.class); ValueStateDescriptor<TestPojo> kvId = new ValueStateDescriptor<>("id", pojoType); ValueState<TestPojo> state = backend.getPartitionedState( VoidNamespace.INSTANCE, VoidNamespaceSerializer.INSTANCE, kvId); // ============== create snapshot - no Kryo registration or specific / default // serializers ============== // make some more modifications backend.setCurrentKey(1); state.update(new TestPojo("u1", 1)); backend.setCurrentKey(2); state.update(new TestPojo("u2", 2)); KeyedStateHandle snapshot = runSnapshot( backend.snapshot( 682375462378L, 2, streamFactory, CheckpointOptions.forCheckpointWithDefaultLocation()), sharedStateRegistry); backend.dispose(); // ========== restore snapshot - should use default serializer (ONLY SERIALIZATION) // ========== // cast because our test serializer is not typed to TestPojo ((SerializerConfigImpl) env.getExecutionConfig().getSerializerConfig()) .addDefaultKryoSerializer( TestPojo.class, (Class) CustomKryoTestSerializer.class); backend = restoreKeyedBackend(IntSerializer.INSTANCE, snapshot, env); // re-initialize to ensure that we create the KryoSerializer from scratch, otherwise // initializeSerializerUnlessSet would not pick up our new config kvId = new ValueStateDescriptor<>("id", pojoType); state = backend.getPartitionedState( VoidNamespace.INSTANCE, VoidNamespaceSerializer.INSTANCE, kvId); backend.setCurrentKey(1); // update to test state backends that eagerly serialize, such as RocksDB state.update(new TestPojo("u1", 11)); KeyedStateHandle snapshot2 = runSnapshot( backend.snapshot( 682375462378L, 2, streamFactory, CheckpointOptions.forCheckpointWithDefaultLocation()), sharedStateRegistry); snapshot.discardState(); backend.dispose(); // ========= restore snapshot - should use default serializer (FAIL ON DESERIALIZATION) // ========= // cast because our test serializer is not typed to TestPojo ((SerializerConfigImpl) env.getExecutionConfig().getSerializerConfig()) .addDefaultKryoSerializer( TestPojo.class, (Class) CustomKryoTestSerializer.class); assertRestoreKeyedBackendFail(snapshot2, kvId); snapshot2.discardState(); } finally { // ensure to release native resources even when we exit through exception IOUtils.closeQuietly(backend); backend.dispose(); } }
Verify state restore resilience when: - snapshot was taken without any Kryo registrations, specific serializers or default serializers for the state type - restored with a default serializer for the state type <p>The default serializer used on restore is {@link CustomKryoTestSerializer}, which deliberately fails only on deserialization. We use the deliberate deserialization failure to acknowledge test success. @throws Exception expects {@link ExpectedKryoTestException} to be thrown.
testKryoRegisteringRestoreResilienceWithDefaultSerializer
java
apache/flink
flink-runtime/src/test/java/org/apache/flink/runtime/state/StateBackendTestBase.java
https://github.com/apache/flink/blob/master/flink-runtime/src/test/java/org/apache/flink/runtime/state/StateBackendTestBase.java
Apache-2.0
@TestTemplate @SuppressWarnings("unchecked") void testValueStateRace() throws Exception { final Integer namespace = 1; final ValueStateDescriptor<String> kvId = new ValueStateDescriptor<>("id", String.class); final TypeSerializer<Integer> keySerializer = IntSerializer.INSTANCE; final TypeSerializer<Integer> namespaceSerializer = IntSerializer.INSTANCE; final CheckpointableKeyedStateBackend<Integer> backend = createKeyedBackend(IntSerializer.INSTANCE); try { final ValueState<String> state = backend.getPartitionedState(namespace, IntSerializer.INSTANCE, kvId); // this is only available after the backend initialized the serializer final TypeSerializer<String> valueSerializer = kvId.getSerializer(); @SuppressWarnings("unchecked") final InternalKvState<Integer, Integer, String> kvState = (InternalKvState<Integer, Integer, String>) state; /** * 1) Test that ValueState#value() before and after KvState#getSerializedValue(byte[]) * return the same value. */ // set some key and namespace final int key1 = 1; backend.setCurrentKey(key1); kvState.setCurrentNamespace(2); state.update("2"); assertThat(state.value()).isEqualTo("2"); // query another key and namespace assertThat( getSerializedValue( kvState, 3, keySerializer, namespace, IntSerializer.INSTANCE, valueSerializer)) .isNull(); // the state should not have changed! assertThat(state.value()).isEqualTo("2"); // re-set values kvState.setCurrentNamespace(namespace); /** * 2) Test two threads concurrently using ValueState#value() and * KvState#getSerializedValue(byte[]). */ // some modifications to the state final int key2 = 10; backend.setCurrentKey(key2); assertThat(state.value()).isNull(); assertThat( getSerializedValue( kvState, key2, keySerializer, namespace, namespaceSerializer, valueSerializer)) .isNull(); state.update("1"); final CheckedThread getter = new CheckedThread("State getter") { @Override public void go() throws Exception { while (!isInterrupted()) { assertThat(state.value()).isEqualTo("1"); } } }; final CheckedThread serializedGetter = new CheckedThread("Serialized state getter") { @Override public void go() throws Exception { while (!isInterrupted() && getter.isAlive()) { final String serializedValue = getSerializedValue( kvState, key2, keySerializer, namespace, namespaceSerializer, valueSerializer); assertThat(serializedValue).isEqualTo("1"); } } }; getter.start(); serializedGetter.start(); // run both threads for max 100ms Timer t = new Timer("stopper"); t.schedule( new TimerTask() { @Override public void run() { getter.interrupt(); serializedGetter.interrupt(); this.cancel(); } }, 100); // wait for both threads to finish // serializedGetter will finish if its assertion fails or if // getter is not alive any more serializedGetter.sync(); // if serializedGetter crashed, getter will not know -> interrupt just in case getter.interrupt(); getter.sync(); t.cancel(); // if not executed yet } finally { // clean up IOUtils.closeQuietly(backend); backend.dispose(); } }
Tests {@link ValueState#value()} and {@link InternalKvState#getSerializedValue(byte[], TypeSerializer, TypeSerializer, TypeSerializer)} accessing the state concurrently. They should not get in the way of each other.
testValueStateRace
java
apache/flink
flink-runtime/src/test/java/org/apache/flink/runtime/state/StateBackendTestBase.java
https://github.com/apache/flink/blob/master/flink-runtime/src/test/java/org/apache/flink/runtime/state/StateBackendTestBase.java
Apache-2.0
@TestTemplate public void testMapStateEntryCompare() throws Exception { MapStateDescriptor<Integer, Long> stateDesc1 = new MapStateDescriptor<>("map-state-1", Integer.class, Long.class); MapStateDescriptor<Integer, Long> stateDesc2 = new MapStateDescriptor<>("map-state-2", Integer.class, Long.class); CheckpointableKeyedStateBackend<Integer> backend = createKeyedBackend(IntSerializer.INSTANCE); try { MapState<Integer, Long> state1 = backend.getPartitionedState( VoidNamespace.INSTANCE, VoidNamespaceSerializer.INSTANCE, stateDesc1); MapState<Integer, Long> state2 = backend.getPartitionedState( VoidNamespace.INSTANCE, VoidNamespaceSerializer.INSTANCE, stateDesc2); Map.Entry<Integer, Long> expectedEntry = new AbstractMap.SimpleEntry<>(0, 10L); backend.setCurrentKey(1); state1.put(expectedEntry.getKey(), expectedEntry.getValue()); state2.put(expectedEntry.getKey(), expectedEntry.getValue()); assertThat(state1.entries().iterator().next()).isEqualTo(expectedEntry); assertThat(state2.entries().iterator().next()).isEqualTo(expectedEntry); assertThat(state1.entries().iterator().next()) .isEqualTo(state2.entries().iterator().next()); } finally { IOUtils.closeQuietly(backend); backend.dispose(); } }
Verify that iterator of {@link MapState} compares on the content.
testMapStateEntryCompare
java
apache/flink
flink-runtime/src/test/java/org/apache/flink/runtime/state/StateBackendTestBase.java
https://github.com/apache/flink/blob/master/flink-runtime/src/test/java/org/apache/flink/runtime/state/StateBackendTestBase.java
Apache-2.0
@TestTemplate void testKeyGroupSnapshotRestoreScaleDown() throws Exception { testKeyGroupSnapshotRestore(4, 2, 128); }
This test verifies that state is correctly assigned to key groups and that restore restores the relevant key groups in the backend. <p>We have 128 key groups. Initially, four backends with different states are responsible for all the key groups equally. Different backends for the same operator may contains different states if we create the state in runtime (such as {@link DeltaTrigger#onElement} Then we snapshot, split up the state and restore into 2 backends where each is responsible for 64 key groups. Then we make sure that the state is only available in the correct backend.
testKeyGroupSnapshotRestoreScaleDown
java
apache/flink
flink-runtime/src/test/java/org/apache/flink/runtime/state/StateBackendTestBase.java
https://github.com/apache/flink/blob/master/flink-runtime/src/test/java/org/apache/flink/runtime/state/StateBackendTestBase.java
Apache-2.0
@TestTemplate void testKeyGroupSnapshotRestoreScaleUp() throws Exception { testKeyGroupSnapshotRestore(2, 4, 128); }
This test verifies that state is correctly assigned to key groups and that restore restores the relevant key groups in the backend. <p>We have 128 key groups. Initially, two backends with different states are responsible for all the key groups equally. Different backends for the same operator may contains different states if we create the state in runtime (such as {@link DeltaTrigger#onElement} Then we snapshot, split up the state and restore into 4 backends where each is responsible for 32 key groups. Then we make sure that the state is only available in the correct backend.
testKeyGroupSnapshotRestoreScaleUp
java
apache/flink
flink-runtime/src/test/java/org/apache/flink/runtime/state/StateBackendTestBase.java
https://github.com/apache/flink/blob/master/flink-runtime/src/test/java/org/apache/flink/runtime/state/StateBackendTestBase.java
Apache-2.0
@TestTemplate void testKeyGroupsSnapshotRestoreNoRescale() throws Exception { testKeyGroupSnapshotRestore(2, 2, 128); }
This test verifies that state is correctly assigned to key groups and that restore restores the relevant key groups in the backend. <p>We have 128 key groups. Initially, two backends with different states are responsible for all the key groups equally. Different backends for the same operator may contains different states if we create the state in runtime (such as {@link DeltaTrigger#onElement} Then we snapshot, split up the state and restore into 2 backends where each is responsible for 64 key groups. Then we make sure that the state is only available in the correct backend.
testKeyGroupsSnapshotRestoreNoRescale
java
apache/flink
flink-runtime/src/test/java/org/apache/flink/runtime/state/StateBackendTestBase.java
https://github.com/apache/flink/blob/master/flink-runtime/src/test/java/org/apache/flink/runtime/state/StateBackendTestBase.java
Apache-2.0
@TestTemplate void testKeyGroupsSnapshotRestoreScaleUpUnEvenDistribute() throws Exception { testKeyGroupSnapshotRestore(15, 77, 128); }
Similar with testKeyGroupSnapshotRestoreScaleUp, but the KeyGroups were distributed unevenly.
testKeyGroupsSnapshotRestoreScaleUpUnEvenDistribute
java
apache/flink
flink-runtime/src/test/java/org/apache/flink/runtime/state/StateBackendTestBase.java
https://github.com/apache/flink/blob/master/flink-runtime/src/test/java/org/apache/flink/runtime/state/StateBackendTestBase.java
Apache-2.0
@TestTemplate void testRequireNonNullNamespace() throws Exception { ValueStateDescriptor<IntValue> kvId = new ValueStateDescriptor<>("id", IntValue.class, new IntValue(-1)); final CheckpointableKeyedStateBackend<Integer> backend = createKeyedBackend(IntSerializer.INSTANCE); try { assertThatThrownBy( () -> backend.getPartitionedState( null, VoidNamespaceSerializer.INSTANCE, kvId)) .isInstanceOf(NullPointerException.class); assertThatThrownBy( () -> backend.getPartitionedState(VoidNamespace.INSTANCE, null, kvId)) .isInstanceOf(NullPointerException.class); assertThatThrownBy(() -> backend.getPartitionedState(null, null, kvId)) .isInstanceOf(NullPointerException.class); } finally { IOUtils.closeQuietly(backend); backend.dispose(); } }
Previously, it was possible to create partitioned state with <code>null</code> namespace. This test makes sure that this is prohibited now.
testRequireNonNullNamespace
java
apache/flink
flink-runtime/src/test/java/org/apache/flink/runtime/state/StateBackendTestBase.java
https://github.com/apache/flink/blob/master/flink-runtime/src/test/java/org/apache/flink/runtime/state/StateBackendTestBase.java
Apache-2.0
protected boolean supportsMetaInfoVerification() { return true; }
@return true if metadata serialization supports verification. If not, expected exceptions will likely not be thrown.
supportsMetaInfoVerification
java
apache/flink
flink-runtime/src/test/java/org/apache/flink/runtime/state/StateBackendTestBase.java
https://github.com/apache/flink/blob/master/flink-runtime/src/test/java/org/apache/flink/runtime/state/StateBackendTestBase.java
Apache-2.0
@Test void testCreationFromConfig() throws Exception { final Configuration config = new Configuration(); File newFolder = TempDirUtils.newFolder(temporaryFolder.toPath()); String tmpDir = newFolder.getAbsolutePath() + File.separator; final String rootDirString = "__localStateRoot1,__localStateRoot2,__localStateRoot3".replaceAll("__", tmpDir); // test configuration of the local state directories config.set(CheckpointingOptions.LOCAL_RECOVERY_TASK_MANAGER_STATE_ROOT_DIRS, rootDirString); // test configuration of the local state mode config.set(StateRecoveryOptions.LOCAL_RECOVERY, true); final WorkingDirectory workingDirectory = WORKING_DIRECTORY_EXTENSION_WRAPPER .getCustomExtension() .createNewWorkingDirectory(); TaskManagerServices taskManagerServices = createTaskManagerServices( createTaskManagerServiceConfiguration(config, workingDirectory), workingDirectory); try { TaskExecutorLocalStateStoresManager taskStateManager = taskManagerServices.getTaskManagerStateStore(); // verify configured directories for local state String[] split = rootDirString.split(","); File[] rootDirectories = taskStateManager.getLocalStateRootDirectories(); for (int i = 0; i < split.length; ++i) { assertThat(rootDirectories[i].toPath()).startsWith(Paths.get(split[i])); } // verify local recovery mode assertThat(taskStateManager.isLocalRecoveryEnabled()).isTrue(); for (File rootDirectory : rootDirectories) { FileUtils.deleteFileOrDirectory(rootDirectory); } } finally { taskManagerServices.shutDown(); } }
This tests that the creation of {@link TaskManagerServices} correctly creates the local state root directory for the {@link TaskExecutorLocalStateStoresManager} with the configured root directory.
testCreationFromConfig
java
apache/flink
flink-runtime/src/test/java/org/apache/flink/runtime/state/TaskExecutorLocalStateStoresManagerTest.java
https://github.com/apache/flink/blob/master/flink-runtime/src/test/java/org/apache/flink/runtime/state/TaskExecutorLocalStateStoresManagerTest.java
Apache-2.0
@Test void getLocalRecoveryRootDirectoryProvider() { LocalRecoveryConfig directoryProvider = taskLocalStateStore.getLocalRecoveryConfig(); assertThat( directoryProvider .getLocalStateDirectoryProvider() .get() .allocationBaseDirsCount()) .isEqualTo(allocationBaseDirs.length); for (int i = 0; i < allocationBaseDirs.length; ++i) { assertThat( directoryProvider .getLocalStateDirectoryProvider() .get() .selectAllocationBaseDirectory(i)) .isEqualTo(allocationBaseDirs[i]); } }
Test that the instance delivers a correctly configured LocalRecoveryDirectoryProvider.
getLocalRecoveryRootDirectoryProvider
java
apache/flink
flink-runtime/src/test/java/org/apache/flink/runtime/state/TaskLocalStateStoreImplTest.java
https://github.com/apache/flink/blob/master/flink-runtime/src/test/java/org/apache/flink/runtime/state/TaskLocalStateStoreImplTest.java
Apache-2.0
@Test void storeAndRetrieve() throws Exception { final int chkCount = 3; for (int i = 0; i < chkCount; ++i) { assertThat(taskLocalStateStore.retrieveLocalState(i)).isNull(); } List<TestingTaskStateSnapshot> taskStateSnapshots = storeStates(chkCount); checkStoredAsExpected(taskStateSnapshots, 0, chkCount); assertThat(taskLocalStateStore.retrieveLocalState(chkCount + 1)).isNull(); }
Tests basic store/retrieve of local state.
storeAndRetrieve
java
apache/flink
flink-runtime/src/test/java/org/apache/flink/runtime/state/TaskLocalStateStoreImplTest.java
https://github.com/apache/flink/blob/master/flink-runtime/src/test/java/org/apache/flink/runtime/state/TaskLocalStateStoreImplTest.java
Apache-2.0
@Test void confirmCheckpoint() throws Exception { final int chkCount = 3; final int confirmed = chkCount - 1; List<TestingTaskStateSnapshot> taskStateSnapshots = storeStates(chkCount); taskLocalStateStore.confirmCheckpoint(confirmed); checkPrunedAndDiscarded(taskStateSnapshots, 0, confirmed); checkStoredAsExpected(taskStateSnapshots, confirmed, chkCount); }
Tests pruning of previous checkpoints if a new checkpoint is confirmed.
confirmCheckpoint
java
apache/flink
flink-runtime/src/test/java/org/apache/flink/runtime/state/TaskLocalStateStoreImplTest.java
https://github.com/apache/flink/blob/master/flink-runtime/src/test/java/org/apache/flink/runtime/state/TaskLocalStateStoreImplTest.java
Apache-2.0
@Test void abortCheckpoint() throws Exception { final int chkCount = 4; final int aborted = chkCount - 2; List<TestingTaskStateSnapshot> taskStateSnapshots = storeStates(chkCount); taskLocalStateStore.abortCheckpoint(aborted); checkPrunedAndDiscarded(taskStateSnapshots, aborted, aborted + 1); checkStoredAsExpected(taskStateSnapshots, 0, aborted); checkStoredAsExpected(taskStateSnapshots, aborted + 1, chkCount); }
Tests pruning of target previous checkpoints if that checkpoint is aborted.
abortCheckpoint
java
apache/flink
flink-runtime/src/test/java/org/apache/flink/runtime/state/TaskLocalStateStoreImplTest.java
https://github.com/apache/flink/blob/master/flink-runtime/src/test/java/org/apache/flink/runtime/state/TaskLocalStateStoreImplTest.java
Apache-2.0
@Test void testStateReportingAndRetrieving() { JobID jobID = new JobID(); ExecutionAttemptID executionAttemptID = createExecutionAttemptId(); TestCheckpointResponder testCheckpointResponder = new TestCheckpointResponder(); TestTaskLocalStateStore testTaskLocalStateStore = new TestTaskLocalStateStore(); InMemoryStateChangelogStorage changelogStorage = new InMemoryStateChangelogStorage(); TaskStateManager taskStateManager = taskStateManager( jobID, executionAttemptID, testCheckpointResponder, null, testTaskLocalStateStore, changelogStorage); // ---------------------------------------- test reporting // ----------------------------------------- CheckpointMetaData checkpointMetaData = new CheckpointMetaData(74L, 11L); CheckpointMetrics checkpointMetrics = new CheckpointMetrics(); TaskStateSnapshot jmTaskStateSnapshot = new TaskStateSnapshot(); OperatorID operatorID_1 = new OperatorID(1L, 1L); OperatorID operatorID_2 = new OperatorID(2L, 2L); OperatorID operatorID_3 = new OperatorID(3L, 3L); assertThat(taskStateManager.prioritizedOperatorState(operatorID_1).isRestored()).isFalse(); assertThat(taskStateManager.prioritizedOperatorState(operatorID_2).isRestored()).isFalse(); assertThat(taskStateManager.prioritizedOperatorState(operatorID_3).isRestored()).isFalse(); KeyGroupRange keyGroupRange = new KeyGroupRange(0, 1); // Remote state of operator 1 has only managed keyed state. OperatorSubtaskState jmOperatorSubtaskState_1 = OperatorSubtaskState.builder() .setManagedKeyedState( StateHandleDummyUtil.createNewKeyedStateHandle(keyGroupRange)) .build(); // Remote state of operator 1 has only raw keyed state. OperatorSubtaskState jmOperatorSubtaskState_2 = OperatorSubtaskState.builder() .setRawKeyedState( StateHandleDummyUtil.createNewKeyedStateHandle(keyGroupRange)) .build(); jmTaskStateSnapshot.putSubtaskStateByOperatorID(operatorID_1, jmOperatorSubtaskState_1); jmTaskStateSnapshot.putSubtaskStateByOperatorID(operatorID_2, jmOperatorSubtaskState_2); TaskStateSnapshot tmTaskStateSnapshot = new TaskStateSnapshot(); // Only operator 1 has a local alternative for the managed keyed state. OperatorSubtaskState tmOperatorSubtaskState_1 = OperatorSubtaskState.builder() .setManagedKeyedState( StateHandleDummyUtil.createNewKeyedStateHandle(keyGroupRange)) .build(); tmTaskStateSnapshot.putSubtaskStateByOperatorID(operatorID_1, tmOperatorSubtaskState_1); taskStateManager.reportTaskStateSnapshots( checkpointMetaData, checkpointMetrics, jmTaskStateSnapshot, tmTaskStateSnapshot); TestCheckpointResponder.AcknowledgeReport acknowledgeReport = testCheckpointResponder.getAcknowledgeReports().get(0); // checks that the checkpoint responder and the local state store received state as // expected. assertThat(acknowledgeReport.getCheckpointId()) .isEqualTo(checkpointMetaData.getCheckpointId()); assertThat(acknowledgeReport.getCheckpointMetrics()).isEqualTo(checkpointMetrics); assertThat(acknowledgeReport.getExecutionAttemptID()).isEqualTo(executionAttemptID); assertThat(acknowledgeReport.getJobID()).isEqualTo(jobID); assertThat(acknowledgeReport.getSubtaskState()).isEqualTo(jmTaskStateSnapshot); assertThat(testTaskLocalStateStore.retrieveLocalState(checkpointMetaData.getCheckpointId())) .isEqualTo(tmTaskStateSnapshot); // -------------------------------------- test prio retrieving // --------------------------------------- JobManagerTaskRestore taskRestore = new JobManagerTaskRestore( checkpointMetaData.getCheckpointId(), acknowledgeReport.getSubtaskState()); taskStateManager = taskStateManager( jobID, executionAttemptID, testCheckpointResponder, taskRestore, testTaskLocalStateStore, changelogStorage); // this has remote AND local managed keyed state. PrioritizedOperatorSubtaskState prioritized_1 = taskStateManager.prioritizedOperatorState(operatorID_1); // this has only remote raw keyed state. PrioritizedOperatorSubtaskState prioritized_2 = taskStateManager.prioritizedOperatorState(operatorID_2); // not restored. PrioritizedOperatorSubtaskState prioritized_3 = taskStateManager.prioritizedOperatorState(operatorID_3); assertThat(prioritized_1.isRestored()).isTrue(); assertThat(prioritized_2.isRestored()).isTrue(); assertThat(prioritized_3.isRestored()).isTrue(); assertThat(taskStateManager.prioritizedOperatorState(new OperatorID()).isRestored()) .isTrue(); // checks for operator 1. Iterator<StateObjectCollection<KeyedStateHandle>> prioritizedManagedKeyedState_1 = prioritized_1.getPrioritizedManagedKeyedState().iterator(); assertThat(prioritizedManagedKeyedState_1).hasNext(); StateObjectCollection<KeyedStateHandle> current = prioritizedManagedKeyedState_1.next(); KeyedStateHandle keyedStateHandleExp = tmOperatorSubtaskState_1.getManagedKeyedState().iterator().next(); KeyedStateHandle keyedStateHandleAct = current.iterator().next(); assertThat(keyedStateHandleExp).isSameAs(keyedStateHandleAct); assertThat(prioritizedManagedKeyedState_1).hasNext(); current = prioritizedManagedKeyedState_1.next(); keyedStateHandleExp = jmOperatorSubtaskState_1.getManagedKeyedState().iterator().next(); keyedStateHandleAct = current.iterator().next(); assertThat(keyedStateHandleExp).isSameAs(keyedStateHandleAct); assertThat(prioritizedManagedKeyedState_1).isExhausted(); // checks for operator 2. Iterator<StateObjectCollection<KeyedStateHandle>> prioritizedRawKeyedState_2 = prioritized_2.getPrioritizedRawKeyedState().iterator(); assertThat(prioritizedRawKeyedState_2).hasNext(); current = prioritizedRawKeyedState_2.next(); keyedStateHandleExp = jmOperatorSubtaskState_2.getRawKeyedState().iterator().next(); keyedStateHandleAct = current.iterator().next(); assertThat(keyedStateHandleExp).isSameAs(keyedStateHandleAct); assertThat(prioritizedRawKeyedState_2).isExhausted(); }
Test reporting and retrieving prioritized local and remote state.
testStateReportingAndRetrieving
java
apache/flink
flink-runtime/src/test/java/org/apache/flink/runtime/state/TaskStateManagerImplTest.java
https://github.com/apache/flink/blob/master/flink-runtime/src/test/java/org/apache/flink/runtime/state/TaskStateManagerImplTest.java
Apache-2.0
default PhysicalStateHandleID getStreamStateHandleID() { return new PhysicalStateHandleID(Integer.toString(System.identityHashCode(this))); }
Test {@link StreamStateHandle} that implements {@link #getStreamStateHandleID()} using {@link System#identityHashCode(Object)}.
getStreamStateHandleID
java
apache/flink
flink-runtime/src/test/java/org/apache/flink/runtime/state/TestStreamStateHandle.java
https://github.com/apache/flink/blob/master/flink-runtime/src/test/java/org/apache/flink/runtime/state/TestStreamStateHandle.java
Apache-2.0
@Parameters public static Collection<CheckpointStateOutputStreamType> getCheckpointStateOutputStreamType() { return Arrays.asList(CheckpointStateOutputStreamType.values()); }
Abstract base class for tests against checkpointing streams.
getCheckpointStateOutputStreamType
java
apache/flink
flink-runtime/src/test/java/org/apache/flink/runtime/state/filesystem/CheckpointStateOutputStreamTest.java
https://github.com/apache/flink/blob/master/flink-runtime/src/test/java/org/apache/flink/runtime/state/filesystem/CheckpointStateOutputStreamTest.java
Apache-2.0
@TestTemplate void testEmptyState() throws Exception { final FileSystem fs = FileSystem.getLocalFileSystem(); final Path folder = baseFolder(); final String fileName = "myFileName"; final Path filePath = new Path(folder, fileName); final FileStateHandle handle; try (FSDataOutputStream stream = createTestStream(fs, folder, fileName)) { handle = closeAndGetResult(stream); } // must have created a handle assertThat(handle).isNotNull(); assertThat(handle.getFilePath()).isEqualTo(filePath); // the pointer path should exist as a directory assertThat(fs.exists(handle.getFilePath())).isTrue(); assertThat(fs.getFileStatus(filePath).isDir()).isFalse(); // the contents should be empty try (FSDataInputStream in = handle.openInputStream()) { assertThat(in.read()).isEqualTo(-1); } }
Validates that even empty streams create a file and a file state handle.
testEmptyState
java
apache/flink
flink-runtime/src/test/java/org/apache/flink/runtime/state/filesystem/CheckpointStateOutputStreamTest.java
https://github.com/apache/flink/blob/master/flink-runtime/src/test/java/org/apache/flink/runtime/state/filesystem/CheckpointStateOutputStreamTest.java
Apache-2.0
@Tag("org.apache.flink.testutils.junit.FailsInGHAContainerWithRootUser") @TestTemplate void testStreamDoesNotTryToCleanUpParentOnError() throws Exception { final File directory = TempDirUtils.newFolder(tempDir); // prevent creation of files in that directory // this operation does not work reliably on Windows, so we use an "assume" to skip the test // is this prerequisite operation is not supported. assumeThat(directory.setWritable(false, true)).isTrue(); checkDirectoryNotWritable(directory); FileSystem fs = spy(FileSystem.getLocalFileSystem()); FsCheckpointStateOutputStream stream1 = new FsCheckpointStateOutputStream( Path.fromLocalFile(directory), fs, 1024, 1, relativePaths); FsCheckpointStateOutputStream stream2 = new FsCheckpointStateOutputStream( Path.fromLocalFile(directory), fs, 1024, 1, relativePaths); stream1.write(new byte[61]); stream2.write(new byte[61]); assertThatThrownBy(stream1::closeAndGetHandle).isInstanceOf(IOException.class); stream2.close(); // no delete call must have happened verify(fs, times(0)).delete(any(Path.class), anyBoolean()); // the directory must still exist as a proper directory assertThat(directory).exists(); assertThat(directory).isDirectory(); }
This test checks that the stream does not check and clean the parent directory when encountering a write error.
testStreamDoesNotTryToCleanUpParentOnError
java
apache/flink
flink-runtime/src/test/java/org/apache/flink/runtime/state/filesystem/FsCheckpointStateOutputStreamTest.java
https://github.com/apache/flink/blob/master/flink-runtime/src/test/java/org/apache/flink/runtime/state/filesystem/FsCheckpointStateOutputStreamTest.java
Apache-2.0
@Override public FSDataOutputStream create(Path filePath, WriteMode overwrite) throws IOException { return streamFactory.apply(filePath); }
Test {@link LocalFileSystem} for testing purposes.
create
java
apache/flink
flink-runtime/src/test/java/org/apache/flink/runtime/state/filesystem/TestFs.java
https://github.com/apache/flink/blob/master/flink-runtime/src/test/java/org/apache/flink/runtime/state/filesystem/TestFs.java
Apache-2.0
@Test void testIncrementalRehash() { final CopyOnWriteStateMap<Integer, Integer, ArrayList<Integer>> stateMap = new CopyOnWriteStateMap<>(new ArrayListSerializer<>(IntSerializer.INSTANCE)); int insert = 0; int remove = 0; while (!stateMap.isRehashing()) { stateMap.put(insert++, 0, new ArrayList<>()); if (insert % 8 == 0) { stateMap.remove(remove++, 0); } } assertThat(stateMap).hasSize(insert - remove); while (stateMap.isRehashing()) { stateMap.put(insert++, 0, new ArrayList<>()); if (insert % 8 == 0) { stateMap.remove(remove++, 0); } } assertThat(stateMap).hasSize(insert - remove); for (int i = 0; i < insert; ++i) { if (i < remove) { assertThat(stateMap.containsKey(i, 0)).isFalse(); } else { assertThat(stateMap.containsKey(i, 0)).isTrue(); } } }
This test triggers incremental rehash and tests for corruptions.
testIncrementalRehash
java
apache/flink
flink-runtime/src/test/java/org/apache/flink/runtime/state/heap/CopyOnWriteStateMapTest.java
https://github.com/apache/flink/blob/master/flink-runtime/src/test/java/org/apache/flink/runtime/state/heap/CopyOnWriteStateMapTest.java
Apache-2.0
@Test void testRandomModificationsAndCopyOnWriteIsolation() throws Exception { final CopyOnWriteStateMap<Integer, Integer, ArrayList<Integer>> stateMap = new CopyOnWriteStateMap<>(new ArrayListSerializer<>(IntSerializer.INSTANCE)); final HashMap<Tuple2<Integer, Integer>, ArrayList<Integer>> referenceMap = new HashMap<>(); final Random random = new Random(42); // holds snapshots from the map under test CopyOnWriteStateMap.StateMapEntry<Integer, Integer, ArrayList<Integer>>[] snapshot = null; int snapshotSize = 0; // holds a reference snapshot from our reference map that we compare against Tuple3<Integer, Integer, ArrayList<Integer>>[] reference = null; int val = 0; int snapshotCounter = 0; int referencedSnapshotId = 0; final StateTransformationFunction<ArrayList<Integer>, Integer> transformationFunction = (previousState, value) -> { if (previousState == null) { previousState = new ArrayList<>(); } previousState.add(value); // we give back the original, attempting to spot errors in to copy-on-write return previousState; }; StateIncrementalVisitor<Integer, Integer, ArrayList<Integer>> updatingIterator = stateMap.getStateIncrementalVisitor(5); // the main loop for modifications for (int i = 0; i < 10_000_000; ++i) { int key = random.nextInt(20); int namespace = random.nextInt(4); Tuple2<Integer, Integer> compositeKey = new Tuple2<>(key, namespace); int op = random.nextInt(10); ArrayList<Integer> state = null; ArrayList<Integer> referenceState = null; switch (op) { case 0: case 1: { state = stateMap.get(key, namespace); referenceState = referenceMap.get(compositeKey); if (null == state) { state = new ArrayList<>(); stateMap.put(key, namespace, state); referenceState = new ArrayList<>(); referenceMap.put(compositeKey, referenceState); } break; } case 2: { stateMap.put(key, namespace, new ArrayList<>()); referenceMap.put(compositeKey, new ArrayList<>()); break; } case 3: { state = stateMap.putAndGetOld(key, namespace, new ArrayList<>()); referenceState = referenceMap.put(compositeKey, new ArrayList<>()); break; } case 4: { stateMap.remove(key, namespace); referenceMap.remove(compositeKey); break; } case 5: { state = stateMap.removeAndGetOld(key, namespace); referenceState = referenceMap.remove(compositeKey); break; } case 6: { final int updateValue = random.nextInt(1000); stateMap.transform(key, namespace, updateValue, transformationFunction); referenceMap.put( compositeKey, transformationFunction.apply( referenceMap.remove(compositeKey), updateValue)); break; } case 7: case 8: case 9: if (!updatingIterator.hasNext()) { updatingIterator = stateMap.getStateIncrementalVisitor(5); if (!updatingIterator.hasNext()) { break; } } testStateIteratorWithUpdate( updatingIterator, stateMap, referenceMap, op == 8, op == 9); break; default: { fail("Unknown op-code " + op); } } assertThat(stateMap).hasSize(referenceMap.size()); if (state != null) { assertThat(referenceState).isNotNull(); // mutate the states a bit... if (random.nextBoolean() && !state.isEmpty()) { state.remove(state.size() - 1); referenceState.remove(referenceState.size() - 1); } else { state.add(val); referenceState.add(val); ++val; } } assertThat(state).isEqualTo(referenceState); // snapshot triggering / comparison / release if (i > 0 && i % 500 == 0) { if (snapshot != null) { // check our referenced snapshot deepCheck(reference, convert(snapshot, snapshotSize)); if (i % 1_000 == 0) { // draw and release some other snapshot while holding on the old snapshot ++snapshotCounter; stateMap.snapshotMapArrays(); stateMap.releaseSnapshot(snapshotCounter); } // release the snapshot after some time if (i % 5_000 == 0) { snapshot = null; reference = null; snapshotSize = 0; stateMap.releaseSnapshot(referencedSnapshotId); } } else { // if there is no more referenced snapshot, we create one ++snapshotCounter; referencedSnapshotId = snapshotCounter; snapshot = stateMap.snapshotMapArrays(); snapshotSize = stateMap.size(); reference = manualDeepDump(referenceMap); } } } }
This test does some random modifications to a state map and a reference (hash map). Then draws snapshots, performs more modifications and checks snapshot integrity.
testRandomModificationsAndCopyOnWriteIsolation
java
apache/flink
flink-runtime/src/test/java/org/apache/flink/runtime/state/heap/CopyOnWriteStateMapTest.java
https://github.com/apache/flink/blob/master/flink-runtime/src/test/java/org/apache/flink/runtime/state/heap/CopyOnWriteStateMapTest.java
Apache-2.0
private static void testStateIteratorWithUpdate( StateIncrementalVisitor<Integer, Integer, ArrayList<Integer>> updatingIterator, CopyOnWriteStateMap<Integer, Integer, ArrayList<Integer>> stateMap, HashMap<Tuple2<Integer, Integer>, ArrayList<Integer>> referenceMap, boolean update, boolean remove) { for (StateEntry<Integer, Integer, ArrayList<Integer>> stateEntry : updatingIterator.nextEntries()) { Integer key = stateEntry.getKey(); Integer namespace = stateEntry.getNamespace(); Tuple2<Integer, Integer> compositeKey = new Tuple2<>(key, namespace); assertThat(stateEntry.getState()).isEqualTo(referenceMap.get(compositeKey)); if (update) { ArrayList<Integer> newState = new ArrayList<>(stateEntry.getState()); if (!newState.isEmpty()) { newState.remove(0); } updatingIterator.update(stateEntry, newState); referenceMap.put(compositeKey, new ArrayList<>(newState)); assertThat(stateMap.get(key, namespace)).isEqualTo(newState); } if (remove) { updatingIterator.remove(stateEntry); referenceMap.remove(compositeKey); } } }
Test operations specific for StateIncrementalVisitor in {@code testRandomModificationsAndCopyOnWriteIsolation()}. <p>Check next, update and remove during global iteration of StateIncrementalVisitor.
testStateIteratorWithUpdate
java
apache/flink
flink-runtime/src/test/java/org/apache/flink/runtime/state/heap/CopyOnWriteStateMapTest.java
https://github.com/apache/flink/blob/master/flink-runtime/src/test/java/org/apache/flink/runtime/state/heap/CopyOnWriteStateMapTest.java
Apache-2.0
@Test void testSerializerAfterMetaInfoChanged() { RegisteredKeyValueStateBackendMetaInfo<Integer, TestType> originalMetaInfo = new RegisteredKeyValueStateBackendMetaInfo<>( StateDescriptor.Type.VALUE, "test", IntSerializer.INSTANCE, new TestType.V1TestTypeSerializer()); InternalKeyContext<Integer> mockKeyContext = new InternalKeyContextImpl<>(KeyGroupRange.of(0, 9), 10); CopyOnWriteStateTable<Integer, Integer, TestType> table = new CopyOnWriteStateTable<>( mockKeyContext, originalMetaInfo, IntSerializer.INSTANCE); RegisteredKeyValueStateBackendMetaInfo<Integer, TestType> newMetaInfo = new RegisteredKeyValueStateBackendMetaInfo<>( StateDescriptor.Type.VALUE, "test", IntSerializer.INSTANCE, new TestType.V2TestTypeSerializer()); table.setMetaInfo(newMetaInfo); Preconditions.checkState(table.getState().length > 0); for (StateMap<?, ?, ?> stateEntries : table.getState()) { assertThat(((CopyOnWriteStateMap<?, ?, ?>) stateEntries).getStateSerializer()) .isEqualTo(table.getStateSerializer()); } }
This tests that Whether serializers are consistent between {@link StateTable} and {@link StateMap}.
testSerializerAfterMetaInfoChanged
java
apache/flink
flink-runtime/src/test/java/org/apache/flink/runtime/state/heap/CopyOnWriteStateTableTest.java
https://github.com/apache/flink/blob/master/flink-runtime/src/test/java/org/apache/flink/runtime/state/heap/CopyOnWriteStateTableTest.java
Apache-2.0
@Test void testSerializerDuplicationInSnapshot() throws IOException { final TestDuplicateSerializer namespaceSerializer = new TestDuplicateSerializer(); final TestDuplicateSerializer stateSerializer = new TestDuplicateSerializer(); final TestDuplicateSerializer keySerializer = new TestDuplicateSerializer(); RegisteredKeyValueStateBackendMetaInfo<Integer, Integer> metaInfo = new RegisteredKeyValueStateBackendMetaInfo<>( StateDescriptor.Type.VALUE, "test", namespaceSerializer, stateSerializer); InternalKeyContext<Integer> mockKeyContext = new MockInternalKeyContext<>(); CopyOnWriteStateTable<Integer, Integer, Integer> table = new CopyOnWriteStateTable<>(mockKeyContext, metaInfo, keySerializer); table.put(0, 0, 0, 0); table.put(1, 0, 0, 1); table.put(2, 0, 1, 2); final CopyOnWriteStateTableSnapshot<Integer, Integer, Integer> snapshot = table.stateSnapshot(); final StateSnapshot.StateKeyGroupWriter partitionedSnapshot = snapshot.getKeyGroupWriter(); namespaceSerializer.disable(); keySerializer.disable(); stateSerializer.disable(); partitionedSnapshot.writeStateInKeyGroup( new DataOutputViewStreamWrapper(new ByteArrayOutputStreamWithPos(1024)), 0); }
This tests that serializers used for snapshots are duplicates of the ones used in processing to avoid race conditions in stateful serializers.
testSerializerDuplicationInSnapshot
java
apache/flink
flink-runtime/src/test/java/org/apache/flink/runtime/state/heap/CopyOnWriteStateTableTest.java
https://github.com/apache/flink/blob/master/flink-runtime/src/test/java/org/apache/flink/runtime/state/heap/CopyOnWriteStateTableTest.java
Apache-2.0
@Test void testReleaseForSuccessfulSnapshot() throws IOException { int numberOfKeyGroups = 10; CopyOnWriteStateTable<Integer, Integer, Float> table = createStateTableForSnapshotRelease(numberOfKeyGroups); ByteArrayOutputStreamWithPos byteArrayOutputStreamWithPos = new ByteArrayOutputStreamWithPos(); DataOutputView dataOutputView = new DataOutputViewStreamWrapper(byteArrayOutputStreamWithPos); CopyOnWriteStateTableSnapshot<Integer, Integer, Float> snapshot = table.stateSnapshot(); for (int group = 0; group < numberOfKeyGroups; group++) { snapshot.writeStateInKeyGroup(dataOutputView, group); // resource used by one key group should be released after the snapshot is successful assertThat(isResourceReleasedForKeyGroup(table, group)).isTrue(); } snapshot.release(); verifyResourceIsReleasedForAllKeyGroup(table, 1); }
This tests that resource can be released for a successful snapshot.
testReleaseForSuccessfulSnapshot
java
apache/flink
flink-runtime/src/test/java/org/apache/flink/runtime/state/heap/CopyOnWriteStateTableTest.java
https://github.com/apache/flink/blob/master/flink-runtime/src/test/java/org/apache/flink/runtime/state/heap/CopyOnWriteStateTableTest.java
Apache-2.0
@Test void testStorageLocationMkdirs() throws Exception { MemoryBackendCheckpointStorageAccess storage = new MemoryBackendCheckpointStorageAccess( new JobID(), new Path(randomTempPath(), "chk"), null, true, DEFAULT_MAX_STATE_SIZE); File baseDir = new File(storage.getCheckpointsDirectory().getPath()); assertThat(baseDir).doesNotExist(); // mkdirs only be called when initializeLocationForCheckpoint storage.initializeLocationForCheckpoint(177L); assertThat(baseDir).exists(); }
This test checks that the expected mkdirs action for checkpoint storage, only called when initializeLocationForCheckpoint.
testStorageLocationMkdirs
java
apache/flink
flink-runtime/src/test/java/org/apache/flink/runtime/state/memory/MemoryCheckpointStorageAccessTest.java
https://github.com/apache/flink/blob/master/flink-runtime/src/test/java/org/apache/flink/runtime/state/memory/MemoryCheckpointStorageAccessTest.java
Apache-2.0
@Override public void mergeNamespaces(N target, Collection<N> sources) throws Exception { ACC acc = null; for (N n : sources) { setCurrentNamespace(n); ACC nAcc = getInternal(); acc = nAcc == null ? acc : (acc == null ? nAcc : mergeState(acc, nAcc)); } if (acc != null) { setCurrentNamespace(target); updateInternal(acc); } }
In memory mock internal merging state base class.
mergeNamespaces
java
apache/flink
flink-runtime/src/test/java/org/apache/flink/runtime/state/ttl/mock/MockInternalMergingState.java
https://github.com/apache/flink/blob/master/flink-runtime/src/test/java/org/apache/flink/runtime/state/ttl/mock/MockInternalMergingState.java
Apache-2.0