code
stringlengths
25
201k
docstring
stringlengths
19
96.2k
func_name
stringlengths
0
235
language
stringclasses
1 value
repo
stringlengths
8
51
path
stringlengths
11
314
url
stringlengths
62
377
license
stringclasses
7 values
public Collection<TestSpecification<?, ?>> createTestSpecifications(FlinkVersion flinkVersion) throws Exception { ArrayList<TestSpecification<?, ?>> testSpecifications = new ArrayList<>(); testSpecifications.add( new TestSpecification<>( "buffer-entry-serializer", flinkVersion, BufferEntrySerializerSetup.class, BufferEntrySerializerVerifier.class)); return testSpecifications; }
State migration tests for {@link BufferEntrySerializer}.
createTestSpecifications
java
apache/flink
flink-runtime/src/test/java/org/apache/flink/streaming/api/operators/co/BufferEntrySerializerUpgradeTest.java
https://github.com/apache/flink/blob/master/flink-runtime/src/test/java/org/apache/flink/streaming/api/operators/co/BufferEntrySerializerUpgradeTest.java
Apache-2.0
@Test void testAccessToKeyedStateIt() throws Exception { final List<String> test1content = new ArrayList<>(); test1content.add("test1"); test1content.add("test1"); final List<String> test2content = new ArrayList<>(); test2content.add("test2"); test2content.add("test2"); test2content.add("test2"); test2content.add("test2"); final List<String> test3content = new ArrayList<>(); test3content.add("test3"); test3content.add("test3"); test3content.add("test3"); final Map<String, List<String>> expectedState = new HashMap<>(); expectedState.put("test1", test1content); expectedState.put("test2", test2content); expectedState.put("test3", test3content); try (TwoInputStreamOperatorTestHarness<String, Integer, String> testHarness = getInitializedTestHarness( BasicTypeInfo.STRING_TYPE_INFO, new IdentityKeySelector<>(), new StatefulFunctionWithKeyedStateAccessedOnBroadcast(expectedState))) { // send elements to the keyed state testHarness.processElement1(new StreamRecord<>("test1", 12L)); testHarness.processElement1(new StreamRecord<>("test1", 12L)); testHarness.processElement1(new StreamRecord<>("test2", 13L)); testHarness.processElement1(new StreamRecord<>("test2", 13L)); testHarness.processElement1(new StreamRecord<>("test2", 13L)); testHarness.processElement1(new StreamRecord<>("test3", 14L)); testHarness.processElement1(new StreamRecord<>("test3", 14L)); testHarness.processElement1(new StreamRecord<>("test3", 14L)); testHarness.processElement1(new StreamRecord<>("test2", 13L)); // this is the element on the broadcast side that will trigger the verification // check the StatefulFunctionWithKeyedStateAccessedOnBroadcast#processBroadcastElement() testHarness.processElement2(new StreamRecord<>(1, 13L)); } }
Test the iteration over the keyed state on the broadcast side.
testAccessToKeyedStateIt
java
apache/flink
flink-runtime/src/test/java/org/apache/flink/streaming/api/operators/co/CoBroadcastWithKeyedOperatorTest.java
https://github.com/apache/flink/blob/master/flink-runtime/src/test/java/org/apache/flink/streaming/api/operators/co/CoBroadcastWithKeyedOperatorTest.java
Apache-2.0
@Override public String getKey(Integer value) throws Exception { return "" + value; }
A key selector which convert a integer key to string.
getKey
java
apache/flink
flink-runtime/src/test/java/org/apache/flink/streaming/api/operators/co/LegacyKeyedCoProcessOperatorTest.java
https://github.com/apache/flink/blob/master/flink-runtime/src/test/java/org/apache/flink/streaming/api/operators/co/LegacyKeyedCoProcessOperatorTest.java
Apache-2.0
public static TestingSinkFunction createSinkFunctionAndInitializeCoordinator( CollectSinkOperatorCoordinator coordinator) throws Exception { final TestingSinkFunction socketServer = new TestingSinkFunction(); socketServer.registerSinkFunctionWith(coordinator); return socketServer; }
Creates a {@code TestingSinkFunction} and connects it with the passed {@link CollectSinkOperatorCoordinator}.
createSinkFunctionAndInitializeCoordinator
java
apache/flink
flink-runtime/src/test/java/org/apache/flink/streaming/api/operators/collect/CollectSinkOperatorCoordinatorTest.java
https://github.com/apache/flink/blob/master/flink-runtime/src/test/java/org/apache/flink/streaming/api/operators/collect/CollectSinkOperatorCoordinatorTest.java
Apache-2.0
public static TestingSinkFunction createTestingSinkFunctionWithoutConnection() throws IOException { return new TestingSinkFunction(ignoredServerSocket -> null); }
Creates a {@code TestingSinKFunction} that doesn't listen on the configured. Sending requests to this function will block forever when trying to connect to it.
createTestingSinkFunctionWithoutConnection
java
apache/flink
flink-runtime/src/test/java/org/apache/flink/streaming/api/operators/collect/CollectSinkOperatorCoordinatorTest.java
https://github.com/apache/flink/blob/master/flink-runtime/src/test/java/org/apache/flink/streaming/api/operators/collect/CollectSinkOperatorCoordinatorTest.java
Apache-2.0
private InetSocketAddress getSocketAddress() { return new InetSocketAddress( InetAddress.getLoopbackAddress(), serverSocket.getLocalPort()); }
Returns the {@link InetSocketAddress} of the {@code TestingSinkFunction}.
getSocketAddress
java
apache/flink
flink-runtime/src/test/java/org/apache/flink/streaming/api/operators/collect/CollectSinkOperatorCoordinatorTest.java
https://github.com/apache/flink/blob/master/flink-runtime/src/test/java/org/apache/flink/streaming/api/operators/collect/CollectSinkOperatorCoordinatorTest.java
Apache-2.0
public void closeAcceptingSocket() throws Exception { if (connectionFuture != null) { connectionFuture.get().close(); connectionFuture = null; } }
Closes the established connection from the {@code SinkFunction}'s side.
closeAcceptingSocket
java
apache/flink
flink-runtime/src/test/java/org/apache/flink/streaming/api/operators/collect/CollectSinkOperatorCoordinatorTest.java
https://github.com/apache/flink/blob/master/flink-runtime/src/test/java/org/apache/flink/streaming/api/operators/collect/CollectSinkOperatorCoordinatorTest.java
Apache-2.0
public void waitForConnectionToBeEstablished() throws ExecutionException, InterruptedException { getConnectionFuture().get(); }
Waits for the connection to be established between the {@code coordinator} and the {@code SinkFunction}. This method will block until a request is sent by the coordinator and a {@code handle*} call is initiated by this instance.
waitForConnectionToBeEstablished
java
apache/flink
flink-runtime/src/test/java/org/apache/flink/streaming/api/operators/collect/CollectSinkOperatorCoordinatorTest.java
https://github.com/apache/flink/blob/master/flink-runtime/src/test/java/org/apache/flink/streaming/api/operators/collect/CollectSinkOperatorCoordinatorTest.java
Apache-2.0
public void handleRequest(List<Row> actualData) { handleRequest( DEFAULT_SINK_FUNCTION_RESPONSE_VERSION, DEFAULT_SINK_FUNCTION_RESPONSE_OFFSET, actualData); }
Handles a request with the given data in a synchronous fashion. The {@code TestingSinkFunction}'s default meta information is attached to the response. @see #DEFAULT_SINK_FUNCTION_RESPONSE_VERSION @see #DEFAULT_SINK_FUNCTION_RESPONSE_OFFSET
handleRequest
java
apache/flink
flink-runtime/src/test/java/org/apache/flink/streaming/api/operators/collect/CollectSinkOperatorCoordinatorTest.java
https://github.com/apache/flink/blob/master/flink-runtime/src/test/java/org/apache/flink/streaming/api/operators/collect/CollectSinkOperatorCoordinatorTest.java
Apache-2.0
public void handleRequest(String actualVersion, int actualOffset, List<Row> actualData) { handleRequestAsync( actualVersion, actualOffset, CompletableFuture.completedFuture(actualData)) .join(); }
Handles the next request synchronously. The passed {@code actualData} will be forwarded to the response.
handleRequest
java
apache/flink
flink-runtime/src/test/java/org/apache/flink/streaming/api/operators/collect/CollectSinkOperatorCoordinatorTest.java
https://github.com/apache/flink/blob/master/flink-runtime/src/test/java/org/apache/flink/streaming/api/operators/collect/CollectSinkOperatorCoordinatorTest.java
Apache-2.0
public CompletableFuture<Void> handleRequestAsync( String actualVersion, int actualOffset, CompletableFuture<List<Row>> actualDataAsync) { return internalConnectWithRequestHandlingAsync() .thenCombineAsync( actualDataAsync, (socketConnection, data) -> { if (socketConnection == null) { throw new CompletionException( new IllegalStateException( "No SocketConnection established.")); } try { // serialize generic response (only the data is relevant) new CollectCoordinationResponse( actualVersion, actualOffset, CollectTestUtils.toBytesList(data, serializer)) .serialize(socketConnection.getDataOutputView()); } catch (IOException e) { throw new CompletionException(e); } return null; }); }
Handles the request by sending a {@link CollectCoordinationResponse} via the socket. @return {@code CompleteFuture} that indicates whether the asynchronous processing on the {@code SinkFunction}'s side finished.
handleRequestAsync
java
apache/flink
flink-runtime/src/test/java/org/apache/flink/streaming/api/operators/collect/CollectSinkOperatorCoordinatorTest.java
https://github.com/apache/flink/blob/master/flink-runtime/src/test/java/org/apache/flink/streaming/api/operators/collect/CollectSinkOperatorCoordinatorTest.java
Apache-2.0
@Override protected void updateBufferedResults() { for (int i = random.nextInt(3) + 1; i > 0; i--) { if (checkpointCountDown > 0) { // countdown on-going checkpoint checkpointCountDown--; if (checkpointCountDown == 0) { // complete a checkpoint checkpointedData = checkpointingData; checkpointedBuffered = checkpointingBuffered; checkpointedOffset = checkpointingOffset; } } int r = random.nextInt(10); if (r < 6) { // with 60% chance we add data int size = Math.min(data.size(), BATCH_SIZE * 2 - buffered.size()); if (size > 0) { size = random.nextInt(size) + 1; } for (int j = 0; j < size; j++) { buffered.add(data.removeFirst()); } if (data.isEmpty()) { buildAccumulatorResults(); closed = true; break; } } else if (r < 9) { // with 30% chance we do a checkpoint completed in the future if (checkpointCountDown == 0) { checkpointCountDown = random.nextInt(5) + 1; checkpointingData = new ArrayList<>(data); checkpointingBuffered = new ArrayList<>(buffered); checkpointingOffset = offset; } } else { // with 10% chance we fail checkpointCountDown = 0; version = UUID.randomUUID().toString(); // we shuffle data to simulate jobs whose result order is undetermined Collections.shuffle(checkpointedData); data = new LinkedList<>(checkpointedData); buffered = new LinkedList<>(checkpointedBuffered); offset = checkpointedOffset; } } }
A {@link CoordinationRequestHandler} to test fetching SELECT query results. It will randomly do checkpoint or restart from checkpoint.
updateBufferedResults
java
apache/flink
flink-runtime/src/test/java/org/apache/flink/streaming/api/operators/collect/utils/TestCheckpointedCoordinationRequestHandler.java
https://github.com/apache/flink/blob/master/flink-runtime/src/test/java/org/apache/flink/streaming/api/operators/collect/utils/TestCheckpointedCoordinationRequestHandler.java
Apache-2.0
@Override protected void updateBufferedResults() { for (int i = random.nextInt(3) + 1; i > 0; i--) { int r = random.nextInt(20); if (r < 19 || failCount <= 0) { // with 95% chance we add data int size = Math.min(data.size(), BATCH_SIZE * 2 - buffered.size()); if (size > 0) { size = random.nextInt(size) + 1; } for (int j = 0; j < size; j++) { buffered.add(data.removeFirst()); } if (data.isEmpty()) { buildAccumulatorResults(); closed = true; break; } } else { // with 5% chance we fail, we fail at most `failCount` times failCount--; // we shuffle data to simulate jobs whose result order is undetermined data = new LinkedList<>(originalData); Collections.shuffle(data); buffered = new LinkedList<>(); version = UUID.randomUUID().toString(); offset = 0; } } }
A {@link CoordinationRequestHandler} to test fetching SELECT query results. It does not do checkpoint and will produce all results again when failure occurs.
updateBufferedResults
java
apache/flink
flink-runtime/src/test/java/org/apache/flink/streaming/api/operators/collect/utils/TestUncheckpointedCoordinationRequestHandler.java
https://github.com/apache/flink/blob/master/flink-runtime/src/test/java/org/apache/flink/streaming/api/operators/collect/utils/TestUncheckpointedCoordinationRequestHandler.java
Apache-2.0
private <K> CheckpointableKeyedStateBackend<K> createKeyedBackend( TypeSerializer<K> keySerializer) { return new BatchExecutionKeyedStateBackend<>( keySerializer, new KeyGroupRange(0, 9), new ExecutionConfig()); }
Tests copied over from {@link StateBackendTestBase} and adjusted to make sense for a single key state backend. <p>Some of the tests in {@link StateBackendTestBase} do not make sense for {@link BatchExecutionKeyedStateBackend}, e.g. checkpointing tests, tests verifying methods used by the queryable state etc. Moreover the tests had to be adjusted as the state backend assumes keys are grouped.
createKeyedBackend
java
apache/flink
flink-runtime/src/test/java/org/apache/flink/streaming/api/operators/sorted/state/BatchExecutionStateBackendTest.java
https://github.com/apache/flink/blob/master/flink-runtime/src/test/java/org/apache/flink/streaming/api/operators/sorted/state/BatchExecutionStateBackendTest.java
Apache-2.0
@Test void testListStateUpdateNullEntries() throws Exception { CheckpointableKeyedStateBackend<String> keyedBackend = createKeyedBackend(StringSerializer.INSTANCE); final ListStateDescriptor<Long> stateDescr = new ListStateDescriptor<>("my-state", Long.class); try { ListState<Long> state = keyedBackend.getPartitionedState( VoidNamespace.INSTANCE, VoidNamespaceSerializer.INSTANCE, stateDescr); keyedBackend.setCurrentKey("abc"); assertThat(state.get()).isNull(); List<Long> adding = new ArrayList<>(); adding.add(3L); adding.add(null); adding.add(5L); assertThatThrownBy(() -> state.update(adding)).isInstanceOf(NullPointerException.class); } finally { keyedBackend.close(); keyedBackend.dispose(); } }
This test verifies that all ListState implementations are consistent in not allowing {@link ListState#update(List)} to be called with {@code null} entries in the list of entries to add.
testListStateUpdateNullEntries
java
apache/flink
flink-runtime/src/test/java/org/apache/flink/streaming/api/operators/sorted/state/BatchExecutionStateBackendTest.java
https://github.com/apache/flink/blob/master/flink-runtime/src/test/java/org/apache/flink/streaming/api/operators/sorted/state/BatchExecutionStateBackendTest.java
Apache-2.0
@Test void verifySnapshotNotSupported() { BatchExecutionKeyedStateBackend<Long> stateBackend = new BatchExecutionKeyedStateBackend<>( LONG_SERIALIZER, new KeyGroupRange(0, 9), new ExecutionConfig()); long checkpointId = 0L; CheckpointStreamFactory streamFactory = new MemCheckpointStreamFactory(10); assertThatThrownBy( () -> stateBackend.snapshot( checkpointId, 0L, streamFactory, CheckpointOptions.forCheckpointWithDefaultLocation())) .isInstanceOf(UnsupportedOperationException.class) .hasMessageContaining("Snapshotting is not supported in BATCH runtime mode."); }
Tests that verify an exception is thrown in methods that are not supported in the BATCH runtime mode.
verifySnapshotNotSupported
java
apache/flink
flink-runtime/src/test/java/org/apache/flink/streaming/api/operators/sorted/state/BatchExecutionStateBackendVerificationTest.java
https://github.com/apache/flink/blob/master/flink-runtime/src/test/java/org/apache/flink/streaming/api/operators/sorted/state/BatchExecutionStateBackendVerificationTest.java
Apache-2.0
@Override protected void setChannelSelector( RecordWriterBuilder recordWriterBuilder, boolean broadcastMode) { checkArgument(!broadcastMode, "Combining broadcasting with data skew doesn't make sense"); recordWriterBuilder.setChannelSelector(new DataSkewChannelSelector()); }
Network throughput benchmarks for data skew scenario executed by the external <a href="https://github.com/dataArtisans/flink-benchmarks">flink-benchmarks</a> project.
setChannelSelector
java
apache/flink
flink-runtime/src/test/java/org/apache/flink/streaming/runtime/io/benchmark/DataSkewStreamNetworkThroughputBenchmark.java
https://github.com/apache/flink/blob/master/flink-runtime/src/test/java/org/apache/flink/streaming/runtime/io/benchmark/DataSkewStreamNetworkThroughputBenchmark.java
Apache-2.0
@Override protected StreamNetworkThroughputBenchmark createBenchmark() { return new DataSkewStreamNetworkThroughputBenchmark(); }
Tests for various network benchmarks based on {@link DataSkewStreamNetworkThroughputBenchmark}.
createBenchmark
java
apache/flink
flink-runtime/src/test/java/org/apache/flink/streaming/runtime/io/benchmark/DataSkewStreamNetworkThroughputBenchmarkTest.java
https://github.com/apache/flink/blob/master/flink-runtime/src/test/java/org/apache/flink/streaming/runtime/io/benchmark/DataSkewStreamNetworkThroughputBenchmarkTest.java
Apache-2.0
public synchronized void shutdown() { running = false; recordsToSend.complete(0L); }
Future to wait on a definition of the number of records to send.
shutdown
java
apache/flink
flink-runtime/src/test/java/org/apache/flink/streaming/runtime/io/benchmark/LongRecordWriterThread.java
https://github.com/apache/flink/blob/master/flink-runtime/src/test/java/org/apache/flink/streaming/runtime/io/benchmark/LongRecordWriterThread.java
Apache-2.0
@Override protected void readRecords(long lastExpectedRecord) throws Exception { LOG.debug("readRecords(lastExpectedRecord = {})", lastExpectedRecord); final LongValue value = new LongValue(); while (running && reader.next(value)) { final long ts = value.getValue(); if (ts == lastExpectedRecord) { expectedRecordCounter++; if (expectedRecordCounter == expectedRepetitionsOfExpectedRecord) { break; } } } }
{@link ReceiverThread} that deserialize incoming messages.
readRecords
java
apache/flink
flink-runtime/src/test/java/org/apache/flink/streaming/runtime/io/benchmark/SerializingLongReceiver.java
https://github.com/apache/flink/blob/master/flink-runtime/src/test/java/org/apache/flink/streaming/runtime/io/benchmark/SerializingLongReceiver.java
Apache-2.0
@Override protected InputChannel createKnownInputChannel( SingleInputGate inputGate, int index, int buffersPerChannel, NettyShuffleDescriptor inputChannelDescriptor, ResultSubpartitionIndexSet subpartitionIndexSet, ChannelStatistics channelStatistics, InputChannelMetrics metrics) { ResultPartitionID partitionId = inputChannelDescriptor.getResultPartitionID(); if (inputChannelDescriptor.isLocalTo(taskExecutorResourceId)) { return new TestLocalInputChannel( inputGate, index, partitionId, new ResultSubpartitionIndexSet(index), partitionManager, taskEventPublisher, partitionRequestInitialBackoff, partitionRequestMaxBackoff, metrics); } else { return new TestRemoteInputChannel( inputGate, index, partitionId, new ResultSubpartitionIndexSet(index), inputChannelDescriptor.getConnectionId(), connectionManager, partitionRequestInitialBackoff, partitionRequestMaxBackoff, partitionRequestListenerTimeout, configuredNetworkBuffersPerChannel, metrics); } }
A benchmark-specific input gate factory which overrides the respective methods of creating {@link RemoteInputChannel} and {@link LocalInputChannel} for requesting specific subpartitions.
createKnownInputChannel
java
apache/flink
flink-runtime/src/test/java/org/apache/flink/streaming/runtime/io/benchmark/SingleInputGateBenchmarkFactory.java
https://github.com/apache/flink/blob/master/flink-runtime/src/test/java/org/apache/flink/streaming/runtime/io/benchmark/SingleInputGateBenchmarkFactory.java
Apache-2.0
public void setUp( int writers, int channels, boolean localMode, int senderBufferPoolSize, int receiverBufferPoolSize) throws Exception { setUp( writers, channels, localMode, senderBufferPoolSize, receiverBufferPoolSize, new Configuration()); }
Context for network benchmarks executed by the external <a href="https://github.com/dataArtisans/flink-benchmarks">flink-benchmarks</a> project.
setUp
java
apache/flink
flink-runtime/src/test/java/org/apache/flink/streaming/runtime/io/benchmark/StreamNetworkBenchmarkEnvironment.java
https://github.com/apache/flink/blob/master/flink-runtime/src/test/java/org/apache/flink/streaming/runtime/io/benchmark/StreamNetworkBenchmarkEnvironment.java
Apache-2.0
@Override public void setUp( int recordWriters, int channels, int flushTimeout, boolean localMode, int senderBufferPoolSize, int receiverBufferPoolSize) throws Exception { setUp( recordWriters, channels, flushTimeout, true, localMode, senderBufferPoolSize, receiverBufferPoolSize, new Configuration()); }
Same as {@link StreamNetworkThroughputBenchmark#setUp(int, int, int, boolean, int, int)} but also setups broadcast mode.
setUp
java
apache/flink
flink-runtime/src/test/java/org/apache/flink/streaming/runtime/io/benchmark/StreamNetworkBroadcastThroughputBenchmark.java
https://github.com/apache/flink/blob/master/flink-runtime/src/test/java/org/apache/flink/streaming/runtime/io/benchmark/StreamNetworkBroadcastThroughputBenchmark.java
Apache-2.0
@Override protected StreamNetworkThroughputBenchmark createBenchmark() { return new StreamNetworkBroadcastThroughputBenchmark(); }
Tests for various network benchmarks based on {@link StreamNetworkBroadcastThroughputBenchmark}.
createBenchmark
java
apache/flink
flink-runtime/src/test/java/org/apache/flink/streaming/runtime/io/benchmark/StreamNetworkBroadcastThroughputBenchmarkTest.java
https://github.com/apache/flink/blob/master/flink-runtime/src/test/java/org/apache/flink/streaming/runtime/io/benchmark/StreamNetworkBroadcastThroughputBenchmarkTest.java
Apache-2.0
public void tearDown() { environment.tearDown(); receiver.shutdown(); }
Shuts down a benchmark previously set up via {@link #setUp}.
tearDown
java
apache/flink
flink-runtime/src/test/java/org/apache/flink/streaming/runtime/io/benchmark/StreamNetworkPointToPointBenchmark.java
https://github.com/apache/flink/blob/master/flink-runtime/src/test/java/org/apache/flink/streaming/runtime/io/benchmark/StreamNetworkPointToPointBenchmark.java
Apache-2.0
public void executeBenchmark(long records) throws Exception { executeBenchmark(records, Long.MAX_VALUE); }
Network throughput benchmarks executed by the external <a href="https://github.com/dataArtisans/flink-benchmarks">flink-benchmarks</a> project.
executeBenchmark
java
apache/flink
flink-runtime/src/test/java/org/apache/flink/streaming/runtime/io/benchmark/StreamNetworkThroughputBenchmark.java
https://github.com/apache/flink/blob/master/flink-runtime/src/test/java/org/apache/flink/streaming/runtime/io/benchmark/StreamNetworkThroughputBenchmark.java
Apache-2.0
public void tearDown() throws Exception { for (LongRecordWriterThread writerThread : writerThreads) { writerThread.shutdown(); writerThread.sync(5000); } environment.tearDown(); receiver.shutdown(); }
Shuts down a benchmark previously set up via {@link #setUp}. <p>This will wait for all senders to finish but timeout with an exception after 5 seconds.
tearDown
java
apache/flink
flink-runtime/src/test/java/org/apache/flink/streaming/runtime/io/benchmark/StreamNetworkThroughputBenchmark.java
https://github.com/apache/flink/blob/master/flink-runtime/src/test/java/org/apache/flink/streaming/runtime/io/benchmark/StreamNetworkThroughputBenchmark.java
Apache-2.0
@Test void testMultiChannelWithBarriers() throws Exception { BufferOrEvent[] sequence = { // checkpoint with data from multi channels createBuffer(0), createBuffer(2), createBuffer(0), createBarrier(1, 1), createBarrier(1, 2), createBuffer(0), createBarrier(1, 0), // another checkpoint createBuffer(0), createBuffer(0), createBuffer(1), createBuffer(1), createBuffer(2), createBarrier(2, 0), createBarrier(2, 1), createBarrier(2, 2), // checkpoint with data only from one channel createBuffer(2), createBuffer(2), createBarrier(3, 2), createBuffer(0), createBuffer(0), createBarrier(3, 0), createBarrier(3, 1), // empty checkpoint createBarrier(4, 1), createBarrier(4, 2), createBarrier(4, 0), // some trailing data createBuffer(0), createEndOfPartition(0), createEndOfPartition(1), createEndOfPartition(2) }; ValidatingCheckpointHandler handler = new ValidatingCheckpointHandler(); inputGate = createCheckpointedInputGate(3, sequence, handler); handler.setNextExpectedCheckpointId(1L); // pre checkpoint 1 check(sequence[0], inputGate.pollNext().get(), PAGE_SIZE); check(sequence[1], inputGate.pollNext().get(), PAGE_SIZE); check(sequence[2], inputGate.pollNext().get(), PAGE_SIZE); assertThat(handler.getNextExpectedCheckpointId()).isOne(); long startTs = System.nanoTime(); // checkpoint 1 done check(sequence[3], inputGate.pollNext().get(), PAGE_SIZE); check(sequence[4], inputGate.pollNext().get(), PAGE_SIZE); check(sequence[5], inputGate.pollNext().get(), PAGE_SIZE); check(sequence[6], inputGate.pollNext().get(), PAGE_SIZE); assertThat(handler.getNextExpectedCheckpointId()).isEqualTo(2L); validateAlignmentTime(startTs, inputGate); assertThat(mockInputGate.getBlockedChannels()).isEmpty(); // pre checkpoint 2 check(sequence[7], inputGate.pollNext().get(), PAGE_SIZE); check(sequence[8], inputGate.pollNext().get(), PAGE_SIZE); check(sequence[9], inputGate.pollNext().get(), PAGE_SIZE); check(sequence[10], inputGate.pollNext().get(), PAGE_SIZE); check(sequence[11], inputGate.pollNext().get(), PAGE_SIZE); assertThat(handler.getNextExpectedCheckpointId()).isEqualTo(2L); // checkpoint 2 barriers come together startTs = System.nanoTime(); check(sequence[12], inputGate.pollNext().get(), PAGE_SIZE); check(sequence[13], inputGate.pollNext().get(), PAGE_SIZE); check(sequence[14], inputGate.pollNext().get(), PAGE_SIZE); assertThat(handler.getNextExpectedCheckpointId()).isEqualTo(3L); validateAlignmentTime(startTs, inputGate); assertThat(mockInputGate.getBlockedChannels()).isEmpty(); check(sequence[15], inputGate.pollNext().get(), PAGE_SIZE); check(sequence[16], inputGate.pollNext().get(), PAGE_SIZE); // checkpoint 3 check(sequence[17], inputGate.pollNext().get(), PAGE_SIZE); check(sequence[18], inputGate.pollNext().get(), PAGE_SIZE); check(sequence[19], inputGate.pollNext().get(), PAGE_SIZE); check(sequence[20], inputGate.pollNext().get(), PAGE_SIZE); check(sequence[21], inputGate.pollNext().get(), PAGE_SIZE); assertThat(handler.getNextExpectedCheckpointId()).isEqualTo(4L); assertThat(mockInputGate.getBlockedChannels()).isEmpty(); // checkpoint 4 check(sequence[22], inputGate.pollNext().get(), PAGE_SIZE); check(sequence[23], inputGate.pollNext().get(), PAGE_SIZE); check(sequence[24], inputGate.pollNext().get(), PAGE_SIZE); assertThat(handler.getNextExpectedCheckpointId()).isEqualTo(5L); assertThat(mockInputGate.getBlockedChannels()).isEmpty(); // remaining data check(sequence[25], inputGate.pollNext().get(), PAGE_SIZE); check(sequence[26], inputGate.pollNext().get(), PAGE_SIZE); check(sequence[27], inputGate.pollNext().get(), PAGE_SIZE); check(sequence[28], inputGate.pollNext().get(), PAGE_SIZE); }
Validates that the buffer correctly aligns the streams for inputs with multiple input channels.
testMultiChannelWithBarriers
java
apache/flink
flink-runtime/src/test/java/org/apache/flink/streaming/runtime/io/checkpointing/AlignedCheckpointsTest.java
https://github.com/apache/flink/blob/master/flink-runtime/src/test/java/org/apache/flink/streaming/runtime/io/checkpointing/AlignedCheckpointsTest.java
Apache-2.0
@Test void testAbortOnCanceledBarriers() throws Exception { BufferOrEvent[] sequence = { // starting a checkpoint /* 0 */ createBuffer(1), /* 1 */ createBarrier(1, 1), /* 2 */ createBuffer(2), createBuffer(0), // cancel the initial checkpoint /* 4 */ createCancellationBarrier(1, 0), // receiving a buffer /* 5 */ createBuffer(1), // starting a new checkpoint /* 6 */ createBarrier(2, 1), // some more buffers /* 7 */ createBuffer(2), createBuffer(0), // ignored barrier - already canceled and moved to next checkpoint /* 9 */ createBarrier(1, 2), // some more buffers /* 10 */ createBuffer(0), createBuffer(2), // complete next checkpoint regularly /* 12 */ createBarrier(2, 0), createBarrier(2, 2), // some more buffers /* 14 */ createBuffer(0), createBuffer(1), createBuffer(2) }; ValidatingCheckpointHandler toNotify = new ValidatingCheckpointHandler(); inputGate = createCheckpointedInputGate(3, sequence, toNotify); long startTs; check(sequence[0], inputGate.pollNext().get(), PAGE_SIZE); // starting first checkpoint check(sequence[1], inputGate.pollNext().get(), PAGE_SIZE); check(sequence[2], inputGate.pollNext().get(), PAGE_SIZE); check(sequence[3], inputGate.pollNext().get(), PAGE_SIZE); // cancelled by cancellation barrier check(sequence[4], inputGate.pollNext().get(), 0); check(sequence[5], inputGate.pollNext().get(), PAGE_SIZE); assertThat(toNotify.getLastCanceledCheckpointId()).isOne(); assertThat(toNotify.getCheckpointFailureReason()) .isEqualTo(CheckpointFailureReason.CHECKPOINT_DECLINED_ON_CANCELLATION_BARRIER); assertThat(mockInputGate.getBlockedChannels()).isEmpty(); // the next checkpoint alignment startTs = System.nanoTime(); check(sequence[6], inputGate.pollNext().get(), PAGE_SIZE); check(sequence[7], inputGate.pollNext().get(), PAGE_SIZE); check(sequence[8], inputGate.pollNext().get(), PAGE_SIZE); // ignored barrier and unblock channel directly check(sequence[9], inputGate.pollNext().get(), PAGE_SIZE); assertThat(mockInputGate.getBlockedChannels()).containsExactly(1); check(sequence[10], inputGate.pollNext().get(), PAGE_SIZE); check(sequence[11], inputGate.pollNext().get(), PAGE_SIZE); // checkpoint 2 done toNotify.setNextExpectedCheckpointId(2); check(sequence[12], inputGate.pollNext().get(), PAGE_SIZE); check(sequence[13], inputGate.pollNext().get(), PAGE_SIZE); validateAlignmentTime(startTs, inputGate); assertThat(mockInputGate.getBlockedChannels()).isEmpty(); // trailing data check(sequence[14], inputGate.pollNext().get(), PAGE_SIZE); check(sequence[15], inputGate.pollNext().get(), PAGE_SIZE); check(sequence[16], inputGate.pollNext().get(), PAGE_SIZE); assertThat(toNotify.getTriggeredCheckpointCounter()).isOne(); assertThat(toNotify.getAbortedCheckpointCounter()).isOne(); }
This tests where a checkpoint barriers meets a canceled checkpoint. <p>The newer checkpoint barrier must not try to cancel the already canceled checkpoint.
testAbortOnCanceledBarriers
java
apache/flink
flink-runtime/src/test/java/org/apache/flink/streaming/runtime/io/checkpointing/AlignedCheckpointsTest.java
https://github.com/apache/flink/blob/master/flink-runtime/src/test/java/org/apache/flink/streaming/runtime/io/checkpointing/AlignedCheckpointsTest.java
Apache-2.0
@Test void testChannelResetOnNewBarrier() throws Exception { RecordingChannelStateWriter stateWriter = new RecordingChannelStateWriter(); try (CheckpointedInputGate gate = new TestCheckpointedInputGateBuilder( 2, getTestBarrierHandlerFactory(new ValidatingCheckpointHandler())) .withChannelStateWriter(stateWriter) .withRemoteChannels() .withMailboxExecutor() .build()) { sendBarrier( 0, clock.relativeTimeMillis(), SavepointType.savepoint(SavepointFormatType.CANONICAL), gate, 0); // using AC because UC would require ordering in gate while polling ((RemoteInputChannel) gate.getChannel(0)) .onBuffer(createBuffer(1024), 1, 0, 0); // to be captured send( toBuffer( new CheckpointBarrier( 1, clock.relativeTimeMillis(), unaligned(CheckpointType.CHECKPOINT, getDefault())), true), 1, gate); assertThat(stateWriter.getAddedInput().isEmpty()).isFalse(); } }
Upon subsuming (or canceling) a checkpoint, channels should be notified regardless of whether UC controller is currently being used or not. Otherwise, channels may not capture in-flight buffers.
testChannelResetOnNewBarrier
java
apache/flink
flink-runtime/src/test/java/org/apache/flink/streaming/runtime/io/checkpointing/AlternatingCheckpointsTest.java
https://github.com/apache/flink/blob/master/flink-runtime/src/test/java/org/apache/flink/streaming/runtime/io/checkpointing/AlternatingCheckpointsTest.java
Apache-2.0
private void testPersisting(boolean drainGate) throws Exception { int numberOfChannels = 3; NetworkBufferPool bufferPool = new NetworkBufferPool(numberOfChannels * 3, 1024); try { long checkpointId = 2L; long obsoleteCheckpointId = 1L; ValidatingCheckpointHandler validatingHandler = new ValidatingCheckpointHandler(checkpointId); RecordingChannelStateWriter stateWriter = new RecordingChannelStateWriter(); CheckpointedInputGate gate = setupInputGateWithAlternatingController( numberOfChannels, bufferPool, validatingHandler, stateWriter); // enqueue first checkpointId before obsoleteCheckpointId, so that we never trigger // and also never cancel the obsoleteCheckpointId enqueue(gate, 0, buildSomeBuffer()); enqueue(gate, 0, barrier(checkpointId)); enqueue(gate, 0, buildSomeBuffer()); enqueue(gate, 1, buildSomeBuffer()); enqueue(gate, 1, barrier(obsoleteCheckpointId)); enqueue(gate, 1, buildSomeBuffer()); enqueue(gate, 2, buildSomeBuffer()); assertThat(validatingHandler.getTriggeredCheckpointCounter()).isZero(); // trigger checkpoint gate.pollNext(); assertThat(validatingHandler.getTriggeredCheckpointCounter()).isOne(); assertAddedInputSize(stateWriter, 0, 1); assertAddedInputSize(stateWriter, 1, 2); assertAddedInputSize(stateWriter, 2, 1); enqueue(gate, 0, buildSomeBuffer()); enqueue(gate, 1, buildSomeBuffer()); enqueue(gate, 2, buildSomeBuffer()); while (drainGate && gate.pollNext().isPresent()) {} assertAddedInputSize(stateWriter, 0, 1); assertAddedInputSize(stateWriter, 1, 3); assertAddedInputSize(stateWriter, 2, 2); enqueue(gate, 1, barrier(checkpointId)); enqueue(gate, 1, buildSomeBuffer()); // Another obsoleted barrier that should be ignored enqueue(gate, 2, barrier(obsoleteCheckpointId)); enqueue(gate, 2, buildSomeBuffer()); while (drainGate && gate.pollNext().isPresent()) {} assertAddedInputSize(stateWriter, 0, 1); assertAddedInputSize(stateWriter, 1, 3); assertAddedInputSize(stateWriter, 2, 3); enqueue(gate, 2, barrier(checkpointId)); enqueue(gate, 2, buildSomeBuffer()); while (drainGate && gate.pollNext().isPresent()) {} assertAddedInputSize(stateWriter, 0, 1); assertAddedInputSize(stateWriter, 1, 3); assertAddedInputSize(stateWriter, 2, 3); } finally { bufferPool.destroy(); } }
This tests a scenario where an older triggered checkpoint, was cancelled and a newer checkpoint was triggered very quickly after the cancellation. It can happen that a task can receive first the more recent checkpoint barrier and later the obsoleted one. This can happen for many reasons (for example Source tasks not running, or just a race condition with notifyCheckpointAborted RPCs) and Task should be able to handle this properly. In FLINK-21104 the problem was that this obsoleted checkpoint barrier was causing a checkState to fail.
testPersisting
java
apache/flink
flink-runtime/src/test/java/org/apache/flink/streaming/runtime/io/checkpointing/CheckpointedInputGateTest.java
https://github.com/apache/flink/blob/master/flink-runtime/src/test/java/org/apache/flink/streaming/runtime/io/checkpointing/CheckpointedInputGateTest.java
Apache-2.0
@Test void testUpscale() throws IOException { DemultiplexingRecordDeserializer<Long> deserializer = DemultiplexingRecordDeserializer.create( new InputChannelInfo(2, 0), rescalingDescriptor( to(0, 1), array(mappings(), mappings(), mappings(to(2, 3), to(4, 5))), emptySet()), unused -> new SpillingAdaptiveSpanningRecordDeserializer<>( ioManager.getSpillingDirectoriesPaths()), unused -> RecordFilter.all()); assertThat(deserializer.getVirtualChannelSelectors()) .containsOnly( new SubtaskConnectionDescriptor(0, 2), new SubtaskConnectionDescriptor(0, 3), new SubtaskConnectionDescriptor(1, 2), new SubtaskConnectionDescriptor(1, 3)); for (int i = 0; i < 100; i++) { SubtaskConnectionDescriptor selector = Iterables.get(deserializer.getVirtualChannelSelectors(), random.nextInt(4)); long start = selector.getInputSubtaskIndex() << 4 | selector.getOutputSubtaskIndex(); MemorySegment memorySegment = allocateUnpooledSegment(128); try (BufferBuilder bufferBuilder = createBufferBuilder(memorySegment)) { Buffer buffer = writeLongs(bufferBuilder, start + 1L, start + 2L, start + 3L); deserializer.select(selector); deserializer.setNextBuffer(buffer); } assertThat(readLongs(deserializer)).containsExactly(start + 1L, start + 2L, start + 3L); assertThat(memorySegment.isFreed()).isTrue(); } }
Tests {@link SubtaskConnectionDescriptor} by mixing buffers from 4 different virtual channels.
testUpscale
java
apache/flink
flink-runtime/src/test/java/org/apache/flink/streaming/runtime/io/recovery/DemultiplexingRecordDeserializerTest.java
https://github.com/apache/flink/blob/master/flink-runtime/src/test/java/org/apache/flink/streaming/runtime/io/recovery/DemultiplexingRecordDeserializerTest.java
Apache-2.0
@Test void testAmbiguousChannels() throws IOException { DemultiplexingRecordDeserializer<Long> deserializer = DemultiplexingRecordDeserializer.create( new InputChannelInfo(1, 0), rescalingDescriptor( to(41, 42), array(mappings(), mappings(to(2, 3), to(4, 5))), set(42)), unused -> new SpillingAdaptiveSpanningRecordDeserializer<>( ioManager.getSpillingDirectoriesPaths()), unused -> new RecordFilter(new ModSelector(2), LongSerializer.INSTANCE, 1)); assertThat(deserializer.getVirtualChannelSelectors()) .containsOnly( new SubtaskConnectionDescriptor(41, 2), new SubtaskConnectionDescriptor(41, 3), new SubtaskConnectionDescriptor(42, 2), new SubtaskConnectionDescriptor(42, 3)); for (int i = 0; i < 100; i++) { MemorySegment memorySegment = allocateUnpooledSegment(128); try (BufferBuilder bufferBuilder = createBufferBuilder(memorySegment)) { // add one even and one odd number Buffer buffer = writeLongs(bufferBuilder, i, i + 1L); SubtaskConnectionDescriptor selector = Iterables.get(deserializer.getVirtualChannelSelectors(), i / 10 % 2); deserializer.select(selector); deserializer.setNextBuffer(buffer); if (selector.getInputSubtaskIndex() == 41) { assertThat(readLongs(deserializer)).containsExactly((long) i, i + 1L); } else { // only odd should occur in output assertThat(readLongs(deserializer)).containsExactly(i / 2 * 2 + 1L); } } assertThat(memorySegment.isFreed()).isTrue(); } }
Tests that {@link RecordFilter} are used correctly.
testAmbiguousChannels
java
apache/flink
flink-runtime/src/test/java/org/apache/flink/streaming/runtime/io/recovery/DemultiplexingRecordDeserializerTest.java
https://github.com/apache/flink/blob/master/flink-runtime/src/test/java/org/apache/flink/streaming/runtime/io/recovery/DemultiplexingRecordDeserializerTest.java
Apache-2.0
@Test void testReaderScalingDown() throws Exception { HarnessWithFormat[] beforeRescale = {}; try { beforeRescale = buildAndStart(5, 15); try (HarnessWithFormat afterRescale = buildAndStart(1, 0, 5, snapshotAndMergeState(beforeRescale))) { afterRescale.awaitEverythingProcessed(); for (HarnessWithFormat i : beforeRescale) { i.getHarness() .getOutput() .clear(); // we only want output from the 2nd chunk (after the // "checkpoint") i.awaitEverythingProcessed(); } assertThat(collectOutput(afterRescale)).isEqualTo(collectOutput(beforeRescale)); } } finally { for (HarnessWithFormat harness : beforeRescale) { harness.close(); } } }
Simulates the scenario of scaling down from 2 to 1 instances.
testReaderScalingDown
java
apache/flink
flink-runtime/src/test/java/org/apache/flink/streaming/runtime/operators/ContinuousFileProcessingRescalingTest.java
https://github.com/apache/flink/blob/master/flink-runtime/src/test/java/org/apache/flink/streaming/runtime/operators/ContinuousFileProcessingRescalingTest.java
Apache-2.0
public TriggerResult processElement(Integer element, W window) throws Exception { return super.processElement(new StreamRecord<>(element, element), window); }
Simple {@link TriggerTestHarness} that accepts integers and takes the value as the timestamp for the {@link StreamRecord}.
processElement
java
apache/flink
flink-runtime/src/test/java/org/apache/flink/streaming/runtime/operators/windowing/SimpleTriggerTestHarness.java
https://github.com/apache/flink/blob/master/flink-runtime/src/test/java/org/apache/flink/streaming/runtime/operators/windowing/SimpleTriggerTestHarness.java
Apache-2.0
public TriggerResult processElement(StreamRecord<T> element, W window) throws Exception { TestTriggerContext<Integer, W> triggerContext = new TestTriggerContext<>( KEY, window, internalTimerService, stateBackend, windowSerializer); return trigger.onElement( element.getValue(), element.getTimestamp(), window, triggerContext); }
Injects one element into the trigger for the given window and returns the result of {@link Trigger#onElement(Object, long, Window, Trigger.TriggerContext)}.
processElement
java
apache/flink
flink-runtime/src/test/java/org/apache/flink/streaming/runtime/operators/windowing/TriggerTestHarness.java
https://github.com/apache/flink/blob/master/flink-runtime/src/test/java/org/apache/flink/streaming/runtime/operators/windowing/TriggerTestHarness.java
Apache-2.0
public TriggerResult advanceProcessingTime(long time, W window) throws Exception { Collection<Tuple2<W, TriggerResult>> firings = advanceProcessingTime(time); if (firings.size() != 1) { throw new IllegalStateException( "Must have exactly one timer firing. Fired timers: " + firings); } Tuple2<W, TriggerResult> firing = firings.iterator().next(); if (!firing.f0.equals(window)) { throw new IllegalStateException("Trigger fired for another window."); } return firing.f1; }
Advanced processing time and checks whether we have exactly one firing for the given window. The result of {@link Trigger#onProcessingTime(long, Window, Trigger.TriggerContext)} is returned for that firing.
advanceProcessingTime
java
apache/flink
flink-runtime/src/test/java/org/apache/flink/streaming/runtime/operators/windowing/TriggerTestHarness.java
https://github.com/apache/flink/blob/master/flink-runtime/src/test/java/org/apache/flink/streaming/runtime/operators/windowing/TriggerTestHarness.java
Apache-2.0
public TriggerResult advanceWatermark(long time, W window) throws Exception { Collection<Tuple2<W, TriggerResult>> firings = advanceWatermark(time); if (firings.size() != 1) { throw new IllegalStateException( "Must have exactly one timer firing. Fired timers: " + firings); } Tuple2<W, TriggerResult> firing = firings.iterator().next(); if (!firing.f0.equals(window)) { throw new IllegalStateException("Trigger fired for another window."); } return firing.f1; }
Advanced the watermark and checks whether we have exactly one firing for the given window. The result of {@link Trigger#onEventTime(long, Window, Trigger.TriggerContext)} is returned for that firing.
advanceWatermark
java
apache/flink
flink-runtime/src/test/java/org/apache/flink/streaming/runtime/operators/windowing/TriggerTestHarness.java
https://github.com/apache/flink/blob/master/flink-runtime/src/test/java/org/apache/flink/streaming/runtime/operators/windowing/TriggerTestHarness.java
Apache-2.0
public Collection<Tuple2<W, TriggerResult>> advanceProcessingTime(long time) throws Exception { Collection<TestInternalTimerService.Timer<Integer, W>> firedTimers = internalTimerService.advanceProcessingTime(time); Collection<Tuple2<W, TriggerResult>> result = new ArrayList<>(); for (TestInternalTimerService.Timer<Integer, W> timer : firedTimers) { TestTriggerContext<Integer, W> triggerContext = new TestTriggerContext<>( KEY, timer.getNamespace(), internalTimerService, stateBackend, windowSerializer); TriggerResult triggerResult = trigger.onProcessingTime( timer.getTimestamp(), timer.getNamespace(), triggerContext); result.add(new Tuple2<>(timer.getNamespace(), triggerResult)); } return result; }
Advanced processing time and processes any timers that fire because of this. The window and {@link TriggerResult} for each firing are returned.
advanceProcessingTime
java
apache/flink
flink-runtime/src/test/java/org/apache/flink/streaming/runtime/operators/windowing/TriggerTestHarness.java
https://github.com/apache/flink/blob/master/flink-runtime/src/test/java/org/apache/flink/streaming/runtime/operators/windowing/TriggerTestHarness.java
Apache-2.0
public TriggerResult invokeOnEventTime(long timestamp, W window) throws Exception { TestInternalTimerService.Timer<Integer, W> timer = new TestInternalTimerService.Timer<>(timestamp, KEY, window); return invokeOnEventTime(timer); }
Manually invoke {@link Trigger#onEventTime(long, Window, Trigger.TriggerContext)} with the given parameters.
invokeOnEventTime
java
apache/flink
flink-runtime/src/test/java/org/apache/flink/streaming/runtime/operators/windowing/TriggerTestHarness.java
https://github.com/apache/flink/blob/master/flink-runtime/src/test/java/org/apache/flink/streaming/runtime/operators/windowing/TriggerTestHarness.java
Apache-2.0
public void mergeWindows(W targetWindow, Collection<W> mergedWindows) throws Exception { TestOnMergeContext<Integer, W> onMergeContext = new TestOnMergeContext<>( KEY, targetWindow, mergedWindows, internalTimerService, stateBackend, windowSerializer); trigger.onMerge(targetWindow, onMergeContext); for (W mergedWindow : mergedWindows) { clearTriggerState(mergedWindow); } }
Calls {@link Trigger#onMerge(Window, Trigger.OnMergeContext)} with the given parameters. This also calls {@link Trigger#clear(Window, Trigger.TriggerContext)} on the merged windows as does {@link WindowOperator}.
mergeWindows
java
apache/flink
flink-runtime/src/test/java/org/apache/flink/streaming/runtime/operators/windowing/TriggerTestHarness.java
https://github.com/apache/flink/blob/master/flink-runtime/src/test/java/org/apache/flink/streaming/runtime/operators/windowing/TriggerTestHarness.java
Apache-2.0
public Collection<TestSpecification<?, ?>> createTestSpecifications(FlinkVersion flinkVersion) throws Exception { ArrayList<TestSpecification<?, ?>> testSpecifications = new ArrayList<>(); testSpecifications.add( new TestSpecification<>( "time-window-serializer", flinkVersion, TimeWindowSerializerSetup.class, TimeWindowSerializerVerifier.class)); testSpecifications.add( new TestSpecification<>( "global-window-serializer", flinkVersion, GlobalWindowSerializerSetup.class, GlobalWindowSerializerVerifier.class)); return testSpecifications; }
A {@link TypeSerializerUpgradeTestBase} for {@link TimeWindow.Serializer} and {@link GlobalWindow.Serializer}.
createTestSpecifications
java
apache/flink
flink-runtime/src/test/java/org/apache/flink/streaming/runtime/operators/windowing/WindowSerializerUpgradeTest.java
https://github.com/apache/flink/blob/master/flink-runtime/src/test/java/org/apache/flink/streaming/runtime/operators/windowing/WindowSerializerUpgradeTest.java
Apache-2.0
private void head(OperatorID headOperatorID) { headConfig.setOperatorID(headOperatorID); headConfig.setChainStart(); headConfig.setChainIndex(chainIndex); }
Helper class to build StreamConfig for chain of operators.
head
java
apache/flink
flink-runtime/src/test/java/org/apache/flink/streaming/runtime/tasks/StreamConfigChainer.java
https://github.com/apache/flink/blob/master/flink-runtime/src/test/java/org/apache/flink/streaming/runtime/tasks/StreamConfigChainer.java
Apache-2.0
@Test void testConcurrentAsyncCheckpointCannotFailFinishedStreamTask() throws Exception { final Configuration taskConfiguration = new Configuration(); final StreamConfig streamConfig = new StreamConfig(taskConfiguration); final NoOpStreamOperator<Long> noOpStreamOperator = new NoOpStreamOperator<>(); final StateBackend blockingStateBackend = new BlockingStateBackend(); streamConfig.setStreamOperator(noOpStreamOperator); streamConfig.setOperatorID(new OperatorID()); streamConfig.setStateBackend(blockingStateBackend); streamConfig.serializeAllConfigs(); final long checkpointId = 0L; final long checkpointTimestamp = 0L; final JobInformation jobInformation = new JobInformation( new JobID(), JobType.STREAMING, "Test Job", new SerializedValue<>(new ExecutionConfig()), new Configuration(), Collections.emptyList(), Collections.emptyList()); final TaskInformation taskInformation = new TaskInformation( new JobVertexID(), "Test Task", 1, 1, BlockingStreamTask.class.getName(), taskConfiguration); final TaskManagerRuntimeInfo taskManagerRuntimeInfo = new TestingTaskManagerRuntimeInfo(); final ShuffleEnvironment<?, ?> shuffleEnvironment = new NettyShuffleEnvironmentBuilder().build(); final Task task = new Task( jobInformation, taskInformation, createExecutionAttemptId(taskInformation.getJobVertexId()), new AllocationID(), Collections.<ResultPartitionDeploymentDescriptor>emptyList(), Collections.<InputGateDeploymentDescriptor>emptyList(), MemoryManagerBuilder.newBuilder().setMemorySize(32L * 1024L).build(), new SharedResources(), new IOManagerAsync(), shuffleEnvironment, new KvStateService(new KvStateRegistry(), null, null), mock(BroadcastVariableManager.class), new TaskEventDispatcher(), ExternalResourceInfoProvider.NO_EXTERNAL_RESOURCES, new TestTaskStateManager(), mock(TaskManagerActions.class), mock(InputSplitProvider.class), mock(CheckpointResponder.class), new NoOpTaskOperatorEventGateway(), new TestGlobalAggregateManager(), TestingClassLoaderLease.newBuilder().build(), mock(FileCache.class), taskManagerRuntimeInfo, UnregisteredMetricGroups.createUnregisteredTaskMetricGroup(), mock(PartitionProducerStateChecker.class), Executors.directExecutor(), new ChannelStateWriteRequestExecutorFactory(jobInformation.getJobId())); CompletableFuture<Void> taskRun = CompletableFuture.runAsync(() -> task.run(), EXECUTOR_RESOURCE.getExecutor()); // wait until the stream task started running RUN_LATCH.await(); // trigger a checkpoint task.triggerCheckpointBarrier( checkpointId, checkpointTimestamp, CheckpointOptions.forCheckpointWithDefaultLocation()); // wait until the task has completed execution taskRun.get(); // check that no failure occurred if (task.getFailureCause() != null) { throw new Exception("Task failed", task.getFailureCause()); } // check that we have entered the finished state assertThat(task.getExecutionState()).isEqualTo(ExecutionState.FINISHED); }
FLINK-6833 <p>Tests that a finished stream task cannot be failed by an asynchronous checkpointing operation after the stream task has stopped running.
testConcurrentAsyncCheckpointCannotFailFinishedStreamTask
java
apache/flink
flink-runtime/src/test/java/org/apache/flink/streaming/runtime/tasks/StreamTaskTerminationTest.java
https://github.com/apache/flink/blob/master/flink-runtime/src/test/java/org/apache/flink/streaming/runtime/tasks/StreamTaskTerminationTest.java
Apache-2.0
@Test void taskDispatcherThreadPoolAllowsForSynchronousCheckpoints() throws Exception { final Task task = createTask(SynchronousCheckpointTestingTask.class); try (TaskCleaner ignored = new TaskCleaner(task)) { task.startTaskThread(); assertThat(EVENT_QUEUE.take()).isEqualTo(Event.TASK_IS_RUNNING); assertThat(EVENT_QUEUE).isEmpty(); assertThat(task.getExecutionState()).isEqualTo(ExecutionState.RUNNING); task.triggerCheckpointBarrier( 42, 156865867234L, new CheckpointOptions( SavepointType.suspend(SavepointFormatType.CANONICAL), CheckpointStorageLocationReference.getDefault())); assertThat(EVENT_QUEUE.take()).isEqualTo(Event.PRE_TRIGGER_CHECKPOINT); assertThat(EVENT_QUEUE.take()).isEqualTo(Event.POST_TRIGGER_CHECKPOINT); assertThat(EVENT_QUEUE).isEmpty(); task.notifyCheckpointComplete(42); assertThat(EVENT_QUEUE.take()).isEqualTo(Event.PRE_NOTIFY_CHECKPOINT_COMPLETE); assertThat(EVENT_QUEUE.take()).isEqualTo(Event.POST_NOTIFY_CHECKPOINT_COMPLETE); assertThat(EVENT_QUEUE).isEmpty(); assertThat(task.getExecutionState()).isEqualTo(ExecutionState.RUNNING); } }
Tests that the cached thread pool used by the {@link Task} allows synchronous checkpoints to complete successfully.
taskDispatcherThreadPoolAllowsForSynchronousCheckpoints
java
apache/flink
flink-runtime/src/test/java/org/apache/flink/streaming/runtime/tasks/SynchronousCheckpointITCase.java
https://github.com/apache/flink/blob/master/flink-runtime/src/test/java/org/apache/flink/streaming/runtime/tasks/SynchronousCheckpointITCase.java
Apache-2.0
@Timeout(value = 10000, unit = TimeUnit.MILLISECONDS) @Test void testScheduleAtFixedRate() throws Exception { final AtomicReference<Throwable> errorRef = new AtomicReference<>(); final long period = 10L; final int countDown = 3; final SystemProcessingTimeService timer = createSystemProcessingTimeService(errorRef); final CountDownLatch countDownLatch = new CountDownLatch(countDown); try { timer.scheduleAtFixedRate(timestamp -> countDownLatch.countDown(), 0L, period); countDownLatch.await(); assertThat(errorRef.get()).isNull(); } finally { timer.shutdownService(); } }
Tests that SystemProcessingTimeService#scheduleAtFixedRate is actually triggered multiple times.
testScheduleAtFixedRate
java
apache/flink
flink-runtime/src/test/java/org/apache/flink/streaming/runtime/tasks/SystemProcessingTimeServiceTest.java
https://github.com/apache/flink/blob/master/flink-runtime/src/test/java/org/apache/flink/streaming/runtime/tasks/SystemProcessingTimeServiceTest.java
Apache-2.0
@Override public boolean runMailboxStep() throws Exception { assert mailbox.getState() == TaskMailbox.State.OPEN : "Mailbox must be opened!"; final MailboxController defaultActionContext = new MailboxController(this); Optional<Mail> maybeMail; if (isMailboxLoopRunning() && (maybeMail = mailbox.tryTake(MIN_PRIORITY)).isPresent()) { maybeMail.get().run(); return true; } mailboxDefaultAction.runDefaultAction(defaultActionContext); return false; }
A {@link MailboxProcessor} that allows to execute one mail at a time.
runMailboxStep
java
apache/flink
flink-runtime/src/test/java/org/apache/flink/streaming/runtime/tasks/mailbox/SteppingMailboxProcessor.java
https://github.com/apache/flink/blob/master/flink-runtime/src/test/java/org/apache/flink/streaming/runtime/tasks/mailbox/SteppingMailboxProcessor.java
Apache-2.0
@Test void testConcurrentPutTakeBlocking() throws Exception { testPutTake(mailbox -> mailbox.take(DEFAULT_PRIORITY)); }
Test the producer-consumer pattern using the blocking methods on the mailbox.
testConcurrentPutTakeBlocking
java
apache/flink
flink-runtime/src/test/java/org/apache/flink/streaming/runtime/tasks/mailbox/TaskMailboxImplTest.java
https://github.com/apache/flink/blob/master/flink-runtime/src/test/java/org/apache/flink/streaming/runtime/tasks/mailbox/TaskMailboxImplTest.java
Apache-2.0
@Test void testConcurrentPutTakeNonBlockingAndWait() throws Exception { testPutTake( (mailbox -> { Optional<Mail> optionalMail = mailbox.tryTake(DEFAULT_PRIORITY); while (!optionalMail.isPresent()) { optionalMail = mailbox.tryTake(DEFAULT_PRIORITY); } return optionalMail.get(); })); }
Test the producer-consumer pattern using the non-blocking methods & waits on the mailbox.
testConcurrentPutTakeNonBlockingAndWait
java
apache/flink
flink-runtime/src/test/java/org/apache/flink/streaming/runtime/tasks/mailbox/TaskMailboxImplTest.java
https://github.com/apache/flink/blob/master/flink-runtime/src/test/java/org/apache/flink/streaming/runtime/tasks/mailbox/TaskMailboxImplTest.java
Apache-2.0
@Test void testCloseUnblocks() throws InterruptedException { testAllPuttingUnblocksInternal(TaskMailbox::close); }
Test that closing the mailbox unblocks pending accesses with correct exceptions.
testCloseUnblocks
java
apache/flink
flink-runtime/src/test/java/org/apache/flink/streaming/runtime/tasks/mailbox/TaskMailboxImplTest.java
https://github.com/apache/flink/blob/master/flink-runtime/src/test/java/org/apache/flink/streaming/runtime/tasks/mailbox/TaskMailboxImplTest.java
Apache-2.0
@Test void testRunExclusively() throws InterruptedException { CountDownLatch exclusiveCodeStarted = new CountDownLatch(1); final int numMails = 10; // send 10 mails in an atomic operation new Thread( () -> taskMailbox.runExclusively( () -> { exclusiveCodeStarted.countDown(); for (int index = 0; index < numMails; index++) { try { taskMailbox.put(new Mail(() -> {}, 1, "mailD")); Thread.sleep(1); } catch (Exception e) { } } })) .start(); exclusiveCodeStarted.await(); // make sure that all 10 messages have been actually enqueued. assertThat(taskMailbox.close()).hasSize(numMails); }
Testing that we cannot close while running exclusively.
testRunExclusively
java
apache/flink
flink-runtime/src/test/java/org/apache/flink/streaming/runtime/tasks/mailbox/TaskMailboxImplTest.java
https://github.com/apache/flink/blob/master/flink-runtime/src/test/java/org/apache/flink/streaming/runtime/tasks/mailbox/TaskMailboxImplTest.java
Apache-2.0
@Test void testSingleInputIncreasingWatermarks() throws Exception { StatusWatermarkOutput valveOutput = new StatusWatermarkOutput(); StatusWatermarkValve valve = new StatusWatermarkValve(1); valve.inputWatermark(new Watermark(0), 0, valveOutput); assertThat(valveOutput.popLastSeenOutput()).isEqualTo(new Watermark(0)); assertThat(valveOutput.popLastSeenOutput()).isNull(); valve.inputWatermark(new Watermark(25), 0, valveOutput); assertThat(valveOutput.popLastSeenOutput()).isEqualTo(new Watermark(25)); assertThat(valveOutput.popLastSeenOutput()).isNull(); }
Tests that watermarks correctly advance with increasing watermarks for a single input valve.
testSingleInputIncreasingWatermarks
java
apache/flink
flink-runtime/src/test/java/org/apache/flink/streaming/runtime/watermarkstatus/StatusWatermarkValveTest.java
https://github.com/apache/flink/blob/master/flink-runtime/src/test/java/org/apache/flink/streaming/runtime/watermarkstatus/StatusWatermarkValveTest.java
Apache-2.0
@Test void testSingleInputDecreasingWatermarksYieldsNoOutput() throws Exception { StatusWatermarkOutput valveOutput = new StatusWatermarkOutput(); StatusWatermarkValve valve = new StatusWatermarkValve(1); valve.inputWatermark(new Watermark(25), 0, valveOutput); assertThat(valveOutput.popLastSeenOutput()).isEqualTo(new Watermark(25)); valve.inputWatermark(new Watermark(18), 0, valveOutput); assertThat(valveOutput.popLastSeenOutput()).isNull(); valve.inputWatermark(new Watermark(42), 0, valveOutput); assertThat(valveOutput.popLastSeenOutput()).isEqualTo(new Watermark(42)); assertThat(valveOutput.popLastSeenOutput()).isNull(); }
Tests that watermarks do not advance with decreasing watermark inputs for a single input valve.
testSingleInputDecreasingWatermarksYieldsNoOutput
java
apache/flink
flink-runtime/src/test/java/org/apache/flink/streaming/runtime/watermarkstatus/StatusWatermarkValveTest.java
https://github.com/apache/flink/blob/master/flink-runtime/src/test/java/org/apache/flink/streaming/runtime/watermarkstatus/StatusWatermarkValveTest.java
Apache-2.0
@Test void testSingleInputWatermarkStatusToggling() throws Exception { StatusWatermarkOutput valveOutput = new StatusWatermarkOutput(); StatusWatermarkValve valve = new StatusWatermarkValve(1); valve.inputWatermarkStatus(WatermarkStatus.ACTIVE, 0, valveOutput); // this also implicitly verifies that input channels start as ACTIVE assertThat(valveOutput.popLastSeenOutput()).isNull(); valve.inputWatermarkStatus(WatermarkStatus.IDLE, 0, valveOutput); assertThat(valveOutput.popLastSeenOutput()).isEqualTo(WatermarkStatus.IDLE); valve.inputWatermarkStatus(WatermarkStatus.IDLE, 0, valveOutput); assertThat(valveOutput.popLastSeenOutput()).isNull(); valve.inputWatermarkStatus(WatermarkStatus.ACTIVE, 0, valveOutput); assertThat(valveOutput.popLastSeenOutput()).isEqualTo(WatermarkStatus.ACTIVE); assertThat(valveOutput.popLastSeenOutput()).isNull(); }
Tests that watermark status toggling works correctly, as well as that non-toggling status inputs do not yield output for a single input valve.
testSingleInputWatermarkStatusToggling
java
apache/flink
flink-runtime/src/test/java/org/apache/flink/streaming/runtime/watermarkstatus/StatusWatermarkValveTest.java
https://github.com/apache/flink/blob/master/flink-runtime/src/test/java/org/apache/flink/streaming/runtime/watermarkstatus/StatusWatermarkValveTest.java
Apache-2.0
@Test void testSingleInputWatermarksIntactDuringIdleness() throws Exception { StatusWatermarkOutput valveOutput = new StatusWatermarkOutput(); StatusWatermarkValve valve = new StatusWatermarkValve(1); valve.inputWatermark(new Watermark(25), 0, valveOutput); assertThat(valveOutput.popLastSeenOutput()).isEqualTo(new Watermark(25)); assertThat(valveOutput.popLastSeenOutput()).isNull(); valve.inputWatermarkStatus(WatermarkStatus.IDLE, 0, valveOutput); assertThat(valveOutput.popLastSeenOutput()).isEqualTo(WatermarkStatus.IDLE); valve.inputWatermark(new Watermark(50), 0, valveOutput); assertThat(valveOutput.popLastSeenOutput()).isNull(); assertThat(valve.getSubpartitionStatus(0).watermark).isEqualTo(25); valve.inputWatermarkStatus(WatermarkStatus.ACTIVE, 0, valveOutput); assertThat(valveOutput.popLastSeenOutput()).isEqualTo(WatermarkStatus.ACTIVE); assertThat(valveOutput.popLastSeenOutput()).isNull(); valve.inputWatermark(new Watermark(50), 0, valveOutput); assertThat(valveOutput.popLastSeenOutput()).isEqualTo(new Watermark(50)); assertThat(valveOutput.popLastSeenOutput()).isNull(); }
Tests that the watermark of an input channel remains intact while in the IDLE status.
testSingleInputWatermarksIntactDuringIdleness
java
apache/flink
flink-runtime/src/test/java/org/apache/flink/streaming/runtime/watermarkstatus/StatusWatermarkValveTest.java
https://github.com/apache/flink/blob/master/flink-runtime/src/test/java/org/apache/flink/streaming/runtime/watermarkstatus/StatusWatermarkValveTest.java
Apache-2.0
@Test void testMultipleInputYieldsWatermarkOnlyWhenAllChannelsReceivesWatermarks() throws Exception { StatusWatermarkOutput valveOutput = new StatusWatermarkOutput(); StatusWatermarkValve valve = new StatusWatermarkValve(3); valve.inputWatermark(new Watermark(0), 0, valveOutput); valve.inputWatermark(new Watermark(0), 1, valveOutput); assertThat(valveOutput.popLastSeenOutput()).isNull(); // now, all channels have watermarks valve.inputWatermark(new Watermark(0), 2, valveOutput); assertThat(valveOutput.popLastSeenOutput()).isEqualTo(new Watermark(0)); assertThat(valveOutput.popLastSeenOutput()).isNull(); }
Tests that the valve yields a watermark only when all inputs have received a watermark.
testMultipleInputYieldsWatermarkOnlyWhenAllChannelsReceivesWatermarks
java
apache/flink
flink-runtime/src/test/java/org/apache/flink/streaming/runtime/watermarkstatus/StatusWatermarkValveTest.java
https://github.com/apache/flink/blob/master/flink-runtime/src/test/java/org/apache/flink/streaming/runtime/watermarkstatus/StatusWatermarkValveTest.java
Apache-2.0
@Test void testMultipleInputDecreasingWatermarksYieldsNoOutput() throws Exception { StatusWatermarkOutput valveOutput = new StatusWatermarkOutput(); StatusWatermarkValve valve = new StatusWatermarkValve(3); valve.inputWatermark(new Watermark(25), 0, valveOutput); valve.inputWatermark(new Watermark(10), 1, valveOutput); valve.inputWatermark(new Watermark(17), 2, valveOutput); assertThat(valveOutput.popLastSeenOutput()).isEqualTo(new Watermark(10)); valve.inputWatermark(new Watermark(12), 0, valveOutput); valve.inputWatermark(new Watermark(8), 1, valveOutput); valve.inputWatermark(new Watermark(15), 2, valveOutput); assertThat(valveOutput.popLastSeenOutput()).isNull(); }
Tests that for a multiple input valve, decreasing watermarks will yield no output.
testMultipleInputDecreasingWatermarksYieldsNoOutput
java
apache/flink
flink-runtime/src/test/java/org/apache/flink/streaming/runtime/watermarkstatus/StatusWatermarkValveTest.java
https://github.com/apache/flink/blob/master/flink-runtime/src/test/java/org/apache/flink/streaming/runtime/watermarkstatus/StatusWatermarkValveTest.java
Apache-2.0
@Test void testMultipleInputWatermarkStatusToggling() throws Exception { StatusWatermarkOutput valveOutput = new StatusWatermarkOutput(); StatusWatermarkValve valve = new StatusWatermarkValve(2); // this also implicitly verifies that all input channels start as active valve.inputWatermarkStatus(WatermarkStatus.ACTIVE, 0, valveOutput); valve.inputWatermarkStatus(WatermarkStatus.ACTIVE, 1, valveOutput); assertThat(valveOutput.popLastSeenOutput()).isNull(); valve.inputWatermarkStatus(WatermarkStatus.IDLE, 1, valveOutput); assertThat(valveOutput.popLastSeenOutput()).isNull(); // now, all channels are IDLE valve.inputWatermarkStatus(WatermarkStatus.IDLE, 0, valveOutput); assertThat(valveOutput.popLastSeenOutput()).isEqualTo(WatermarkStatus.IDLE); valve.inputWatermarkStatus(WatermarkStatus.IDLE, 0, valveOutput); valve.inputWatermarkStatus(WatermarkStatus.IDLE, 1, valveOutput); assertThat(valveOutput.popLastSeenOutput()).isNull(); // as soon as at least one input becomes active again, the ACTIVE marker should be forwarded valve.inputWatermarkStatus(WatermarkStatus.ACTIVE, 1, valveOutput); assertThat(valveOutput.popLastSeenOutput()).isEqualTo(WatermarkStatus.ACTIVE); valve.inputWatermarkStatus(WatermarkStatus.ACTIVE, 0, valveOutput); // already back to ACTIVE, should yield no output assertThat(valveOutput.popLastSeenOutput()).isNull(); }
Tests that watermark status toggling works correctly, as well as that non-toggling status inputs do not yield output for a multiple input valve.
testMultipleInputWatermarkStatusToggling
java
apache/flink
flink-runtime/src/test/java/org/apache/flink/streaming/runtime/watermarkstatus/StatusWatermarkValveTest.java
https://github.com/apache/flink/blob/master/flink-runtime/src/test/java/org/apache/flink/streaming/runtime/watermarkstatus/StatusWatermarkValveTest.java
Apache-2.0
@Test void testMultipleInputWatermarkAdvancingWithPartiallyIdleChannels() throws Exception { StatusWatermarkOutput valveOutput = new StatusWatermarkOutput(); StatusWatermarkValve valve = new StatusWatermarkValve(3); valve.inputWatermark(new Watermark(15), 0, valveOutput); valve.inputWatermark(new Watermark(10), 1, valveOutput); assertThat(valveOutput.popLastSeenOutput()).isNull(); valve.inputWatermarkStatus(WatermarkStatus.IDLE, 2, valveOutput); // min watermark should be computed from remaining ACTIVE channels assertThat(valveOutput.popLastSeenOutput()).isEqualTo(new Watermark(10)); assertThat(valveOutput.popLastSeenOutput()).isNull(); valve.inputWatermark(new Watermark(18), 1, valveOutput); // now, min watermark should be 15 from channel #0 assertThat(valveOutput.popLastSeenOutput()).isEqualTo(new Watermark(15)); assertThat(valveOutput.popLastSeenOutput()).isNull(); valve.inputWatermark(new Watermark(20), 0, valveOutput); // now, min watermark should be 18 from channel #1 assertThat(valveOutput.popLastSeenOutput()).isEqualTo(new Watermark(18)); assertThat(valveOutput.popLastSeenOutput()).isNull(); }
Tests that for multiple inputs, when some inputs are idle, the min watermark is correctly computed and advanced from the remaining active inputs.
testMultipleInputWatermarkAdvancingWithPartiallyIdleChannels
java
apache/flink
flink-runtime/src/test/java/org/apache/flink/streaming/runtime/watermarkstatus/StatusWatermarkValveTest.java
https://github.com/apache/flink/blob/master/flink-runtime/src/test/java/org/apache/flink/streaming/runtime/watermarkstatus/StatusWatermarkValveTest.java
Apache-2.0
@Test void testMultipleInputWatermarkAdvancingAsChannelsIndividuallyBecomeIdle() throws Exception { StatusWatermarkOutput valveOutput = new StatusWatermarkOutput(); StatusWatermarkValve valve = new StatusWatermarkValve(3); valve.inputWatermark(new Watermark(25), 0, valveOutput); valve.inputWatermark(new Watermark(10), 1, valveOutput); valve.inputWatermark(new Watermark(17), 2, valveOutput); assertThat(valveOutput.popLastSeenOutput()).isEqualTo(new Watermark(10)); valve.inputWatermarkStatus(WatermarkStatus.IDLE, 1, valveOutput); // only channel 0 & 2 is ACTIVE; 17 is the overall min watermark now assertThat(valveOutput.popLastSeenOutput()).isEqualTo(new Watermark(17)); valve.inputWatermarkStatus(WatermarkStatus.IDLE, 2, valveOutput); // only channel 0 is ACTIVE; 25 is the overall min watermark now assertThat(valveOutput.popLastSeenOutput()).isEqualTo(new Watermark(25)); assertThat(valveOutput.popLastSeenOutput()).isNull(); }
Tests that as input channels individually and gradually become idle, watermarks are output as soon remaining active channels can yield a new min watermark.
testMultipleInputWatermarkAdvancingAsChannelsIndividuallyBecomeIdle
java
apache/flink
flink-runtime/src/test/java/org/apache/flink/streaming/runtime/watermarkstatus/StatusWatermarkValveTest.java
https://github.com/apache/flink/blob/master/flink-runtime/src/test/java/org/apache/flink/streaming/runtime/watermarkstatus/StatusWatermarkValveTest.java
Apache-2.0
@Test void testMultipleInputFlushMaxWatermarkAndWatermarkStatusOnceAllInputsBecomeIdle() throws Exception { StatusWatermarkOutput valveOutput = new StatusWatermarkOutput(); StatusWatermarkValve valve = new StatusWatermarkValve(3); // ------------------------------------------------------------------------------------------- // Setup valve for test case: // channel #1: Watermark 10, ACTIVE // channel #2: Watermark 5, ACTIVE // channel #3: Watermark 3, ACTIVE // Min Watermark across channels = 3 (from channel #3) // ------------------------------------------------------------------------------------------- valve.inputWatermark(new Watermark(10), 0, valveOutput); valve.inputWatermark(new Watermark(5), 1, valveOutput); valve.inputWatermark(new Watermark(3), 2, valveOutput); assertThat(valveOutput.popLastSeenOutput()).isEqualTo(new Watermark(3)); // ------------------------------------------------------------------------------------------- // Order of becoming IDLE: // channel #1 ----------------> channel #2 ----------------> channel #3 // |-> (nothing emitted) |-> (nothing emitted) |-> Emit Watermark(10) & IDLE // ------------------------------------------------------------------------------------------- valve.inputWatermarkStatus(WatermarkStatus.IDLE, 0, valveOutput); valve.inputWatermarkStatus(WatermarkStatus.IDLE, 1, valveOutput); assertThat(valveOutput.popLastSeenOutput()).isNull(); valve.inputWatermarkStatus(WatermarkStatus.IDLE, 2, valveOutput); assertThat(valveOutput.popLastSeenOutput()).isEqualTo(new Watermark(10)); assertThat(valveOutput.popLastSeenOutput()).isEqualTo(WatermarkStatus.IDLE); assertThat(valveOutput.popLastSeenOutput()).isNull(); }
Tests that when all inputs become idle, the max watermark across all channels is correctly "flushed" from the valve, as well as the watermark status IDLE marker. <p>This test along with {@link #testMultipleInputWatermarkAdvancingAsChannelsIndividuallyBecomeIdle} should completely verify that the eventual watermark advancement result when all inputs become idle is independent of the order that the inputs become idle.
testMultipleInputFlushMaxWatermarkAndWatermarkStatusOnceAllInputsBecomeIdle
java
apache/flink
flink-runtime/src/test/java/org/apache/flink/streaming/runtime/watermarkstatus/StatusWatermarkValveTest.java
https://github.com/apache/flink/blob/master/flink-runtime/src/test/java/org/apache/flink/streaming/runtime/watermarkstatus/StatusWatermarkValveTest.java
Apache-2.0
@Test void testMultipleInputWatermarkRealignmentAfterResumeActive() throws Exception { StatusWatermarkOutput valveOutput = new StatusWatermarkOutput(); StatusWatermarkValve valve = new StatusWatermarkValve(3); valve.inputWatermark(new Watermark(10), 0, valveOutput); valve.inputWatermark(new Watermark(7), 1, valveOutput); valve.inputWatermark(new Watermark(3), 2, valveOutput); assertThat(valveOutput.popLastSeenOutput()).isEqualTo(new Watermark(3)); assertThat(valveOutput.popLastSeenOutput()).isNull(); valve.inputWatermarkStatus(WatermarkStatus.IDLE, 2, valveOutput); assertThat(valveOutput.popLastSeenOutput()).isEqualTo(new Watermark(7)); assertThat(valveOutput.popLastSeenOutput()).isNull(); // let channel 2 become active again; since the min watermark has now advanced to 7, // channel 2 should have been marked as non-aligned. valve.inputWatermarkStatus(WatermarkStatus.ACTIVE, 2, valveOutput); assertThat(valve.getSubpartitionStatus(2).isWatermarkAligned).isFalse(); // during the realignment process, watermarks should still be accepted by channel 2 (but // shouldn't yield new watermarks) valve.inputWatermark(new Watermark(5), 2, valveOutput); assertThat(valve.getSubpartitionStatus(2).watermark).isEqualTo(5); assertThat(valveOutput.popLastSeenOutput()).isNull(); // let channel 2 catch up with the min watermark; now should be realigned valve.inputWatermark(new Watermark(9), 2, valveOutput); assertThat(valve.getSubpartitionStatus(2).isWatermarkAligned).isTrue(); assertThat(valveOutput.popLastSeenOutput()).isNull(); // check that realigned inputs is now taken into account for watermark advancement valve.inputWatermark(new Watermark(12), 1, valveOutput); assertThat(valveOutput.popLastSeenOutput()).isEqualTo(new Watermark(9)); assertThat(valveOutput.popLastSeenOutput()).isNull(); }
Tests that when idle channels become active again, they need to "catch up" with the latest watermark before they are considered for min watermark computation again.
testMultipleInputWatermarkRealignmentAfterResumeActive
java
apache/flink
flink-runtime/src/test/java/org/apache/flink/streaming/runtime/watermarkstatus/StatusWatermarkValveTest.java
https://github.com/apache/flink/blob/master/flink-runtime/src/test/java/org/apache/flink/streaming/runtime/watermarkstatus/StatusWatermarkValveTest.java
Apache-2.0
@Override public <K> InternalTimeServiceManager<K> create( TaskIOMetricGroup taskIOMetricGroup, PriorityQueueSetFactory factory, KeyGroupRange keyGroupRange, ClassLoader userClassloader, KeyContext keyContext, ProcessingTimeService processingTimeService, Iterable<KeyGroupStatePartitionStreamProvider> rawKeyedStates, StreamTaskCancellationContext cancellationContext) throws Exception { InternalTimeServiceManagerImpl<K> typedTimeServiceManager = InternalTimeServiceManagerImpl.create( taskIOMetricGroup, factory, keyGroupRange, userClassloader, keyContext, processingTimeService, rawKeyedStates, cancellationContext); if (timeServiceManager == null) { timeServiceManager = typedTimeServiceManager; } return typedTimeServiceManager; }
Base class for {@code AbstractStreamOperator} test harnesses.
create
java
apache/flink
flink-runtime/src/test/java/org/apache/flink/streaming/util/AbstractStreamOperatorTestHarness.java
https://github.com/apache/flink/blob/master/flink-runtime/src/test/java/org/apache/flink/streaming/util/AbstractStreamOperatorTestHarness.java
Apache-2.0
public static OperatorSubtaskState repartitionOperatorState( final OperatorSubtaskState operatorStateHandles, final int numKeyGroups, final int oldParallelism, final int newParallelism, final int subtaskIndex) { Preconditions.checkNotNull( operatorStateHandles, "the previous operatorStateHandles should not be null."); // create a new OperatorStateHandles that only contains the state for our key-groups List<KeyGroupRange> keyGroupPartitions = StateAssignmentOperation.createKeyGroupPartitions(numKeyGroups, newParallelism); KeyGroupRange localKeyGroupRange = keyGroupPartitions.get(subtaskIndex); List<KeyedStateHandle> localManagedKeyGroupState = new ArrayList<>(); StateAssignmentOperation.extractIntersectingState( operatorStateHandles.getManagedKeyedState(), localKeyGroupRange, localManagedKeyGroupState); List<KeyedStateHandle> localRawKeyGroupState = new ArrayList<>(); StateAssignmentOperation.extractIntersectingState( operatorStateHandles.getRawKeyedState(), localKeyGroupRange, localRawKeyGroupState); StateObjectCollection<OperatorStateHandle> managedOperatorStates = operatorStateHandles.getManagedOperatorState(); Collection<OperatorStateHandle> localManagedOperatorState; if (!managedOperatorStates.isEmpty()) { List<List<OperatorStateHandle>> managedOperatorState = managedOperatorStates.stream() .map(Collections::singletonList) .collect(Collectors.toList()); localManagedOperatorState = operatorStateRepartitioner .repartitionState(managedOperatorState, oldParallelism, newParallelism) .get(subtaskIndex); } else { localManagedOperatorState = Collections.emptyList(); } StateObjectCollection<OperatorStateHandle> rawOperatorStates = operatorStateHandles.getRawOperatorState(); Collection<OperatorStateHandle> localRawOperatorState; if (!rawOperatorStates.isEmpty()) { List<List<OperatorStateHandle>> rawOperatorState = rawOperatorStates.stream() .map(Collections::singletonList) .collect(Collectors.toList()); localRawOperatorState = operatorStateRepartitioner .repartitionState(rawOperatorState, oldParallelism, newParallelism) .get(subtaskIndex); } else { localRawOperatorState = Collections.emptyList(); } return OperatorSubtaskState.builder() .setManagedOperatorState( new StateObjectCollection<>( nullToEmptyCollection(localManagedOperatorState))) .setRawOperatorState( new StateObjectCollection<>(nullToEmptyCollection(localRawOperatorState))) .setManagedKeyedState( new StateObjectCollection<>( nullToEmptyCollection(localManagedKeyGroupState))) .setRawKeyedState( new StateObjectCollection<>(nullToEmptyCollection(localRawKeyGroupState))) .build(); }
Returns the reshaped the state handles to include only those key-group states in the local key-group range and the operator states that would be assigned to the local subtask.
repartitionOperatorState
java
apache/flink
flink-runtime/src/test/java/org/apache/flink/streaming/util/AbstractStreamOperatorTestHarness.java
https://github.com/apache/flink/blob/master/flink-runtime/src/test/java/org/apache/flink/streaming/util/AbstractStreamOperatorTestHarness.java
Apache-2.0
public static OperatorSubtaskState repackageState(OperatorSubtaskState... handles) throws Exception { if (handles.length < 1) { return null; } else if (handles.length == 1) { return handles[0]; } List<OperatorStateHandle> mergedManagedOperatorState = new ArrayList<>(handles.length); List<OperatorStateHandle> mergedRawOperatorState = new ArrayList<>(handles.length); List<KeyedStateHandle> mergedManagedKeyedState = new ArrayList<>(handles.length); List<KeyedStateHandle> mergedRawKeyedState = new ArrayList<>(handles.length); for (OperatorSubtaskState handle : handles) { Collection<OperatorStateHandle> managedOperatorState = handle.getManagedOperatorState(); Collection<OperatorStateHandle> rawOperatorState = handle.getRawOperatorState(); Collection<KeyedStateHandle> managedKeyedState = handle.getManagedKeyedState(); Collection<KeyedStateHandle> rawKeyedState = handle.getRawKeyedState(); mergedManagedOperatorState.addAll(managedOperatorState); mergedRawOperatorState.addAll(rawOperatorState); mergedManagedKeyedState.addAll(managedKeyedState); mergedRawKeyedState.addAll(rawKeyedState); } return OperatorSubtaskState.builder() .setManagedOperatorState(new StateObjectCollection<>(mergedManagedOperatorState)) .setRawOperatorState(new StateObjectCollection<>(mergedRawOperatorState)) .setManagedKeyedState(new StateObjectCollection<>(mergedManagedKeyedState)) .setRawKeyedState(new StateObjectCollection<>(mergedRawKeyedState)) .build(); }
Takes the different {@link OperatorSubtaskState} created by calling {@link #snapshot(long, long)} on different instances of {@link AbstractStreamOperatorTestHarness} (each one representing one subtask) and repacks them into a single {@link OperatorSubtaskState} so that the parallelism of the test can change arbitrarily (i.e. be able to scale both up and down). <p>After repacking the partial states, remember to use {@link #repartitionOperatorState(OperatorSubtaskState, int, int, int, int)} to reshape the state handles to include only those key-group states in the local key-group range and the operator states that would be assigned to the local subtask. Bear in mind that for parallelism greater than one, you have to use the constructor {@link #AbstractStreamOperatorTestHarness(StreamOperator, int, int, int)}. <p><b>NOTE: </b> each of the {@code handles} in the argument list is assumed to be from a single task of a single operator (i.e. chain length of one). <p>For an example of how to use it, have a look at {@link AbstractStreamOperatorTest#testStateAndTimerStateShufflingScalingDown()}. @param handles the different states to be merged. @return the resulting state, or {@code null} if no partial states are specified.
repackageState
java
apache/flink
flink-runtime/src/test/java/org/apache/flink/streaming/util/AbstractStreamOperatorTestHarness.java
https://github.com/apache/flink/blob/master/flink-runtime/src/test/java/org/apache/flink/streaming/util/AbstractStreamOperatorTestHarness.java
Apache-2.0
public static void configureJobManagerCheckpointStorage(StreamExecutionEnvironment env) { env.configure( new Configuration().set(CheckpointingOptions.CHECKPOINT_STORAGE, "jobmanager")); }
Configures the checkpoint storage to use the JobManager for storing checkpoints. @param env The StreamExecutionEnvironment to configure.
configureJobManagerCheckpointStorage
java
apache/flink
flink-runtime/src/test/java/org/apache/flink/streaming/util/CheckpointStorageUtils.java
https://github.com/apache/flink/blob/master/flink-runtime/src/test/java/org/apache/flink/streaming/util/CheckpointStorageUtils.java
Apache-2.0
public static void configureFileSystemCheckpointStorage( StreamExecutionEnvironment env, URI checkpointDirectory) { Preconditions.checkNotNull(checkpointDirectory, "Checkpoint directory must not be null"); configureFileSystemCheckpointStorage(env, checkpointDirectory.toString()); }
Configures the checkpoint storage with a given directory as a URI. @param env The StreamExecutionEnvironment to configure. @param checkpointDirectory The URI of the directory where checkpoints will be stored, must not be null. @throws NullPointerException if checkpointDirectory is null.
configureFileSystemCheckpointStorage
java
apache/flink
flink-runtime/src/test/java/org/apache/flink/streaming/util/CheckpointStorageUtils.java
https://github.com/apache/flink/blob/master/flink-runtime/src/test/java/org/apache/flink/streaming/util/CheckpointStorageUtils.java
Apache-2.0
public static void configureFileSystemCheckpointStorage( StreamExecutionEnvironment env, Path checkpointDirectory) { Preconditions.checkNotNull(checkpointDirectory, "Checkpoint directory must not be null"); configureFileSystemCheckpointStorage(env, checkpointDirectory.toString()); }
Sets the checkpoint storage with a given directory as a Path. @param env The StreamExecutionEnvironment to configure. @param checkpointDirectory The Path of the directory where checkpoints will be stored, must not be null. @throws NullPointerException if checkpointDirectory is null.
configureFileSystemCheckpointStorage
java
apache/flink
flink-runtime/src/test/java/org/apache/flink/streaming/util/CheckpointStorageUtils.java
https://github.com/apache/flink/blob/master/flink-runtime/src/test/java/org/apache/flink/streaming/util/CheckpointStorageUtils.java
Apache-2.0
public static void configureCheckpointStorageWithFactory( StreamExecutionEnvironment env, String checkpointStorageFactory) { Preconditions.checkNotNull( checkpointStorageFactory != null, "Checkpoint storage factory must not be null."); env.configure( new Configuration() .set(CheckpointingOptions.CHECKPOINT_STORAGE, checkpointStorageFactory)); }
Configures the checkpoint storage using a specified storage factory. @param env The StreamExecutionEnvironment to configure. @param checkpointStorageFactory The fully qualified class name of the checkpoint storage factory, must not be null or empty. @throws IllegalArgumentException if checkpointStorageFactory is null or empty.
configureCheckpointStorageWithFactory
java
apache/flink
flink-runtime/src/test/java/org/apache/flink/streaming/util/CheckpointStorageUtils.java
https://github.com/apache/flink/blob/master/flink-runtime/src/test/java/org/apache/flink/streaming/util/CheckpointStorageUtils.java
Apache-2.0
public int numKeyedStateEntries() { AbstractStreamOperator<?> abstractStreamOperator = (AbstractStreamOperator<?>) operator; KeyedStateBackend<Object> keyedStateBackend = abstractStreamOperator.getKeyedStateBackend(); if (keyedStateBackend instanceof HeapKeyedStateBackend) { return ((HeapKeyedStateBackend) keyedStateBackend).numKeyValueStateEntries(); } else { throw new UnsupportedOperationException( String.format( "Unsupported keyed state backend: %s", keyedStateBackend.getClass().getCanonicalName())); } }
Extension of {@link TwoInputStreamOperatorTestHarness} that allows the operator to get a {@link KeyedStateBackend}.
numKeyedStateEntries
java
apache/flink
flink-runtime/src/test/java/org/apache/flink/streaming/util/KeyedTwoInputStreamOperatorTestHarness.java
https://github.com/apache/flink/blob/master/flink-runtime/src/test/java/org/apache/flink/streaming/util/KeyedTwoInputStreamOperatorTestHarness.java
Apache-2.0
public static <IN, OUT> OneInputStreamOperatorTestHarness<IN, OUT> forProcessFunction( final ProcessFunction<IN, OUT> function) throws Exception { OneInputStreamOperatorTestHarness<IN, OUT> testHarness = new OneInputStreamOperatorTestHarness<>( new ProcessOperator<>(Preconditions.checkNotNull(function)), 1, 1, 0); testHarness.setup(); testHarness.open(); return testHarness; }
Returns an initialized test harness for {@link ProcessFunction}. @param function instance of a {@link ProcessFunction} under test @param <IN> type of input stream elements @param <OUT> type of output stream elements @return {@link OneInputStreamOperatorTestHarness} wrapped around {@code function}
forProcessFunction
java
apache/flink
flink-runtime/src/test/java/org/apache/flink/streaming/util/ProcessFunctionTestHarnesses.java
https://github.com/apache/flink/blob/master/flink-runtime/src/test/java/org/apache/flink/streaming/util/ProcessFunctionTestHarnesses.java
Apache-2.0
public static <K, IN, OUT> KeyedOneInputStreamOperatorTestHarness<K, IN, OUT> forKeyedProcessFunction( final KeyedProcessFunction<K, IN, OUT> function, final KeySelector<IN, K> keySelector, final TypeInformation<K> keyType) throws Exception { KeyedOneInputStreamOperatorTestHarness<K, IN, OUT> testHarness = new KeyedOneInputStreamOperatorTestHarness<>( new KeyedProcessOperator<>(Preconditions.checkNotNull(function)), keySelector, keyType, 1, 1, 0); testHarness.open(); return testHarness; }
Returns an initialized test harness for {@link KeyedProcessFunction}. @param function instance of a {@link KeyedCoProcessFunction} under test @param <K> key type @param <IN> type of input stream elements @param <OUT> type of output stream elements @return {@link KeyedOneInputStreamOperatorTestHarness} wrapped around {@code function}
forKeyedProcessFunction
java
apache/flink
flink-runtime/src/test/java/org/apache/flink/streaming/util/ProcessFunctionTestHarnesses.java
https://github.com/apache/flink/blob/master/flink-runtime/src/test/java/org/apache/flink/streaming/util/ProcessFunctionTestHarnesses.java
Apache-2.0
public static <IN1, IN2, OUT> TwoInputStreamOperatorTestHarness<IN1, IN2, OUT> forCoProcessFunction( final CoProcessFunction<IN1, IN2, OUT> function) throws Exception { TwoInputStreamOperatorTestHarness<IN1, IN2, OUT> testHarness = new TwoInputStreamOperatorTestHarness<>( new CoProcessOperator<>(Preconditions.checkNotNull(function)), 1, 1, 0); testHarness.open(); return testHarness; }
Returns an initialized test harness for {@link CoProcessFunction} with two input streams. @param function instance of a {@link CoProcessFunction} under test @param <IN1> type of first input stream elements @param <IN2> type of second input stream elements @param <OUT> type of output stream elements @return {@link TwoInputStreamOperatorTestHarness} wrapped around {@code function}
forCoProcessFunction
java
apache/flink
flink-runtime/src/test/java/org/apache/flink/streaming/util/ProcessFunctionTestHarnesses.java
https://github.com/apache/flink/blob/master/flink-runtime/src/test/java/org/apache/flink/streaming/util/ProcessFunctionTestHarnesses.java
Apache-2.0
public static <K, IN1, IN2, OUT> KeyedTwoInputStreamOperatorTestHarness<K, IN1, IN2, OUT> forKeyedCoProcessFunction( final KeyedCoProcessFunction<K, IN1, IN2, OUT> function, final KeySelector<IN1, K> keySelector1, final KeySelector<IN2, K> keySelector2, final TypeInformation<K> keyType) throws Exception { KeyedTwoInputStreamOperatorTestHarness<K, IN1, IN2, OUT> testHarness = new KeyedTwoInputStreamOperatorTestHarness<>( new KeyedCoProcessOperator<>(Preconditions.checkNotNull(function)), keySelector1, keySelector2, keyType, 1, 1, 0); testHarness.open(); return testHarness; }
Returns an initialized test harness for {@link KeyedCoProcessFunction} with two input streams. @param function instance of a {@link KeyedCoProcessFunction} under test @param <K> key type @param <IN1> type of first input stream elements @param <IN2> type of second input stream elements @param <OUT> type of output stream elements @return {@link KeyedOneInputStreamOperatorTestHarness} wrapped around {@code function}
forKeyedCoProcessFunction
java
apache/flink
flink-runtime/src/test/java/org/apache/flink/streaming/util/ProcessFunctionTestHarnesses.java
https://github.com/apache/flink/blob/master/flink-runtime/src/test/java/org/apache/flink/streaming/util/ProcessFunctionTestHarnesses.java
Apache-2.0
public static <IN1, IN2, OUT> BroadcastOperatorTestHarness<IN1, IN2, OUT> forBroadcastProcessFunction( final BroadcastProcessFunction<IN1, IN2, OUT> function, final MapStateDescriptor<?, ?>... descriptors) throws Exception { BroadcastOperatorTestHarness<IN1, IN2, OUT> testHarness = new BroadcastOperatorTestHarness<>( new CoBroadcastWithNonKeyedOperator<>( Preconditions.checkNotNull(function), Arrays.asList(descriptors)), 1, 1, 0); testHarness.open(); return testHarness; }
Returns an initialized test harness for {@link BroadcastProcessFunction}. @param function instance of a {@link BroadcastProcessFunction} under test @param descriptors broadcast state descriptors used in the {@code function} @param <IN1> type of input stream elements @param <IN2> type of broadcast stream elements @param <OUT> type of output elements @return {@link BroadcastOperatorTestHarness} wrapped around {@code function}
forBroadcastProcessFunction
java
apache/flink
flink-runtime/src/test/java/org/apache/flink/streaming/util/ProcessFunctionTestHarnesses.java
https://github.com/apache/flink/blob/master/flink-runtime/src/test/java/org/apache/flink/streaming/util/ProcessFunctionTestHarnesses.java
Apache-2.0
public static <K, IN1, IN2, OUT> KeyedBroadcastOperatorTestHarness<K, IN1, IN2, OUT> forKeyedBroadcastProcessFunction( final KeyedBroadcastProcessFunction<K, IN1, IN2, OUT> function, final KeySelector<IN1, K> keySelector, final TypeInformation<K> keyType, final MapStateDescriptor<?, ?>... descriptors) throws Exception { KeyedBroadcastOperatorTestHarness<K, IN1, IN2, OUT> testHarness = new KeyedBroadcastOperatorTestHarness<>( new CoBroadcastWithKeyedOperator<>( Preconditions.checkNotNull(function), Arrays.asList(descriptors)), keySelector, keyType, 1, 1, 0); testHarness.open(); return testHarness; }
Returns an initialized test harness for {@link KeyedBroadcastProcessFunction}. @param function instance of a {@link KeyedBroadcastProcessFunction} under test @param descriptors broadcast state descriptors used in the {@code function} @param <K> key type @param <IN1> type of input stream elements @param <IN2> type of broadcast stream elements @param <OUT> type of output elements @return {@link BroadcastOperatorTestHarness} wrapped around {@code function}
forKeyedBroadcastProcessFunction
java
apache/flink
flink-runtime/src/test/java/org/apache/flink/streaming/util/ProcessFunctionTestHarnesses.java
https://github.com/apache/flink/blob/master/flink-runtime/src/test/java/org/apache/flink/streaming/util/ProcessFunctionTestHarnesses.java
Apache-2.0
public static void configureNoRestartStrategy(StreamExecutionEnvironment env) { env.configure(new Configuration().set(RestartStrategyOptions.RESTART_STRATEGY, "none")); }
Disables the restart strategy for the given StreamExecutionEnvironment. @param env the StreamExecutionEnvironment to configure
configureNoRestartStrategy
java
apache/flink
flink-runtime/src/test/java/org/apache/flink/streaming/util/RestartStrategyUtils.java
https://github.com/apache/flink/blob/master/flink-runtime/src/test/java/org/apache/flink/streaming/util/RestartStrategyUtils.java
Apache-2.0
public static void configureNoRestartStrategy(JobGraph jobGraph) { jobGraph.getJobConfiguration().set(RestartStrategyOptions.RESTART_STRATEGY, "none"); }
Disables the restart strategy for the given JobGraph. @param jobGraph the JobGraph to configure
configureNoRestartStrategy
java
apache/flink
flink-runtime/src/test/java/org/apache/flink/streaming/util/RestartStrategyUtils.java
https://github.com/apache/flink/blob/master/flink-runtime/src/test/java/org/apache/flink/streaming/util/RestartStrategyUtils.java
Apache-2.0
public static void configureFixedDelayRestartStrategy( StreamExecutionEnvironment env, int restartAttempts, long delayBetweenAttempts) { configureFixedDelayRestartStrategy( env, restartAttempts, Duration.ofMillis(delayBetweenAttempts)); }
Sets a fixed-delay restart strategy for the given StreamExecutionEnvironment. @param env the StreamExecutionEnvironment to configure @param restartAttempts the number of restart attempts @param delayBetweenAttempts the delay between restart attempts in milliseconds
configureFixedDelayRestartStrategy
java
apache/flink
flink-runtime/src/test/java/org/apache/flink/streaming/util/RestartStrategyUtils.java
https://github.com/apache/flink/blob/master/flink-runtime/src/test/java/org/apache/flink/streaming/util/RestartStrategyUtils.java
Apache-2.0
public static void configureFixedDelayRestartStrategy( StreamExecutionEnvironment env, int restartAttempts, Duration delayBetweenAttempts) { Configuration configuration = new Configuration(); configuration.set(RestartStrategyOptions.RESTART_STRATEGY, "fixed-delay"); configuration.set( RestartStrategyOptions.RESTART_STRATEGY_FIXED_DELAY_ATTEMPTS, restartAttempts); configuration.set( RestartStrategyOptions.RESTART_STRATEGY_FIXED_DELAY_DELAY, delayBetweenAttempts); env.configure(configuration); }
Sets a fixed-delay restart strategy for the given StreamExecutionEnvironment. @param env the StreamExecutionEnvironment to configure @param restartAttempts the number of restart attempts @param delayBetweenAttempts the delay between restart attempts
configureFixedDelayRestartStrategy
java
apache/flink
flink-runtime/src/test/java/org/apache/flink/streaming/util/RestartStrategyUtils.java
https://github.com/apache/flink/blob/master/flink-runtime/src/test/java/org/apache/flink/streaming/util/RestartStrategyUtils.java
Apache-2.0
public static void configureFixedDelayRestartStrategy( JobGraph jobGraph, int restartAttempts, long delayBetweenAttempts) { configureFixedDelayRestartStrategy( jobGraph, restartAttempts, Duration.ofMillis(delayBetweenAttempts)); }
Sets a fixed-delay restart strategy for the given JobGraph. @param jobGraph the JobGraph to configure @param restartAttempts the number of restart attempts @param delayBetweenAttempts the delay between restart attempts in milliseconds
configureFixedDelayRestartStrategy
java
apache/flink
flink-runtime/src/test/java/org/apache/flink/streaming/util/RestartStrategyUtils.java
https://github.com/apache/flink/blob/master/flink-runtime/src/test/java/org/apache/flink/streaming/util/RestartStrategyUtils.java
Apache-2.0
public static void configureFixedDelayRestartStrategy( JobGraph jobGraph, int restartAttempts, Duration delayBetweenAttempts) { Configuration configuration = jobGraph.getJobConfiguration(); configuration.set(RestartStrategyOptions.RESTART_STRATEGY, "fixed-delay"); configuration.set( RestartStrategyOptions.RESTART_STRATEGY_FIXED_DELAY_ATTEMPTS, restartAttempts); configuration.set( RestartStrategyOptions.RESTART_STRATEGY_FIXED_DELAY_DELAY, delayBetweenAttempts); }
Sets a fixed-delay restart strategy for the given JobGraph. @param jobGraph the JobGraph to configure @param restartAttempts the number of restart attempts @param delayBetweenAttempts the delay between restart attempts
configureFixedDelayRestartStrategy
java
apache/flink
flink-runtime/src/test/java/org/apache/flink/streaming/util/RestartStrategyUtils.java
https://github.com/apache/flink/blob/master/flink-runtime/src/test/java/org/apache/flink/streaming/util/RestartStrategyUtils.java
Apache-2.0
public static void configureHashMapStateBackend(StreamExecutionEnvironment env) { env.configure(new Configuration().set(StateBackendOptions.STATE_BACKEND, "hashmap")); }
Configures the StreamExecutionEnvironment to use a HashMap-based state backend. @param env the StreamExecutionEnvironment to configure
configureHashMapStateBackend
java
apache/flink
flink-runtime/src/test/java/org/apache/flink/streaming/util/StateBackendUtils.java
https://github.com/apache/flink/blob/master/flink-runtime/src/test/java/org/apache/flink/streaming/util/StateBackendUtils.java
Apache-2.0
public static void configureRocksDBStateBackend(StreamExecutionEnvironment env) { env.configure(new Configuration().set(StateBackendOptions.STATE_BACKEND, "rocksdb")); }
Configures the StreamExecutionEnvironment to use a RocksDB state backend. @param env the StreamExecutionEnvironment to configure
configureRocksDBStateBackend
java
apache/flink
flink-runtime/src/test/java/org/apache/flink/streaming/util/StateBackendUtils.java
https://github.com/apache/flink/blob/master/flink-runtime/src/test/java/org/apache/flink/streaming/util/StateBackendUtils.java
Apache-2.0
public static void configureRocksDBStateBackend( StreamExecutionEnvironment env, boolean incrementalCheckpoints) { env.configure( new Configuration() .set(StateBackendOptions.STATE_BACKEND, "rocksdb") .set(CheckpointingOptions.INCREMENTAL_CHECKPOINTS, incrementalCheckpoints)); }
Configures the StreamExecutionEnvironment to use a RocksDB state backend with the option for incremental checkpoints. @param env the StreamExecutionEnvironment to configure @param incrementalCheckpoints whether to enable incremental checkpoints
configureRocksDBStateBackend
java
apache/flink
flink-runtime/src/test/java/org/apache/flink/streaming/util/StateBackendUtils.java
https://github.com/apache/flink/blob/master/flink-runtime/src/test/java/org/apache/flink/streaming/util/StateBackendUtils.java
Apache-2.0
public static void configureStateBackendWithFactory( StreamExecutionEnvironment env, String stateBackendFactory) { env.configure( new Configuration().set(StateBackendOptions.STATE_BACKEND, stateBackendFactory)); }
Configures the StreamExecutionEnvironment to use a state backend defined by a factory. @param env the StreamExecutionEnvironment to configure @param stateBackendFactory the fully qualified name of the state backend factory class
configureStateBackendWithFactory
java
apache/flink
flink-runtime/src/test/java/org/apache/flink/streaming/util/StateBackendUtils.java
https://github.com/apache/flink/blob/master/flink-runtime/src/test/java/org/apache/flink/streaming/util/StateBackendUtils.java
Apache-2.0
public static JobClient configureStateBackendAndExecuteAsync( StreamExecutionEnvironment env, StateBackend stateBackend) throws Exception { StreamGraph streamGraph = env.getStreamGraph(); streamGraph.setStateBackend(stateBackend); return env.executeAsync(streamGraph); }
Configures the state backend for the given StreamExecutionEnvironment and executes the stream job asynchronously. @param env The StreamExecutionEnvironment to configure. @param stateBackend The StateBackend to set for the execution environment.
configureStateBackendAndExecuteAsync
java
apache/flink
flink-runtime/src/test/java/org/apache/flink/streaming/util/StateBackendUtils.java
https://github.com/apache/flink/blob/master/flink-runtime/src/test/java/org/apache/flink/streaming/util/StateBackendUtils.java
Apache-2.0
public static JobGraph configureStateBackendAndGetJobGraph( StreamExecutionEnvironment env, StateBackend stateBackend) { StreamGraph streamGraph = env.getStreamGraph(); streamGraph.setStateBackend(stateBackend); return streamGraph.getJobGraph(); }
Configures the state backend for the given StreamExecutionEnvironment and returns the corresponding JobGraph without executing the job. @param env The StreamExecutionEnvironment to configure. @param stateBackend The StateBackend to set for the execution environment. @return The JobGraph representing the configured job.
configureStateBackendAndGetJobGraph
java
apache/flink
flink-runtime/src/test/java/org/apache/flink/streaming/util/StateBackendUtils.java
https://github.com/apache/flink/blob/master/flink-runtime/src/test/java/org/apache/flink/streaming/util/StateBackendUtils.java
Apache-2.0
public static JobGraph configureStateBackendAndGetJobGraph( StreamExecutionEnvironment env, StateBackend stateBackend, ClassLoader userClassLoader, JobID jobId) { StreamGraph streamGraph = env.getStreamGraph(); streamGraph.setStateBackend(stateBackend); return streamGraph.getJobGraph(userClassLoader, jobId); }
Configures the state backend for the given StreamExecutionEnvironment and returns the corresponding JobGraph without executing the job, using a specified ClassLoader and JobID. @param env The StreamExecutionEnvironment to configure. @param stateBackend The StateBackend to set for the execution environment. @param userClassLoader The ClassLoader to use for user-defined classes. @param jobId The JobID to associate with the JobGraph. @return The JobGraph representing the configured job.
configureStateBackendAndGetJobGraph
java
apache/flink
flink-runtime/src/test/java/org/apache/flink/streaming/util/StateBackendUtils.java
https://github.com/apache/flink/blob/master/flink-runtime/src/test/java/org/apache/flink/streaming/util/StateBackendUtils.java
Apache-2.0
public TestCheckpointedInputGateBuilder withRemoteChannels() { this.gateBuilder = this::buildRemoteGate; return this; }
Uses {@link RemoteInputChannel RemoteInputChannels} and enables {@link #withMailboxExecutor()} by default.
withRemoteChannels
java
apache/flink
flink-runtime/src/test/java/org/apache/flink/streaming/util/TestCheckpointedInputGateBuilder.java
https://github.com/apache/flink/blob/master/flink-runtime/src/test/java/org/apache/flink/streaming/util/TestCheckpointedInputGateBuilder.java
Apache-2.0
public TestCheckpointedInputGateBuilder withMixedChannels(Integer... testChannelIds) { this.gateBuilder = () -> buildMixedGate(testChannelIds); return this; }
Uses all channels as {@link RemoteInputChannel RemoteInputChannels} except the channel from testChannelIds which should be {@link TestInputChannel}.
withMixedChannels
java
apache/flink
flink-runtime/src/test/java/org/apache/flink/streaming/util/TestCheckpointedInputGateBuilder.java
https://github.com/apache/flink/blob/master/flink-runtime/src/test/java/org/apache/flink/streaming/util/TestCheckpointedInputGateBuilder.java
Apache-2.0
@SuppressWarnings("unchecked") public static <OUT> List<OUT> getRawElementsFromOutput(Queue<Object> output) { List<OUT> resultElements = new LinkedList<>(); for (Object e : output) { if (e instanceof StreamRecord) { resultElements.add(((StreamRecord<OUT>) e).getValue()); } } return resultElements; }
Extracts the raw elements from the given output list.
getRawElementsFromOutput
java
apache/flink
flink-runtime/src/test/java/org/apache/flink/streaming/util/TestHarnessUtil.java
https://github.com/apache/flink/blob/master/flink-runtime/src/test/java/org/apache/flink/streaming/util/TestHarnessUtil.java
Apache-2.0
public static void assertOutputEqualsSorted( String message, Iterable<Object> expected, Iterable<Object> actual, Comparator<Object> comparator) { assertThat(actual).hasSameSizeAs(expected); // first, compare only watermarks, their position should be deterministic Iterator<Object> exIt = expected.iterator(); Iterator<Object> actIt = actual.iterator(); while (exIt.hasNext()) { Object nextEx = exIt.next(); Object nextAct = actIt.next(); if (nextEx instanceof Watermark) { assertThat(nextAct).isEqualTo(nextEx); } } List<Object> expectedRecords = new ArrayList<>(); List<Object> actualRecords = new ArrayList<>(); for (Object ex : expected) { if (ex instanceof StreamRecord) { expectedRecords.add(ex); } } for (Object act : actual) { if (act instanceof StreamRecord) { actualRecords.add(act); } } Object[] sortedExpected = expectedRecords.toArray(); Object[] sortedActual = actualRecords.toArray(); Arrays.sort(sortedExpected, comparator); Arrays.sort(sortedActual, comparator); assertThat(sortedActual).as(message).isEqualTo(sortedExpected); }
Compare the two queues containing operator/task output by converting them to an array first.
assertOutputEqualsSorted
java
apache/flink
flink-runtime/src/test/java/org/apache/flink/streaming/util/TestHarnessUtil.java
https://github.com/apache/flink/blob/master/flink-runtime/src/test/java/org/apache/flink/streaming/util/TestHarnessUtil.java
Apache-2.0
public static void assertNoLateRecords(Iterable<Object> elements) { // check that no watermark is violated long highestWatermark = Long.MIN_VALUE; for (Object elem : elements) { if (elem instanceof Watermark) { highestWatermark = ((Watermark) elem).asWatermark().getTimestamp(); } else if (elem instanceof StreamRecord) { boolean dataIsOnTime = highestWatermark < ((StreamRecord) elem).getTimestamp(); assertThat(dataIsOnTime).as("Late data was emitted after join").isTrue(); } } }
Verify no StreamRecord is equal to or later than any watermarks. This is checked over the order of the elements @param elements An iterable containing StreamRecords and watermarks
assertNoLateRecords
java
apache/flink
flink-runtime/src/test/java/org/apache/flink/streaming/util/TestHarnessUtil.java
https://github.com/apache/flink/blob/master/flink-runtime/src/test/java/org/apache/flink/streaming/util/TestHarnessUtil.java
Apache-2.0
public static <InputT, CommT> OperatorSubtaskState buildSubtaskState( OneInputStreamOperatorTestHarness<InputT, CommT> testHarness, List<InputT> input) throws Exception { testHarness.initializeEmptyState(); testHarness.open(); testHarness.processElements( input.stream().map(StreamRecord::new).collect(Collectors.toList())); testHarness.prepareSnapshotPreBarrier(1); final OperatorSubtaskState operatorSubtaskState = testHarness.snapshot(1, 1); testHarness.close(); return operatorSubtaskState; }
Get the operator's state after processing given inputs. @param testHarness A operator whose state is computed @param input A list of inputs @return The operator's snapshot
buildSubtaskState
java
apache/flink
flink-runtime/src/test/java/org/apache/flink/streaming/util/TestHarnessUtil.java
https://github.com/apache/flink/blob/master/flink-runtime/src/test/java/org/apache/flink/streaming/util/TestHarnessUtil.java
Apache-2.0
@Test void testRowTypeInfo() { TypeInformation<?>[] typeList = new TypeInformation<?>[] { new RowTypeInfo(BasicTypeInfo.SHORT_TYPE_INFO, BasicTypeInfo.BIG_DEC_TYPE_INFO) }; String[] fieldNames = new String[] {"row"}; RowTypeInfo rowTypeInfo = new RowTypeInfo(typeList, fieldNames); assertThatThrownBy(() -> FieldAccessorFactory.getAccessor(rowTypeInfo, "row.0", null)) .isInstanceOf(CompositeType.InvalidFieldReferenceException.class); }
Validates that no ClassCastException happens should not fail e.g. like in FLINK-8255.
testRowTypeInfo
java
apache/flink
flink-runtime/src/test/java/org/apache/flink/streaming/util/typeutils/FieldAccessorTest.java
https://github.com/apache/flink/blob/master/flink-runtime/src/test/java/org/apache/flink/streaming/util/typeutils/FieldAccessorTest.java
Apache-2.0
public static synchronized void checkAndCreateUploadDir(File uploadDir) throws IOException { if (uploadDir.exists() && uploadDir.canWrite()) { LOG.info("Using directory {} for web frontend JAR file uploads.", uploadDir); } else if (uploadDir.mkdirs() && uploadDir.canWrite()) { LOG.info("Created directory {} for web frontend JAR file uploads.", uploadDir); } else { LOG.warn( "Jar upload directory {} cannot be created or is not writable.", uploadDir.getAbsolutePath()); throw new IOException( String.format( "Jar upload directory %s cannot be created or is not writable.", uploadDir.getAbsolutePath())); } }
Checks whether the given directory exists and is writable. If it doesn't exist this method will attempt to create it. @param uploadDir directory to check @throws IOException if the directory does not exist and cannot be created, or if the directory isn't writable
checkAndCreateUploadDir
java
apache/flink
flink-runtime-web/src/main/java/org/apache/flink/runtime/webmonitor/HttpRequestHandler.java
https://github.com/apache/flink/blob/master/flink-runtime-web/src/main/java/org/apache/flink/runtime/webmonitor/HttpRequestHandler.java
Apache-2.0
@Override protected CompletableFuture<EmptyResponseBody> handleRequest( @Nonnull final HandlerRequest<EmptyRequestBody> request, @Nonnull final RestfulGateway gateway) throws RestHandlerException { final String jarId = request.getPathParameter(JarIdPathParameter.class); return CompletableFuture.supplyAsync( () -> { final Path jarToDelete = jarDir.resolve(jarId); if (!Files.exists(jarToDelete)) { throw new CompletionException( new RestHandlerException( String.format( "File %s does not exist in %s.", jarId, jarDir), HttpResponseStatus.BAD_REQUEST)); } else { try { Files.delete(jarToDelete); return EmptyResponseBody.getInstance(); } catch (final IOException e) { throw new CompletionException( new RestHandlerException( String.format("Failed to delete jar %s.", jarToDelete), HttpResponseStatus.INTERNAL_SERVER_ERROR, e)); } } }, executor); }
Handles requests for deletion of jars.
handleRequest
java
apache/flink
flink-runtime-web/src/main/java/org/apache/flink/runtime/webmonitor/handlers/JarDeleteHandler.java
https://github.com/apache/flink/blob/master/flink-runtime-web/src/main/java/org/apache/flink/runtime/webmonitor/handlers/JarDeleteHandler.java
Apache-2.0
@Override protected CompletableFuture<JobPlanInfo> handleRequest( @Nonnull final HandlerRequest<JarPlanRequestBody> request, @Nonnull final RestfulGateway gateway) throws RestHandlerException { final JarHandlerContext context = JarHandlerContext.fromRequest(request, jarDir, log); final Configuration effectiveConfiguration = new Configuration(this.configuration); context.applyToConfiguration(effectiveConfiguration, request); return CompletableFuture.supplyAsync( () -> { try (PackagedProgram packagedProgram = context.toPackagedProgram(effectiveConfiguration)) { final JobGraph jobGraph = context.toJobGraph(packagedProgram, effectiveConfiguration, true); return planGenerator.apply(jobGraph); } }, executor); }
This handler handles requests to fetch the plan for a jar.
handleRequest
java
apache/flink
flink-runtime-web/src/main/java/org/apache/flink/runtime/webmonitor/handlers/JarPlanHandler.java
https://github.com/apache/flink/blob/master/flink-runtime-web/src/main/java/org/apache/flink/runtime/webmonitor/handlers/JarPlanHandler.java
Apache-2.0
@Override public String getDescription() { return "String value that specifies the path of the savepoint to restore the job from."; }
Query parameter that specifies the savepoint to restore from.
getDescription
java
apache/flink
flink-runtime-web/src/main/java/org/apache/flink/runtime/webmonitor/handlers/SavepointPathQueryParameter.java
https://github.com/apache/flink/blob/master/flink-runtime-web/src/main/java/org/apache/flink/runtime/webmonitor/handlers/SavepointPathQueryParameter.java
Apache-2.0