code
stringlengths
25
201k
docstring
stringlengths
19
96.2k
func_name
stringlengths
0
235
language
stringclasses
1 value
repo
stringlengths
8
51
path
stringlengths
11
314
url
stringlengths
62
377
license
stringclasses
7 values
public void notifyAvailable() { lock.lock(); try { moveToAvailable(); } finally { lock.unlock(); } }
Makes sure the availability future is complete, if it is not complete already. All futures returned by previous calls to {@link #getAvailabilityFuture()} are guaranteed to be completed. <p>All future calls to the method will return a completed future, until the point that the availability is reset via calls to {@link #poll()} that leave the queue empty.
notifyAvailable
java
apache/flink
flink-connectors/flink-connector-base/src/main/java/org/apache/flink/connector/base/source/reader/synchronization/FutureCompletingBlockingQueue.java
https://github.com/apache/flink/blob/master/flink-connectors/flink-connector-base/src/main/java/org/apache/flink/connector/base/source/reader/synchronization/FutureCompletingBlockingQueue.java
Apache-2.0
@GuardedBy("lock") private void moveToAvailable() { final CompletableFuture<Void> current = currentFuture; if (current != AVAILABLE) { currentFuture = AVAILABLE; current.complete(null); } }
Internal utility to make sure that the current future futures are complete (until reset).
moveToAvailable
java
apache/flink
flink-connectors/flink-connector-base/src/main/java/org/apache/flink/connector/base/source/reader/synchronization/FutureCompletingBlockingQueue.java
https://github.com/apache/flink/blob/master/flink-connectors/flink-connector-base/src/main/java/org/apache/flink/connector/base/source/reader/synchronization/FutureCompletingBlockingQueue.java
Apache-2.0
@GuardedBy("lock") private void moveToUnAvailable() { if (currentFuture == AVAILABLE) { currentFuture = new CompletableFuture<>(); } }
Makes sure the availability future is incomplete, if it was complete before.
moveToUnAvailable
java
apache/flink
flink-connectors/flink-connector-base/src/main/java/org/apache/flink/connector/base/source/reader/synchronization/FutureCompletingBlockingQueue.java
https://github.com/apache/flink/blob/master/flink-connectors/flink-connector-base/src/main/java/org/apache/flink/connector/base/source/reader/synchronization/FutureCompletingBlockingQueue.java
Apache-2.0
public boolean put(int threadIndex, T element) throws InterruptedException { if (element == null) { throw new NullPointerException(); } lock.lockInterruptibly(); try { while (queue.size() >= capacity) { if (getAndResetWakeUpFlag(threadIndex)) { return false; } waitOnPut(threadIndex); } enqueue(element); return true; } finally { lock.unlock(); } }
Put an element into the queue. The thread blocks if the queue is full. @param threadIndex the index of the thread. @param element the element to put. @return true if the element has been successfully put into the queue, false otherwise. @throws InterruptedException when the thread is interrupted.
put
java
apache/flink
flink-connectors/flink-connector-base/src/main/java/org/apache/flink/connector/base/source/reader/synchronization/FutureCompletingBlockingQueue.java
https://github.com/apache/flink/blob/master/flink-connectors/flink-connector-base/src/main/java/org/apache/flink/connector/base/source/reader/synchronization/FutureCompletingBlockingQueue.java
Apache-2.0
@VisibleForTesting public T take() throws InterruptedException { T next; while ((next = poll()) == null) { // use the future to wait for availability to avoid busy waiting try { getAvailabilityFuture().get(); } catch (ExecutionException | CompletionException e) { // this should never happen, but we propagate just in case throw new FlinkRuntimeException("exception in queue future completion", e); } } return next; }
<b>Warning:</b> This is a dangerous method and should only be used for testing convenience. A method that blocks until availability does not go together well with the concept of asynchronous notifications and non-blocking polling. <p>Get and remove the first element from the queue. The call blocks if the queue is empty. The problem with this method is that it may loop internally until an element is available and that way eagerly reset the availability future. If a consumer thread is blocked in taking an element, it will receive availability notifications from {@link #notifyAvailable()} and immediately reset them by calling {@link #poll()} and finding the queue empty. @return the first element in the queue. @throws InterruptedException when the thread is interrupted.
take
java
apache/flink
flink-connectors/flink-connector-base/src/main/java/org/apache/flink/connector/base/source/reader/synchronization/FutureCompletingBlockingQueue.java
https://github.com/apache/flink/blob/master/flink-connectors/flink-connector-base/src/main/java/org/apache/flink/connector/base/source/reader/synchronization/FutureCompletingBlockingQueue.java
Apache-2.0
public T poll() { lock.lock(); try { if (queue.size() == 0) { moveToUnAvailable(); return null; } return dequeue(); } finally { lock.unlock(); } }
Get and remove the first element from the queue. Null is returned if the queue is empty. If this makes the queue empty (takes the last element) or finds the queue already empty, then this resets the availability notifications. The next call to {@link #getAvailabilityFuture()} will then return a non-complete future that completes only the next time that the queue becomes non-empty or the {@link #notifyAvailable()} method is called. @return the first element from the queue, or Null if the queue is empty.
poll
java
apache/flink
flink-connectors/flink-connector-base/src/main/java/org/apache/flink/connector/base/source/reader/synchronization/FutureCompletingBlockingQueue.java
https://github.com/apache/flink/blob/master/flink-connectors/flink-connector-base/src/main/java/org/apache/flink/connector/base/source/reader/synchronization/FutureCompletingBlockingQueue.java
Apache-2.0
public T peek() { lock.lock(); try { return queue.peek(); } finally { lock.unlock(); } }
Get the first element from the queue without removing it. @return the first element in the queue, or Null if the queue is empty.
peek
java
apache/flink
flink-connectors/flink-connector-base/src/main/java/org/apache/flink/connector/base/source/reader/synchronization/FutureCompletingBlockingQueue.java
https://github.com/apache/flink/blob/master/flink-connectors/flink-connector-base/src/main/java/org/apache/flink/connector/base/source/reader/synchronization/FutureCompletingBlockingQueue.java
Apache-2.0
public int size() { lock.lock(); try { return queue.size(); } finally { lock.unlock(); } }
Gets the size of the queue.
size
java
apache/flink
flink-connectors/flink-connector-base/src/main/java/org/apache/flink/connector/base/source/reader/synchronization/FutureCompletingBlockingQueue.java
https://github.com/apache/flink/blob/master/flink-connectors/flink-connector-base/src/main/java/org/apache/flink/connector/base/source/reader/synchronization/FutureCompletingBlockingQueue.java
Apache-2.0
public boolean isEmpty() { lock.lock(); try { return queue.isEmpty(); } finally { lock.unlock(); } }
Checks whether the queue is empty.
isEmpty
java
apache/flink
flink-connectors/flink-connector-base/src/main/java/org/apache/flink/connector/base/source/reader/synchronization/FutureCompletingBlockingQueue.java
https://github.com/apache/flink/blob/master/flink-connectors/flink-connector-base/src/main/java/org/apache/flink/connector/base/source/reader/synchronization/FutureCompletingBlockingQueue.java
Apache-2.0
public int remainingCapacity() { lock.lock(); try { return capacity - queue.size(); } finally { lock.unlock(); } }
Checks the remaining capacity in the queue. That is the difference between the maximum capacity and the current number of elements in the queue.
remainingCapacity
java
apache/flink
flink-connectors/flink-connector-base/src/main/java/org/apache/flink/connector/base/source/reader/synchronization/FutureCompletingBlockingQueue.java
https://github.com/apache/flink/blob/master/flink-connectors/flink-connector-base/src/main/java/org/apache/flink/connector/base/source/reader/synchronization/FutureCompletingBlockingQueue.java
Apache-2.0
public static <SplitT extends SourceSplit, C extends Collection<SplitT>> byte[] serializeSplitAssignments( Map<Integer, C> splitAssignments, SimpleVersionedSerializer<SplitT> splitSerializer) throws IOException { try (ByteArrayOutputStream baos = new ByteArrayOutputStream(); DataOutputStream out = new DataOutputStream(baos)) { out.writeInt(splitAssignments.size()); // Split serializer version. out.writeInt(splitSerializer.getVersion()); // Write assignments for subtasks. for (Map.Entry<Integer, C> entry : splitAssignments.entrySet()) { // Subtask ID int subtaskId = entry.getKey(); Collection<SplitT> splitsForSubtask = entry.getValue(); // Number of the splits. out.writeInt(subtaskId); out.writeInt(splitsForSubtask.size()); for (SplitT split : splitsForSubtask) { byte[] serializedSplit = splitSerializer.serialize(split); out.writeInt(serializedSplit.length); out.write(serializedSplit); } } return baos.toByteArray(); } }
Serialize a mapping from subtask ids to lists of assigned splits. The serialized format is following: <pre> 4 bytes - number of subtasks 4 bytes - split serializer version N bytes - [assignment_for_subtask] 4 bytes - subtask id 4 bytes - number of assigned splits N bytes - [assigned_splits] 4 bytes - serialized split length N bytes - serialized splits </pre> @param splitAssignments a mapping from subtask ids to lists of assigned splits. @param splitSerializer the serializer of the split. @param <SplitT> the type of the splits. @param <C> the type of the collection to hold the assigned splits for a subtask. @return the serialized bytes of the given subtask to splits assignment mapping. @throws IOException when serialization failed.
serializeSplitAssignments
java
apache/flink
flink-connectors/flink-connector-base/src/main/java/org/apache/flink/connector/base/source/utils/SerdeUtils.java
https://github.com/apache/flink/blob/master/flink-connectors/flink-connector-base/src/main/java/org/apache/flink/connector/base/source/utils/SerdeUtils.java
Apache-2.0
public static <SplitT extends SourceSplit, C extends Collection<SplitT>> Map<Integer, C> deserializeSplitAssignments( byte[] serialized, SimpleVersionedSerializer<SplitT> splitSerializer, Function<Integer, C> collectionSupplier) throws IOException { try (ByteArrayInputStream bais = new ByteArrayInputStream(serialized); DataInputStream in = new DataInputStream(bais)) { int numSubtasks = in.readInt(); Map<Integer, C> splitsAssignments = new HashMap<>(numSubtasks); int serializerVersion = in.readInt(); for (int i = 0; i < numSubtasks; i++) { int subtaskId = in.readInt(); int numAssignedSplits = in.readInt(); C assignedSplits = collectionSupplier.apply(numAssignedSplits); for (int j = 0; j < numAssignedSplits; j++) { int serializedSplitSize = in.readInt(); byte[] serializedSplit = new byte[serializedSplitSize]; in.readFully(serializedSplit); SplitT split = splitSerializer.deserialize(serializerVersion, serializedSplit); assignedSplits.add(split); } splitsAssignments.put(subtaskId, assignedSplits); } return splitsAssignments; } }
Deserialize the given bytes returned by {@link #serializeSplitAssignments(Map, SimpleVersionedSerializer)}. @param serialized the serialized bytes returned by {@link #serializeSplitAssignments(Map, SimpleVersionedSerializer)}. @param splitSerializer the split serializer for the splits. @param collectionSupplier the supplier for the {@link Collection} instance to hold the assigned splits for a subtask. @param <SplitT> the type of the splits. @param <C> the type of the collection to hold the assigned splits for a subtask. @return A mapping from subtask id to its assigned splits. @throws IOException when deserialization failed.
deserializeSplitAssignments
java
apache/flink
flink-connectors/flink-connector-base/src/main/java/org/apache/flink/connector/base/source/utils/SerdeUtils.java
https://github.com/apache/flink/blob/master/flink-connectors/flink-connector-base/src/main/java/org/apache/flink/connector/base/source/utils/SerdeUtils.java
Apache-2.0
public ConcreteBuilderT setMaxBufferSizeInBytes(long maxBufferSizeInBytes) { this.maxBufferSizeInBytes = maxBufferSizeInBytes; return (ConcreteBuilderT) this; }
@param maxBufferSizeInBytes a flush will be attempted if the most recent call to write introduces an element to the buffer such that the total size of the buffer is greater than or equal to this threshold value. @return {@link ConcreteBuilderT} itself
setMaxBufferSizeInBytes
java
apache/flink
flink-connectors/flink-connector-base/src/main/java/org/apache/flink/connector/base/table/sink/AsyncDynamicTableSinkBuilder.java
https://github.com/apache/flink/blob/master/flink-connectors/flink-connector-base/src/main/java/org/apache/flink/connector/base/table/sink/AsyncDynamicTableSinkBuilder.java
Apache-2.0
protected static List<Integer> putRecords(List<Integer> newRecords) { store.addAll( newRecords.stream().filter(record -> record <= 1000).collect(Collectors.toList())); if (newRecords.contains(1_000_000)) { throw new RuntimeException( "Intentional error on persisting 1_000_000 to ArrayListDestination"); } return newRecords.stream().filter(record -> record > 1000).collect(Collectors.toList()); }
Returns a list of indices of elements that failed to insert, fails to insert if the integer value of the {@code newRecord} is greater than 1000.
putRecords
java
apache/flink
flink-connectors/flink-connector-base/src/test/java/org/apache/flink/connector/base/sink/ArrayListDestination.java
https://github.com/apache/flink/blob/master/flink-connectors/flink-connector-base/src/test/java/org/apache/flink/connector/base/sink/ArrayListDestination.java
Apache-2.0
@Override protected void submitRequestEntries( List<Integer> requestEntries, ResultHandler<Integer> resultHandler) { if (requestEntries.size() == 3) { try { delayedStartLatch.countDown(); if (blockForLimitedTime) { assertThat(blockedThreadLatch.await(500, TimeUnit.MILLISECONDS)) .as( "The countdown latch was released before the full amount" + "of time was reached.") .isFalse(); } else { blockedThreadLatch.await(); } } catch (InterruptedException e) { fail("The unit test latch must not have been interrupted by another thread."); } } res.addAll(requestEntries); resultHandler.complete(); }
This SinkWriter releases the lock on existing threads blocked by {@code delayedStartLatch} and blocks itself until {@code blockedThreadLatch} is unblocked.
submitRequestEntries
java
apache/flink
flink-connectors/flink-connector-base/src/test/java/org/apache/flink/connector/base/sink/writer/AsyncSinkWriterTest.java
https://github.com/apache/flink/blob/master/flink-connectors/flink-connector-base/src/test/java/org/apache/flink/connector/base/sink/writer/AsyncSinkWriterTest.java
Apache-2.0
@Test void shouldAddEntriesInFifoOrder() { RequestEntryWrapper<String> entry1 = new RequestEntryWrapper<>("Entry1", 10); RequestEntryWrapper<String> entry2 = new RequestEntryWrapper<>("Entry2", 20); bufferWrapper.add(entry1, false); bufferWrapper.add(entry2, false); assertThat(bufferWrapper.size()).isEqualTo(2); assertThat(bufferWrapper.peek()).isEqualTo(entry1); assertThat(bufferWrapper.poll()).isEqualTo(entry1); assertThat(bufferWrapper.poll()).isEqualTo(entry2); assertThat(bufferWrapper.isEmpty()).isTrue(); }
Test entries should be added in FIFO Fashion.
shouldAddEntriesInFifoOrder
java
apache/flink
flink-connectors/flink-connector-base/src/test/java/org/apache/flink/connector/base/sink/writer/DequeRequestBufferTest.java
https://github.com/apache/flink/blob/master/flink-connectors/flink-connector-base/src/test/java/org/apache/flink/connector/base/sink/writer/DequeRequestBufferTest.java
Apache-2.0
@Test void shouldPrioritizeEntriesAddedAtHead() { RequestEntryWrapper<String> entry1 = new RequestEntryWrapper<>("Entry1", 10); RequestEntryWrapper<String> entry2 = new RequestEntryWrapper<>("Entry2", 20); RequestEntryWrapper<String> priorityEntry = new RequestEntryWrapper<>("PriorityEntry", 30); bufferWrapper.add(entry1, false); bufferWrapper.add(entry2, false); bufferWrapper.add(priorityEntry, true); // Should be added at the front assertThat(bufferWrapper.size()).isEqualTo(3); assertThat(bufferWrapper.peek()).isEqualTo(priorityEntry); assertThat(bufferWrapper.poll()).isEqualTo(priorityEntry); assertThat(bufferWrapper.poll()).isEqualTo(entry1); assertThat(bufferWrapper.poll()).isEqualTo(entry2); }
Test that priority entries are added to the HEAD.
shouldPrioritizeEntriesAddedAtHead
java
apache/flink
flink-connectors/flink-connector-base/src/test/java/org/apache/flink/connector/base/sink/writer/DequeRequestBufferTest.java
https://github.com/apache/flink/blob/master/flink-connectors/flink-connector-base/src/test/java/org/apache/flink/connector/base/sink/writer/DequeRequestBufferTest.java
Apache-2.0
@Test public void testCreatNextBatchWithEmptyBuffer() { SimpleBatchCreator<String> creator = new SimpleBatchCreator<>(100L); RequestBuffer<String> buffer = new DequeRequestBuffer<>(); // No entries in the buffer RequestInfo requestInfo = () -> 10; Batch<String> result = creator.createNextBatch(requestInfo, buffer); assertThat(result.getBatchEntries()).isEmpty(); assertThat(result.getRecordCount()).isEqualTo(0); assertThat(result.getSizeInBytes()).isEqualTo(0L); }
Ensures no entries are returned when the buffer is empty.
testCreatNextBatchWithEmptyBuffer
java
apache/flink
flink-connectors/flink-connector-base/src/test/java/org/apache/flink/connector/base/sink/writer/SimpleBatchCreatorTest.java
https://github.com/apache/flink/blob/master/flink-connectors/flink-connector-base/src/test/java/org/apache/flink/connector/base/sink/writer/SimpleBatchCreatorTest.java
Apache-2.0
@Test public void testCreateNextBatchRespectsBatchCountLimit() { SimpleBatchCreator<String> creator = new SimpleBatchCreator<>(100L); RequestBuffer<String> buffer = new DequeRequestBuffer<>(); // Add multiple items to the buffer for (int i = 0; i < 10; i++) { buffer.add(new RequestEntryWrapper<>("elem-" + i, 10L), false); } RequestInfo requestInfo = () -> { return 5; // limit to 5 items }; Batch<String> result = creator.createNextBatch(requestInfo, buffer); // Should only take 5 items, ignoring the size limit because each item is 10 bytes assertThat(result.getBatchEntries().size()).isEqualTo(5); assertThat(result.getRecordCount()).isEqualTo(5); assertThat(result.getSizeInBytes()).isEqualTo(50L); // Check the buffer was drained of exactly 5 elements assertThat(buffer.size()).isEqualTo(5); }
Verifies that the maximum batch size (count of entries) is observed even when the size in bytes would allow more entries.
testCreateNextBatchRespectsBatchCountLimit
java
apache/flink
flink-connectors/flink-connector-base/src/test/java/org/apache/flink/connector/base/sink/writer/SimpleBatchCreatorTest.java
https://github.com/apache/flink/blob/master/flink-connectors/flink-connector-base/src/test/java/org/apache/flink/connector/base/sink/writer/SimpleBatchCreatorTest.java
Apache-2.0
@Test public void testCreateNextBatchSizeLimitFits() { // The total size limit for a batch is 20 SimpleBatchCreator<String> creator = new SimpleBatchCreator<>(20L); RequestBuffer<String> buffer = new DequeRequestBuffer<>(); buffer.add(new RequestEntryWrapper<>("A", 10L), false); buffer.add(new RequestEntryWrapper<>("B", 10L), false); buffer.add(new RequestEntryWrapper<>("C", 10L), false); RequestInfo requestInfo = () -> { return 10; // large enough that size becomes the limiting factor }; Batch<String> result = creator.createNextBatch(requestInfo, buffer); // We can fit exactly two items: A and B assertThat(result.getBatchEntries()).isEqualTo(Arrays.asList("A", "B")); assertThat(result.getRecordCount()).isEqualTo(2); assertThat(result.getSizeInBytes()).isEqualTo(20L); // C should still remain in the buffer. assertThat(buffer.size()).isEqualTo(1); assertThat(buffer.peek().getRequestEntry()).isEqualTo("C"); }
Tests an exact boundary condition (filling up exactly to maxBatchSizeInBytes).
testCreateNextBatchSizeLimitFits
java
apache/flink
flink-connectors/flink-connector-base/src/test/java/org/apache/flink/connector/base/sink/writer/SimpleBatchCreatorTest.java
https://github.com/apache/flink/blob/master/flink-connectors/flink-connector-base/src/test/java/org/apache/flink/connector/base/sink/writer/SimpleBatchCreatorTest.java
Apache-2.0
@Test void testHybridSource(@InjectMiniCluster MiniCluster miniCluster) throws Exception { testHybridSource(FailoverType.NONE, sourceWithFixedSwitchPosition(), miniCluster); }
Test the source in the happy path.
testHybridSource
java
apache/flink
flink-connectors/flink-connector-base/src/test/java/org/apache/flink/connector/base/source/hybrid/HybridSourceITCase.java
https://github.com/apache/flink/blob/master/flink-connectors/flink-connector-base/src/test/java/org/apache/flink/connector/base/source/hybrid/HybridSourceITCase.java
Apache-2.0
@Test void testHybridSourceWithDynamicSwitchPosition(@InjectMiniCluster MiniCluster miniCluster) throws Exception { testHybridSource(FailoverType.NONE, sourceWithDynamicSwitchPosition(), miniCluster); }
Test the source in the happy path with runtime position transfer.
testHybridSourceWithDynamicSwitchPosition
java
apache/flink
flink-connectors/flink-connector-base/src/test/java/org/apache/flink/connector/base/source/hybrid/HybridSourceITCase.java
https://github.com/apache/flink/blob/master/flink-connectors/flink-connector-base/src/test/java/org/apache/flink/connector/base/source/hybrid/HybridSourceITCase.java
Apache-2.0
@Test void testHybridSourceWithTaskManagerFailover(@InjectMiniCluster MiniCluster miniCluster) throws Exception { testHybridSource(FailoverType.TM, sourceWithFixedSwitchPosition(), miniCluster); }
Test the source with TaskManager restart.
testHybridSourceWithTaskManagerFailover
java
apache/flink
flink-connectors/flink-connector-base/src/test/java/org/apache/flink/connector/base/source/hybrid/HybridSourceITCase.java
https://github.com/apache/flink/blob/master/flink-connectors/flink-connector-base/src/test/java/org/apache/flink/connector/base/source/hybrid/HybridSourceITCase.java
Apache-2.0
@ParameterizedTest(name = "Individual reader per split: {0}") @ValueSource(booleans = {false, true}) void testPauseResumeSplitReaders(boolean individualReader) throws Exception { final AtomicInteger numSplitReaders = new AtomicInteger(); final MockSplitReader.Builder readerBuilder = SteppingSourceReaderTestHarness.createSplitReaderBuilder(); final SteppingSourceReaderTestHarness testHarness = new SteppingSourceReaderTestHarness( () -> { numSplitReaders.getAndIncrement(); return readerBuilder.build(); }, new Configuration()); if (individualReader) { testHarness.addPrefilledSplitsIndividualReader(2, 5); assertThat(numSplitReaders.get()).isEqualTo(2); } else { testHarness.addPrefilledSplitsSingleReader(2, 5); assertThat(numSplitReaders.get()).isEqualTo(1); } TestingReaderOutput output = new TestingReaderOutput<>(); testHarness.runUntilRecordsEmitted(output, 10, 2); Set<Integer> recordSet = new HashSet<>(output.getEmittedRecords()); assertThat(recordSet).containsExactlyInAnyOrder(0, 1); testHarness.pauseOrResumeSplits(Collections.singleton("0"), Collections.emptyList()); testHarness.runUntilRecordsEmitted(output, 10, 5); Set<Integer> recordSet2 = new HashSet<>(output.getEmittedRecords()); assertThat(recordSet2).containsExactlyInAnyOrder(0, 1, 3, 5, 7); testHarness.pauseOrResumeSplits(Collections.emptyList(), Collections.singleton("0")); testHarness.runUntilAllRecordsEmitted(output, 10); Set<Integer> recordSet3 = new HashSet<>(output.getEmittedRecords()); assertThat(recordSet3).containsExactlyInAnyOrder(0, 1, 2, 3, 4, 5, 6, 7, 8, 9); }
Tests if pause or resume shows expected behavior which requires creation and execution of {@link SplitFetcher} tasks.
testPauseResumeSplitReaders
java
apache/flink
flink-connectors/flink-connector-base/src/test/java/org/apache/flink/connector/base/source/reader/fetcher/SplitFetcherPauseResumeSplitReaderTest.java
https://github.com/apache/flink/blob/master/flink-connectors/flink-connector-base/src/test/java/org/apache/flink/connector/base/source/reader/fetcher/SplitFetcherPauseResumeSplitReaderTest.java
Apache-2.0
@Test void testQueueUsesShortCircuitFuture() { assertThat(FutureCompletingBlockingQueue.AVAILABLE) .isSameAs(AvailabilityProvider.AVAILABLE); }
This test is to guard that our reflection is not broken and we do not lose the performance advantage. This is possible, because the tests depend on the runtime modules while the main scope does not.
testQueueUsesShortCircuitFuture
java
apache/flink
flink-connectors/flink-connector-base/src/test/java/org/apache/flink/connector/base/source/reader/synchronization/FutureCompletingBlockingQueueTest.java
https://github.com/apache/flink/blob/master/flink-connectors/flink-connector-base/src/test/java/org/apache/flink/connector/base/source/reader/synchronization/FutureCompletingBlockingQueueTest.java
Apache-2.0
public static <OUT> void checkIterable(Iterable<OUT> elements, Class<?> viewedAs) { for (OUT elem : elements) { if (elem == null) { throw new IllegalArgumentException("The collection contains a null element"); } if (!viewedAs.isAssignableFrom(elem.getClass())) { throw new IllegalArgumentException( "The elements in the collection are not all subclasses of " + viewedAs.getCanonicalName()); } } }
Verifies that all elements in the iterable are non-null, and are of the given class, or a subclass thereof. @param elements The iterable to check. @param viewedAs The class to which the elements must be assignable to. @param <OUT> The generic type of the iterable to be checked.
checkIterable
java
apache/flink
flink-connectors/flink-connector-datagen/src/main/java/org/apache/flink/connector/datagen/functions/FromElementsGeneratorFunction.java
https://github.com/apache/flink/blob/master/flink-connectors/flink-connector-datagen/src/main/java/org/apache/flink/connector/datagen/functions/FromElementsGeneratorFunction.java
Apache-2.0
public FileCompactStrategy.Builder enableCompactionOnCheckpoint( int numCheckpointsBeforeCompaction) { checkArgument( numCheckpointsBeforeCompaction > 0, "Number of checkpoints before compaction should be more than 0."); this.numCheckpointsBeforeCompaction = numCheckpointsBeforeCompaction; return this; }
Optional, compaction will be triggered when N checkpoints passed since the last triggering, -1 by default indicating no compaction on checkpoint.
enableCompactionOnCheckpoint
java
apache/flink
flink-connectors/flink-connector-files/src/main/java/org/apache/flink/connector/file/sink/compactor/FileCompactStrategy.java
https://github.com/apache/flink/blob/master/flink-connectors/flink-connector-files/src/main/java/org/apache/flink/connector/file/sink/compactor/FileCompactStrategy.java
Apache-2.0
public void compact(List<Path> inputFiles, OutputStream outputStream) throws Exception { checkState(inputFiles.size() == 1, "IdenticalFileCompactor can only copy one input file"); super.compact(inputFiles, outputStream); }
A simple {@link OutputStreamBasedFileCompactor} implementation that directly copy the content of the only input file to the output.
compact
java
apache/flink
flink-connectors/flink-connector-files/src/main/java/org/apache/flink/connector/file/sink/compactor/IdenticalFileCompactor.java
https://github.com/apache/flink/blob/master/flink-connectors/flink-connector-files/src/main/java/org/apache/flink/connector/file/sink/compactor/IdenticalFileCompactor.java
Apache-2.0
public void compact(List<Path> inputFiles, OutputStream outputStream) throws Exception { // The outputStream should not be closed here. CloseShieldOutputStream shieldOutputStream = new CloseShieldOutputStream(outputStream); doCompact(inputFiles, shieldOutputStream); }
Base class for {@link FileCompactor} implementations that write the compacting file by a output stream.
compact
java
apache/flink
flink-connectors/flink-connector-files/src/main/java/org/apache/flink/connector/file/sink/compactor/OutputStreamBasedFileCompactor.java
https://github.com/apache/flink/blob/master/flink-connectors/flink-connector-files/src/main/java/org/apache/flink/connector/file/sink/compactor/OutputStreamBasedFileCompactor.java
Apache-2.0
public void compact(List<Path> inputFiles, Writer<IN> writer) throws Exception { for (Path input : inputFiles) { try (Reader<IN> reader = readerFactory.createFor(input)) { IN elem; while ((elem = reader.read()) != null) { writer.write(elem); } } } }
A {@link FileCompactor} implementation that reads input files with a {@link Reader} and writes with a {@link Writer}.
compact
java
apache/flink
flink-connectors/flink-connector-files/src/main/java/org/apache/flink/connector/file/sink/compactor/RecordWiseFileCompactor.java
https://github.com/apache/flink/blob/master/flink-connectors/flink-connector-files/src/main/java/org/apache/flink/connector/file/sink/compactor/RecordWiseFileCompactor.java
Apache-2.0
static <IN> FileWriterBucket<IN> getNew( final String bucketId, final Path bucketPath, final BucketWriter<IN, String> bucketWriter, final RollingPolicy<IN, String> rollingPolicy, final OutputFileConfig outputFileConfig) { return new FileWriterBucket<>( bucketId, bucketPath, bucketWriter, rollingPolicy, outputFileConfig); }
Creates a new empty {@code Bucket}. @param bucketId the identifier of the bucket, as returned by the {@link BucketAssigner}. @param bucketPath the path to where the part files for the bucket will be written to. @param bucketWriter the {@link BucketWriter} used to write part files in the bucket. @param <IN> the type of input elements to the sink. @param outputFileConfig the part file configuration. @return The new Bucket.
getNew
java
apache/flink
flink-connectors/flink-connector-files/src/main/java/org/apache/flink/connector/file/sink/writer/FileWriterBucket.java
https://github.com/apache/flink/blob/master/flink-connectors/flink-connector-files/src/main/java/org/apache/flink/connector/file/sink/writer/FileWriterBucket.java
Apache-2.0
static <IN> FileWriterBucket<IN> restore( final BucketWriter<IN, String> bucketWriter, final RollingPolicy<IN, String> rollingPolicy, final FileWriterBucketState bucketState, final OutputFileConfig outputFileConfig) throws IOException { return new FileWriterBucket<>(bucketWriter, rollingPolicy, bucketState, outputFileConfig); }
Restores a {@code Bucket} from the state included in the provided {@link FileWriterBucketState}. @param bucketWriter the {@link BucketWriter} used to write part files in the bucket. @param bucketState the initial state of the restored bucket. @param <IN> the type of input elements to the sink. @param outputFileConfig the part file configuration. @return The restored Bucket.
restore
java
apache/flink
flink-connectors/flink-connector-files/src/main/java/org/apache/flink/connector/file/sink/writer/FileWriterBucket.java
https://github.com/apache/flink/blob/master/flink-connectors/flink-connector-files/src/main/java/org/apache/flink/connector/file/sink/writer/FileWriterBucket.java
Apache-2.0
public SELF monitorContinuously(Duration discoveryInterval) { checkNotNull(discoveryInterval, "discoveryInterval"); checkArgument( !(discoveryInterval.isNegative() || discoveryInterval.isZero()), "discoveryInterval must be > 0"); this.continuousSourceSettings = new ContinuousEnumerationSettings(discoveryInterval); return self(); }
Sets this source to streaming ("continuous monitoring") mode. <p>This makes the source a "continuous streaming" source that keeps running, monitoring for new files, and reads these files when they appear and are discovered by the monitoring. <p>The interval in which the source checks for new files is the {@code discoveryInterval}. Shorter intervals mean that files are discovered more quickly, but also imply more frequent listing or directory traversal of the file system / object store.
monitorContinuously
java
apache/flink
flink-connectors/flink-connector-files/src/main/java/org/apache/flink/connector/file/src/AbstractFileSource.java
https://github.com/apache/flink/blob/master/flink-connectors/flink-connector-files/src/main/java/org/apache/flink/connector/file/src/AbstractFileSource.java
Apache-2.0
public SELF processStaticFileSet() { this.continuousSourceSettings = null; return self(); }
Sets this source to bounded (batch) mode. <p>In this mode, the source processes the files that are under the given paths when the application is started. Once all files are processed, the source will finish. <p>This setting is also the default behavior. This method is mainly here to "switch back" to bounded (batch) mode, or to make it explicit in the source construction.
processStaticFileSet
java
apache/flink
flink-connectors/flink-connector-files/src/main/java/org/apache/flink/connector/file/src/AbstractFileSource.java
https://github.com/apache/flink/blob/master/flink-connectors/flink-connector-files/src/main/java/org/apache/flink/connector/file/src/AbstractFileSource.java
Apache-2.0
public SELF setFileEnumerator(FileEnumerator.Provider fileEnumerator) { this.fileEnumerator = checkNotNull(fileEnumerator); return self(); }
Configures the {@link FileEnumerator} for the source. The File Enumerator is responsible for selecting from the input path the set of files that should be processed (and which to filter out). Furthermore, the File Enumerator may split the files further into sub-regions, to enable parallelization beyond the number of files.
setFileEnumerator
java
apache/flink
flink-connectors/flink-connector-files/src/main/java/org/apache/flink/connector/file/src/AbstractFileSource.java
https://github.com/apache/flink/blob/master/flink-connectors/flink-connector-files/src/main/java/org/apache/flink/connector/file/src/AbstractFileSource.java
Apache-2.0
public static <T> FileSourceBuilder<T> forRecordStreamFormat( final StreamFormat<T> streamFormat, final Path... paths) { return forBulkFileFormat(new StreamFormatAdapter<>(streamFormat), paths); }
Builds a new {@code FileSource} using a {@link StreamFormat} to read record-by-record from a file stream. <p>When possible, stream-based formats are generally easier (preferable) to file-based formats, because they support better default behavior around I/O batching or progress tracking (checkpoints). <p>Stream formats also automatically de-compress files based on the file extension. This supports files ending in ".deflate" (Deflate), ".xz" (XZ), ".bz2" (BZip2), ".gz", ".gzip" (GZip).
forRecordStreamFormat
java
apache/flink
flink-connectors/flink-connector-files/src/main/java/org/apache/flink/connector/file/src/FileSource.java
https://github.com/apache/flink/blob/master/flink-connectors/flink-connector-files/src/main/java/org/apache/flink/connector/file/src/FileSource.java
Apache-2.0
public static <T> FileSourceBuilder<T> forBulkFileFormat( final BulkFormat<T, FileSourceSplit> bulkFormat, final Path... paths) { checkNotNull(bulkFormat, "reader"); checkNotNull(paths, "paths"); checkArgument(paths.length > 0, "paths must not be empty"); return new FileSourceBuilder<>(paths, bulkFormat); }
Builds a new {@code FileSource} using a {@link BulkFormat} to read batches of records from files. <p>Examples for bulk readers are compressed and vectorized formats such as ORC or Parquet.
forBulkFileFormat
java
apache/flink
flink-connectors/flink-connector-files/src/main/java/org/apache/flink/connector/file/src/FileSource.java
https://github.com/apache/flink/blob/master/flink-connectors/flink-connector-files/src/main/java/org/apache/flink/connector/file/src/FileSource.java
Apache-2.0
@Override public FileSource<T> build() { return new FileSource<>( inputPaths, fileEnumerator, splitAssigner, readerFormat, continuousSourceSettings); }
The builder for the {@code FileSource}, to configure the various behaviors. <p>Start building the source via one of the following methods: <ul> <li>{@link FileSource#forRecordStreamFormat(StreamFormat, Path...)} <li>{@link FileSource#forBulkFileFormat(BulkFormat, Path...)} </ul>
build
java
apache/flink
flink-connectors/flink-connector-files/src/main/java/org/apache/flink/connector/file/src/FileSource.java
https://github.com/apache/flink/blob/master/flink-connectors/flink-connector-files/src/main/java/org/apache/flink/connector/file/src/FileSource.java
Apache-2.0
@Override public String splitId() { return id; }
Package private constructor, used by the serializers to directly cache the serialized form.
splitId
java
apache/flink
flink-connectors/flink-connector-files/src/main/java/org/apache/flink/connector/file/src/FileSourceSplit.java
https://github.com/apache/flink/blob/master/flink-connectors/flink-connector-files/src/main/java/org/apache/flink/connector/file/src/FileSourceSplit.java
Apache-2.0
public long offset() { return offset; }
Returns the start of the file region referenced by this source split. The position is inclusive, the value indicates the first byte that is part of the split.
offset
java
apache/flink
flink-connectors/flink-connector-files/src/main/java/org/apache/flink/connector/file/src/FileSourceSplit.java
https://github.com/apache/flink/blob/master/flink-connectors/flink-connector-files/src/main/java/org/apache/flink/connector/file/src/FileSourceSplit.java
Apache-2.0
public long length() { return length; }
Returns the number of bytes in the file region described by this source split.
length
java
apache/flink
flink-connectors/flink-connector-files/src/main/java/org/apache/flink/connector/file/src/FileSourceSplit.java
https://github.com/apache/flink/blob/master/flink-connectors/flink-connector-files/src/main/java/org/apache/flink/connector/file/src/FileSourceSplit.java
Apache-2.0
public long fileModificationTime() { return fileModificationTime; }
Returns the modification time of the file, from {@link FileStatus#getModificationTime()}.
fileModificationTime
java
apache/flink
flink-connectors/flink-connector-files/src/main/java/org/apache/flink/connector/file/src/FileSourceSplit.java
https://github.com/apache/flink/blob/master/flink-connectors/flink-connector-files/src/main/java/org/apache/flink/connector/file/src/FileSourceSplit.java
Apache-2.0
public long fileSize() { return fileSize; }
Returns the full file size in bytes, from {@link FileStatus#getLen()}.
fileSize
java
apache/flink
flink-connectors/flink-connector-files/src/main/java/org/apache/flink/connector/file/src/FileSourceSplit.java
https://github.com/apache/flink/blob/master/flink-connectors/flink-connector-files/src/main/java/org/apache/flink/connector/file/src/FileSourceSplit.java
Apache-2.0
public String[] hostnames() { return hostnames; }
Gets the hostnames of the nodes storing the file range described by this split. The returned array is empty, if no host information is available. <p>Host information is typically only available on specific file systems, like HDFS.
hostnames
java
apache/flink
flink-connectors/flink-connector-files/src/main/java/org/apache/flink/connector/file/src/FileSourceSplit.java
https://github.com/apache/flink/blob/master/flink-connectors/flink-connector-files/src/main/java/org/apache/flink/connector/file/src/FileSourceSplit.java
Apache-2.0
public Optional<CheckpointedPosition> getReaderPosition() { return Optional.ofNullable(readerPosition); }
Gets the (checkpointed) position of the reader, if set. This value is typically absent for splits when assigned from the enumerator to the readers, and present when the splits are recovered from a checkpoint.
getReaderPosition
java
apache/flink
flink-connectors/flink-connector-files/src/main/java/org/apache/flink/connector/file/src/FileSourceSplit.java
https://github.com/apache/flink/blob/master/flink-connectors/flink-connector-files/src/main/java/org/apache/flink/connector/file/src/FileSourceSplit.java
Apache-2.0
@SuppressWarnings("unchecked") public SplitT toFileSourceSplit() { final CheckpointedPosition position = (offset == CheckpointedPosition.NO_OFFSET && recordsToSkipAfterOffset == 0) ? null : new CheckpointedPosition(offset, recordsToSkipAfterOffset); final FileSourceSplit updatedSplit = split.updateWithCheckpointedPosition(position); // some sanity checks to avoid surprises and not accidentally lose split information if (updatedSplit == null) { throw new FlinkRuntimeException( "Split returned 'null' in updateWithCheckpointedPosition(): " + split); } if (updatedSplit.getClass() != split.getClass()) { throw new FlinkRuntimeException( String.format( "Split returned different type in updateWithCheckpointedPosition(). " + "Split type is %s, returned type is %s", split.getClass().getName(), updatedSplit.getClass().getName())); } return (SplitT) updatedSplit; }
Use the current row count as the starting row count to create a new FileSourceSplit.
toFileSourceSplit
java
apache/flink
flink-connectors/flink-connector-files/src/main/java/org/apache/flink/connector/file/src/FileSourceSplitState.java
https://github.com/apache/flink/blob/master/flink-connectors/flink-connector-files/src/main/java/org/apache/flink/connector/file/src/FileSourceSplitState.java
Apache-2.0
public static Collection<String> getCommonSuffixes() { return COMMON_SUFFIXES; }
Gets all common file extensions of supported file compression formats.
getCommonSuffixes
java
apache/flink
flink-connectors/flink-connector-files/src/main/java/org/apache/flink/connector/file/src/compression/StandardDeCompressors.java
https://github.com/apache/flink/blob/master/flink-connectors/flink-connector-files/src/main/java/org/apache/flink/connector/file/src/compression/StandardDeCompressors.java
Apache-2.0
@Nullable public static InflaterInputStreamFactory<?> getDecompressorForExtension(String extension) { return DECOMPRESSORS.get(extension); }
Gets the decompressor for a file extension. Returns null if there is no decompressor for this file extension.
getDecompressorForExtension
java
apache/flink
flink-connectors/flink-connector-files/src/main/java/org/apache/flink/connector/file/src/compression/StandardDeCompressors.java
https://github.com/apache/flink/blob/master/flink-connectors/flink-connector-files/src/main/java/org/apache/flink/connector/file/src/compression/StandardDeCompressors.java
Apache-2.0
@Override public boolean test(Path path) { final String fileName = path.getName(); if (fileName == null || fileName.length() == 0) { return true; } final char first = fileName.charAt(0); return first != '.' && first != '_'; }
A file filter that filters out hidden files based on common naming patterns, i.e., files where the filename starts with '.' or with '_'.
test
java
apache/flink
flink-connectors/flink-connector-files/src/main/java/org/apache/flink/connector/file/src/enumerate/DefaultFileFilter.java
https://github.com/apache/flink/blob/master/flink-connectors/flink-connector-files/src/main/java/org/apache/flink/connector/file/src/enumerate/DefaultFileFilter.java
Apache-2.0
@Override protected void addSplitsForPath( FileStatus fileStatus, FileSystem fs, ArrayList<FileSourceSplit> target) throws IOException { if (fileStatus.isDir()) { if (!hiddenDirFilter.test(fileStatus.getPath())) { return; } final FileStatus[] containedFiles = fs.listStatus(fileStatus.getPath()); for (FileStatus containedStatus : containedFiles) { addSplitsForPath(containedStatus, fs, target); } } else if (fileFilter.test(fileStatus.getPath())) { convertToSourceSplits(fileStatus, fs, target); return; } }
Creates a NonSplittingRegexEnumerator that enumerates all files whose file path matches the regex. Support to use given custom predicate as a filter for file paths.
addSplitsForPath
java
apache/flink
flink-connectors/flink-connector-files/src/main/java/org/apache/flink/connector/file/src/enumerate/NonSplittingRecursiveAllDirEnumerator.java
https://github.com/apache/flink/blob/master/flink-connectors/flink-connector-files/src/main/java/org/apache/flink/connector/file/src/enumerate/NonSplittingRecursiveAllDirEnumerator.java
Apache-2.0
@Override public boolean test(Path path) { return defaultFileFilter.test(path) && pattern.matcher(path.getPath()).matches(); }
A file filter that filters out hidden files, see {@link DefaultFileFilter} and the files whose path doesn't match the given regex pattern.
test
java
apache/flink
flink-connectors/flink-connector-files/src/main/java/org/apache/flink/connector/file/src/enumerate/RegexFileFilter.java
https://github.com/apache/flink/blob/master/flink-connectors/flink-connector-files/src/main/java/org/apache/flink/connector/file/src/enumerate/RegexFileFilter.java
Apache-2.0
@Nullable default CheckpointedPosition getCheckpointedPosition() { return null; }
Optionally returns the current position of the reader. This can be implemented by readers that want to speed up recovery from a checkpoint. <p>The current position of the reader is the position of the next record that will be returned in a call to {@link #read()}. This can be implemented by readers that want to speed up recovery from a checkpoint. <p>See the {@link StreamFormat top-level class comment} (section "Checkpointing") for details.
getCheckpointedPosition
java
apache/flink
flink-connectors/flink-connector-files/src/main/java/org/apache/flink/connector/file/src/reader/StreamFormat.java
https://github.com/apache/flink/blob/master/flink-connectors/flink-connector-files/src/main/java/org/apache/flink/connector/file/src/reader/StreamFormat.java
Apache-2.0
public long getOffset() { return offset; }
Gets the offset that the reader will seek to when restored from this checkpoint.
getOffset
java
apache/flink
flink-connectors/flink-connector-files/src/main/java/org/apache/flink/connector/file/src/util/CheckpointedPosition.java
https://github.com/apache/flink/blob/master/flink-connectors/flink-connector-files/src/main/java/org/apache/flink/connector/file/src/util/CheckpointedPosition.java
Apache-2.0
public void set(E record, long offset, long recordSkipCount) { this.record = record; this.offset = offset; this.recordSkipCount = recordSkipCount; }
Updates the record and position in this object.
set
java
apache/flink
flink-connectors/flink-connector-files/src/main/java/org/apache/flink/connector/file/src/util/MutableRecordAndPosition.java
https://github.com/apache/flink/blob/master/flink-connectors/flink-connector-files/src/main/java/org/apache/flink/connector/file/src/util/MutableRecordAndPosition.java
Apache-2.0
public void setPosition(long offset, long recordSkipCount) { this.offset = offset; this.recordSkipCount = recordSkipCount; }
Sets the position without setting a record.
setPosition
java
apache/flink
flink-connectors/flink-connector-files/src/main/java/org/apache/flink/connector/file/src/util/MutableRecordAndPosition.java
https://github.com/apache/flink/blob/master/flink-connectors/flink-connector-files/src/main/java/org/apache/flink/connector/file/src/util/MutableRecordAndPosition.java
Apache-2.0
public void setNext(E record) { this.record = record; this.recordSkipCount++; }
Sets the next record of a sequence. This increments the {@code recordSkipCount} by one.
setNext
java
apache/flink
flink-connectors/flink-connector-files/src/main/java/org/apache/flink/connector/file/src/util/MutableRecordAndPosition.java
https://github.com/apache/flink/blob/master/flink-connectors/flink-connector-files/src/main/java/org/apache/flink/connector/file/src/util/MutableRecordAndPosition.java
Apache-2.0
public Recycler<T> recycler() { return recycler; }
Gets the recycler for this pool. The recycler returns its given objects back to this pool.
recycler
java
apache/flink
flink-connectors/flink-connector-files/src/main/java/org/apache/flink/connector/file/src/util/Pool.java
https://github.com/apache/flink/blob/master/flink-connectors/flink-connector-files/src/main/java/org/apache/flink/connector/file/src/util/Pool.java
Apache-2.0
public synchronized void add(T object) { if (poolSize >= poolCapacity) { throw new IllegalStateException("No space left in pool"); } poolSize++; addBack(object); }
Adds an entry to the pool with an optional payload. This method fails if called more often than the pool capacity specified during construction.
add
java
apache/flink
flink-connectors/flink-connector-files/src/main/java/org/apache/flink/connector/file/src/util/Pool.java
https://github.com/apache/flink/blob/master/flink-connectors/flink-connector-files/src/main/java/org/apache/flink/connector/file/src/util/Pool.java
Apache-2.0
public T pollEntry() throws InterruptedException { return pool.take(); }
Gets the next cached entry. This blocks until the next entry is available.
pollEntry
java
apache/flink
flink-connectors/flink-connector-files/src/main/java/org/apache/flink/connector/file/src/util/Pool.java
https://github.com/apache/flink/blob/master/flink-connectors/flink-connector-files/src/main/java/org/apache/flink/connector/file/src/util/Pool.java
Apache-2.0
@Nullable public T tryPollEntry() { return pool.poll(); }
Tries to get the next cached entry. If the pool is empty, this method returns null.
tryPollEntry
java
apache/flink
flink-connectors/flink-connector-files/src/main/java/org/apache/flink/connector/file/src/util/Pool.java
https://github.com/apache/flink/blob/master/flink-connectors/flink-connector-files/src/main/java/org/apache/flink/connector/file/src/util/Pool.java
Apache-2.0
@Override public void releaseBatch() { if (recycler != null) { recycler.run(); } }
Creates a {@code RecyclableIterator} with the given optional recycler.
releaseBatch
java
apache/flink
flink-connectors/flink-connector-files/src/main/java/org/apache/flink/connector/file/src/util/RecyclableIterator.java
https://github.com/apache/flink/blob/master/flink-connectors/flink-connector-files/src/main/java/org/apache/flink/connector/file/src/util/RecyclableIterator.java
Apache-2.0
public static <E> E doWithCleanupOnException( final Closeable toCleanUp, final SupplierWithException<E, IOException> code) throws IOException { try { return code.get(); } catch (Throwable t) { IOUtils.closeQuietly(toCleanUp); ExceptionUtils.rethrowIOException(t); return null; // silence the compiler } }
Runs the given {@code SupplierWithException} (a piece of code producing a result). If an exception happens during that, the given closable is quietly closed.
doWithCleanupOnException
java
apache/flink
flink-connectors/flink-connector-files/src/main/java/org/apache/flink/connector/file/src/util/Utils.java
https://github.com/apache/flink/blob/master/flink-connectors/flink-connector-files/src/main/java/org/apache/flink/connector/file/src/util/Utils.java
Apache-2.0
public static void doWithCleanupOnException( final Closeable toCleanUp, final ThrowingRunnable<IOException> code) throws IOException { doWithCleanupOnException( toCleanUp, (SupplierWithException<Void, IOException>) () -> { code.run(); return null; }); }
Runs the given {@code Runnable}. If an exception happens during that, the given closable is quietly closed.
doWithCleanupOnException
java
apache/flink
flink-connectors/flink-connector-files/src/main/java/org/apache/flink/connector/file/src/util/Utils.java
https://github.com/apache/flink/blob/master/flink-connectors/flink-connector-files/src/main/java/org/apache/flink/connector/file/src/util/Utils.java
Apache-2.0
public static <T> void forEachRemaining( final BulkFormat.Reader<T> reader, final Consumer<? super T> action) throws IOException { BulkFormat.RecordIterator<T> batch; RecordAndPosition<T> record; try { while ((batch = reader.readBatch()) != null) { while ((record = batch.next()) != null) { action.accept(record.getRecord()); } batch.releaseBatch(); } } finally { reader.close(); } }
Performs the given action for each remaining element in {@link BulkFormat.Reader} until all elements have been processed or the action throws an exception.
forEachRemaining
java
apache/flink
flink-connectors/flink-connector-files/src/main/java/org/apache/flink/connector/file/src/util/Utils.java
https://github.com/apache/flink/blob/master/flink-connectors/flink-connector-files/src/main/java/org/apache/flink/connector/file/src/util/Utils.java
Apache-2.0
public void set(final int num, final long recordSkipCount) { set(num, CheckpointedPosition.NO_OFFSET, recordSkipCount); }
@param num number rows in this batch. @param recordSkipCount The number of rows that have been returned before this batch.
set
java
apache/flink
flink-connectors/flink-connector-files/src/main/java/org/apache/flink/connector/file/table/ColumnarRowIterator.java
https://github.com/apache/flink/blob/master/flink-connectors/flink-connector-files/src/main/java/org/apache/flink/connector/file/table/ColumnarRowIterator.java
Apache-2.0
public static EnrichedRowData from( RowData fixedRow, List<String> producedRowFields, List<String> mutableRowFields, List<String> fixedRowFields) { return new EnrichedRowData( fixedRow, computeIndexMapping(producedRowFields, mutableRowFields, fixedRowFields)); }
Creates a new {@link EnrichedRowData} with the provided {@code fixedRow} as the immutable static row, and uses the {@code producedRowFields}, {@code fixedRowFields} and {@code mutableRowFields} arguments to compute the indexes mapping. <p>The {@code producedRowFields} should include the name of fields of the full row once mutable and fixed rows are merged, while {@code fixedRowFields} and {@code mutableRowFields} should contain respectively the field names of fixed row and mutable row. All the lists are ordered with indexes matching the position of the field in the row. As an example, for a complete row {@code (a, b, c)} the mutable row might be {@code (a, c)} and the fixed row might be {@code (b)}
from
java
apache/flink
flink-connectors/flink-connector-files/src/main/java/org/apache/flink/connector/file/table/EnrichedRowData.java
https://github.com/apache/flink/blob/master/flink-connectors/flink-connector-files/src/main/java/org/apache/flink/connector/file/table/EnrichedRowData.java
Apache-2.0
public static int[] computeIndexMapping( List<String> producedRowFields, List<String> mutableRowFields, List<String> fixedRowFields) { int[] indexMapping = new int[producedRowFields.size()]; for (int i = 0; i < producedRowFields.size(); i++) { String fieldName = producedRowFields.get(i); int newIndex = mutableRowFields.indexOf(fieldName); if (newIndex < 0) { newIndex = -(fixedRowFields.indexOf(fieldName) + 1); } indexMapping[i] = newIndex; } return indexMapping; }
This method computes the index mapping for {@link EnrichedRowData}. @see EnrichedRowData#from(RowData, List, List, List)
computeIndexMapping
java
apache/flink
flink-connectors/flink-connector-files/src/main/java/org/apache/flink/connector/file/table/EnrichedRowData.java
https://github.com/apache/flink/blob/master/flink-connectors/flink-connector-files/src/main/java/org/apache/flink/connector/file/table/EnrichedRowData.java
Apache-2.0
private boolean formatFactoryExists(Context context, Class<?> factoryClass) { Configuration options = Configuration.fromMap(context.getCatalogTable().getOptions()); String identifier = options.get(FactoryUtil.FORMAT); if (identifier == null) { throw new ValidationException( String.format( "Table options do not contain an option key '%s' for discovering a format.", FactoryUtil.FORMAT.key())); } final List<Factory> factories = new LinkedList<>(); ServiceLoader.load(Factory.class, context.getClassLoader()) .iterator() .forEachRemaining(factories::add); final List<Factory> foundFactories = factories.stream() .filter(f -> factoryClass.isAssignableFrom(f.getClass())) .collect(Collectors.toList()); final List<Factory> matchingFactories = foundFactories.stream() .filter(f -> f.factoryIdentifier().equals(identifier)) .collect(Collectors.toList()); return !matchingFactories.isEmpty(); }
Returns true if the format factory can be found using the given factory base class and identifier.
formatFactoryExists
java
apache/flink
flink-connectors/flink-connector-files/src/main/java/org/apache/flink/connector/file/table/FileSystemTableFactory.java
https://github.com/apache/flink/blob/master/flink-connectors/flink-connector-files/src/main/java/org/apache/flink/connector/file/table/FileSystemTableFactory.java
Apache-2.0
protected void validateTimeZone(String zone) { boolean isValid; try { // We enforce a zone string that is compatible with both java.util.TimeZone and // java.time.ZoneId to avoid bugs. // In general, advertising either TZDB ID, GMT+xx:xx, or UTC is the best we can do. isValid = java.util.TimeZone.getTimeZone(zone).toZoneId().equals(ZoneId.of(zone)); } catch (Exception e) { isValid = false; } if (!isValid) { throw new ValidationException( String.format( "Invalid time zone for '%s'. The value should be a Time Zone Database (TZDB) ID " + "such as 'America/Los_Angeles' to include daylight saving time. Fixed " + "offsets are supported using 'GMT-03:00' or 'GMT+03:00'. Or use 'UTC' " + "without time zone and daylight saving time.", FileSystemConnectorOptions.SINK_PARTITION_COMMIT_WATERMARK_TIME_ZONE .key())); } }
Similar logic as for {@link TableConfig}.
validateTimeZone
java
apache/flink
flink-connectors/flink-connector-files/src/main/java/org/apache/flink/connector/file/table/FileSystemTableFactory.java
https://github.com/apache/flink/blob/master/flink-connectors/flink-connector-files/src/main/java/org/apache/flink/connector/file/table/FileSystemTableFactory.java
Apache-2.0
default LinkedHashMap<String, String> partitionSpec() { LinkedHashMap<String, String> res = new LinkedHashMap<>(); for (int i = 0; i < partitionKeys().size(); i++) { res.put(partitionKeys().get(i), partitionValues().get(i)); } return res; }
Partition spec in the form of a map from partition keys to values.
partitionSpec
java
apache/flink
flink-connectors/flink-connector-files/src/main/java/org/apache/flink/connector/file/table/PartitionCommitPolicy.java
https://github.com/apache/flink/blob/master/flink-connectors/flink-connector-files/src/main/java/org/apache/flink/connector/file/table/PartitionCommitPolicy.java
Apache-2.0
static PartitionFieldExtractor<FileSourceSplit> forFileSystem(String defaultPartValue) { return (split, fieldName, fieldType) -> { LinkedHashMap<String, String> partitionSpec = PartitionPathUtils.extractPartitionSpecFromPath(split.path()); if (!partitionSpec.containsKey(fieldName)) { throw new RuntimeException( "Cannot find the partition value from path for partition: " + fieldName); } String valueStr = partitionSpec.get(fieldName); valueStr = valueStr.equals(defaultPartValue) ? null : valueStr; return RowPartitionComputer.restorePartValueFromType(valueStr, fieldType); }; }
Interface to extract partition field from split.
forFileSystem
java
apache/flink
flink-connectors/flink-connector-files/src/main/java/org/apache/flink/connector/file/table/PartitionFieldExtractor.java
https://github.com/apache/flink/blob/master/flink-connectors/flink-connector-files/src/main/java/org/apache/flink/connector/file/table/PartitionFieldExtractor.java
Apache-2.0
public void loadPartition( LinkedHashMap<String, String> partSpec, List<Path> srcPaths, boolean srcPathIsDir) throws Exception { Optional<Path> pathFromMeta = metaStore.getPartition(partSpec); Path path = pathFromMeta.orElseGet( () -> new Path( metaStore.getLocationPath(), generatePartitionPath(partSpec))); overwriteAndMoveFiles(srcPaths, path, srcPathIsDir); commitPartition(partSpec, path); }
Load a single partition. @param partSpec the specification for the single partition @param srcPaths the paths for the files used to load to the single partition @param srcPathIsDir whether the every path in {@param srcPaths} is directory or not. If true, it will load the files under the directory of the every path. If false, every path in {@param srcPaths} is considered as single file, and it will load the single file for every path.
loadPartition
java
apache/flink
flink-connectors/flink-connector-files/src/main/java/org/apache/flink/connector/file/table/PartitionLoader.java
https://github.com/apache/flink/blob/master/flink-connectors/flink-connector-files/src/main/java/org/apache/flink/connector/file/table/PartitionLoader.java
Apache-2.0
public void loadNonPartition(List<Path> srcPaths, boolean srcPathIsDir) throws Exception { Path tableLocation = metaStore.getLocationPath(); overwriteAndMoveFiles(srcPaths, tableLocation, srcPathIsDir); commitPartition(new LinkedHashMap<>(), tableLocation); metaStore.finishWritingTable(tableLocation); }
Load a non-partition files to output path. @param srcPaths the paths for the files used to load to the single partition @param srcPathIsDir whether the every path in {@param srcPaths} is directory or not. If true, it will load the files under the directory of the every path. If false, every path in {@param srcPaths} is considered as single file, and it will load the single file for every path.
loadNonPartition
java
apache/flink
flink-connectors/flink-connector-files/src/main/java/org/apache/flink/connector/file/table/PartitionLoader.java
https://github.com/apache/flink/blob/master/flink-connectors/flink-connector-files/src/main/java/org/apache/flink/connector/file/table/PartitionLoader.java
Apache-2.0
public static List<Path> listTaskTemporaryPaths( FileSystem fs, Path basePath, BiPredicate<Integer, Integer> taskAttemptFilter) throws Exception { List<Path> taskTmpPaths = new ArrayList<>(); if (fs.exists(basePath)) { for (FileStatus taskStatus : fs.listStatus(basePath)) { final String taskDirName = taskStatus.getPath().getName(); final Matcher matcher = TASK_DIR_PATTERN.matcher(taskDirName); if (matcher.matches()) { final int subtaskIndex = Integer.parseInt(matcher.group(1)); final int attemptNumber = Integer.parseInt(matcher.group(2)); if (taskAttemptFilter.test(subtaskIndex, attemptNumber)) { taskTmpPaths.add(taskStatus.getPath()); } } } } else { LOG.warn( "The path {} doesn't exist. Maybe no data is generated in the path and the path is not created.", basePath); } return taskTmpPaths; }
Returns task temporary paths in this checkpoint.
listTaskTemporaryPaths
java
apache/flink
flink-connectors/flink-connector-files/src/main/java/org/apache/flink/connector/file/table/PartitionTempFileManager.java
https://github.com/apache/flink/blob/master/flink-connectors/flink-connector-files/src/main/java/org/apache/flink/connector/file/table/PartitionTempFileManager.java
Apache-2.0
public static Map<LinkedHashMap<String, String>, List<Path>> collectPartSpecToPaths( FileSystem fs, List<Path> taskPaths, int partColSize) { Map<LinkedHashMap<String, String>, List<Path>> specToPaths = new HashMap<>(); for (Path taskPath : taskPaths) { searchPartSpecAndPaths(fs, taskPath, partColSize) .forEach( tuple2 -> specToPaths.compute( tuple2.f0, (spec, paths) -> { paths = paths == null ? new ArrayList<>() : paths; paths.add(tuple2.f1); return paths; })); } return specToPaths; }
Collect all partitioned paths, aggregate according to partition spec.
collectPartSpecToPaths
java
apache/flink
flink-connectors/flink-connector-files/src/main/java/org/apache/flink/connector/file/table/PartitionTempFileManager.java
https://github.com/apache/flink/blob/master/flink-connectors/flink-connector-files/src/main/java/org/apache/flink/connector/file/table/PartitionTempFileManager.java
Apache-2.0
static PartitionTimeExtractor create( ClassLoader userClassLoader, String extractorKind, String extractorClass, String extractorPattern, String formatterPattern) { switch (extractorKind) { case DEFAULT: return new DefaultPartTimeExtractor(extractorPattern, formatterPattern); case CUSTOM: try { return (PartitionTimeExtractor) userClassLoader.loadClass(extractorClass).newInstance(); } catch (ClassNotFoundException | IllegalAccessException | InstantiationException e) { throw new RuntimeException( "Can not new instance for custom class from " + extractorClass, e); } default: throw new UnsupportedOperationException( "Unsupported extractor kind: " + extractorKind); } }
Extract time from partition keys and values.
create
java
apache/flink
flink-connectors/flink-connector-files/src/main/java/org/apache/flink/connector/file/table/PartitionTimeExtractor.java
https://github.com/apache/flink/blob/master/flink-connectors/flink-connector-files/src/main/java/org/apache/flink/connector/file/table/PartitionTimeExtractor.java
Apache-2.0
OutputFormat<T> createNewOutputFormat(Path path) throws IOException { OutputFormat<T> format = factory.createOutputFormat(path); format.configure(conf); // Here we just think of it as a single file format, so there can only be a single task. format.open(FirstAttemptInitializationContext.of(0, 1)); return format; }
Create a new output format with path, configure it and open it.
createNewOutputFormat
java
apache/flink
flink-connectors/flink-connector-files/src/main/java/org/apache/flink/connector/file/table/PartitionWriter.java
https://github.com/apache/flink/blob/master/flink-connectors/flink-connector-files/src/main/java/org/apache/flink/connector/file/table/PartitionWriter.java
Apache-2.0
public static Object restorePartValueFromType(String valStr, LogicalType type) { if (valStr == null) { return null; } LogicalTypeRoot typeRoot = type.getTypeRoot(); switch (typeRoot) { case CHAR: case VARCHAR: return valStr; case BOOLEAN: return Boolean.parseBoolean(valStr); case TINYINT: return Integer.valueOf(valStr).byteValue(); case SMALLINT: return Short.valueOf(valStr); case INTEGER: return Integer.valueOf(valStr); case BIGINT: return Long.valueOf(valStr); case FLOAT: return Float.valueOf(valStr); case DOUBLE: return Double.valueOf(valStr); case DATE: return LocalDate.parse(valStr); case TIMESTAMP_WITHOUT_TIME_ZONE: return LocalDateTime.parse(valStr); case DECIMAL: return new BigDecimal(valStr); default: throw new RuntimeException( String.format( "Can not convert %s to type %s for partition value", valStr, type)); } }
Restore partition value from string and type. This method is the opposite of method {@link #generatePartValues}. @param valStr string partition value. @param type type of partition field. @return partition value.
restorePartValueFromType
java
apache/flink
flink-connectors/flink-connector-files/src/main/java/org/apache/flink/connector/file/table/RowPartitionComputer.java
https://github.com/apache/flink/blob/master/flink-connectors/flink-connector-files/src/main/java/org/apache/flink/connector/file/table/RowPartitionComputer.java
Apache-2.0
@Override public boolean isPartitionCommittable(PredicateContext predicateContext) { LocalDateTime partitionTime = extractor.extract( partitionKeys, extractPartitionValues(new Path(predicateContext.partition()))); return watermarkHasPassedWithDelay( predicateContext.currentWatermark(), partitionTime, commitDelay); }
The time zone used to parse the long watermark value to TIMESTAMP.
isPartitionCommittable
java
apache/flink
flink-connectors/flink-connector-files/src/main/java/org/apache/flink/connector/file/table/stream/PartitionTimeCommitPredicate.java
https://github.com/apache/flink/blob/master/flink-connectors/flink-connector-files/src/main/java/org/apache/flink/connector/file/table/stream/PartitionTimeCommitPredicate.java
Apache-2.0
static CompactContext create( Configuration config, FileSystem fileSystem, String partition, Path path) { return new CompactContextImpl(config, fileSystem, partition, path); }
Context for {@link CompactReader} and {@link CompactWriter}.
create
java
apache/flink
flink-connectors/flink-connector-files/src/main/java/org/apache/flink/connector/file/table/stream/compact/CompactContext.java
https://github.com/apache/flink/blob/master/flink-connectors/flink-connector-files/src/main/java/org/apache/flink/connector/file/table/stream/compact/CompactContext.java
Apache-2.0
protected JobGraph createJobGraph(boolean triggerFailover, String path) { StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment(); Configuration config = new Configuration(); config.set(ExecutionOptions.RUNTIME_MODE, RuntimeExecutionMode.BATCH); env.configure(config, getClass().getClassLoader()); if (triggerFailover) { RestartStrategyUtils.configureFixedDelayRestartStrategy(env, 1, 100); } else { RestartStrategyUtils.configureNoRestartStrategy(env); } // Create a testing job with a bounded legacy source in a bit hacky way. StreamSource<Integer, ?> sourceOperator = new StreamSource<>(new BatchExecutionTestSource(NUM_RECORDS)); DataStreamSource<Integer> source = new DataStreamSource<>( env, BasicTypeInfo.INT_TYPE_INFO, sourceOperator, true, "Source", Boundedness.BOUNDED); DataStreamSink<Integer> sink = source.setParallelism(NUM_SOURCES) .rebalance() .map(new BatchExecutionOnceFailingMap(NUM_RECORDS, triggerFailover)) .setParallelism(NUM_SINKS) .sinkTo(createFileSink(path)) .setParallelism(NUM_SINKS); configureSink(sink); StreamGraph streamGraph = env.getStreamGraph(); return streamGraph.getJobGraph(); }
Creating the testing job graph in batch mode. The graph created is [Source] -> [Failover Map -> File Sink]. The Failover Map is introduced to ensure the failover would always restart the file writer so the data would be re-written.
createJobGraph
java
apache/flink
flink-connectors/flink-connector-files/src/test/java/org/apache/flink/connector/file/sink/BatchExecutionFileSinkITCase.java
https://github.com/apache/flink/blob/master/flink-connectors/flink-connector-files/src/test/java/org/apache/flink/connector/file/sink/BatchExecutionFileSinkITCase.java
Apache-2.0
@Override public Integer map(Integer value) { if (triggerFailover && getRuntimeContext().getTaskInfo().getIndexOfThisSubtask() == 0 && getRuntimeContext().getTaskInfo().getAttemptNumber() == 0 && value >= FAILOVER_RATIO * maxNumber) { throw new RuntimeException("Designated Failure"); } return value; }
A {@link RichMapFunction} that throws an exception to fail the job iff {@code triggerFailover} is {@code true} and when it is subtask 0 and we're in execution attempt 0.
map
java
apache/flink
flink-connectors/flink-connector-files/src/test/java/org/apache/flink/connector/file/sink/BatchExecutionFileSinkITCase.java
https://github.com/apache/flink/blob/master/flink-connectors/flink-connector-files/src/test/java/org/apache/flink/connector/file/sink/BatchExecutionFileSinkITCase.java
Apache-2.0
@Override protected JobGraph createJobGraph(boolean triggerFailover, String path) { StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment(); Configuration config = new Configuration(); config.set(ExecutionOptions.RUNTIME_MODE, RuntimeExecutionMode.STREAMING); env.configure(config, getClass().getClassLoader()); env.enableCheckpointing(10, CheckpointingMode.EXACTLY_ONCE); if (triggerFailover) { RestartStrategyUtils.configureFixedDelayRestartStrategy(env, 1, 100); } else { RestartStrategyUtils.configureNoRestartStrategy(env); } DataStreamSink<Integer> sink = env.addSource( new StreamingExecutionTestSource( latchId, NUM_RECORDS, triggerFailover)) .setParallelism(NUM_SOURCES) .sinkTo(createFileSink(path)) .setParallelism(NUM_SINKS); configureSink(sink); StreamGraph streamGraph = env.getStreamGraph(); return streamGraph.getJobGraph(); }
Creating the testing job graph in streaming mode. The graph created is [Source] -> [File Sink]. The source would trigger failover if required.
createJobGraph
java
apache/flink
flink-connectors/flink-connector-files/src/test/java/org/apache/flink/connector/file/sink/StreamingExecutionFileSinkITCase.java
https://github.com/apache/flink/blob/master/flink-connectors/flink-connector-files/src/test/java/org/apache/flink/connector/file/sink/StreamingExecutionFileSinkITCase.java
Apache-2.0
@Override public void encode(Integer element, OutputStream stream) throws IOException { stream.write(ByteBuffer.allocate(4).putInt(element).array()); stream.flush(); }
Testing sink {@link Encoder} that writes integer with its binary representation.
encode
java
apache/flink
flink-connectors/flink-connector-files/src/test/java/org/apache/flink/connector/file/sink/utils/IntegerFileSinkTestDataUtils.java
https://github.com/apache/flink/blob/master/flink-connectors/flink-connector-files/src/test/java/org/apache/flink/connector/file/sink/utils/IntegerFileSinkTestDataUtils.java
Apache-2.0
public static void checkIntegerSequenceSinkOutput( String path, int numRecords, int numBuckets, int numSources) throws Exception { File dir = new File(path); String[] subDirNames = dir.list(); assertThat(subDirNames).isNotNull(); Arrays.sort(subDirNames, Comparator.comparingInt(Integer::parseInt)); assertThat(subDirNames).hasSize(numBuckets); for (int i = 0; i < numBuckets; ++i) { assertThat(subDirNames[i]).isEqualTo(Integer.toString(i)); // now check its content File bucketDir = new File(path, subDirNames[i]); assertThat(bucketDir) .as(bucketDir.getAbsolutePath() + " Should be a existing directory") .isDirectory(); Map<Integer, Integer> counts = new HashMap<>(); File[] files = bucketDir.listFiles(f -> !f.getName().startsWith(".")); assertThat(files).isNotNull(); for (File file : files) { assertThat(file).isFile(); try (DataInputStream dataInputStream = new DataInputStream(new FileInputStream(file))) { while (true) { int value = dataInputStream.readInt(); counts.compute(value, (k, v) -> v == null ? 1 : v + 1); } } catch (EOFException e) { // End the reading } } int expectedCount = numRecords / numBuckets + (i < numRecords % numBuckets ? 1 : 0); assertThat(counts).hasSize(expectedCount); for (int j = i; j < numRecords; j += numBuckets) { assertThat(counts.getOrDefault(j, 0).intValue()) .as( "The record " + j + " should occur " + numSources + " times, " + " but only occurs " + counts.getOrDefault(j, 0) + "time") .isEqualTo(numSources); } } }
Verifies the files written by the sink contains the expected integer sequences. The integers are partition into different buckets according to module, and each integer will be repeated by <tt>numSources</tt> times. @param path The directory to check. @param numRecords The total number of records. @param numBuckets The number of buckets to assign. @param numSources The parallelism of sources generating the sequences. Each integer will be repeat for <tt>numSources</tt> times.
checkIntegerSequenceSinkOutput
java
apache/flink
flink-connectors/flink-connector-files/src/test/java/org/apache/flink/connector/file/sink/utils/IntegerFileSinkTestDataUtils.java
https://github.com/apache/flink/blob/master/flink-connectors/flink-connector-files/src/test/java/org/apache/flink/connector/file/sink/utils/IntegerFileSinkTestDataUtils.java
Apache-2.0
@Test void noNullHostsAllowed() { assertThatThrownBy( () -> new FileSourceSplit( "id", new Path("file:/some/random/path"), 0, 10, 0, 10, "host1", null, "host2")) .isInstanceOf(IllegalArgumentException.class); }
Unit tests for the {@link FileSourceSplit}.
noNullHostsAllowed
java
apache/flink
flink-connectors/flink-connector-files/src/test/java/org/apache/flink/connector/file/src/FileSourceSplitTest.java
https://github.com/apache/flink/blob/master/flink-connectors/flink-connector-files/src/test/java/org/apache/flink/connector/file/src/FileSourceSplitTest.java
Apache-2.0
@Test void testBoundedTextFileSource( @TempDir java.nio.file.Path tmpTestDir, @InjectMiniCluster MiniCluster miniCluster) throws Exception { testBoundedTextFileSource(tmpTestDir, FailoverType.NONE, miniCluster); }
This test runs a job reading bounded input with a stream record format (text lines).
testBoundedTextFileSource
java
apache/flink
flink-connectors/flink-connector-files/src/test/java/org/apache/flink/connector/file/src/FileSourceTextLinesITCase.java
https://github.com/apache/flink/blob/master/flink-connectors/flink-connector-files/src/test/java/org/apache/flink/connector/file/src/FileSourceTextLinesITCase.java
Apache-2.0
@Test void testBoundedTextFileSourceWithTaskManagerFailover(@TempDir java.nio.file.Path tmpTestDir) throws Exception { // This test will kill TM, so we run it in a new cluster to avoid affecting other tests runTestWithNewMiniCluster( miniCluster -> testBoundedTextFileSource(tmpTestDir, FailoverType.TM, miniCluster)); }
This test runs a job reading bounded input with a stream record format (text lines) and restarts TaskManager.
testBoundedTextFileSourceWithTaskManagerFailover
java
apache/flink
flink-connectors/flink-connector-files/src/test/java/org/apache/flink/connector/file/src/FileSourceTextLinesITCase.java
https://github.com/apache/flink/blob/master/flink-connectors/flink-connector-files/src/test/java/org/apache/flink/connector/file/src/FileSourceTextLinesITCase.java
Apache-2.0
@Test void testContinuousTextFileSource( @TempDir java.nio.file.Path tmpTestDir, @InjectMiniCluster MiniCluster miniCluster) throws Exception { testContinuousTextFileSource(tmpTestDir, FailoverType.NONE, miniCluster); }
This test runs a job reading continuous input (files appearing over time) with a stream record format (text lines).
testContinuousTextFileSource
java
apache/flink
flink-connectors/flink-connector-files/src/test/java/org/apache/flink/connector/file/src/FileSourceTextLinesITCase.java
https://github.com/apache/flink/blob/master/flink-connectors/flink-connector-files/src/test/java/org/apache/flink/connector/file/src/FileSourceTextLinesITCase.java
Apache-2.0
@Test void testContinuousTextFileSourceWithTaskManagerFailover(@TempDir java.nio.file.Path tmpTestDir) throws Exception { // This test will kill TM, so we run it in a new cluster to avoid affecting other tests runTestWithNewMiniCluster( miniCluster -> testContinuousTextFileSource(tmpTestDir, FailoverType.TM, miniCluster)); }
This test runs a job reading continuous input (files appearing over time) with a stream record format (text lines) and restarts TaskManager.
testContinuousTextFileSourceWithTaskManagerFailover
java
apache/flink
flink-connectors/flink-connector-files/src/test/java/org/apache/flink/connector/file/src/FileSourceTextLinesITCase.java
https://github.com/apache/flink/blob/master/flink-connectors/flink-connector-files/src/test/java/org/apache/flink/connector/file/src/FileSourceTextLinesITCase.java
Apache-2.0
default CompactingFileWriter openNewCompactingFile( final CompactingFileWriter.Type type, final BucketID bucketID, final Path path, final long creationTime) throws IOException { if (type == CompactingFileWriter.Type.RECORD_WISE) { return openNewInProgressFile(bucketID, path, creationTime); } throw new UnsupportedOperationException(); }
Used to create a new {@link CompactingFileWriter} of the requesting type. Requesting a writer of an unsupported type will result in UnsupportedOperationException. By default, only RECORD_WISE type is supported, for which a {@link InProgressFileWriter} will be created. @param type the type of this writer. @param bucketID the id of the bucket this writer is writing to. @param path the path this writer will write to. @param creationTime the creation time of the file. @return the new {@link InProgressFileWriter} @throws IOException Thrown if creating a writer fails. @throws UnsupportedOperationException Thrown if the bucket writer doesn't support the requesting type.
openNewCompactingFile
java
apache/flink
flink-connectors/flink-file-sink-common/src/main/java/org/apache/flink/streaming/api/functions/sink/filesystem/BucketWriter.java
https://github.com/apache/flink/blob/master/flink-connectors/flink-file-sink-common/src/main/java/org/apache/flink/streaming/api/functions/sink/filesystem/BucketWriter.java
Apache-2.0
public String getPartPrefix() { return partPrefix; }
The prefix for the part name.
getPartPrefix
java
apache/flink
flink-connectors/flink-file-sink-common/src/main/java/org/apache/flink/streaming/api/functions/sink/filesystem/OutputFileConfig.java
https://github.com/apache/flink/blob/master/flink-connectors/flink-file-sink-common/src/main/java/org/apache/flink/streaming/api/functions/sink/filesystem/OutputFileConfig.java
Apache-2.0
@Override public void write(final IN element, final long currentTime) throws IOException { ensureWriteType(Type.RECORD_WISE); encoder.encode(element, currentPartStream); markWrite(currentTime); }
A {@link InProgressFileWriter} for row-wise formats that use an {@link Encoder}. This also implements the {@link PartFileInfo} and the {@link OutputStreamBasedCompactingFileWriter}.
write
java
apache/flink
flink-connectors/flink-file-sink-common/src/main/java/org/apache/flink/streaming/api/functions/sink/filesystem/RowWisePartWriter.java
https://github.com/apache/flink/blob/master/flink-connectors/flink-file-sink-common/src/main/java/org/apache/flink/streaming/api/functions/sink/filesystem/RowWisePartWriter.java
Apache-2.0
public boolean supportsResume() { return supportsResume; }
@return Whether the {@link BucketWriter} support appending data to the restored the in-progress file or not.
supportsResume
java
apache/flink
flink-connectors/flink-file-sink-common/src/main/java/org/apache/flink/streaming/api/functions/sink/filesystem/WriterProperties.java
https://github.com/apache/flink/blob/master/flink-connectors/flink-file-sink-common/src/main/java/org/apache/flink/streaming/api/functions/sink/filesystem/WriterProperties.java
Apache-2.0
public SimpleVersionedSerializer<InProgressFileWriter.PendingFileRecoverable> getPendingFileRecoverableSerializer() { return pendingFileRecoverableSerializer; }
@return the serializer for the {@link InProgressFileWriter.PendingFileRecoverable}.
getPendingFileRecoverableSerializer
java
apache/flink
flink-connectors/flink-file-sink-common/src/main/java/org/apache/flink/streaming/api/functions/sink/filesystem/WriterProperties.java
https://github.com/apache/flink/blob/master/flink-connectors/flink-file-sink-common/src/main/java/org/apache/flink/streaming/api/functions/sink/filesystem/WriterProperties.java
Apache-2.0
public SimpleVersionedSerializer<InProgressFileWriter.InProgressFileRecoverable> getInProgressFileRecoverableSerializer() { return inProgressFileRecoverableSerializer; }
@return the serializer for the {@link InProgressFileWriter.InProgressFileRecoverable}.
getInProgressFileRecoverableSerializer
java
apache/flink
flink-connectors/flink-file-sink-common/src/main/java/org/apache/flink/streaming/api/functions/sink/filesystem/WriterProperties.java
https://github.com/apache/flink/blob/master/flink-connectors/flink-file-sink-common/src/main/java/org/apache/flink/streaming/api/functions/sink/filesystem/WriterProperties.java
Apache-2.0
public boolean shouldRollOnCheckpoint(PartFileInfo<BucketID> partFileState) { return true; }
An abstract {@link RollingPolicy} which rolls on every checkpoint.
shouldRollOnCheckpoint
java
apache/flink
flink-connectors/flink-file-sink-common/src/main/java/org/apache/flink/streaming/api/functions/sink/filesystem/rollingpolicies/CheckpointRollingPolicy.java
https://github.com/apache/flink/blob/master/flink-connectors/flink-file-sink-common/src/main/java/org/apache/flink/streaming/api/functions/sink/filesystem/rollingpolicies/CheckpointRollingPolicy.java
Apache-2.0
@SuppressWarnings("unchecked") protected T self() { return (T) this; }
The base abstract builder class for {@link CheckpointRollingPolicy}.
self
java
apache/flink
flink-connectors/flink-file-sink-common/src/main/java/org/apache/flink/streaming/api/functions/sink/filesystem/rollingpolicies/CheckpointRollingPolicy.java
https://github.com/apache/flink/blob/master/flink-connectors/flink-file-sink-common/src/main/java/org/apache/flink/streaming/api/functions/sink/filesystem/rollingpolicies/CheckpointRollingPolicy.java
Apache-2.0
public long getMaxPartSize() { return partSize; }
Returns the maximum part file size before rolling. @return Max size in bytes
getMaxPartSize
java
apache/flink
flink-connectors/flink-file-sink-common/src/main/java/org/apache/flink/streaming/api/functions/sink/filesystem/rollingpolicies/DefaultRollingPolicy.java
https://github.com/apache/flink/blob/master/flink-connectors/flink-file-sink-common/src/main/java/org/apache/flink/streaming/api/functions/sink/filesystem/rollingpolicies/DefaultRollingPolicy.java
Apache-2.0
public long getRolloverInterval() { return rolloverInterval; }
Returns the maximum time duration a part file can stay open before rolling. @return Time duration in milliseconds
getRolloverInterval
java
apache/flink
flink-connectors/flink-file-sink-common/src/main/java/org/apache/flink/streaming/api/functions/sink/filesystem/rollingpolicies/DefaultRollingPolicy.java
https://github.com/apache/flink/blob/master/flink-connectors/flink-file-sink-common/src/main/java/org/apache/flink/streaming/api/functions/sink/filesystem/rollingpolicies/DefaultRollingPolicy.java
Apache-2.0