code
stringlengths
25
201k
docstring
stringlengths
19
96.2k
func_name
stringlengths
0
235
language
stringclasses
1 value
repo
stringlengths
8
51
path
stringlengths
11
314
url
stringlengths
62
377
license
stringclasses
7 values
@Override public void init() throws Exception { StreamConfig configuration = getConfiguration(); ClassLoader userClassLoader = getUserCodeClassLoader(); int numberOfInputs = configuration.getNumberOfNetworkInputs(); ArrayList<IndexedInputGate> inputList1 = new ArrayList<>(); ArrayList<IndexedInputGate> inputList2 = new ArrayList<>(); List<StreamEdge> inEdges = configuration.getInPhysicalEdges(userClassLoader); for (int i = 0; i < numberOfInputs; i++) { int inputType = inEdges.get(i).getTypeNumber(); IndexedInputGate reader = getEnvironment().getInputGate(i); switch (inputType) { case 1: inputList1.add(reader); break; case 2: inputList2.add(reader); break; default: throw new RuntimeException("Invalid input type number: " + inputType); } } createInputProcessor( inputList1, inputList2, gateIndex -> inEdges.get(gateIndex).getPartitioner()); mainOperator .getMetricGroup() .gauge(MetricNames.IO_CURRENT_INPUT_WATERMARK, minInputWatermarkGauge); // wrap watermark gauge since registered metrics must be unique getEnvironment() .getMetricGroup() .gauge(MetricNames.IO_CURRENT_INPUT_WATERMARK, minInputWatermarkGauge::getValue); }
Constructor for initialization, possibly with initial state (recovery / savepoint / etc). @param env The task environment for this task.
init
java
apache/flink
flink-runtime/src/main/java/org/apache/flink/streaming/runtime/tasks/AbstractTwoInputStreamTask.java
https://github.com/apache/flink/blob/master/flink-runtime/src/main/java/org/apache/flink/streaming/runtime/tasks/AbstractTwoInputStreamTask.java
Apache-2.0
@VisibleForTesting public Iterable<StreamOperatorWrapper<?, ?>> getAllOperators() { return getAllOperators(false); }
Returns an {@link Iterable} which traverses all operators in forward topological order.
getAllOperators
java
apache/flink
flink-runtime/src/main/java/org/apache/flink/streaming/runtime/tasks/OperatorChain.java
https://github.com/apache/flink/blob/master/flink-runtime/src/main/java/org/apache/flink/streaming/runtime/tasks/OperatorChain.java
Apache-2.0
public void flushOutputs() throws IOException { for (RecordWriterOutput<?> streamOutput : getStreamOutputs()) { streamOutput.flush(); } }
This method should be called before finishing the record emission, to make sure any data that is still buffered will be sent. It also ensures that all data sending related exceptions are recognized. @throws IOException Thrown, if the buffered data cannot be pushed into the output streams.
flushOutputs
java
apache/flink
flink-runtime/src/main/java/org/apache/flink/streaming/runtime/tasks/OperatorChain.java
https://github.com/apache/flink/blob/master/flink-runtime/src/main/java/org/apache/flink/streaming/runtime/tasks/OperatorChain.java
Apache-2.0
private <IN, OUT> WatermarkGaugeExposingOutput<StreamRecord<IN>> createOperatorChain( StreamTask<OUT, ?> containingTask, StreamConfig prevOperatorConfig, StreamConfig operatorConfig, Map<Integer, StreamConfig> chainedConfigs, ClassLoader userCodeClassloader, Map<IntermediateDataSetID, RecordWriterOutput<?>> recordWriterOutputs, List<StreamOperatorWrapper<?, ?>> allOperatorWrappers, OutputTag<IN> outputTag, MailboxExecutorFactory mailboxExecutorFactory, boolean shouldAddMetricForPrevOperator) { // create the output that the operator writes to first. this may recursively create more // operators WatermarkGaugeExposingOutput<StreamRecord<OUT>> chainedOperatorOutput = createOutputCollector( containingTask, operatorConfig, chainedConfigs, userCodeClassloader, recordWriterOutputs, allOperatorWrappers, mailboxExecutorFactory, true); OneInputStreamOperator<IN, OUT> chainedOperator = createOperator( containingTask, operatorConfig, userCodeClassloader, chainedOperatorOutput, allOperatorWrappers, false); return wrapOperatorIntoOutput( chainedOperator, containingTask, prevOperatorConfig, operatorConfig, userCodeClassloader, outputTag, shouldAddMetricForPrevOperator); }
Recursively create chain of operators that starts from the given {@param operatorConfig}. Operators are created tail to head and wrapped into an {@link WatermarkGaugeExposingOutput}.
createOperatorChain
java
apache/flink
flink-runtime/src/main/java/org/apache/flink/streaming/runtime/tasks/OperatorChain.java
https://github.com/apache/flink/blob/master/flink-runtime/src/main/java/org/apache/flink/streaming/runtime/tasks/OperatorChain.java
Apache-2.0
@Override default long getCurrentProcessingTime() { return getClock().absoluteTimeMillis(); }
Defines the current processing time and handles all related actions, such as register timers for tasks to be executed in the future. <p>The access to the time via {@link #getCurrentProcessingTime()} is always available, regardless of whether the timer service has been shut down.
getCurrentProcessingTime
java
apache/flink
flink-runtime/src/main/java/org/apache/flink/streaming/runtime/tasks/ProcessingTimeService.java
https://github.com/apache/flink/blob/master/flink-runtime/src/main/java/org/apache/flink/streaming/runtime/tasks/ProcessingTimeService.java
Apache-2.0
public static long getProcessingTimeDelay(long processingTimestamp, long currentTimestamp) { // Two cases of timers here: // (1) future/now timers(processingTimestamp >= currentTimestamp): delay the firing of the // timer by 1 ms to align the semantics with watermark. A watermark T says we // won't see elements in the future with a timestamp smaller or equal to T. Without this // 1ms delay, if we had fired the timer for T at the timestamp T, it would be possible // that we would process another record for timestamp == T in the same millisecond, but // after the timer for the timsetamp T has already been fired. // (2) past timers(processingTimestamp < currentTimestamp): do not need to delay the firing // because currentTimestamp is larger than processingTimestamp pluses the 1ms offset. // TODO. The processing timers' performance can be further improved. // see FLINK-23690 and https://github.com/apache/flink/pull/16744 if (processingTimestamp >= currentTimestamp) { return processingTimestamp - currentTimestamp + 1; } else { return 0; } }
Returns the remaining delay of the processing time specified by {@code processingTimestamp}. This delay guarantees that the timer will be fired at least 1ms after the time it's registered for. @param processingTimestamp the processing time in milliseconds @param currentTimestamp the current processing timestamp; it usually uses {@link ProcessingTimeService#getCurrentProcessingTime()} to get @return the remaining delay of the processing time
getProcessingTimeDelay
java
apache/flink
flink-runtime/src/main/java/org/apache/flink/streaming/runtime/tasks/ProcessingTimeServiceUtil.java
https://github.com/apache/flink/blob/master/flink-runtime/src/main/java/org/apache/flink/streaming/runtime/tasks/ProcessingTimeServiceUtil.java
Apache-2.0
private void cleanupOldCheckpoints(long checkpointId) { assert (mailboxProcessor.isMailboxThread()); triggeredCheckpoints.headSet(checkpointId).clear(); untriggeredCheckpoints.headMap(checkpointId).clear(); maybeResumeProcessing(); }
Cleanup any orphaned checkpoint before the given currently triggered checkpoint. These checkpoint may occur when the checkpoint is cancelled but the RPC is lost. Note, to be safe, checkpoint X is only removed when both RPC and trigger for a checkpoint Y>X is received.
cleanupOldCheckpoints
java
apache/flink
flink-runtime/src/main/java/org/apache/flink/streaming/runtime/tasks/SourceOperatorStreamTask.java
https://github.com/apache/flink/blob/master/flink-runtime/src/main/java/org/apache/flink/streaming/runtime/tasks/SourceOperatorStreamTask.java
Apache-2.0
private void maybeResumeProcessing() { assert (mailboxProcessor.isMailboxThread()); if (triggeredCheckpoints.isEmpty()) { waitForRPC.complete(null); } }
Resumes processing if it was blocked before or else is a no-op.
maybeResumeProcessing
java
apache/flink
flink-runtime/src/main/java/org/apache/flink/streaming/runtime/tasks/SourceOperatorStreamTask.java
https://github.com/apache/flink/blob/master/flink-runtime/src/main/java/org/apache/flink/streaming/runtime/tasks/SourceOperatorStreamTask.java
Apache-2.0
private void cleanupCheckpoint(long checkpointId) { assert (mailboxProcessor.isMailboxThread()); triggeredCheckpoints.remove(checkpointId); untriggeredCheckpoints.remove(checkpointId); maybeResumeProcessing(); }
Remove temporary data about a canceled checkpoint.
cleanupCheckpoint
java
apache/flink
flink-runtime/src/main/java/org/apache/flink/streaming/runtime/tasks/SourceOperatorStreamTask.java
https://github.com/apache/flink/blob/master/flink-runtime/src/main/java/org/apache/flink/streaming/runtime/tasks/SourceOperatorStreamTask.java
Apache-2.0
CompletableFuture<Void> getCompletionFuture() { return isFailing() && !isAlive() ? CompletableFuture.completedFuture(null) : completionFuture; }
@return future that is completed once this thread completes. If this task {@link #isFailing()} and this thread is not alive (e.g. not started) returns a normally completed future.
getCompletionFuture
java
apache/flink
flink-runtime/src/main/java/org/apache/flink/streaming/runtime/tasks/SourceStreamTask.java
https://github.com/apache/flink/blob/master/flink-runtime/src/main/java/org/apache/flink/streaming/runtime/tasks/SourceStreamTask.java
Apache-2.0
public static String createBrokerIdString(JobID jid, String iterationID, int subtaskIndex) { return jid + "-" + iterationID + "-" + subtaskIndex; }
Creates the identification string with which head and tail task find the shared blocking queue for the back channel. The identification string is unique per parallel head/tail pair per iteration per job. @param jid The job ID. @param iterationID The id of the iteration in the job. @param subtaskIndex The parallel subtask number @return The identification string.
createBrokerIdString
java
apache/flink
flink-runtime/src/main/java/org/apache/flink/streaming/runtime/tasks/StreamIterationHead.java
https://github.com/apache/flink/blob/master/flink-runtime/src/main/java/org/apache/flink/streaming/runtime/tasks/StreamIterationHead.java
Apache-2.0
public void finish(StreamTaskActionExecutor actionExecutor, StopMode stopMode) throws Exception { if (!isHead && stopMode == StopMode.DRAIN) { // NOTE: This only do for the case where the operator is one-input operator. At present, // any non-head operator on the operator chain is one-input operator. actionExecutor.runThrowing(() -> endOperatorInput(1)); } quiesceTimeServiceAndFinishOperator(actionExecutor, stopMode); // propagate the close operation to the next wrapper if (next != null) { next.finish(actionExecutor, stopMode); } }
Finishes the wrapped operator and propagates the finish operation to the next wrapper that the {@link #next} points to. <p>Note that this method must be called in the task thread, because we need to call {@link MailboxExecutor#yield()} to take the mails of closing operator and running timers and run them.
finish
java
apache/flink
flink-runtime/src/main/java/org/apache/flink/streaming/runtime/tasks/StreamOperatorWrapper.java
https://github.com/apache/flink/blob/master/flink-runtime/src/main/java/org/apache/flink/streaming/runtime/tasks/StreamOperatorWrapper.java
Apache-2.0
private void closeAllOperators() throws Exception { if (operatorChain != null && !closedOperators) { closedOperators = true; operatorChain.closeAllOperators(); } }
Closes all the operators if not closed before.
closeAllOperators
java
apache/flink
flink-runtime/src/main/java/org/apache/flink/streaming/runtime/tasks/StreamTask.java
https://github.com/apache/flink/blob/master/flink-runtime/src/main/java/org/apache/flink/streaming/runtime/tasks/StreamTask.java
Apache-2.0
public final String getName() { return getEnvironment().getTaskInfo().getTaskNameWithSubtasks(); }
Gets the name of the task, in the form "taskname (2/5)". @return The name of the task.
getName
java
apache/flink
flink-runtime/src/main/java/org/apache/flink/streaming/runtime/tasks/StreamTask.java
https://github.com/apache/flink/blob/master/flink-runtime/src/main/java/org/apache/flink/streaming/runtime/tasks/StreamTask.java
Apache-2.0
static SynchronizedStreamTaskActionExecutor synchronizedExecutor() { return synchronizedExecutor(new Object()); }
Returns an ExecutionDecorator that synchronizes each invocation.
synchronizedExecutor
java
apache/flink
flink-runtime/src/main/java/org/apache/flink/streaming/runtime/tasks/StreamTaskActionExecutor.java
https://github.com/apache/flink/blob/master/flink-runtime/src/main/java/org/apache/flink/streaming/runtime/tasks/StreamTaskActionExecutor.java
Apache-2.0
static SynchronizedStreamTaskActionExecutor synchronizedExecutor(Object mutex) { return new SynchronizedStreamTaskActionExecutor(mutex); }
Returns an ExecutionDecorator that synchronizes each invocation on a given object.
synchronizedExecutor
java
apache/flink
flink-runtime/src/main/java/org/apache/flink/streaming/runtime/tasks/StreamTaskActionExecutor.java
https://github.com/apache/flink/blob/master/flink-runtime/src/main/java/org/apache/flink/streaming/runtime/tasks/StreamTaskActionExecutor.java
Apache-2.0
public Object getMutex() { return mutex; }
@return an object used for mutual exclusion of all operations that involve data and state mutation. (a.k.a. checkpoint lock).
getMutex
java
apache/flink
flink-runtime/src/main/java/org/apache/flink/streaming/runtime/tasks/StreamTaskActionExecutor.java
https://github.com/apache/flink/blob/master/flink-runtime/src/main/java/org/apache/flink/streaming/runtime/tasks/StreamTaskActionExecutor.java
Apache-2.0
static StreamTaskCancellationContext alwaysRunning() { return () -> false; }
Factory for a context that always returns {@code false} when {@link #isCancelled()} is called. @return context
alwaysRunning
java
apache/flink
flink-runtime/src/main/java/org/apache/flink/streaming/runtime/tasks/StreamTaskCancellationContext.java
https://github.com/apache/flink/blob/master/flink-runtime/src/main/java/org/apache/flink/streaming/runtime/tasks/StreamTaskCancellationContext.java
Apache-2.0
@Override public Clock getClock() { return SystemClock.getInstance(); }
The executor service that schedules and calls the triggers of this task.
getClock
java
apache/flink
flink-runtime/src/main/java/org/apache/flink/streaming/runtime/tasks/SystemProcessingTimeService.java
https://github.com/apache/flink/blob/master/flink-runtime/src/main/java/org/apache/flink/streaming/runtime/tasks/SystemProcessingTimeService.java
Apache-2.0
@Override public String toString() { return "TimerException{" + getCause() + "}"; }
{@code RuntimeException} for wrapping exceptions that are thrown in the timer callback of the timer service in {@link StreamTask}.
toString
java
apache/flink
flink-runtime/src/main/java/org/apache/flink/streaming/runtime/tasks/TimerException.java
https://github.com/apache/flink/blob/master/flink-runtime/src/main/java/org/apache/flink/streaming/runtime/tasks/TimerException.java
Apache-2.0
public void setupLatencyMeasurement( TimerService timerService, MailboxExecutor mailboxExecutor) { checkState( !isLatencyMeasurementSetup(), "latency measurement has already been setup and cannot be setup twice"); this.timerService = timerService; this.mailboxExecutor = mailboxExecutor; }
Sets up latency measurement with required {@link TimerService} and {@link MailboxExecutor}. <p>Note: For each instance, latency measurement can be set up only once. @param timerService {@link TimerService} used for latency measurement. @param mailboxExecutor {@link MailboxExecutor} used for latency measurement.
setupLatencyMeasurement
java
apache/flink
flink-runtime/src/main/java/org/apache/flink/streaming/runtime/tasks/mailbox/MailboxMetricsController.java
https://github.com/apache/flink/blob/master/flink-runtime/src/main/java/org/apache/flink/streaming/runtime/tasks/mailbox/MailboxMetricsController.java
Apache-2.0
public void startLatencyMeasurement() { checkState(!isLatencyMeasurementStarted(), "latency measurement has already been started"); checkState( isLatencyMeasurementSetup(), "timer service and mailbox executor must be setup for latency measurement"); scheduleLatencyMeasurement(); started = true; }
Starts mailbox latency measurement. This requires setup of latency measurement via {@link MailboxMetricsController#setupLatencyMeasurement(TimerService, MailboxExecutor)}. Latency is measured through execution of a mail that is triggered by default in the interval defined by {@link MailboxMetricsController#defaultLatencyMeasurementInterval}. <p>Note: For each instance, latency measurement can be started only once.
startLatencyMeasurement
java
apache/flink
flink-runtime/src/main/java/org/apache/flink/streaming/runtime/tasks/mailbox/MailboxMetricsController.java
https://github.com/apache/flink/blob/master/flink-runtime/src/main/java/org/apache/flink/streaming/runtime/tasks/mailbox/MailboxMetricsController.java
Apache-2.0
public boolean isLatencyMeasurementStarted() { return started; }
Indicates if latency mesurement has been started. @return True if latency measurement has been started.
isLatencyMeasurementStarted
java
apache/flink
flink-runtime/src/main/java/org/apache/flink/streaming/runtime/tasks/mailbox/MailboxMetricsController.java
https://github.com/apache/flink/blob/master/flink-runtime/src/main/java/org/apache/flink/streaming/runtime/tasks/mailbox/MailboxMetricsController.java
Apache-2.0
public boolean isLatencyMeasurementSetup() { return this.timerService != null && this.mailboxExecutor != null; }
Indicates if latency measurement has been setup. @return True if latency measurement has been setup.
isLatencyMeasurementSetup
java
apache/flink
flink-runtime/src/main/java/org/apache/flink/streaming/runtime/tasks/mailbox/MailboxMetricsController.java
https://github.com/apache/flink/blob/master/flink-runtime/src/main/java/org/apache/flink/streaming/runtime/tasks/mailbox/MailboxMetricsController.java
Apache-2.0
public MailboxExecutor getMainMailboxExecutor() { return new MailboxExecutorImpl(mailbox, MIN_PRIORITY, actionExecutor); }
Remembers a currently active suspension of the default action. Serves as flag to indicate a suspended default action (suspended if not-null) and to reuse the object as return value in consecutive suspend attempts. Must only be accessed from mailbox thread.
getMainMailboxExecutor
java
apache/flink
flink-runtime/src/main/java/org/apache/flink/streaming/runtime/tasks/mailbox/MailboxProcessor.java
https://github.com/apache/flink/blob/master/flink-runtime/src/main/java/org/apache/flink/streaming/runtime/tasks/mailbox/MailboxProcessor.java
Apache-2.0
@VisibleForTesting public MailboxMetricsController getMailboxMetricsControl() { return this.mailboxMetricsControl; }
Gets {@link MailboxMetricsController} for control and access to mailbox metrics. @return {@link MailboxMetricsController}.
getMailboxMetricsControl
java
apache/flink
flink-runtime/src/main/java/org/apache/flink/streaming/runtime/tasks/mailbox/MailboxProcessor.java
https://github.com/apache/flink/blob/master/flink-runtime/src/main/java/org/apache/flink/streaming/runtime/tasks/mailbox/MailboxProcessor.java
Apache-2.0
@Override public void close() { List<Mail> droppedMails = mailbox.close(); if (!droppedMails.isEmpty()) { LOG.debug("Closing the mailbox dropped mails {}.", droppedMails); Optional<RuntimeException> maybeErr = Optional.empty(); for (Mail droppedMail : droppedMails) { try { droppedMail.tryCancel(false); } catch (RuntimeException x) { maybeErr = Optional.of(ExceptionUtils.firstOrSuppressed(x, maybeErr.orElse(null))); } } maybeErr.ifPresent( e -> { throw e; }); } }
Lifecycle method to close the mailbox for action submission/retrieval. This will cancel all instances of {@link java.util.concurrent.RunnableFuture} that are still contained in the mailbox.
close
java
apache/flink
flink-runtime/src/main/java/org/apache/flink/streaming/runtime/tasks/mailbox/MailboxProcessor.java
https://github.com/apache/flink/blob/master/flink-runtime/src/main/java/org/apache/flink/streaming/runtime/tasks/mailbox/MailboxProcessor.java
Apache-2.0
public void drain() throws Exception { for (final Mail mail : mailbox.drain()) { runMail(mail); } }
Finishes running all mails in the mailbox. If no concurrent write operations occurred, the mailbox must be empty after this method.
drain
java
apache/flink
flink-runtime/src/main/java/org/apache/flink/streaming/runtime/tasks/mailbox/MailboxProcessor.java
https://github.com/apache/flink/blob/master/flink-runtime/src/main/java/org/apache/flink/streaming/runtime/tasks/mailbox/MailboxProcessor.java
Apache-2.0
public void runMailboxLoop() throws Exception { suspended = !mailboxLoopRunning; final TaskMailbox localMailbox = mailbox; checkState( localMailbox.isMailboxThread(), "Method must be executed by declared mailbox thread!"); assert localMailbox.getState() == TaskMailbox.State.OPEN : "Mailbox must be opened!"; final MailboxController mailboxController = new MailboxController(this); while (isNextLoopPossible()) { // The blocking `processMail` call will not return until default action is available. processMail(localMailbox, false); if (isNextLoopPossible()) { mailboxDefaultAction.runDefaultAction( mailboxController); // lock is acquired inside default action as needed } } }
Runs the mailbox processing loop. This is where the main work is done. This loop can be suspended at any time by calling {@link #suspend()}. For resuming the loop this method should be called again.
runMailboxLoop
java
apache/flink
flink-runtime/src/main/java/org/apache/flink/streaming/runtime/tasks/mailbox/MailboxProcessor.java
https://github.com/apache/flink/blob/master/flink-runtime/src/main/java/org/apache/flink/streaming/runtime/tasks/mailbox/MailboxProcessor.java
Apache-2.0
public void suspend() { sendPoisonMail(() -> suspended = true); }
Suspend the running of the loop which was started by {@link #runMailboxLoop()}}.
suspend
java
apache/flink
flink-runtime/src/main/java/org/apache/flink/streaming/runtime/tasks/mailbox/MailboxProcessor.java
https://github.com/apache/flink/blob/master/flink-runtime/src/main/java/org/apache/flink/streaming/runtime/tasks/mailbox/MailboxProcessor.java
Apache-2.0
public boolean isMailboxThread() { return mailbox.isMailboxThread(); }
Check if the current thread is the mailbox thread. @return only true if called from the mailbox thread.
isMailboxThread
java
apache/flink
flink-runtime/src/main/java/org/apache/flink/streaming/runtime/tasks/mailbox/MailboxProcessor.java
https://github.com/apache/flink/blob/master/flink-runtime/src/main/java/org/apache/flink/streaming/runtime/tasks/mailbox/MailboxProcessor.java
Apache-2.0
public void reportThrowable(Throwable throwable) { sendControlMail( () -> { if (throwable instanceof Exception) { throw (Exception) throwable; } else if (throwable instanceof Error) { throw (Error) throwable; } else { throw WrappingRuntimeException.wrapIfNecessary(throwable); } }, "Report throwable %s", throwable); }
Reports a throwable for rethrowing from the mailbox thread. This will clear and cancel all other pending mails. @param throwable to report by rethrowing from the mailbox loop.
reportThrowable
java
apache/flink
flink-runtime/src/main/java/org/apache/flink/streaming/runtime/tasks/mailbox/MailboxProcessor.java
https://github.com/apache/flink/blob/master/flink-runtime/src/main/java/org/apache/flink/streaming/runtime/tasks/mailbox/MailboxProcessor.java
Apache-2.0
public void allActionsCompleted() { sendPoisonMail( () -> { mailboxLoopRunning = false; suspended = true; }); }
This method must be called to end the stream task when all actions for the tasks have been performed.
allActionsCompleted
java
apache/flink
flink-runtime/src/main/java/org/apache/flink/streaming/runtime/tasks/mailbox/MailboxProcessor.java
https://github.com/apache/flink/blob/master/flink-runtime/src/main/java/org/apache/flink/streaming/runtime/tasks/mailbox/MailboxProcessor.java
Apache-2.0
private void sendPoisonMail(RunnableWithException mail) { mailbox.runExclusively( () -> { // keep state check and poison mail enqueuing atomic, such that no intermediate // #close may cause a // MailboxStateException in #sendPriorityMail. if (mailbox.getState() == TaskMailbox.State.OPEN) { sendControlMail(mail, "poison mail"); } }); }
Send mail in first priority for internal needs.
sendPoisonMail
java
apache/flink
flink-runtime/src/main/java/org/apache/flink/streaming/runtime/tasks/mailbox/MailboxProcessor.java
https://github.com/apache/flink/blob/master/flink-runtime/src/main/java/org/apache/flink/streaming/runtime/tasks/mailbox/MailboxProcessor.java
Apache-2.0
private void sendControlMail( RunnableWithException mail, String descriptionFormat, Object... descriptionArgs) { mailbox.putFirst( new Mail( mail, Integer.MAX_VALUE /*not used with putFirst*/, descriptionFormat, descriptionArgs)); }
Sends the given <code>mail</code> using {@link TaskMailbox#putFirst(Mail)} . Intended use is to control this <code>MailboxProcessor</code>; no interaction with tasks should be performed;
sendControlMail
java
apache/flink
flink-runtime/src/main/java/org/apache/flink/streaming/runtime/tasks/mailbox/MailboxProcessor.java
https://github.com/apache/flink/blob/master/flink-runtime/src/main/java/org/apache/flink/streaming/runtime/tasks/mailbox/MailboxProcessor.java
Apache-2.0
public boolean isAcceptingMails() { return acceptingMails; }
This enum represents the states of the mailbox lifecycle.
isAcceptingMails
java
apache/flink
flink-runtime/src/main/java/org/apache/flink/streaming/runtime/tasks/mailbox/TaskMailbox.java
https://github.com/apache/flink/blob/master/flink-runtime/src/main/java/org/apache/flink/streaming/runtime/tasks/mailbox/TaskMailbox.java
Apache-2.0
protected Collection<Integer> translateInternal( final Transformation<OUT> transformation, final StreamOperatorFactory<OUT> operatorFactory, final TypeInformation<IN> inputType, @Nullable final KeySelector<IN, ?> stateKeySelector, @Nullable final TypeInformation<?> stateKeyType, final Context context) { checkNotNull(transformation); checkNotNull(operatorFactory); checkNotNull(inputType); checkNotNull(context); final StreamGraph streamGraph = context.getStreamGraph(); final String slotSharingGroup = context.getSlotSharingGroup(); final int transformationId = transformation.getId(); final ExecutionConfig executionConfig = streamGraph.getExecutionConfig(); streamGraph.addOperator( transformationId, slotSharingGroup, transformation.getCoLocationGroupKey(), operatorFactory, inputType, transformation.getOutputType(), transformation.getName()); streamGraph.setAttribute(transformationId, transformation.getAttribute()); if (stateKeySelector != null) { TypeSerializer<?> keySerializer = stateKeyType.createSerializer(executionConfig.getSerializerConfig()); streamGraph.setOneInputStateKey(transformationId, stateKeySelector, keySerializer); } int parallelism = transformation.getParallelism() != ExecutionConfig.PARALLELISM_DEFAULT ? transformation.getParallelism() : executionConfig.getParallelism(); streamGraph.setParallelism( transformationId, parallelism, transformation.isParallelismConfigured()); streamGraph.setMaxParallelism(transformationId, transformation.getMaxParallelism()); final List<Transformation<?>> parentTransformations = transformation.getInputs(); checkState( parentTransformations.size() == 1, "Expected exactly one input transformation but found " + parentTransformations.size()); for (Integer inputId : context.getStreamNodeIds(parentTransformations.get(0))) { streamGraph.addEdge(inputId, transformationId, 0); } if (transformation instanceof PhysicalTransformation) { streamGraph.setSupportsConcurrentExecutionAttempts( transformationId, ((PhysicalTransformation<OUT>) transformation) .isSupportsConcurrentExecutionAttempts()); } return Collections.singleton(transformationId); }
A utility base class for one input {@link Transformation transformations} that provides a function for configuring common graph properties.
translateInternal
java
apache/flink
flink-runtime/src/main/java/org/apache/flink/streaming/runtime/translators/AbstractOneInputTransformationTranslator.java
https://github.com/apache/flink/blob/master/flink-runtime/src/main/java/org/apache/flink/streaming/runtime/translators/AbstractOneInputTransformationTranslator.java
Apache-2.0
protected Collection<Integer> translateInternal( final Transformation<OUT> transformation, final Transformation<IN1> firstInputTransformation, final Transformation<IN2> secondInputTransformation, final StreamOperatorFactory<OUT> operatorFactory, @Nullable final TypeInformation<?> keyTypeInfo, @Nullable final KeySelector<IN1, ?> firstKeySelector, @Nullable final KeySelector<IN2, ?> secondKeySelector, final Context context) { checkNotNull(transformation); checkNotNull(firstInputTransformation); checkNotNull(secondInputTransformation); checkNotNull(operatorFactory); checkNotNull(context); final StreamGraph streamGraph = context.getStreamGraph(); final String slotSharingGroup = context.getSlotSharingGroup(); final int transformationId = transformation.getId(); final ExecutionConfig executionConfig = streamGraph.getExecutionConfig(); streamGraph.addCoOperator( transformationId, slotSharingGroup, transformation.getCoLocationGroupKey(), operatorFactory, firstInputTransformation.getOutputType(), secondInputTransformation.getOutputType(), transformation.getOutputType(), transformation.getName()); streamGraph.setAttribute(transformationId, transformation.getAttribute()); if (firstKeySelector != null || secondKeySelector != null) { checkState( keyTypeInfo != null, "Keyed Transformation without provided key type information."); final TypeSerializer<?> keySerializer = keyTypeInfo.createSerializer(executionConfig.getSerializerConfig()); streamGraph.setTwoInputStateKey( transformationId, firstKeySelector, secondKeySelector, keySerializer); } final int parallelism = transformation.getParallelism() != ExecutionConfig.PARALLELISM_DEFAULT ? transformation.getParallelism() : executionConfig.getParallelism(); streamGraph.setParallelism( transformationId, parallelism, transformation.isParallelismConfigured()); streamGraph.setMaxParallelism(transformationId, transformation.getMaxParallelism()); for (Integer inputId : context.getStreamNodeIds(firstInputTransformation)) { streamGraph.addEdge(inputId, transformationId, 1); } for (Integer inputId : context.getStreamNodeIds(secondInputTransformation)) { streamGraph.addEdge(inputId, transformationId, 2); } if (transformation instanceof PhysicalTransformation) { streamGraph.setSupportsConcurrentExecutionAttempts( transformationId, ((PhysicalTransformation<OUT>) transformation) .isSupportsConcurrentExecutionAttempts()); } return Collections.singleton(transformationId); }
A base class with functionality used during translating {@link Transformation transformations} with two inputs.
translateInternal
java
apache/flink
flink-runtime/src/main/java/org/apache/flink/streaming/runtime/translators/AbstractTwoInputTransformationTranslator.java
https://github.com/apache/flink/blob/master/flink-runtime/src/main/java/org/apache/flink/streaming/runtime/translators/AbstractTwoInputTransformationTranslator.java
Apache-2.0
@Override public void processElement(StreamRecord<T> element) throws Exception { // do nothing }
The {@link NoOpStreamOperator} acts as a dummy sink so that the upstream can produce the intermediate dataset to be cached. @param <T> The output type of the operator, which is the type of the cached intermediate dataset as well.
processElement
java
apache/flink
flink-runtime/src/main/java/org/apache/flink/streaming/runtime/translators/CacheTransformationTranslator.java
https://github.com/apache/flink/blob/master/flink-runtime/src/main/java/org/apache/flink/streaming/runtime/translators/CacheTransformationTranslator.java
Apache-2.0
@Override public void processElement(StreamRecord<T> element) throws Exception { output.collect(element); }
The {@link IdentityStreamOperator} acts as a dummy source to consume cached intermediate dataset. @param <T> The output type of the operator, which is the type of the cached intermediate * dataset as well.
processElement
java
apache/flink
flink-runtime/src/main/java/org/apache/flink/streaming/runtime/translators/CacheTransformationTranslator.java
https://github.com/apache/flink/blob/master/flink-runtime/src/main/java/org/apache/flink/streaming/runtime/translators/CacheTransformationTranslator.java
Apache-2.0
private <I> DataStream<I> addFailOverRegion(DataStream<I> input) { return new DataStream<>( executionEnvironment, new PartitionTransformation<>( input.getTransformation(), new ForwardPartitioner<>(), StreamExchangeMode.BATCH)); }
Adds a batch exchange that materializes the output first. This is a no-op in STREAMING.
addFailOverRegion
java
apache/flink
flink-runtime/src/main/java/org/apache/flink/streaming/runtime/translators/SinkTransformationTranslator.java
https://github.com/apache/flink/blob/master/flink-runtime/src/main/java/org/apache/flink/streaming/runtime/translators/SinkTransformationTranslator.java
Apache-2.0
public static AbstractInternalWatermarkDeclaration<?> from( WatermarkDeclaration watermarkDeclaration) { if (watermarkDeclaration instanceof AbstractInternalWatermarkDeclaration) { return (AbstractInternalWatermarkDeclaration<?>) watermarkDeclaration; } else if (watermarkDeclaration instanceof BoolWatermarkDeclaration) { return new InternalBoolWatermarkDeclaration( (BoolWatermarkDeclaration) watermarkDeclaration); } else if (watermarkDeclaration instanceof LongWatermarkDeclaration) { return new InternalLongWatermarkDeclaration( (LongWatermarkDeclaration) watermarkDeclaration); } else { throw new IllegalArgumentException( "Unknown watermark declaration type: " + watermarkDeclaration.getClass()); } }
Converts a user-oriented {@link WatermarkDeclaration} to an internal-oriented {@link AbstractInternalWatermarkDeclaration}.
from
java
apache/flink
flink-runtime/src/main/java/org/apache/flink/streaming/runtime/watermark/AbstractInternalWatermarkDeclaration.java
https://github.com/apache/flink/blob/master/flink-runtime/src/main/java/org/apache/flink/streaming/runtime/watermark/AbstractInternalWatermarkDeclaration.java
Apache-2.0
@Override public void combineWatermark( Watermark watermark, int channelIndex, Consumer<Watermark> watermarkEmitter) throws Exception { output.setWatermarkEmitter(watermarkEmitter); if (EventTimeExtension.isEventTimeWatermark(watermark)) { inputWatermark( new org.apache.flink.streaming.api.watermark.Watermark( ((LongWatermark) watermark).getValue()), channelIndex, output); } else if (EventTimeExtension.isIdleStatusWatermark(watermark.getIdentifier())) { inputWatermarkStatus( new WatermarkStatus( ((BoolWatermark) watermark).getValue() ? WatermarkStatus.IDLE_STATUS : WatermarkStatus.ACTIVE_STATUS), channelIndex, output); } }
A {@link WatermarkCombiner} used to combine {@link EventTimeExtension} related watermarks in input channels.
combineWatermark
java
apache/flink
flink-runtime/src/main/java/org/apache/flink/streaming/runtime/watermark/extension/eventtime/EventTimeWatermarkCombiner.java
https://github.com/apache/flink/blob/master/flink-runtime/src/main/java/org/apache/flink/streaming/runtime/watermark/extension/eventtime/EventTimeWatermarkCombiner.java
Apache-2.0
public EventTimeUpdateStatus processWatermark( org.apache.flink.api.common.watermark.Watermark watermark, int inputIndex) throws Exception { if (EventTimeExtension.isEventTimeWatermark(watermark.getIdentifier())) { long timestamp = ((LongWatermark) watermark).getValue(); return this.processEventTime(timestamp, inputIndex); } else if (EventTimeExtension.isIdleStatusWatermark(watermark.getIdentifier())) { boolean isIdle = ((BoolWatermark) watermark).getValue(); this.processEventTimeIdleStatus(isIdle, inputIndex); } return EventTimeUpdateStatus.NO_UPDATE; }
Process EventTimeWatermark/IdleStatusWatermark. <p>It's caller's responsibility to check whether the watermark is EventTimeWatermark/IdleStatusWatermark. @return the status of event time watermark update.
processWatermark
java
apache/flink
flink-runtime/src/main/java/org/apache/flink/streaming/runtime/watermark/extension/eventtime/EventTimeWatermarkHandler.java
https://github.com/apache/flink/blob/master/flink-runtime/src/main/java/org/apache/flink/streaming/runtime/watermark/extension/eventtime/EventTimeWatermarkHandler.java
Apache-2.0
private void markWatermarkAligned(SubpartitionStatus subpartitionStatus) { if (!subpartitionStatus.isWatermarkAligned) { subpartitionStatus.isWatermarkAligned = true; subpartitionStatus.addTo(alignedSubpartitionStatuses); } }
Mark the {@link SubpartitionStatus} as watermark-aligned and add it to the {@link #alignedSubpartitionStatuses}. @param subpartitionStatus the subpartition status to be marked
markWatermarkAligned
java
apache/flink
flink-runtime/src/main/java/org/apache/flink/streaming/runtime/watermarkstatus/StatusWatermarkValve.java
https://github.com/apache/flink/blob/master/flink-runtime/src/main/java/org/apache/flink/streaming/runtime/watermarkstatus/StatusWatermarkValve.java
Apache-2.0
private void markWatermarkUnaligned(SubpartitionStatus subpartitionStatus) { if (subpartitionStatus.isWatermarkAligned) { subpartitionStatus.isWatermarkAligned = false; subpartitionStatus.removeFrom(alignedSubpartitionStatuses); } }
Mark the {@link SubpartitionStatus} as watermark-unaligned and remove it from the {@link #alignedSubpartitionStatuses}. @param subpartitionStatus the subpartition status to be marked
markWatermarkUnaligned
java
apache/flink
flink-runtime/src/main/java/org/apache/flink/streaming/runtime/watermarkstatus/StatusWatermarkValve.java
https://github.com/apache/flink/blob/master/flink-runtime/src/main/java/org/apache/flink/streaming/runtime/watermarkstatus/StatusWatermarkValve.java
Apache-2.0
private void findAndOutputMaxWatermarkAcrossAllSubpartitions(DataOutput<?> output) throws Exception { long maxWatermark = Long.MIN_VALUE; for (Map<Integer, SubpartitionStatus> map : subpartitionStatuses) { for (SubpartitionStatus subpartitionStatus : map.values()) { maxWatermark = Math.max(subpartitionStatus.watermark, maxWatermark); } } if (maxWatermark > lastOutputWatermark) { lastOutputWatermark = maxWatermark; output.emitWatermark(new Watermark(lastOutputWatermark)); } }
Adjust the {@link #alignedSubpartitionStatuses} when an element({@link SubpartitionStatus}) in it was modified. The {@link #alignedSubpartitionStatuses} is a priority queue, when an element in it was modified, we need to adjust the element's position to ensure its priority order. @param subpartitionStatus the modified subpartition status
findAndOutputMaxWatermarkAcrossAllSubpartitions
java
apache/flink
flink-runtime/src/main/java/org/apache/flink/streaming/runtime/watermarkstatus/StatusWatermarkValve.java
https://github.com/apache/flink/blob/master/flink-runtime/src/main/java/org/apache/flink/streaming/runtime/watermarkstatus/StatusWatermarkValve.java
Apache-2.0
public void reportLatency(LatencyMarker marker) { final String uniqueName = granularity.createUniqueHistogramName(marker, operatorId, subtaskIndex); DescriptiveStatisticsHistogram latencyHistogram = this.latencyStats.get(uniqueName); if (latencyHistogram == null) { latencyHistogram = new DescriptiveStatisticsHistogram(this.historySize); this.latencyStats.put(uniqueName, latencyHistogram); granularity .createSourceMetricGroups(metricGroup, marker, operatorId, subtaskIndex) .addGroup("operator_id", String.valueOf(operatorId)) .addGroup("operator_subtask_index", String.valueOf(subtaskIndex)) .histogram("latency", latencyHistogram); } long now = System.currentTimeMillis(); latencyHistogram.update(now - marker.getMarkedTime()); }
The {@link LatencyStats} objects are used to track and report on the behavior of latencies across measurements.
reportLatency
java
apache/flink
flink-runtime/src/main/java/org/apache/flink/streaming/util/LatencyStats.java
https://github.com/apache/flink/blob/master/flink-runtime/src/main/java/org/apache/flink/streaming/util/LatencyStats.java
Apache-2.0
public static void validateTransformationUid( StreamGraph streamGraph, Transformation<?> transformation) { if (!streamGraph.getExecutionConfig().hasAutoGeneratedUIDsEnabled()) { if (transformation instanceof PhysicalTransformation && transformation.getUserProvidedNodeHash() == null && transformation.getUid() == null) { throw new IllegalStateException( "Auto generated UIDs have been disabled " + "but no UID or hash has been assigned to operator " + transformation.getName()); } } }
Throw {@link IllegalStateException} if the {@link PhysicalTransformation}'s uid or hash is not set when auto generate uid is disabled. @param streamGraph The given graph that the transformation is added to @param transformation The transformation needed to validate
validateTransformationUid
java
apache/flink
flink-runtime/src/main/java/org/apache/flink/streaming/util/graph/StreamGraphUtils.java
https://github.com/apache/flink/blob/master/flink-runtime/src/main/java/org/apache/flink/streaming/util/graph/StreamGraphUtils.java
Apache-2.0
public static <T> void configureBufferTimeout( StreamGraph streamGraph, int nodeId, Transformation<T> transformation, long defaultBufferTimeout) { if (transformation.getBufferTimeout() >= 0) { streamGraph.setBufferTimeout(nodeId, transformation.getBufferTimeout()); } else { streamGraph.setBufferTimeout(nodeId, defaultBufferTimeout); } }
Configure a stream node's buffer timeout according to the given transformation. @param streamGraph The StreamGraph the node belongs to @param nodeId The node's id @param transformation A given transformation @param defaultBufferTimeout The default buffer timeout value
configureBufferTimeout
java
apache/flink
flink-runtime/src/main/java/org/apache/flink/streaming/util/graph/StreamGraphUtils.java
https://github.com/apache/flink/blob/master/flink-runtime/src/main/java/org/apache/flink/streaming/util/graph/StreamGraphUtils.java
Apache-2.0
@Internal public static <T, F> FieldAccessor<T, F> getAccessor( TypeInformation<T> typeInfo, int pos, ExecutionConfig config) { // In case of arrays if (typeInfo instanceof BasicArrayTypeInfo || typeInfo instanceof PrimitiveArrayTypeInfo) { return new FieldAccessor.ArrayFieldAccessor<>(pos, typeInfo); // In case of basic types } else if (typeInfo instanceof BasicTypeInfo) { if (pos != 0) { throw new CompositeType.InvalidFieldReferenceException( "The " + ((Integer) pos).toString() + ". field selected on a " + "basic type (" + typeInfo.toString() + "). A field expression on a basic type can only select " + "the 0th field (which means selecting the entire basic type)."); } @SuppressWarnings("unchecked") FieldAccessor<T, F> result = (FieldAccessor<T, F>) new FieldAccessor.SimpleFieldAccessor<>(typeInfo); return result; // In case of case classes } else if (typeInfo.isTupleType() && ((TupleTypeInfoBase) typeInfo).isCaseClass()) { TupleTypeInfoBase tupleTypeInfo = (TupleTypeInfoBase) typeInfo; @SuppressWarnings("unchecked") TypeInformation<F> fieldTypeInfo = (TypeInformation<F>) tupleTypeInfo.getTypeAt(pos); if (scalaProductFieldAccessorFactory != null) { return scalaProductFieldAccessorFactory.createRecursiveProductFieldAccessor( pos, typeInfo, new FieldAccessor.SimpleFieldAccessor<>(fieldTypeInfo), config); } else { throw new IllegalStateException( "Scala products are used but Scala API is not on the classpath."); } // In case of tuples } else if (typeInfo.isTupleType()) { @SuppressWarnings("unchecked") FieldAccessor<T, F> result = new FieldAccessor.SimpleTupleFieldAccessor(pos, typeInfo); return result; // Default case, PojoType is directed to this statement } else { throw new CompositeType.InvalidFieldReferenceException( "Cannot reference field by position on " + typeInfo.toString() + "Referencing a field by position is supported on tuples, case classes, and arrays. " + "Additionally, you can select the 0th field of a primitive/basic type (e.g. int)."); } }
Creates a {@link FieldAccessor} for the given field position, which can be used to get and set the specified field on instances of this type. @param pos The field position (zero-based) @param config Configuration object @param <F> The type of the field to access @return The created FieldAccessor
getAccessor
java
apache/flink
flink-runtime/src/main/java/org/apache/flink/streaming/util/typeutils/FieldAccessorFactory.java
https://github.com/apache/flink/blob/master/flink-runtime/src/main/java/org/apache/flink/streaming/util/typeutils/FieldAccessorFactory.java
Apache-2.0
static ScalaProductFieldAccessorFactory load(Logger log) { try { final Object factory = Class.forName( "org.apache.flink.streaming.util.typeutils.DefaultScalaProductFieldAccessorFactory") .getDeclaredConstructor() .newInstance(); return (ScalaProductFieldAccessorFactory) factory; } catch (Exception e) { log.debug("Unable to load Scala API extension.", e); return null; } }
Loads the implementation, if it is accessible. @param log Logger to be used in case the loading fails @return Loaded implementation, if it is accessible.
load
java
apache/flink
flink-runtime/src/main/java/org/apache/flink/streaming/util/typeutils/ScalaProductFieldAccessorFactory.java
https://github.com/apache/flink/blob/master/flink-runtime/src/main/java/org/apache/flink/streaming/util/typeutils/ScalaProductFieldAccessorFactory.java
Apache-2.0
public static Set<AbstractInternalWatermarkDeclaration<?>> getInternalWatermarkDeclarationsFromStreamGraph(StreamGraph streamGraph) { Collection<StreamNode> streamNodes = streamGraph.getStreamNodes(); Set<WatermarkDeclaration> declarations = streamNodes.stream() .map(StreamNode::getOperatorFactory) .filter( factory -> factory instanceof SimpleOperatorFactory || factory instanceof SourceOperatorFactory) .map( factory -> { if (factory instanceof SimpleOperatorFactory) { return getWatermarkDeclarations( ((SimpleOperatorFactory<?>) factory).getOperator()); } else { return ((SourceOperatorFactory<?>) factory) .getSourceWatermarkDeclarations(); } }) .flatMap(Collection::stream) .collect(Collectors.toSet()); return convertToInternalWatermarkDeclarations(declarations); }
Retrieve the declared watermarks from StreamGraph and convert them into {@code InternalWatermarkDeclaration}.
getInternalWatermarkDeclarationsFromStreamGraph
java
apache/flink
flink-runtime/src/main/java/org/apache/flink/streaming/util/watermark/WatermarkUtils.java
https://github.com/apache/flink/blob/master/flink-runtime/src/main/java/org/apache/flink/streaming/util/watermark/WatermarkUtils.java
Apache-2.0
private static Collection<? extends WatermarkDeclaration> getWatermarkDeclarations( StreamOperator<?> streamOperator) { if (streamOperator instanceof AbstractAsyncStateUdfStreamOperator) { Function f = ((AbstractAsyncStateUdfStreamOperator<?, ?>) streamOperator).getUserFunction(); if (f instanceof ProcessFunction) { return ((ProcessFunction) f).declareWatermarks(); } } return Collections.emptySet(); }
Retrieve the user-defined {@link WatermarkDeclaration}s of {@link ProcessFunction}. The {@link WatermarkDeclaration} defined by the source operator can be retrieved from {@link SourceOperatorFactory#getSourceWatermarkDeclarations()}.
getWatermarkDeclarations
java
apache/flink
flink-runtime/src/main/java/org/apache/flink/streaming/util/watermark/WatermarkUtils.java
https://github.com/apache/flink/blob/master/flink-runtime/src/main/java/org/apache/flink/streaming/util/watermark/WatermarkUtils.java
Apache-2.0
public static Set<AbstractInternalWatermarkDeclaration<?>> convertToInternalWatermarkDeclarations( Set<WatermarkDeclaration> watermarkDeclarations) { return watermarkDeclarations.stream() .map(AbstractInternalWatermarkDeclaration::from) .collect(Collectors.toSet()); }
Convert user-oriented {@link WatermarkDeclaration} instance to internal-oriented {@link AbstractInternalWatermarkDeclaration} instance.
convertToInternalWatermarkDeclarations
java
apache/flink
flink-runtime/src/main/java/org/apache/flink/streaming/util/watermark/WatermarkUtils.java
https://github.com/apache/flink/blob/master/flink-runtime/src/main/java/org/apache/flink/streaming/util/watermark/WatermarkUtils.java
Apache-2.0
public static void addEventTimeWatermarkCombinerIfNeeded( Set<AbstractInternalWatermarkDeclaration<?>> watermarkDeclarationSet, Map<String, WatermarkCombiner> watermarkCombiners, int numberOfInputChannels) { if (watermarkDeclarationSet.stream() .anyMatch( declaration -> EventTimeExtension.isEventTimeWatermark( declaration.getIdentifier()))) { // create event time watermark combiner EventTimeWatermarkCombiner eventTimeWatermarkCombiner = new EventTimeWatermarkCombiner(numberOfInputChannels); watermarkCombiners.put( EventTimeExtension.EVENT_TIME_WATERMARK_DECLARATION.getIdentifier(), eventTimeWatermarkCombiner); watermarkCombiners.put( EventTimeExtension.IDLE_STATUS_WATERMARK_DECLARATION.getIdentifier(), eventTimeWatermarkCombiner); } }
Create watermark combiners if there are event time watermark declarations.
addEventTimeWatermarkCombinerIfNeeded
java
apache/flink
flink-runtime/src/main/java/org/apache/flink/streaming/util/watermark/WatermarkUtils.java
https://github.com/apache/flink/blob/master/flink-runtime/src/main/java/org/apache/flink/streaming/util/watermark/WatermarkUtils.java
Apache-2.0
public static TestHarness createOne( TwoInputStreamOperator<TestElem, TestElem, Tuple2<TestElem, TestElem>> operator, KeySelector<TestElem, String> keySelector1, KeySelector<TestElem, String> keySelector2, TypeInformation<String> keyType) throws Exception { return AsyncKeyedTwoInputStreamOperatorTestHarness.create( (executor) -> new TestHarness( executor, operator, keySelector1, keySelector2, keyType)); }
Custom test harness to avoid endless generics in all of the test code.
createOne
java
apache/flink
flink-runtime/src/test/java/org/apache/flink/runtime/asyncprocessing/operators/AsyncIntervalJoinOperatorTest.java
https://github.com/apache/flink/blob/master/flink-runtime/src/test/java/org/apache/flink/runtime/asyncprocessing/operators/AsyncIntervalJoinOperatorTest.java
Apache-2.0
@Test void testPermanentBlobCleanup() throws IOException, InterruptedException { JobID jobId = new JobID(); List<PermanentBlobKey> keys = new ArrayList<>(); BlobServer server = null; PermanentBlobCache cache = null; final byte[] buf = new byte[128]; try { Configuration config = new Configuration(); config.set(BlobServerOptions.CLEANUP_INTERVAL, 1L); server = TestingBlobUtils.createServer(tempDir, config); server.start(); cache = TestingBlobUtils.createPermanentCache(tempDir, config, server); // upload blobs keys.add(server.putPermanent(jobId, buf)); buf[0] += 1; keys.add(server.putPermanent(jobId, buf)); checkFileCountForJob(2, jobId, server); checkFileCountForJob(0, jobId, cache); // register once cache.registerJob(jobId); checkFileCountForJob(2, jobId, server); checkFileCountForJob(0, jobId, cache); for (PermanentBlobKey key : keys) { cache.getFile(jobId, key); } // register again (let's say, from another thread or so) cache.registerJob(jobId); for (PermanentBlobKey key : keys) { cache.getFile(jobId, key); } assertThat(checkFilesExist(jobId, keys, cache, true)).isEqualTo(2); checkFileCountForJob(2, jobId, server); checkFileCountForJob(2, jobId, cache); // after releasing once, nothing should change cache.releaseJob(jobId); assertThat(checkFilesExist(jobId, keys, cache, true)).isEqualTo(2); checkFileCountForJob(2, jobId, server); checkFileCountForJob(2, jobId, cache); // after releasing the second time, the job is up for deferred cleanup cache.releaseJob(jobId); verifyJobCleanup(cache, jobId, keys); // server should be unaffected checkFileCountForJob(2, jobId, server); } finally { if (cache != null) { cache.close(); } if (server != null) { server.close(); } // now everything should be cleaned up checkFileCountForJob(0, jobId, server); } }
Tests that {@link PermanentBlobCache} cleans up after calling {@link PermanentBlobCache#releaseJob(JobID)}.
testPermanentBlobCleanup
java
apache/flink
flink-runtime/src/test/java/org/apache/flink/runtime/blob/BlobCacheCleanupTest.java
https://github.com/apache/flink/blob/master/flink-runtime/src/test/java/org/apache/flink/runtime/blob/BlobCacheCleanupTest.java
Apache-2.0
@Test void testPermanentJobReferences() throws IOException { JobID jobId = new JobID(); Configuration config = new Configuration(); config.set( BlobServerOptions.CLEANUP_INTERVAL, 3_600_000L); // 1 hour should effectively prevent races // NOTE: use fake address - we will not connect to it here InetSocketAddress serverAddress = new InetSocketAddress("localhost", 12345); try (PermanentBlobCache cache = TestingBlobUtils.createPermanentCache(tempDir, config, serverAddress)) { // register once cache.registerJob(jobId); assertThat(cache.getJobRefCounters().get(jobId).references).isOne(); assertThat(cache.getJobRefCounters().get(jobId).keepUntil).isEqualTo(-1); // register a second time cache.registerJob(jobId); assertThat(cache.getJobRefCounters().get(jobId).references).isEqualTo(2); assertThat(cache.getJobRefCounters().get(jobId).keepUntil).isEqualTo(-1); // release once cache.releaseJob(jobId); assertThat(cache.getJobRefCounters().get(jobId).references).isOne(); assertThat(cache.getJobRefCounters().get(jobId).keepUntil).isEqualTo(-1); // release a second time long cleanupLowerBound = System.currentTimeMillis() + config.get(BlobServerOptions.CLEANUP_INTERVAL); cache.releaseJob(jobId); assertThat(cache.getJobRefCounters().get(jobId).references).isZero(); assertThat(cache.getJobRefCounters().get(jobId).keepUntil) .isGreaterThanOrEqualTo(cleanupLowerBound); // register again cache.registerJob(jobId); assertThat(cache.getJobRefCounters().get(jobId).references).isOne(); assertThat(cache.getJobRefCounters().get(jobId).keepUntil).isEqualTo(-1); // finally release the job cleanupLowerBound = System.currentTimeMillis() + config.get(BlobServerOptions.CLEANUP_INTERVAL); cache.releaseJob(jobId); assertThat(cache.getJobRefCounters().get(jobId).references).isZero(); assertThat(cache.getJobRefCounters().get(jobId).keepUntil) .isGreaterThanOrEqualTo(cleanupLowerBound); } }
Tests that {@link PermanentBlobCache} sets the expected reference counts and cleanup timeouts when registering, releasing, and re-registering jobs.
testPermanentJobReferences
java
apache/flink
flink-runtime/src/test/java/org/apache/flink/runtime/blob/BlobCacheCleanupTest.java
https://github.com/apache/flink/blob/master/flink-runtime/src/test/java/org/apache/flink/runtime/blob/BlobCacheCleanupTest.java
Apache-2.0
private void testGetFailsFromCorruptFile( final JobID jobId, BlobKey.BlobType blobType, boolean corruptOnHAStore) throws IOException { final Configuration config = new Configuration(); config.set(HighAvailabilityOptions.HA_MODE, "ZOOKEEPER"); config.set( HighAvailabilityOptions.HA_STORAGE_PATH, TempDirUtils.newFolder(tempDir).getPath()); BlobStoreService blobStoreService = null; try { blobStoreService = BlobUtils.createBlobStoreFromConfig(config); TestingBlobHelpers.testGetFailsFromCorruptFile( jobId, blobType, corruptOnHAStore, config, blobStoreService, TempDirUtils.newFolder(tempDir)); } finally { if (blobStoreService != null) { blobStoreService.cleanupAllData(); blobStoreService.close(); } } }
Checks the GET operation fails when the downloaded file (from {@link BlobServer} or HA store) is corrupt, i.e. its content's hash does not match the {@link BlobKey}'s hash. @param jobId job ID or <tt>null</tt> if job-unrelated @param blobType whether the BLOB should become permanent or transient @param corruptOnHAStore whether the file should be corrupt in the HA store (<tt>true</tt>, required <tt>highAvailability</tt> to be set) or on the {@link BlobServer}'s local store (<tt>false</tt>)
testGetFailsFromCorruptFile
java
apache/flink
flink-runtime/src/test/java/org/apache/flink/runtime/blob/BlobCacheCorruptionTest.java
https://github.com/apache/flink/blob/master/flink-runtime/src/test/java/org/apache/flink/runtime/blob/BlobCacheCorruptionTest.java
Apache-2.0
private void testGetFailsStore(@Nullable final JobID jobId, BlobKey.BlobType blobType) throws IOException, InterruptedException { assumeThat(OperatingSystem.isWindows()).as("setWritable doesn't work on Windows").isFalse(); Tuple2<BlobServer, BlobCacheService> serverAndCache = TestingBlobUtils.createServerAndCache(tempDir); File jobStoreDir = null; try (BlobServer server = serverAndCache.f0; BlobCacheService cache = serverAndCache.f1) { server.start(); // store the data on the server byte[] data = new byte[2000000]; rnd.nextBytes(data); BlobKey blobKey = put(server, jobId, data, blobType); verifyType(blobType, blobKey); // make sure the blob cache cannot create any files in its storage dir if (blobType == PERMANENT_BLOB) { jobStoreDir = cache.getPermanentBlobService() .getStorageLocation(jobId, new PermanentBlobKey()) .getParentFile(); } else { jobStoreDir = cache.getTransientBlobService() .getStorageLocation(jobId, new TransientBlobKey()) .getParentFile(); } assertThat(jobStoreDir.setExecutable(true, false)).isTrue(); assertThat(jobStoreDir.setReadable(true, false)).isTrue(); assertThat(jobStoreDir.setWritable(false, false)).isTrue(); try { assertThatThrownBy(() -> get(cache, jobId, blobKey)) .isInstanceOf(AccessDeniedException.class); } finally { // there should be no remaining incoming files File incomingFileDir = new File(jobStoreDir.getParent(), "incoming"); assertThat(incomingFileDir.list()).isEmpty(); // there should be no files in the job directory assertThat(jobStoreDir.list()).isEmpty(); // if transient, the get will fail but since the download was successful, the file // will not be on the server anymore if (blobType == TRANSIENT_BLOB) { verifyDeletedEventually(server, jobId, blobKey); } else { assertThat(server.getStorageLocation(jobId, blobKey)).exists(); } } } finally { // set writable again to make sure we can remove the directory if (jobStoreDir != null) { //noinspection ResultOfMethodCallIgnored jobStoreDir.setWritable(true, false); } } }
Retrieves a BLOB via a {@link BlobCacheService} which cannot create the final storage file. File transfers should fail. @param jobId job id @param blobType whether the BLOB should become permanent or transient
testGetFailsStore
java
apache/flink
flink-runtime/src/test/java/org/apache/flink/runtime/blob/BlobCacheGetTest.java
https://github.com/apache/flink/blob/master/flink-runtime/src/test/java/org/apache/flink/runtime/blob/BlobCacheGetTest.java
Apache-2.0
private void testGetTransientRemoteDeleteFails(@Nullable final JobID jobId) throws IOException { assumeThat(OperatingSystem.isWindows()).as("setWritable doesn't work on Windows").isFalse(); Tuple2<BlobServer, BlobCacheService> serverAndCache = TestingBlobUtils.createServerAndCache(tempDir); File blobFile = null; File directory = null; try (BlobServer server = serverAndCache.f0; BlobCacheService cache = serverAndCache.f1) { server.start(); try { byte[] data = new byte[2000000]; rnd.nextBytes(data); // put BLOB TransientBlobKey key = (TransientBlobKey) put(server, jobId, data, TRANSIENT_BLOB); assertThat(key).isNotNull(); blobFile = server.getStorageLocation(jobId, key); directory = blobFile.getParentFile(); assertThat(blobFile.setWritable(false, false)).isTrue(); assertThat(directory.setWritable(false, false)).isTrue(); // access from cache once which also deletes the file on the server verifyContents(cache, jobId, key, data); // delete locally (should not be affected by the server) assertThat(delete(cache, jobId, key)).isTrue(); File blobFileAtCache = cache.getTransientBlobService().getStorageLocation(jobId, key); assertThat(blobFileAtCache).doesNotExist(); // the file should still be there on the server verifyContents(server, jobId, key, data); // ... and may be retrieved by the cache verifyContents(cache, jobId, key, data); } finally { if (blobFile != null && directory != null) { //noinspection ResultOfMethodCallIgnored blobFile.setWritable(true, false); //noinspection ResultOfMethodCallIgnored directory.setWritable(true, false); } } } }
Uploads a byte array for the given job and verifies that a get operation of a transient BLOB (via the {@link BlobCacheService}; also deletes the file on the {@link BlobServer}) does not fail even if the file is not deletable on the {@link BlobServer}, e.g. via restricting the permissions. @param jobId job id
testGetTransientRemoteDeleteFails
java
apache/flink
flink-runtime/src/test/java/org/apache/flink/runtime/blob/BlobCacheGetTest.java
https://github.com/apache/flink/blob/master/flink-runtime/src/test/java/org/apache/flink/runtime/blob/BlobCacheGetTest.java
Apache-2.0
private void testConcurrentGetOperations( final JobID jobId, final BlobKey.BlobType blobType, final boolean cacheAccessesHAStore) throws IOException, InterruptedException, ExecutionException { final BlobStore blobStoreServer = new VoidBlobStore(); final BlobStore blobStoreCache = new VoidBlobStore(); final int numberConcurrentGetOperations = 3; final List<CompletableFuture<File>> getOperations = new ArrayList<>(numberConcurrentGetOperations); final byte[] data = {1, 2, 3, 4, 99, 42}; final ExecutorService executor = Executors.newFixedThreadPool(numberConcurrentGetOperations); Tuple2<BlobServer, BlobCacheService> serverAndCache = TestingBlobUtils.createServerAndCache( tempDir, blobStoreServer, cacheAccessesHAStore ? blobStoreServer : blobStoreCache); try (BlobServer server = serverAndCache.f0; BlobCacheService cache = serverAndCache.f1) { server.start(); // upload data first final BlobKey blobKey = put(server, jobId, data, blobType); // now try accessing it concurrently (only HA mode will be able to retrieve it from HA // store!) for (int i = 0; i < numberConcurrentGetOperations; i++) { CompletableFuture<File> getOperation = CompletableFuture.supplyAsync( () -> { try { File file = get(cache, jobId, blobKey); // check that we have read the right data validateGetAndClose( Files.newInputStream(file.toPath()), data); return file; } catch (IOException e) { throw new CompletionException( new FlinkException( "Could not read blob for key " + blobKey + '.', e)); } }, executor); getOperations.add(getOperation); } FutureUtils.ConjunctFuture<Collection<File>> filesFuture = FutureUtils.combineAll(getOperations); if (blobType == PERMANENT_BLOB) { // wait until all operations have completed and check that no exception was thrown filesFuture.get(); } else { // wait for all futures to complete (do not abort on expected exceptions) and check // that at least one succeeded int completedSuccessfully = 0; for (CompletableFuture<File> op : getOperations) { try { op.get(); ++completedSuccessfully; } catch (Throwable t) { // transient BLOBs get deleted upon first access and only one request will // be successful while all others will have an IOException caused by a // FileNotFoundException if (!(ExceptionUtils.getRootCause(t) instanceof FileNotFoundException)) { // ignore org.apache.flink.util.ExceptionUtils.rethrowIOException(t); } } } // multiple clients may have accessed the BLOB successfully before it was // deleted, but always at least one: assertThat(completedSuccessfully).isGreaterThanOrEqualTo(1); } } finally { executor.shutdownNow(); } }
[FLINK-6020] Tests that concurrent get operations don't concurrently access the BlobStore to download a blob. @param jobId job ID to use (or <tt>null</tt> if job-unrelated) @param blobType whether the BLOB should become permanent or transient @param cacheAccessesHAStore whether the cache has access to the {@link BlobServer}'s HA store or not
testConcurrentGetOperations
java
apache/flink
flink-runtime/src/test/java/org/apache/flink/runtime/blob/BlobCacheGetTest.java
https://github.com/apache/flink/blob/master/flink-runtime/src/test/java/org/apache/flink/runtime/blob/BlobCacheGetTest.java
Apache-2.0
@Override public void go() throws Exception { cache.getStorageLocation(jobId, key); }
Checked thread that calls {@link TransientBlobCache#getStorageLocation(JobID, BlobKey)}.
go
java
apache/flink
flink-runtime/src/test/java/org/apache/flink/runtime/blob/BlobCachePutTest.java
https://github.com/apache/flink/blob/master/flink-runtime/src/test/java/org/apache/flink/runtime/blob/BlobCachePutTest.java
Apache-2.0
@Test void testPermanentBlobCacheGetStorageLocationConcurrentForJob() throws Exception { final JobID jobId = new JobID(); try (BlobServer server = TestingBlobUtils.createServer(tempDir); PermanentBlobCache cache = TestingBlobUtils.createPermanentCache(tempDir, server)) { server.start(); BlobKey key = new PermanentBlobKey(); CheckedThread[] threads = new CheckedThread[] { new PermanentBlobCacheGetStorageLocation(cache, jobId, key), new PermanentBlobCacheGetStorageLocation(cache, jobId, key), new PermanentBlobCacheGetStorageLocation(cache, jobId, key) }; checkedThreadSimpleTest(threads); } }
Tests concurrent calls to {@link PermanentBlobCache#getStorageLocation(JobID, BlobKey)}.
testPermanentBlobCacheGetStorageLocationConcurrentForJob
java
apache/flink
flink-runtime/src/test/java/org/apache/flink/runtime/blob/BlobCachePutTest.java
https://github.com/apache/flink/blob/master/flink-runtime/src/test/java/org/apache/flink/runtime/blob/BlobCachePutTest.java
Apache-2.0
static void verifyDeletedEventually(BlobServer server, @Nullable JobID jobId, BlobKey... keys) throws IOException, InterruptedException { long deadline = System.currentTimeMillis() + 30_000L; do { Thread.sleep(10); } while (checkFilesExist(jobId, Arrays.asList(keys), server, false) != 0 && System.currentTimeMillis() < deadline); for (BlobKey key : keys) { verifyDeleted(server, jobId, key); } }
Checks that the given blob will be deleted at the {@link BlobServer} eventually (waits at most 30s). @param server BLOB server @param jobId job ID or <tt>null</tt> if job-unrelated @param keys key(s) identifying the BLOB to request
verifyDeletedEventually
java
apache/flink
flink-runtime/src/test/java/org/apache/flink/runtime/blob/BlobCachePutTest.java
https://github.com/apache/flink/blob/master/flink-runtime/src/test/java/org/apache/flink/runtime/blob/BlobCachePutTest.java
Apache-2.0
@Test void testBlobFetchRetries() throws IOException { testBlobFetchRetries(new VoidBlobStore(), null, TRANSIENT_BLOB); }
A test where the connection fails twice and then the get operation succeeds (job-unrelated blob).
testBlobFetchRetries
java
apache/flink
flink-runtime/src/test/java/org/apache/flink/runtime/blob/BlobCacheRetriesTest.java
https://github.com/apache/flink/blob/master/flink-runtime/src/test/java/org/apache/flink/runtime/blob/BlobCacheRetriesTest.java
Apache-2.0
@Test void testBlobForJobFetchRetries() throws IOException { testBlobFetchRetries(new VoidBlobStore(), new JobID(), TRANSIENT_BLOB); }
A test where the connection fails twice and then the get operation succeeds (job-related blob).
testBlobForJobFetchRetries
java
apache/flink
flink-runtime/src/test/java/org/apache/flink/runtime/blob/BlobCacheRetriesTest.java
https://github.com/apache/flink/blob/master/flink-runtime/src/test/java/org/apache/flink/runtime/blob/BlobCacheRetriesTest.java
Apache-2.0
@Test void testBlobFetchRetriesHa() throws IOException { final Configuration config = new Configuration(); config.set(HighAvailabilityOptions.HA_MODE, "ZOOKEEPER"); config.set( HighAvailabilityOptions.HA_STORAGE_PATH, TempDirUtils.newFolder(tempDir).getPath()); BlobStoreService blobStoreService = null; try { blobStoreService = BlobUtils.createBlobStoreFromConfig(config); testBlobFetchRetries(blobStoreService, new JobID(), PERMANENT_BLOB); } finally { if (blobStoreService != null) { blobStoreService.cleanupAllData(); blobStoreService.close(); } } }
A test where the connection fails twice and then the get operation succeeds (with high availability set, job-related job).
testBlobFetchRetriesHa
java
apache/flink
flink-runtime/src/test/java/org/apache/flink/runtime/blob/BlobCacheRetriesTest.java
https://github.com/apache/flink/blob/master/flink-runtime/src/test/java/org/apache/flink/runtime/blob/BlobCacheRetriesTest.java
Apache-2.0
private void testBlobFetchRetries( final BlobStore blobStore, @Nullable final JobID jobId, BlobKey.BlobType blobType) throws IOException { final byte[] data = new byte[] {1, 2, 3, 4, 5, 6, 7, 8, 9, 0}; Tuple2<BlobServer, BlobCacheService> serverAndCache = TestingBlobUtils.createFailingServerAndCache(tempDir, blobStore, 1, 2); try (BlobServer server = serverAndCache.f0; BlobCacheService cache = serverAndCache.f1) { server.start(); // upload some blob final BlobKey key = put(server, jobId, data, blobType); // trigger a download - it should fail the first two times, but retry, and succeed // eventually verifyContents(cache, jobId, key, data); } }
A test where the BlobCache must use the BlobServer and the connection fails twice and then the get operation succeeds. @param blobType whether the BLOB should become permanent or transient
testBlobFetchRetries
java
apache/flink
flink-runtime/src/test/java/org/apache/flink/runtime/blob/BlobCacheRetriesTest.java
https://github.com/apache/flink/blob/master/flink-runtime/src/test/java/org/apache/flink/runtime/blob/BlobCacheRetriesTest.java
Apache-2.0
@Test void testBlobNoJobFetchWithTooManyFailures() throws IOException { testBlobFetchWithTooManyFailures(new VoidBlobStore(), null, TRANSIENT_BLOB); }
A test where the connection fails too often and eventually fails the GET request (job-unrelated blob).
testBlobNoJobFetchWithTooManyFailures
java
apache/flink
flink-runtime/src/test/java/org/apache/flink/runtime/blob/BlobCacheRetriesTest.java
https://github.com/apache/flink/blob/master/flink-runtime/src/test/java/org/apache/flink/runtime/blob/BlobCacheRetriesTest.java
Apache-2.0
@Test void testBlobForJobFetchWithTooManyFailures() throws IOException { testBlobFetchWithTooManyFailures(new VoidBlobStore(), new JobID(), TRANSIENT_BLOB); }
A test where the connection fails too often and eventually fails the GET request (job-related blob).
testBlobForJobFetchWithTooManyFailures
java
apache/flink
flink-runtime/src/test/java/org/apache/flink/runtime/blob/BlobCacheRetriesTest.java
https://github.com/apache/flink/blob/master/flink-runtime/src/test/java/org/apache/flink/runtime/blob/BlobCacheRetriesTest.java
Apache-2.0
@Test void testBlobForJobFetchWithTooManyFailuresHa() throws IOException { final Configuration config = new Configuration(); config.set(HighAvailabilityOptions.HA_MODE, "ZOOKEEPER"); config.set(HighAvailabilityOptions.HA_STORAGE_PATH, tempDir.toString()); BlobStoreService blobStoreService = null; try { blobStoreService = BlobUtils.createBlobStoreFromConfig(config); testBlobFetchWithTooManyFailures(blobStoreService, new JobID(), PERMANENT_BLOB); } finally { if (blobStoreService != null) { blobStoreService.cleanupAllData(); blobStoreService.close(); } } }
A test where the connection fails too often and eventually fails the GET request (with high availability set, job-related blob).
testBlobForJobFetchWithTooManyFailuresHa
java
apache/flink
flink-runtime/src/test/java/org/apache/flink/runtime/blob/BlobCacheRetriesTest.java
https://github.com/apache/flink/blob/master/flink-runtime/src/test/java/org/apache/flink/runtime/blob/BlobCacheRetriesTest.java
Apache-2.0
private void testBlobFetchWithTooManyFailures( final BlobStore blobStore, @Nullable final JobID jobId, BlobKey.BlobType blobType) throws IOException { final byte[] data = new byte[] {1, 2, 3, 4, 5, 6, 7, 8, 9, 0}; Tuple2<BlobServer, BlobCacheService> serverAndCache = TestingBlobUtils.createFailingServerAndCache(tempDir, blobStore, 0, 10); try (BlobServer server = serverAndCache.f0; BlobCacheService cache = serverAndCache.f1) { server.start(); // upload some blob final BlobKey key = put(server, jobId, data, blobType); // trigger a download - it should fail eventually assertThatThrownBy(() -> verifyContents(cache, jobId, key, data)) .isInstanceOf(IOException.class); } }
A test where the BlobCache must use the BlobServer and the connection fails too often which eventually fails the GET request. @param blobType whether the BLOB should become permanent or transient
testBlobFetchWithTooManyFailures
java
apache/flink
flink-runtime/src/test/java/org/apache/flink/runtime/blob/BlobCacheRetriesTest.java
https://github.com/apache/flink/blob/master/flink-runtime/src/test/java/org/apache/flink/runtime/blob/BlobCacheRetriesTest.java
Apache-2.0
@Test void testUntrackNonExistingBlob() { tracker.untrack(Tuple2.of(jobId, BlobKey.createKey(BlobType.PERMANENT_BLOB))); assertThat(tracker.getBlobKeysByJobId(jobId)).hasSize(1); }
Untracking a non-existing BLOB shouldn't change anything or throw any exceptions.
testUntrackNonExistingBlob
java
apache/flink
flink-runtime/src/test/java/org/apache/flink/runtime/blob/BlobCacheSizeTrackerTest.java
https://github.com/apache/flink/blob/master/flink-runtime/src/test/java/org/apache/flink/runtime/blob/BlobCacheSizeTrackerTest.java
Apache-2.0
@Test void testUntrackAllWithNonExistingJob() { tracker.track(jobId, BlobKey.createKey(BlobType.PERMANENT_BLOB), 1L); assertThat(tracker.getBlobKeysByJobId(jobId)).hasSize(2); tracker.untrackAll(new JobID()); assertThat(tracker.getBlobKeysByJobId(jobId)).hasSize(2); }
Untracking all BLOBs for a non-existing job shouldn't change anything or throw any exceptions.
testUntrackAllWithNonExistingJob
java
apache/flink
flink-runtime/src/test/java/org/apache/flink/runtime/blob/BlobCacheSizeTrackerTest.java
https://github.com/apache/flink/blob/master/flink-runtime/src/test/java/org/apache/flink/runtime/blob/BlobCacheSizeTrackerTest.java
Apache-2.0
@Test void testBlobNoJobCache() throws IOException { Configuration config = new Configuration(); config.set(BlobServerOptions.STORAGE_DIRECTORY, tempDir.toString()); uploadFileGetTest(config, null, false, false, TRANSIENT_BLOB); }
BlobCache with no HA, job-unrelated BLOBs. BLOBs need to be downloaded form a working BlobServer.
testBlobNoJobCache
java
apache/flink
flink-runtime/src/test/java/org/apache/flink/runtime/blob/BlobCacheSuccessTest.java
https://github.com/apache/flink/blob/master/flink-runtime/src/test/java/org/apache/flink/runtime/blob/BlobCacheSuccessTest.java
Apache-2.0
@Test void testBlobForJobCache() throws IOException { Configuration config = new Configuration(); config.set(BlobServerOptions.STORAGE_DIRECTORY, tempDir.toString()); uploadFileGetTest(config, new JobID(), false, false, TRANSIENT_BLOB); }
BlobCache with no HA, job-related BLOBS. BLOBs need to be downloaded form a working BlobServer.
testBlobForJobCache
java
apache/flink
flink-runtime/src/test/java/org/apache/flink/runtime/blob/BlobCacheSuccessTest.java
https://github.com/apache/flink/blob/master/flink-runtime/src/test/java/org/apache/flink/runtime/blob/BlobCacheSuccessTest.java
Apache-2.0
@Test void testBlobForJobCacheHa() throws IOException { Configuration config = new Configuration(); config.set( BlobServerOptions.STORAGE_DIRECTORY, TempDirUtils.newFolder(tempDir).getAbsolutePath()); config.set(HighAvailabilityOptions.HA_MODE, "ZOOKEEPER"); config.set( HighAvailabilityOptions.HA_STORAGE_PATH, TempDirUtils.newFolder(tempDir).getPath()); uploadFileGetTest(config, new JobID(), true, true, PERMANENT_BLOB); }
BlobCache is configured in HA mode and the cache can download files from the file system directly and does not need to download BLOBs from the BlobServer which remains active after the BLOB upload. Using job-related BLOBs.
testBlobForJobCacheHa
java
apache/flink
flink-runtime/src/test/java/org/apache/flink/runtime/blob/BlobCacheSuccessTest.java
https://github.com/apache/flink/blob/master/flink-runtime/src/test/java/org/apache/flink/runtime/blob/BlobCacheSuccessTest.java
Apache-2.0
@Test void testBlobForJobCacheHa2() throws IOException { Configuration config = new Configuration(); config.set( BlobServerOptions.STORAGE_DIRECTORY, TempDirUtils.newFolder(tempDir).getAbsolutePath()); config.set(HighAvailabilityOptions.HA_MODE, "ZOOKEEPER"); config.set( HighAvailabilityOptions.HA_STORAGE_PATH, TempDirUtils.newFolder(tempDir).getPath()); uploadFileGetTest(config, new JobID(), false, true, PERMANENT_BLOB); }
BlobCache is configured in HA mode and the cache can download files from the file system directly and does not need to download BLOBs from the BlobServer which is shut down after the BLOB upload. Using job-related BLOBs.
testBlobForJobCacheHa2
java
apache/flink
flink-runtime/src/test/java/org/apache/flink/runtime/blob/BlobCacheSuccessTest.java
https://github.com/apache/flink/blob/master/flink-runtime/src/test/java/org/apache/flink/runtime/blob/BlobCacheSuccessTest.java
Apache-2.0
@Test void testBlobForJobCacheHaFallback() throws IOException { Configuration config = new Configuration(); config.set( BlobServerOptions.STORAGE_DIRECTORY, TempDirUtils.newFolder(tempDir).getAbsolutePath()); config.set(HighAvailabilityOptions.HA_MODE, "ZOOKEEPER"); config.set( HighAvailabilityOptions.HA_STORAGE_PATH, TempDirUtils.newFolder(tempDir).getPath()); uploadFileGetTest(config, new JobID(), false, false, PERMANENT_BLOB); }
BlobCache is configured in HA mode but the cache itself cannot access the file system and thus needs to download BLOBs from the BlobServer. Using job-related BLOBs.
testBlobForJobCacheHaFallback
java
apache/flink
flink-runtime/src/test/java/org/apache/flink/runtime/blob/BlobCacheSuccessTest.java
https://github.com/apache/flink/blob/master/flink-runtime/src/test/java/org/apache/flink/runtime/blob/BlobCacheSuccessTest.java
Apache-2.0
private void uploadFileGetTest( final Configuration config, @Nullable JobID jobId, boolean shutdownServerAfterUpload, boolean cacheHasAccessToFs, BlobKey.BlobType blobType) throws IOException { final Configuration cacheConfig = new Configuration(config); cacheConfig.set( BlobServerOptions.STORAGE_DIRECTORY, TempDirUtils.newFolder(tempDir).getAbsolutePath()); if (!cacheHasAccessToFs) { // make sure the cache cannot access the HA store directly cacheConfig.set( HighAvailabilityOptions.HA_STORAGE_PATH, TempDirUtils.newFolder(tempDir).getPath() + "/does-not-exist"); } // First create two BLOBs and upload them to BLOB server final byte[] data = new byte[128]; byte[] data2 = Arrays.copyOf(data, data.length); data2[0] ^= 1; BlobStoreService blobStoreService = null; try { blobStoreService = BlobUtils.createBlobStoreFromConfig(cacheConfig); Tuple2<BlobServer, BlobCacheService> serverAndCache = TestingBlobUtils.createServerAndCache( tempDir, config, cacheConfig, blobStoreService, blobStoreService); try (BlobServer server = serverAndCache.f0; BlobCacheService cache = serverAndCache.f1) { server.start(); // Upload BLOBs BlobKey key1 = put(server, jobId, data, blobType); BlobKey key2 = put(server, jobId, data2, blobType); if (shutdownServerAfterUpload) { // Now, shut down the BLOB server, the BLOBs must still be accessible through // the cache. server.close(); } verifyContents(cache, jobId, key1, data); verifyContents(cache, jobId, key2, data2); if (shutdownServerAfterUpload) { // Now, shut down the BLOB server, the BLOBs must still be accessible through // the cache. server.close(); verifyContents(cache, jobId, key1, data); verifyContents(cache, jobId, key2, data2); } } } finally { if (blobStoreService != null) { blobStoreService.cleanupAllData(); blobStoreService.close(); } } }
Uploads two different BLOBs to the {@link BlobServer} via a {@link BlobClient} and verifies we can access the files from a {@link BlobCacheService}. @param config configuration to use for the server and cache (the final cache's configuration will actually get some modifications) @param shutdownServerAfterUpload whether the server should be shut down after uploading the BLOBs (only useful with HA mode) - this implies that the cache has access to the shared <tt>HA_STORAGE_PATH</tt> @param cacheHasAccessToFs whether the cache should have access to a shared <tt>HA_STORAGE_PATH</tt> (only useful with HA mode) @param blobType whether the BLOB should become permanent or transient
uploadFileGetTest
java
apache/flink
flink-runtime/src/test/java/org/apache/flink/runtime/blob/BlobCacheSuccessTest.java
https://github.com/apache/flink/blob/master/flink-runtime/src/test/java/org/apache/flink/runtime/blob/BlobCacheSuccessTest.java
Apache-2.0
@Test public void testUploadJarFilesHelper() throws Exception { uploadJarFile(blobSslServer, sslClientConfig); }
Verify ssl client to ssl server upload.
testUploadJarFilesHelper
java
apache/flink
flink-runtime/src/test/java/org/apache/flink/runtime/blob/BlobClientSslTest.java
https://github.com/apache/flink/blob/master/flink-runtime/src/test/java/org/apache/flink/runtime/blob/BlobClientSslTest.java
Apache-2.0
private static byte[] createTestBuffer() { final byte[] buf = new byte[TEST_BUFFER_SIZE]; for (int i = 0; i < buf.length; ++i) { buf[i] = (byte) (i % 128); } return buf; }
Creates a test buffer and fills it with a specific byte pattern. @return a test buffer filled with a specific byte pattern
createTestBuffer
java
apache/flink
flink-runtime/src/test/java/org/apache/flink/runtime/blob/BlobClientTest.java
https://github.com/apache/flink/blob/master/flink-runtime/src/test/java/org/apache/flink/runtime/blob/BlobClientTest.java
Apache-2.0
private static byte[] prepareTestFile(File file) throws IOException { MessageDigest md = BlobUtils.createMessageDigest(); final byte[] buf = new byte[TEST_BUFFER_SIZE]; for (int i = 0; i < buf.length; ++i) { buf[i] = (byte) (i % 128); } FileOutputStream fos = null; try { fos = new FileOutputStream(file); for (int i = 0; i < 20; ++i) { fos.write(buf); md.update(buf); } } finally { if (fos != null) { fos.close(); } } return md.digest(); }
Prepares a test file for the unit tests, i.e. the methods fills the file with a particular byte patterns and computes the file's BLOB key. @param file the file to prepare for the unit tests @return the BLOB key of the prepared file @throws IOException thrown if an I/O error occurs while writing to the test file
prepareTestFile
java
apache/flink
flink-runtime/src/test/java/org/apache/flink/runtime/blob/BlobClientTest.java
https://github.com/apache/flink/blob/master/flink-runtime/src/test/java/org/apache/flink/runtime/blob/BlobClientTest.java
Apache-2.0
static void validateGetAndClose(final InputStream actualInputStream, final byte[] expectedBuf) throws IOException { try { byte[] receivedBuffer = new byte[expectedBuf.length]; int bytesReceived = 0; while (true) { final int read = actualInputStream.read( receivedBuffer, bytesReceived, receivedBuffer.length - bytesReceived); if (read < 0) { throw new EOFException(); } bytesReceived += read; if (bytesReceived == receivedBuffer.length) { assertThat(actualInputStream.read()).isEqualTo(-1); assertThat(receivedBuffer).isEqualTo(expectedBuf); return; } } } finally { actualInputStream.close(); } }
Validates the result of a GET operation by comparing the data from the retrieved input stream to the content of the specified buffer. @param actualInputStream the input stream returned from the GET operation (will be closed by this method) @param expectedBuf the buffer to compare the input stream's data to @throws IOException thrown if an I/O error occurs while reading the input stream
validateGetAndClose
java
apache/flink
flink-runtime/src/test/java/org/apache/flink/runtime/blob/BlobClientTest.java
https://github.com/apache/flink/blob/master/flink-runtime/src/test/java/org/apache/flink/runtime/blob/BlobClientTest.java
Apache-2.0
static void validateGetAndClose(InputStream actualInputStream, InputStream expectedInputStream) throws IOException { try { while (true) { final int r1 = actualInputStream.read(); final int r2 = expectedInputStream.read(); assertThat(r1).isEqualTo(r2); if (r1 < 0) { break; } } } finally { actualInputStream.close(); expectedInputStream.close(); } }
Validates the result of a GET operation by comparing the data from the retrieved input stream to the content of the expected input stream. @param actualInputStream the input stream returned from the GET operation (will be closed by this method) @param expectedInputStream the input stream to compare the input stream's data to @throws IOException thrown if an I/O error occurs while reading any input stream
validateGetAndClose
java
apache/flink
flink-runtime/src/test/java/org/apache/flink/runtime/blob/BlobClientTest.java
https://github.com/apache/flink/blob/master/flink-runtime/src/test/java/org/apache/flink/runtime/blob/BlobClientTest.java
Apache-2.0
private void testGetFailsDuringStreaming(@Nullable final JobID jobId, BlobKey.BlobType blobType) throws IOException { assumeThat(isSSLEnabled()) .as("This test can deadlock when using SSL. See FLINK-19369.") .isFalse(); try (BlobClient client = new BlobClient( new InetSocketAddress("localhost", getBlobServer().getPort()), getBlobClientConfig())) { byte[] data = new byte[5000000]; Random rnd = new Random(); rnd.nextBytes(data); // put content addressable (like libraries) BlobKey key = client.putBuffer(jobId, data, 0, data.length, blobType); assertThat(key).isNotNull(); // issue a GET request that succeeds InputStream is = client.getInternal(jobId, key); byte[] receiveBuffer = new byte[data.length]; int firstChunkLen = 50000; BlobUtils.readFully(is, receiveBuffer, 0, firstChunkLen, null); BlobUtils.readFully(is, receiveBuffer, firstChunkLen, firstChunkLen, null); // shut down the server for (BlobServerConnection conn : getBlobServer().getCurrentActiveConnections()) { conn.close(); } try { BlobUtils.readFully( is, receiveBuffer, 2 * firstChunkLen, data.length - 2 * firstChunkLen, null); // we tolerate that this succeeds, as the receiver socket may have buffered // everything already, but in this case, also verify the contents assertThat(receiveBuffer).isEqualTo(data); } catch (IOException e) { // expected } } }
Checks the correct result if a GET operation fails during the file download. @param jobId job ID or <tt>null</tt> if job-unrelated @param blobType whether the BLOB should become permanent or transient
testGetFailsDuringStreaming
java
apache/flink
flink-runtime/src/test/java/org/apache/flink/runtime/blob/BlobClientTest.java
https://github.com/apache/flink/blob/master/flink-runtime/src/test/java/org/apache/flink/runtime/blob/BlobClientTest.java
Apache-2.0
@Test void testUploadJarFilesHelper() throws Exception { uploadJarFile(getBlobServer(), getBlobClientConfig()); }
Tests the static {@link BlobClient#uploadFiles(InetSocketAddress, Configuration, JobID, List)} helper.
testUploadJarFilesHelper
java
apache/flink
flink-runtime/src/test/java/org/apache/flink/runtime/blob/BlobClientTest.java
https://github.com/apache/flink/blob/master/flink-runtime/src/test/java/org/apache/flink/runtime/blob/BlobClientTest.java
Apache-2.0
private void testEquals(BlobKey.BlobType blobType) { final BlobKey k1 = BlobKey.createKey(blobType, KEY_ARRAY_1, RANDOM_ARRAY_1); final BlobKey k2 = BlobKey.createKey(blobType, KEY_ARRAY_1, RANDOM_ARRAY_1); final BlobKey k3 = BlobKey.createKey(blobType, KEY_ARRAY_2, RANDOM_ARRAY_1); final BlobKey k4 = BlobKey.createKey(blobType, KEY_ARRAY_1, RANDOM_ARRAY_2); assertThat(k1).isEqualTo(k2); assertThat(k2).isEqualTo(k1); assertThat(k2).hasSameHashCodeAs(k1); assertThat(k1).isNotEqualTo(k3); assertThat(k3).isNotEqualTo(k1); assertThat(k1).isNotEqualTo(k4); assertThat(k4).isNotEqualTo(k1); assertThat(k1).isNotEqualTo(null); //noinspection AssertBetweenInconvertibleTypes assertThat(k1).isNotEqualTo(this); }
Tests the {@link BlobKey#equals(Object)} and {@link BlobKey#hashCode()} methods.
testEquals
java
apache/flink
flink-runtime/src/test/java/org/apache/flink/runtime/blob/BlobKeyTest.java
https://github.com/apache/flink/blob/master/flink-runtime/src/test/java/org/apache/flink/runtime/blob/BlobKeyTest.java
Apache-2.0
private void testStreams(BlobKey.BlobType blobType) throws IOException { final BlobKey k1 = BlobKey.createKey(blobType, KEY_ARRAY_1, RANDOM_ARRAY_1); final ByteArrayOutputStream baos = new ByteArrayOutputStream(20); k1.writeToOutputStream(baos); baos.close(); final ByteArrayInputStream bais = new ByteArrayInputStream(baos.toByteArray()); final BlobKey k2 = BlobKey.readFromInputStream(bais); assertThat(k2).isEqualTo(k1); }
Test the serialization/deserialization using input/output streams.
testStreams
java
apache/flink
flink-runtime/src/test/java/org/apache/flink/runtime/blob/BlobKeyTest.java
https://github.com/apache/flink/blob/master/flink-runtime/src/test/java/org/apache/flink/runtime/blob/BlobKeyTest.java
Apache-2.0
static void verifyKeyDifferentHashEquals(BlobKey key1, BlobKey key2) { assertThat(key1).isNotEqualTo(key2); assertThat(key1.getHash()).isEqualTo(key2.getHash()); }
Verifies that the two given key's are different in total but share the same hash. @param key1 first blob key @param key2 second blob key
verifyKeyDifferentHashEquals
java
apache/flink
flink-runtime/src/test/java/org/apache/flink/runtime/blob/BlobKeyTest.java
https://github.com/apache/flink/blob/master/flink-runtime/src/test/java/org/apache/flink/runtime/blob/BlobKeyTest.java
Apache-2.0
static void verifyKeyDifferentHashDifferent(BlobKey key1, BlobKey key2) { assertThat(key1).isNotEqualTo(key2); assertThat(key1.getHash()).isNotEqualTo(key2.getHash()); }
Verifies that the two given key's are different in total and also have different hashes. @param key1 first blob key @param key2 second blob key
verifyKeyDifferentHashDifferent
java
apache/flink
flink-runtime/src/test/java/org/apache/flink/runtime/blob/BlobKeyTest.java
https://github.com/apache/flink/blob/master/flink-runtime/src/test/java/org/apache/flink/runtime/blob/BlobKeyTest.java
Apache-2.0
static void verifyType(BlobKey.BlobType expected, BlobKey key) { if (expected == PERMANENT_BLOB) { assertThat(key).isInstanceOf(PermanentBlobKey.class); } else { assertThat(key).isInstanceOf(TransientBlobKey.class); } }
Verifies that the given <tt>key</tt> is of an expected type. @param expected the type the key should have @param key the key to verify
verifyType
java
apache/flink
flink-runtime/src/test/java/org/apache/flink/runtime/blob/BlobKeyTest.java
https://github.com/apache/flink/blob/master/flink-runtime/src/test/java/org/apache/flink/runtime/blob/BlobKeyTest.java
Apache-2.0
@Test void testGetFailsFromCorruptFile() throws IOException { final Configuration config = new Configuration(); config.set(HighAvailabilityOptions.HA_MODE, "ZOOKEEPER"); config.set( BlobServerOptions.STORAGE_DIRECTORY, TempDirUtils.newFolder(tempDir).getAbsolutePath()); config.set( HighAvailabilityOptions.HA_STORAGE_PATH, TempDirUtils.newFolder(tempDir).getPath()); BlobStoreService blobStoreService = null; try { blobStoreService = BlobUtils.createBlobStoreFromConfig(config); TestingBlobHelpers.testGetFailsFromCorruptFile( config, blobStoreService, TempDirUtils.newFolder(tempDir)); } finally { if (blobStoreService != null) { blobStoreService.cleanupAllData(); blobStoreService.close(); } } }
Checks the GET operation fails when the downloaded file (from {@link BlobServer} or HA store) is corrupt, i.e. its content's hash does not match the {@link BlobKey}'s hash.
testGetFailsFromCorruptFile
java
apache/flink
flink-runtime/src/test/java/org/apache/flink/runtime/blob/BlobServerCorruptionTest.java
https://github.com/apache/flink/blob/master/flink-runtime/src/test/java/org/apache/flink/runtime/blob/BlobServerCorruptionTest.java
Apache-2.0
@Tag("org.apache.flink.testutils.junit.FailsInGHAContainerWithRootUser") @Test void testGetFailsIncomingForJobHa() throws IOException { assumeThat(OperatingSystem.isWindows()).as("setWritable doesn't work on Windows").isFalse(); final JobID jobId = new JobID(); final Configuration config = new Configuration(); config.set(HighAvailabilityOptions.HA_MODE, "ZOOKEEPER"); config.set( HighAvailabilityOptions.HA_STORAGE_PATH, TempDirUtils.newFolder(tempDir).getPath()); BlobStoreService blobStore = null; try { blobStore = BlobUtils.createBlobStoreFromConfig(config); File tempFileDir = null; try (BlobServer server = TestingBlobUtils.createServer(tempDir, config, blobStore)) { server.start(); // store the data on the server (and blobStore), remove from local store byte[] data = new byte[2000000]; rnd.nextBytes(data); BlobKey blobKey = put(server, jobId, data, PERMANENT_BLOB); assertThat(server.getStorageLocation(jobId, blobKey).delete()).isTrue(); // make sure the blob server cannot create any files in its storage dir tempFileDir = server.createTemporaryFilename().getParentFile(); assertThat(tempFileDir.setExecutable(true, false)).isTrue(); assertThat(tempFileDir.setReadable(true, false)).isTrue(); assertThat(tempFileDir.setWritable(false, false)).isTrue(); // request the file from the BlobStore try { assertThatThrownBy(() -> get(server, jobId, blobKey)) .satisfies( FlinkAssertions.anyCauseMatches( IOException.class, "Permission denied")); } finally { HashSet<String> expectedDirs = new HashSet<>(); expectedDirs.add("incoming"); expectedDirs.add(JOB_DIR_PREFIX + jobId); // only the incoming and job directory should exist (no job directory!) File storageDir = tempFileDir.getParentFile(); String[] actualDirs = storageDir.list(); assertThat(actualDirs).isNotNull(); assertThat(actualDirs).isNotEmpty(); assertThat(new HashSet<>(Arrays.asList(actualDirs))).isEqualTo(expectedDirs); // job directory should be empty File jobDir = new File(tempFileDir.getParentFile(), JOB_DIR_PREFIX + jobId); assertThat(jobDir.list()).isEmpty(); } } finally { // set writable again to make sure we can remove the directory if (tempFileDir != null) { //noinspection ResultOfMethodCallIgnored tempFileDir.setWritable(true, false); } } } finally { if (blobStore != null) { blobStore.cleanupAllData(); blobStore.close(); } } }
Retrieves a BLOB from the HA store to a {@link BlobServer} which cannot create incoming files. File transfers should fail.
testGetFailsIncomingForJobHa
java
apache/flink
flink-runtime/src/test/java/org/apache/flink/runtime/blob/BlobServerGetTest.java
https://github.com/apache/flink/blob/master/flink-runtime/src/test/java/org/apache/flink/runtime/blob/BlobServerGetTest.java
Apache-2.0
@Tag("org.apache.flink.testutils.junit.FailsInGHAContainerWithRootUser") @Test void testGetFailsStoreForJobHa() throws IOException { assumeThat(OperatingSystem.isWindows()).as("setWritable doesn't work on Windows").isFalse(); final JobID jobId = new JobID(); final Configuration config = new Configuration(); config.set(HighAvailabilityOptions.HA_MODE, "ZOOKEEPER"); config.set( HighAvailabilityOptions.HA_STORAGE_PATH, TempDirUtils.newFolder(tempDir).getPath()); BlobStoreService blobStore = null; try { blobStore = BlobUtils.createBlobStoreFromConfig(config); File jobStoreDir = null; try (BlobServer server = TestingBlobUtils.createServer(tempDir, config, blobStore)) { server.start(); // store the data on the server (and blobStore), remove from local store byte[] data = new byte[2000000]; rnd.nextBytes(data); BlobKey blobKey = put(server, jobId, data, PERMANENT_BLOB); assertThat(server.getStorageLocation(jobId, blobKey).delete()).isTrue(); // make sure the blob cache cannot create any files in its storage dir jobStoreDir = server.getStorageLocation(jobId, blobKey).getParentFile(); assertThat(jobStoreDir.setExecutable(true, false)).isTrue(); assertThat(jobStoreDir.setReadable(true, false)).isTrue(); assertThat(jobStoreDir.setWritable(false, false)).isTrue(); // request the file from the BlobStore try { assertThatThrownBy(() -> get(server, jobId, blobKey)) .isInstanceOf(AccessDeniedException.class); } finally { // there should be no remaining incoming files File incomingFileDir = new File(jobStoreDir.getParent(), "incoming"); assertThat(incomingFileDir.list()).isEmpty(); // there should be no files in the job directory assertThat(jobStoreDir.list()).isEmpty(); } } finally { // set writable again to make sure we can remove the directory if (jobStoreDir != null) { //noinspection ResultOfMethodCallIgnored jobStoreDir.setWritable(true, false); } } } finally { if (blobStore != null) { blobStore.cleanupAllData(); blobStore.close(); } } }
Retrieves a BLOB from the HA store to a {@link BlobServer} which cannot create the final storage file. File transfers should fail.
testGetFailsStoreForJobHa
java
apache/flink
flink-runtime/src/test/java/org/apache/flink/runtime/blob/BlobServerGetTest.java
https://github.com/apache/flink/blob/master/flink-runtime/src/test/java/org/apache/flink/runtime/blob/BlobServerGetTest.java
Apache-2.0
private void testConcurrentGetOperations( @Nullable final JobID jobId, final BlobKey.BlobType blobType) throws IOException, InterruptedException, ExecutionException { final byte[] data = {1, 2, 3, 4, 99, 42}; final BlobStore blobStore = new TestingBlobStoreBuilder() .setGetFunction( (jobID, blobKey, file) -> { FileUtils.writeByteArrayToFile(file, data); return true; }) .createTestingBlobStore(); final int numberConcurrentGetOperations = 3; final List<CompletableFuture<File>> getOperations = new ArrayList<>(numberConcurrentGetOperations); final ExecutorService executor = Executors.newFixedThreadPool(numberConcurrentGetOperations); try (final BlobServer server = TestingBlobUtils.createServer(tempDir, new Configuration(), blobStore)) { server.start(); // upload data first final BlobKey blobKey = put(server, jobId, data, blobType); // now try accessing it concurrently (only HA mode will be able to retrieve it from HA // store!) if (blobType == PERMANENT_BLOB) { // remove local copy so that a transfer from HA store takes place assertThat(server.getStorageLocation(jobId, blobKey).delete()).isTrue(); } for (int i = 0; i < numberConcurrentGetOperations; i++) { CompletableFuture<File> getOperation = CompletableFuture.supplyAsync( () -> { try { File file = get(server, jobId, blobKey); // check that we have read the right data validateGetAndClose( Files.newInputStream(file.toPath()), data); return file; } catch (IOException e) { throw new CompletionException( new FlinkException( "Could not read blob for key " + blobKey + '.', e)); } }, executor); getOperations.add(getOperation); } CompletableFuture<Collection<File>> filesFuture = FutureUtils.combineAll(getOperations); filesFuture.get(); } finally { executor.shutdownNow(); } }
[FLINK-6020] Tests that concurrent get operations don't concurrently access the BlobStore to download a blob. @param jobId job ID to use (or <tt>null</tt> if job-unrelated) @param blobType whether the BLOB should become permanent or transient
testConcurrentGetOperations
java
apache/flink
flink-runtime/src/test/java/org/apache/flink/runtime/blob/BlobServerGetTest.java
https://github.com/apache/flink/blob/master/flink-runtime/src/test/java/org/apache/flink/runtime/blob/BlobServerGetTest.java
Apache-2.0
static File get(BlobService service, @Nullable JobID jobId, BlobKey key) throws IOException { if (key instanceof PermanentBlobKey) { return service.getPermanentBlobService().getFile(jobId, (PermanentBlobKey) key); } else if (jobId == null) { return service.getTransientBlobService().getFile((TransientBlobKey) key); } else { return service.getTransientBlobService().getFile(jobId, (TransientBlobKey) key); } }
Retrieves the given blob. <p>Note that if a {@link BlobCacheService} is used, it may try to access the {@link BlobServer} to retrieve the blob. @param service BLOB client to use for connecting to the BLOB service @param jobId job ID or <tt>null</tt> if job-unrelated @param key key identifying the BLOB to request
get
java
apache/flink
flink-runtime/src/test/java/org/apache/flink/runtime/blob/BlobServerGetTest.java
https://github.com/apache/flink/blob/master/flink-runtime/src/test/java/org/apache/flink/runtime/blob/BlobServerGetTest.java
Apache-2.0
static void verifyDeleted(BlobService service, @Nullable JobID jobId, BlobKey key) { assertThatThrownBy(() -> get(service, jobId, key)).isInstanceOf(IOException.class); }
Checks that the given blob does not exist anymore by trying to access it. <p>Note that if a {@link BlobCacheService} is used, it may try to access the {@link BlobServer} to retrieve the blob. @param service BLOB client to use for connecting to the BLOB service @param jobId job ID or <tt>null</tt> if job-unrelated @param key key identifying the BLOB to request
verifyDeleted
java
apache/flink
flink-runtime/src/test/java/org/apache/flink/runtime/blob/BlobServerGetTest.java
https://github.com/apache/flink/blob/master/flink-runtime/src/test/java/org/apache/flink/runtime/blob/BlobServerGetTest.java
Apache-2.0