code
stringlengths
25
201k
docstring
stringlengths
19
96.2k
func_name
stringlengths
0
235
language
stringclasses
1 value
repo
stringlengths
8
51
path
stringlengths
11
314
url
stringlengths
62
377
license
stringclasses
7 values
@PublicEvolving public ResourceSpec getPreferredResources() { return transformation.getPreferredResources(); }
Gets the preferred resources for this operator. @return The preferred resources set for this operator.
getPreferredResources
java
apache/flink
flink-runtime/src/main/java/org/apache/flink/streaming/api/datastream/DataStream.java
https://github.com/apache/flink/blob/master/flink-runtime/src/main/java/org/apache/flink/streaming/api/datastream/DataStream.java
Apache-2.0
@SafeVarargs public final DataStream<T> union(DataStream<T>... streams) { List<Transformation<T>> unionedTransforms = new ArrayList<>(); unionedTransforms.add(this.transformation); for (DataStream<T> newStream : streams) { if (!getType().equals(newStream.getType())) { throw new IllegalArgumentException( "Cannot union streams of different types: " + getType() + " and " + newStream.getType()); } unionedTransforms.add(newStream.getTransformation()); } return new DataStream<>(this.environment, new UnionTransformation<>(unionedTransforms)); }
Creates a new {@link DataStream} by merging {@link DataStream} outputs of the same type with each other. The DataStreams merged using this operator will be transformed simultaneously. @param streams The DataStreams to union output with. @return The {@link DataStream}.
union
java
apache/flink
flink-runtime/src/main/java/org/apache/flink/streaming/api/datastream/DataStream.java
https://github.com/apache/flink/blob/master/flink-runtime/src/main/java/org/apache/flink/streaming/api/datastream/DataStream.java
Apache-2.0
public <R> ConnectedStreams<T, R> connect(DataStream<R> dataStream) { return new ConnectedStreams<>(environment, this, dataStream); }
Creates a new {@link ConnectedStreams} by connecting {@link DataStream} outputs of (possible) different types with each other. The DataStreams connected using this operator can be used with CoFunctions to apply joint transformations. @param dataStream The DataStream with which this stream will be connected. @return The {@link ConnectedStreams}.
connect
java
apache/flink
flink-runtime/src/main/java/org/apache/flink/streaming/api/datastream/DataStream.java
https://github.com/apache/flink/blob/master/flink-runtime/src/main/java/org/apache/flink/streaming/api/datastream/DataStream.java
Apache-2.0
public <K> KeyedStream<T, K> keyBy(KeySelector<T, K> key) { Preconditions.checkNotNull(key); return new KeyedStream<>(this, clean(key)); }
It creates a new {@link KeyedStream} that uses the provided key for partitioning its operator states. @param key The KeySelector to be used for extracting the key for partitioning @return The {@link DataStream} with partitioned state (i.e. KeyedStream)
keyBy
java
apache/flink
flink-runtime/src/main/java/org/apache/flink/streaming/api/datastream/DataStream.java
https://github.com/apache/flink/blob/master/flink-runtime/src/main/java/org/apache/flink/streaming/api/datastream/DataStream.java
Apache-2.0
public <K> DataStream<T> partitionCustom( Partitioner<K> partitioner, KeySelector<T, K> keySelector) { return setConnectionType( new CustomPartitionerWrapper<>(clean(partitioner), clean(keySelector))); }
Partitions a DataStream on the key returned by the selector, using a custom partitioner. This method takes the key selector to get the key to partition on, and a partitioner that accepts the key type. <p>Note: This method works only on single field keys, i.e. the selector cannot return tuples of fields. @param partitioner The partitioner to assign partitions to keys. @param keySelector The KeySelector with which the DataStream is partitioned. @return The partitioned DataStream. @see KeySelector
partitionCustom
java
apache/flink
flink-runtime/src/main/java/org/apache/flink/streaming/api/datastream/DataStream.java
https://github.com/apache/flink/blob/master/flink-runtime/src/main/java/org/apache/flink/streaming/api/datastream/DataStream.java
Apache-2.0
public DataStream<T> broadcast() { return setConnectionType(new BroadcastPartitioner<T>()); }
Sets the partitioning of the {@link DataStream} so that the output elements are broadcasted to every parallel instance of the next operation. @return The DataStream with broadcast partitioning set.
broadcast
java
apache/flink
flink-runtime/src/main/java/org/apache/flink/streaming/api/datastream/DataStream.java
https://github.com/apache/flink/blob/master/flink-runtime/src/main/java/org/apache/flink/streaming/api/datastream/DataStream.java
Apache-2.0
@PublicEvolving public DataStream<T> shuffle() { return setConnectionType(new ShufflePartitioner<T>()); }
Sets the partitioning of the {@link DataStream} so that the output elements are shuffled uniformly randomly to the next operation. @return The DataStream with shuffle partitioning set.
shuffle
java
apache/flink
flink-runtime/src/main/java/org/apache/flink/streaming/api/datastream/DataStream.java
https://github.com/apache/flink/blob/master/flink-runtime/src/main/java/org/apache/flink/streaming/api/datastream/DataStream.java
Apache-2.0
public DataStream<T> forward() { return setConnectionType(new ForwardPartitioner<T>()); }
Sets the partitioning of the {@link DataStream} so that the output elements are forwarded to the local subtask of the next operation. @return The DataStream with forward partitioning set.
forward
java
apache/flink
flink-runtime/src/main/java/org/apache/flink/streaming/api/datastream/DataStream.java
https://github.com/apache/flink/blob/master/flink-runtime/src/main/java/org/apache/flink/streaming/api/datastream/DataStream.java
Apache-2.0
public DataStream<T> rebalance() { return setConnectionType(new RebalancePartitioner<T>()); }
Sets the partitioning of the {@link DataStream} so that the output elements are distributed evenly to instances of the next operation in a round-robin fashion. @return The DataStream with rebalance partitioning set.
rebalance
java
apache/flink
flink-runtime/src/main/java/org/apache/flink/streaming/api/datastream/DataStream.java
https://github.com/apache/flink/blob/master/flink-runtime/src/main/java/org/apache/flink/streaming/api/datastream/DataStream.java
Apache-2.0
@PublicEvolving public DataStream<T> rescale() { return setConnectionType(new RescalePartitioner<T>()); }
Sets the partitioning of the {@link DataStream} so that the output elements are distributed evenly to a subset of instances of the next operation in a round-robin fashion. <p>The subset of downstream operations to which the upstream operation sends elements depends on the degree of parallelism of both the upstream and downstream operation. For example, if the upstream operation has parallelism 2 and the downstream operation has parallelism 4, then one upstream operation would distribute elements to two downstream operations while the other upstream operation would distribute to the other two downstream operations. If, on the other hand, the downstream operation has parallelism 2 while the upstream operation has parallelism 4 then two upstream operations will distribute to one downstream operation while the other two upstream operations will distribute to the other downstream operations. <p>In cases where the different parallelisms are not multiples of each other one or several downstream operations will have a differing number of inputs from upstream operations. @return The DataStream with rescale partitioning set.
rescale
java
apache/flink
flink-runtime/src/main/java/org/apache/flink/streaming/api/datastream/DataStream.java
https://github.com/apache/flink/blob/master/flink-runtime/src/main/java/org/apache/flink/streaming/api/datastream/DataStream.java
Apache-2.0
@PublicEvolving public DataStream<T> global() { return setConnectionType(new GlobalPartitioner<T>()); }
Sets the partitioning of the {@link DataStream} so that the output values all go to the first instance of the next processing operator. Use this setting with care since it might cause a serious performance bottleneck in the application. @return The DataStream with shuffle partitioning set.
global
java
apache/flink
flink-runtime/src/main/java/org/apache/flink/streaming/api/datastream/DataStream.java
https://github.com/apache/flink/blob/master/flink-runtime/src/main/java/org/apache/flink/streaming/api/datastream/DataStream.java
Apache-2.0
@PublicEvolving public <R extends Tuple> SingleOutputStreamOperator<R> project(int... fieldIndexes) { return new StreamProjection<>(this, fieldIndexes).projectTupleX(); }
Initiates a Project transformation on a {@link Tuple} {@link DataStream}.<br> <b>Note: Only Tuple DataStreams can be projected.</b> <p>The transformation projects each Tuple of the DataSet onto a (sub)set of fields. @param fieldIndexes The field indexes of the input tuples that are retained. The order of fields in the output tuple corresponds to the order of field indexes. @return The projected DataStream @see Tuple @see DataStream
project
java
apache/flink
flink-runtime/src/main/java/org/apache/flink/streaming/api/datastream/DataStream.java
https://github.com/apache/flink/blob/master/flink-runtime/src/main/java/org/apache/flink/streaming/api/datastream/DataStream.java
Apache-2.0
public <T2> CoGroupedStreams<T, T2> coGroup(DataStream<T2> otherStream) { return new CoGroupedStreams<>(this, otherStream); }
Creates a join operation. See {@link CoGroupedStreams} for an example of how the keys and window can be specified.
coGroup
java
apache/flink
flink-runtime/src/main/java/org/apache/flink/streaming/api/datastream/DataStream.java
https://github.com/apache/flink/blob/master/flink-runtime/src/main/java/org/apache/flink/streaming/api/datastream/DataStream.java
Apache-2.0
public <T2> JoinedStreams<T, T2> join(DataStream<T2> otherStream) { return new JoinedStreams<>(this, otherStream); }
Creates a join operation. See {@link JoinedStreams} for an example of how the keys and window can be specified.
join
java
apache/flink
flink-runtime/src/main/java/org/apache/flink/streaming/api/datastream/DataStream.java
https://github.com/apache/flink/blob/master/flink-runtime/src/main/java/org/apache/flink/streaming/api/datastream/DataStream.java
Apache-2.0
public AllWindowedStream<T, GlobalWindow> countWindowAll(long size) { return windowAll(GlobalWindows.create()).trigger(PurgingTrigger.of(CountTrigger.of(size))); }
Windows this {@code DataStream} into tumbling count windows. <p>Note: This operation is inherently non-parallel since all elements have to pass through the same operator instance. @param size The size of the windows in number of elements.
countWindowAll
java
apache/flink
flink-runtime/src/main/java/org/apache/flink/streaming/api/datastream/DataStream.java
https://github.com/apache/flink/blob/master/flink-runtime/src/main/java/org/apache/flink/streaming/api/datastream/DataStream.java
Apache-2.0
public AllWindowedStream<T, GlobalWindow> countWindowAll(long size, long slide) { return windowAll(GlobalWindows.create()) .evictor(CountEvictor.of(size)) .trigger(CountTrigger.of(slide)); }
Windows this {@code DataStream} into sliding count windows. <p>Note: This operation is inherently non-parallel since all elements have to pass through the same operator instance. @param size The size of the windows in number of elements. @param slide The slide interval in number of elements.
countWindowAll
java
apache/flink
flink-runtime/src/main/java/org/apache/flink/streaming/api/datastream/DataStream.java
https://github.com/apache/flink/blob/master/flink-runtime/src/main/java/org/apache/flink/streaming/api/datastream/DataStream.java
Apache-2.0
@PublicEvolving public DataStreamSink<T> print() { PrintSinkFunction<T> printFunction = new PrintSinkFunction<>(); return addSink(printFunction).name("Print to Std. Out"); }
Writes a DataStream to the standard output stream (stdout). <p>For each element of the DataStream the result of {@link Object#toString()} is written. <p>NOTE: This will print to stdout on the machine where the code is executed, i.e. the Flink worker. @return The closed DataStream.
print
java
apache/flink
flink-runtime/src/main/java/org/apache/flink/streaming/api/datastream/DataStream.java
https://github.com/apache/flink/blob/master/flink-runtime/src/main/java/org/apache/flink/streaming/api/datastream/DataStream.java
Apache-2.0
@PublicEvolving public DataStreamSink<T> printToErr() { PrintSinkFunction<T> printFunction = new PrintSinkFunction<>(true); return addSink(printFunction).name("Print to Std. Err"); }
Writes a DataStream to the standard error stream (stderr). <p>For each element of the DataStream the result of {@link Object#toString()} is written. <p>NOTE: This will print to stderr on the machine where the code is executed, i.e. the Flink worker. @return The closed DataStream.
printToErr
java
apache/flink
flink-runtime/src/main/java/org/apache/flink/streaming/api/datastream/DataStream.java
https://github.com/apache/flink/blob/master/flink-runtime/src/main/java/org/apache/flink/streaming/api/datastream/DataStream.java
Apache-2.0
@PublicEvolving public DataStreamSink<T> print(String sinkIdentifier) { PrintSinkFunction<T> printFunction = new PrintSinkFunction<>(sinkIdentifier, false); return addSink(printFunction).name("Print to Std. Out"); }
Writes a DataStream to the standard output stream (stdout). <p>For each element of the DataStream the result of {@link Object#toString()} is written. <p>NOTE: This will print to stdout on the machine where the code is executed, i.e. the Flink worker. @param sinkIdentifier The string to prefix the output with. @return The closed DataStream.
print
java
apache/flink
flink-runtime/src/main/java/org/apache/flink/streaming/api/datastream/DataStream.java
https://github.com/apache/flink/blob/master/flink-runtime/src/main/java/org/apache/flink/streaming/api/datastream/DataStream.java
Apache-2.0
@PublicEvolving public DataStreamSink<T> printToErr(String sinkIdentifier) { PrintSinkFunction<T> printFunction = new PrintSinkFunction<>(sinkIdentifier, true); return addSink(printFunction).name("Print to Std. Err"); }
Writes a DataStream to the standard error stream (stderr). <p>For each element of the DataStream the result of {@link Object#toString()} is written. <p>NOTE: This will print to stderr on the machine where the code is executed, i.e. the Flink worker. @param sinkIdentifier The string to prefix the output with. @return The closed DataStream.
printToErr
java
apache/flink
flink-runtime/src/main/java/org/apache/flink/streaming/api/datastream/DataStream.java
https://github.com/apache/flink/blob/master/flink-runtime/src/main/java/org/apache/flink/streaming/api/datastream/DataStream.java
Apache-2.0
@PublicEvolving public DataStreamSink<T> writeToSocket( String hostName, int port, SerializationSchema<T> schema) { DataStreamSink<T> returnStream = addSink(new SocketClientSink<>(hostName, port, schema, 0)); returnStream.setParallelism( 1); // It would not work if multiple instances would connect to the same port return returnStream; }
Writes the DataStream to a socket as a byte array. The format of the output is specified by a {@link SerializationSchema}. @param hostName host of the socket @param port port of the socket @param schema schema for serialization @return the closed DataStream
writeToSocket
java
apache/flink
flink-runtime/src/main/java/org/apache/flink/streaming/api/datastream/DataStream.java
https://github.com/apache/flink/blob/master/flink-runtime/src/main/java/org/apache/flink/streaming/api/datastream/DataStream.java
Apache-2.0
@PublicEvolving public <R> SingleOutputStreamOperator<R> transform( String operatorName, TypeInformation<R> outTypeInfo, OneInputStreamOperator<T, R> operator) { return doTransform(operatorName, outTypeInfo, SimpleOperatorFactory.of(operator)); }
Method for passing user defined operators along with the type information that will transform the DataStream. @param operatorName name of the operator, for logging purposes @param outTypeInfo the output type of the operator @param operator the object containing the transformation logic @param <R> type of the return stream @return the data stream constructed @see #transform(String, TypeInformation, OneInputStreamOperatorFactory)
transform
java
apache/flink
flink-runtime/src/main/java/org/apache/flink/streaming/api/datastream/DataStream.java
https://github.com/apache/flink/blob/master/flink-runtime/src/main/java/org/apache/flink/streaming/api/datastream/DataStream.java
Apache-2.0
protected DataStream<T> setConnectionType(StreamPartitioner<T> partitioner) { return new DataStream<>( this.getExecutionEnvironment(), new PartitionTransformation<>(this.getTransformation(), partitioner)); }
Internal function for setting the partitioner for the DataStream. @param partitioner Partitioner to set. @return The modified DataStream.
setConnectionType
java
apache/flink
flink-runtime/src/main/java/org/apache/flink/streaming/api/datastream/DataStream.java
https://github.com/apache/flink/blob/master/flink-runtime/src/main/java/org/apache/flink/streaming/api/datastream/DataStream.java
Apache-2.0
public DataStreamSink<T> addSink(SinkFunction<T> sinkFunction) { // read the output type of the input Transform to coax out errors about MissingTypeInfo transformation.getOutputType(); // configure the type if needed if (sinkFunction instanceof InputTypeConfigurable) { ((InputTypeConfigurable) sinkFunction).setInputType(getType(), getExecutionConfig()); } return DataStreamSink.forSinkFunction(this, clean(sinkFunction)); }
Adds the given sink to this DataStream. Only streams with sinks added will be executed once the {@link StreamExecutionEnvironment#execute()} method is called. @param sinkFunction The object containing the sink's invoke function. @return The closed DataStream.
addSink
java
apache/flink
flink-runtime/src/main/java/org/apache/flink/streaming/api/datastream/DataStream.java
https://github.com/apache/flink/blob/master/flink-runtime/src/main/java/org/apache/flink/streaming/api/datastream/DataStream.java
Apache-2.0
@PublicEvolving public DataStreamSink<T> sinkTo(Sink<T> sink) { return this.sinkTo(sink, CustomSinkOperatorUidHashes.DEFAULT); }
Adds the given {@link Sink} to this DataStream. Only streams with sinks added will be executed once the {@link StreamExecutionEnvironment#execute()} method is called. @param sink The user defined sink. @return The closed DataStream.
sinkTo
java
apache/flink
flink-runtime/src/main/java/org/apache/flink/streaming/api/datastream/DataStream.java
https://github.com/apache/flink/blob/master/flink-runtime/src/main/java/org/apache/flink/streaming/api/datastream/DataStream.java
Apache-2.0
@PublicEvolving public DataStreamSink<T> sinkTo( Sink<T> sink, CustomSinkOperatorUidHashes customSinkOperatorUidHashes) { // read the output type of the input Transform to coax out errors about MissingTypeInfo transformation.getOutputType(); return DataStreamSink.forSink(this, sink, customSinkOperatorUidHashes); }
Adds the given {@link Sink} to this DataStream. Only streams with sinks added will be executed once the {@link StreamExecutionEnvironment#execute()} method is called. <p>This method is intended to be used only to recover a snapshot where no uids have been set before taking the snapshot. @param customSinkOperatorUidHashes operator hashes to support state binding @param sink The user defined sink. @return The closed DataStream.
sinkTo
java
apache/flink
flink-runtime/src/main/java/org/apache/flink/streaming/api/datastream/DataStream.java
https://github.com/apache/flink/blob/master/flink-runtime/src/main/java/org/apache/flink/streaming/api/datastream/DataStream.java
Apache-2.0
@Experimental public CloseableIterator<T> collectAsync() { final Collector<T> collector = new Collector<>(); collectAsync(collector); return collector.getOutput(); }
Sets up the collection of the elements in this {@link DataStream}, and returns an iterator over the collected elements that can be used to retrieve elements once the job execution has started. <p>Caution: When multiple streams are being collected it is recommended to consume all streams in parallel to not back-pressure the job. <p>Caution: Closing the returned iterator cancels the job! It is recommended to close all iterators once you are no longer interested in any of the collected streams. <p>This method is functionally equivalent to {@link #collectAsync(Collector)}. @return iterator over the contained elements
collectAsync
java
apache/flink
flink-runtime/src/main/java/org/apache/flink/streaming/api/datastream/DataStream.java
https://github.com/apache/flink/blob/master/flink-runtime/src/main/java/org/apache/flink/streaming/api/datastream/DataStream.java
Apache-2.0
@Experimental public void collectAsync(Collector<T> collector) { TypeSerializer<T> serializer = getType() .createSerializer( getExecutionEnvironment().getConfig().getSerializerConfig()); String accumulatorName = "dataStreamCollect_" + UUID.randomUUID().toString(); StreamExecutionEnvironment env = getExecutionEnvironment(); MemorySize maxBatchSize = env.getConfiguration().get(CollectSinkOperatorFactory.MAX_BATCH_SIZE); Duration socketTimeout = env.getConfiguration().get(CollectSinkOperatorFactory.SOCKET_TIMEOUT); CollectSinkOperatorFactory<T> factory = new CollectSinkOperatorFactory<>( serializer, accumulatorName, maxBatchSize, socketTimeout); CollectStreamSink<T> sink = new CollectStreamSink<>(this, factory); sink.name("Data stream collect sink"); String operatorUid = "dataStreamCollect_" + sink.getTransformation().getId(); sink.uid(operatorUid); long resultFetchTimeout = env.getConfiguration().get(RpcOptions.ASK_TIMEOUT_DURATION).toMillis(); CollectResultIterator<T> iterator = new CollectResultIterator<>( operatorUid, serializer, accumulatorName, env.getCheckpointConfig(), resultFetchTimeout); env.addOperator(sink.getTransformation()); env.registerCollectIterator(iterator); collector.setIterator(iterator); }
Sets up the collection of the elements in this {@link DataStream}, which can be retrieved later via the given {@link Collector}. <p>Caution: When multiple streams are being collected it is recommended to consume all streams in parallel to not back-pressure the job. <p>Caution: Closing the iterator from the collector cancels the job! It is recommended to close all iterators once you are no longer interested in any of the collected streams. <p>This method is functionally equivalent to {@link #collectAsync()}. <p>This method is meant to support use-cases where the application of a sink is done via a {@code Consumer<DataStream<T>>}, where it wouldn't be possible (or inconvenient) to return an iterator. @param collector a collector that can be used to retrieve the elements
collectAsync
java
apache/flink
flink-runtime/src/main/java/org/apache/flink/streaming/api/datastream/DataStream.java
https://github.com/apache/flink/blob/master/flink-runtime/src/main/java/org/apache/flink/streaming/api/datastream/DataStream.java
Apache-2.0
@PublicEvolving public PartitionWindowedStream<T> fullWindowPartition() { return new NonKeyedPartitionWindowedStream<>(environment, this); }
Collect records from each partition into a separate full window. The window emission will be triggered at the end of inputs. For this non-keyed data stream(each record has no key), a partition contains all records of a subtask. @return The full windowed data stream on partition.
fullWindowPartition
java
apache/flink
flink-runtime/src/main/java/org/apache/flink/streaming/api/datastream/DataStream.java
https://github.com/apache/flink/blob/master/flink-runtime/src/main/java/org/apache/flink/streaming/api/datastream/DataStream.java
Apache-2.0
@Internal void setIterator(CloseableIterator<T> iterator) { this.iterator = iterator; }
This class acts as an accessor to elements collected via {@link #collectAsync(Collector)}. @param <T> the element type
setIterator
java
apache/flink
flink-runtime/src/main/java/org/apache/flink/streaming/api/datastream/DataStream.java
https://github.com/apache/flink/blob/master/flink-runtime/src/main/java/org/apache/flink/streaming/api/datastream/DataStream.java
Apache-2.0
@Internal public Transformation<T> getTransformation() { return transformation; }
Returns the {@link Transformation} that represents the operation that logically creates this {@link DataStream}. @return The Transformation
getTransformation
java
apache/flink
flink-runtime/src/main/java/org/apache/flink/streaming/api/datastream/DataStream.java
https://github.com/apache/flink/blob/master/flink-runtime/src/main/java/org/apache/flink/streaming/api/datastream/DataStream.java
Apache-2.0
public DataStreamSink<T> name(String name) { transformation.setName(name); return this; }
Sets the name of this sink. This name is used by the visualization and logging during runtime. @return The named sink.
name
java
apache/flink
flink-runtime/src/main/java/org/apache/flink/streaming/api/datastream/DataStreamSink.java
https://github.com/apache/flink/blob/master/flink-runtime/src/main/java/org/apache/flink/streaming/api/datastream/DataStreamSink.java
Apache-2.0
public DataStreamSink<T> setParallelism(int parallelism) { transformation.setParallelism(parallelism); return this; }
Sets the parallelism for this sink. The degree must be higher than zero. @param parallelism The parallelism for this sink. @return The sink with set parallelism.
setParallelism
java
apache/flink
flink-runtime/src/main/java/org/apache/flink/streaming/api/datastream/DataStreamSink.java
https://github.com/apache/flink/blob/master/flink-runtime/src/main/java/org/apache/flink/streaming/api/datastream/DataStreamSink.java
Apache-2.0
public DataStreamSink<T> setMaxParallelism(int maxParallelism) { OperatorValidationUtils.validateMaxParallelism(maxParallelism, true); transformation.setMaxParallelism(maxParallelism); return this; }
Sets the max parallelism for this sink. <p>The maximum parallelism specifies the upper bound for dynamic scaling. The degree must be higher than zero and less than the upper bound. @param maxParallelism The max parallelism for this sink. @return The sink with set parallelism.
setMaxParallelism
java
apache/flink
flink-runtime/src/main/java/org/apache/flink/streaming/api/datastream/DataStreamSink.java
https://github.com/apache/flink/blob/master/flink-runtime/src/main/java/org/apache/flink/streaming/api/datastream/DataStreamSink.java
Apache-2.0
@PublicEvolving public DataStreamSink<T> setDescription(String description) { transformation.setDescription(description); return this; }
Sets the description for this sink. <p>Description is used in json plan and web ui, but not in logging and metrics where only name is available. Description is expected to provide detailed information about the sink, while name is expected to be more simple, providing summary information only, so that we can have more user-friendly logging messages and metric tags without losing useful messages for debugging. @param description The description for this sink. @return The sink with new description.
setDescription
java
apache/flink
flink-runtime/src/main/java/org/apache/flink/streaming/api/datastream/DataStreamSink.java
https://github.com/apache/flink/blob/master/flink-runtime/src/main/java/org/apache/flink/streaming/api/datastream/DataStreamSink.java
Apache-2.0
private DataStreamSink<T> setResources(ResourceSpec resources) { transformation.setResources(resources, resources); return this; }
Sets the resources for this sink, the minimum and preferred resources are the same by default. @param resources The resources for this sink. @return The sink with set minimum and preferred resources.
setResources
java
apache/flink
flink-runtime/src/main/java/org/apache/flink/streaming/api/datastream/DataStreamSink.java
https://github.com/apache/flink/blob/master/flink-runtime/src/main/java/org/apache/flink/streaming/api/datastream/DataStreamSink.java
Apache-2.0
@PublicEvolving public DataStreamSink<T> slotSharingGroup(SlotSharingGroup slotSharingGroup) { transformation.setSlotSharingGroup(slotSharingGroup); return this; }
Sets the slot sharing group of this operation. Parallel instances of operations that are in the same slot sharing group will be co-located in the same TaskManager slot, if possible. <p>Operations inherit the slot sharing group of input operations if all input operations are in the same slot sharing group and no slot sharing group was explicitly specified. <p>Initially an operation is in the default slot sharing group. An operation can be put into the default group explicitly by setting the slot sharing group with name {@code "default"}. @param slotSharingGroup which contains name and its resource spec.
slotSharingGroup
java
apache/flink
flink-runtime/src/main/java/org/apache/flink/streaming/api/datastream/DataStreamSink.java
https://github.com/apache/flink/blob/master/flink-runtime/src/main/java/org/apache/flink/streaming/api/datastream/DataStreamSink.java
Apache-2.0
private static <T> LegacySourceTransformation<T> createSourceTransformation( StreamExecutionEnvironment environment, TypeInformation<T> outTypeInfo, StreamSource<T, ?> operator, boolean isParallel, String sourceName, Boundedness boundedness) { LegacySourceTransformation<T> transformation = new LegacySourceTransformation<>( sourceName, operator, outTypeInfo, environment.getParallelism(), boundedness, false); transformation.setChainingStrategy(ChainingStrategy.HEAD); return transformation; }
The constructor used to create legacy sources.
createSourceTransformation
java
apache/flink
flink-runtime/src/main/java/org/apache/flink/streaming/api/datastream/DataStreamSource.java
https://github.com/apache/flink/blob/master/flink-runtime/src/main/java/org/apache/flink/streaming/api/datastream/DataStreamSource.java
Apache-2.0
public <KEY> Where<KEY> where(KeySelector<T1, KEY> keySelector) { requireNonNull(keySelector); final TypeInformation<KEY> keyType = TypeExtractor.getKeySelectorTypes(keySelector, input1.getType()); return where(keySelector, keyType); }
Specifies a {@link KeySelector} for elements from the first input. @param keySelector The KeySelector to be used for extracting the key for partitioning.
where
java
apache/flink
flink-runtime/src/main/java/org/apache/flink/streaming/api/datastream/JoinedStreams.java
https://github.com/apache/flink/blob/master/flink-runtime/src/main/java/org/apache/flink/streaming/api/datastream/JoinedStreams.java
Apache-2.0
public <KEY> Where<KEY> where(KeySelector<T1, KEY> keySelector, TypeInformation<KEY> keyType) { requireNonNull(keySelector); requireNonNull(keyType); return new Where<>(input1.clean(keySelector), keyType); }
Specifies a {@link KeySelector} for elements from the first input with explicit type information for the key type. @param keySelector The KeySelector to be used for extracting the first input's key for partitioning. @param keyType The type information describing the key type.
where
java
apache/flink
flink-runtime/src/main/java/org/apache/flink/streaming/api/datastream/JoinedStreams.java
https://github.com/apache/flink/blob/master/flink-runtime/src/main/java/org/apache/flink/streaming/api/datastream/JoinedStreams.java
Apache-2.0
public EqualTo equalTo(KeySelector<T2, KEY> keySelector, TypeInformation<KEY> keyType) { requireNonNull(keySelector); requireNonNull(keyType); if (!keyType.equals(this.keyType)) { throw new IllegalArgumentException( "The keys for the two inputs are not equal: " + "first key = " + this.keyType + " , second key = " + keyType); } return new EqualTo(input2.clean(keySelector)); }
Specifies a {@link KeySelector} for elements from the second input with explicit type information for the key type. @param keySelector The KeySelector to be used for extracting the second input's key for partitioning. @param keyType The type information describing the key type.
equalTo
java
apache/flink
flink-runtime/src/main/java/org/apache/flink/streaming/api/datastream/JoinedStreams.java
https://github.com/apache/flink/blob/master/flink-runtime/src/main/java/org/apache/flink/streaming/api/datastream/JoinedStreams.java
Apache-2.0
@Override public void coGroup(Iterable<T1> first, Iterable<T2> second, Collector<T> out) throws Exception { for (T1 val1 : first) { for (T2 val2 : second) { out.collect(wrappedFunction.join(val1, val2)); } } }
CoGroup function that does a nested-loop join to get the join result.
coGroup
java
apache/flink
flink-runtime/src/main/java/org/apache/flink/streaming/api/datastream/JoinedStreams.java
https://github.com/apache/flink/blob/master/flink-runtime/src/main/java/org/apache/flink/streaming/api/datastream/JoinedStreams.java
Apache-2.0
@Override public void coGroup(Iterable<T1> first, Iterable<T2> second, Collector<T> out) throws Exception { for (T1 val1 : first) { for (T2 val2 : second) { wrappedFunction.join(val1, val2, out); } } }
CoGroup function that does a nested-loop join to get the join result. (FlatJoin version)
coGroup
java
apache/flink
flink-runtime/src/main/java/org/apache/flink/streaming/api/datastream/JoinedStreams.java
https://github.com/apache/flink/blob/master/flink-runtime/src/main/java/org/apache/flink/streaming/api/datastream/JoinedStreams.java
Apache-2.0
@SuppressWarnings("rawtypes") private TypeInformation<KEY> validateKeyType(TypeInformation<KEY> keyType) { Stack<TypeInformation<?>> stack = new Stack<>(); stack.push(keyType); List<TypeInformation<?>> unsupportedTypes = new ArrayList<>(); while (!stack.isEmpty()) { TypeInformation<?> typeInfo = stack.pop(); if (!validateKeyTypeIsHashable(typeInfo)) { unsupportedTypes.add(typeInfo); } if (typeInfo instanceof TupleTypeInfoBase) { for (int i = 0; i < typeInfo.getArity(); i++) { stack.push(((TupleTypeInfoBase) typeInfo).getTypeAt(i)); } } } if (!unsupportedTypes.isEmpty()) { throw new InvalidProgramException( "Type " + keyType + " cannot be used as key. Contained " + "UNSUPPORTED key types: " + StringUtils.join(unsupportedTypes, ", ") + ". Look " + "at the keyBy() documentation for the conditions a type has to satisfy in order to be " + "eligible for a key."); } return keyType; }
Validates that a given type of element (as encoded by the provided {@link TypeInformation}) can be used as a key in the {@code DataStream.keyBy()} operation. This is done by searching depth-first the key type and checking if each of the composite types satisfies the required conditions (see {@link #validateKeyTypeIsHashable(TypeInformation)}). @param keyType The {@link TypeInformation} of the key.
validateKeyType
java
apache/flink
flink-runtime/src/main/java/org/apache/flink/streaming/api/datastream/KeyedStream.java
https://github.com/apache/flink/blob/master/flink-runtime/src/main/java/org/apache/flink/streaming/api/datastream/KeyedStream.java
Apache-2.0
@Internal public KeySelector<T, KEY> getKeySelector() { return this.keySelector; }
Gets the key selector that can get the key by which the stream if partitioned from the elements. @return The key selector for the key.
getKeySelector
java
apache/flink
flink-runtime/src/main/java/org/apache/flink/streaming/api/datastream/KeyedStream.java
https://github.com/apache/flink/blob/master/flink-runtime/src/main/java/org/apache/flink/streaming/api/datastream/KeyedStream.java
Apache-2.0
@PublicEvolving public <T1> IntervalJoin<T, T1, KEY> intervalJoin(KeyedStream<T1, KEY> otherStream) { return new IntervalJoin<>(this, otherStream); }
Join elements of this {@link KeyedStream} with elements of another {@link KeyedStream} over a time interval that can be specified with {@link IntervalJoin#between(Duration, Duration)}. @param otherStream The other keyed stream to join this keyed stream with @param <T1> Type parameter of elements in the other stream @return An instance of {@link IntervalJoin} with this keyed stream and the other keyed stream
intervalJoin
java
apache/flink
flink-runtime/src/main/java/org/apache/flink/streaming/api/datastream/KeyedStream.java
https://github.com/apache/flink/blob/master/flink-runtime/src/main/java/org/apache/flink/streaming/api/datastream/KeyedStream.java
Apache-2.0
@PublicEvolving public IntervalJoined<T1, T2, KEY> between(Duration lowerBound, Duration upperBound) { if (timeBehaviour != TimeBehaviour.EventTime) { throw new UnsupportedTimeCharacteristicException( "Time-bounded stream joins are only supported in event time"); } checkNotNull(lowerBound, "A lower bound needs to be provided for a time-bounded join"); checkNotNull(upperBound, "An upper bound needs to be provided for a time-bounded join"); return new IntervalJoined<>( streamOne, streamTwo, lowerBound.toMillis(), upperBound.toMillis(), true, true); }
Specifies the time boundaries over which the join operation works, so that <pre> leftElement.timestamp + lowerBound <= rightElement.timestamp <= leftElement.timestamp + upperBound </pre> <p>By default both the lower and the upper bound are inclusive. This can be configured with {@link IntervalJoined#lowerBoundExclusive()} and {@link IntervalJoined#upperBoundExclusive()} @param lowerBound The lower bound. Needs to be smaller than or equal to the upperBound @param upperBound The upper bound. Needs to be bigger than or equal to the lowerBound
between
java
apache/flink
flink-runtime/src/main/java/org/apache/flink/streaming/api/datastream/KeyedStream.java
https://github.com/apache/flink/blob/master/flink-runtime/src/main/java/org/apache/flink/streaming/api/datastream/KeyedStream.java
Apache-2.0
@PublicEvolving public IntervalJoined<IN1, IN2, KEY> upperBoundExclusive() { this.upperBoundInclusive = false; return this; }
Set the upper bound to be exclusive.
upperBoundExclusive
java
apache/flink
flink-runtime/src/main/java/org/apache/flink/streaming/api/datastream/KeyedStream.java
https://github.com/apache/flink/blob/master/flink-runtime/src/main/java/org/apache/flink/streaming/api/datastream/KeyedStream.java
Apache-2.0
@PublicEvolving public IntervalJoined<IN1, IN2, KEY> lowerBoundExclusive() { this.lowerBoundInclusive = false; return this; }
Set the lower bound to be exclusive.
lowerBoundExclusive
java
apache/flink
flink-runtime/src/main/java/org/apache/flink/streaming/api/datastream/KeyedStream.java
https://github.com/apache/flink/blob/master/flink-runtime/src/main/java/org/apache/flink/streaming/api/datastream/KeyedStream.java
Apache-2.0
@PublicEvolving public IntervalJoined<IN1, IN2, KEY> sideOutputLeftLateData(OutputTag<IN1> outputTag) { outputTag = left.getExecutionEnvironment().clean(outputTag); this.leftLateDataOutputTag = outputTag; return this; }
Send late arriving left-side data to the side output identified by the given {@link OutputTag}. Data is considered late after the watermark
sideOutputLeftLateData
java
apache/flink
flink-runtime/src/main/java/org/apache/flink/streaming/api/datastream/KeyedStream.java
https://github.com/apache/flink/blob/master/flink-runtime/src/main/java/org/apache/flink/streaming/api/datastream/KeyedStream.java
Apache-2.0
@PublicEvolving public IntervalJoined<IN1, IN2, KEY> sideOutputRightLateData(OutputTag<IN2> outputTag) { outputTag = right.getExecutionEnvironment().clean(outputTag); this.rightLateDataOutputTag = outputTag; return this; }
Send late arriving right-side data to the side output identified by the given {@link OutputTag}. Data is considered late after the watermark
sideOutputRightLateData
java
apache/flink
flink-runtime/src/main/java/org/apache/flink/streaming/api/datastream/KeyedStream.java
https://github.com/apache/flink/blob/master/flink-runtime/src/main/java/org/apache/flink/streaming/api/datastream/KeyedStream.java
Apache-2.0
@PublicEvolving public <OUT> SingleOutputStreamOperator<OUT> process( ProcessJoinFunction<IN1, IN2, OUT> processJoinFunction) { Preconditions.checkNotNull(processJoinFunction); final TypeInformation<OUT> outputType = TypeExtractor.getBinaryOperatorReturnType( processJoinFunction, ProcessJoinFunction.class, 0, 1, 2, TypeExtractor.NO_INDEX, left.getType(), right.getType(), Utils.getCallLocationName(), true); return process(processJoinFunction, outputType); }
Completes the join operation with the given user function that is executed for each joined pair of elements. @param processJoinFunction The user-defined process join function. @param <OUT> The output type. @return The transformed {@link DataStream}.
process
java
apache/flink
flink-runtime/src/main/java/org/apache/flink/streaming/api/datastream/KeyedStream.java
https://github.com/apache/flink/blob/master/flink-runtime/src/main/java/org/apache/flink/streaming/api/datastream/KeyedStream.java
Apache-2.0
@PublicEvolving public <OUT> SingleOutputStreamOperator<OUT> process( ProcessJoinFunction<IN1, IN2, OUT> processJoinFunction, TypeInformation<OUT> outputType) { Preconditions.checkNotNull(processJoinFunction); Preconditions.checkNotNull(outputType); final ProcessJoinFunction<IN1, IN2, OUT> cleanedUdf = left.getExecutionEnvironment().clean(processJoinFunction); if (isEnableAsyncState) { final AsyncIntervalJoinOperator<KEY, IN1, IN2, OUT> operator = new AsyncIntervalJoinOperator<>( lowerBound, upperBound, lowerBoundInclusive, upperBoundInclusive, leftLateDataOutputTag, rightLateDataOutputTag, left.getType() .createSerializer( left.getExecutionConfig().getSerializerConfig()), right.getType() .createSerializer( right.getExecutionConfig().getSerializerConfig()), cleanedUdf); return left.connect(right) .keyBy(keySelector1, keySelector2) .transform("Interval Join [Async]", outputType, operator); } else { final IntervalJoinOperator<KEY, IN1, IN2, OUT> operator = new IntervalJoinOperator<>( lowerBound, upperBound, lowerBoundInclusive, upperBoundInclusive, leftLateDataOutputTag, rightLateDataOutputTag, left.getType() .createSerializer( left.getExecutionConfig().getSerializerConfig()), right.getType() .createSerializer( right.getExecutionConfig().getSerializerConfig()), cleanedUdf); return left.connect(right) .keyBy(keySelector1, keySelector2) .transform("Interval Join", outputType, operator); } }
Completes the join operation with the given user function that is executed for each joined pair of elements. This methods allows for passing explicit type information for the output type. @param processJoinFunction The user-defined process join function. @param outputType The type information for the output type. @param <OUT> The output type. @return The transformed {@link DataStream}.
process
java
apache/flink
flink-runtime/src/main/java/org/apache/flink/streaming/api/datastream/KeyedStream.java
https://github.com/apache/flink/blob/master/flink-runtime/src/main/java/org/apache/flink/streaming/api/datastream/KeyedStream.java
Apache-2.0
@Experimental public IntervalJoined<IN1, IN2, KEY> enableAsyncState() { isEnableAsyncState = true; return this; }
Enable the async state processing for following keyed processing function. This also requires only State V2 APIs are used in the function. @return the configured IntervalJoin itself.
enableAsyncState
java
apache/flink
flink-runtime/src/main/java/org/apache/flink/streaming/api/datastream/KeyedStream.java
https://github.com/apache/flink/blob/master/flink-runtime/src/main/java/org/apache/flink/streaming/api/datastream/KeyedStream.java
Apache-2.0
public WindowedStream<T, KEY, GlobalWindow> countWindow(long size) { return window(GlobalWindows.create()).trigger(PurgingTrigger.of(CountTrigger.of(size))); }
Windows this {@code KeyedStream} into tumbling count windows. @param size The size of the windows in number of elements.
countWindow
java
apache/flink
flink-runtime/src/main/java/org/apache/flink/streaming/api/datastream/KeyedStream.java
https://github.com/apache/flink/blob/master/flink-runtime/src/main/java/org/apache/flink/streaming/api/datastream/KeyedStream.java
Apache-2.0
public WindowedStream<T, KEY, GlobalWindow> countWindow(long size, long slide) { return window(GlobalWindows.create()) .evictor(CountEvictor.of(size)) .trigger(CountTrigger.of(slide)); }
Windows this {@code KeyedStream} into sliding count windows. @param size The size of the windows in number of elements. @param slide The slide interval in number of elements.
countWindow
java
apache/flink
flink-runtime/src/main/java/org/apache/flink/streaming/api/datastream/KeyedStream.java
https://github.com/apache/flink/blob/master/flink-runtime/src/main/java/org/apache/flink/streaming/api/datastream/KeyedStream.java
Apache-2.0
public SingleOutputStreamOperator<T> reduce(ReduceFunction<T> reducer) { ReduceTransformation<T, KEY> reduce = new ReduceTransformation<>( "Keyed Reduce", environment.getParallelism(), transformation, clean(reducer), keySelector, getKeyType(), false); if (isEnableAsyncState) { reduce.enableAsyncState(); } getExecutionEnvironment().addOperator(reduce); return new SingleOutputStreamOperator<>(getExecutionEnvironment(), reduce); }
Applies a reduce transformation on the grouped data stream grouped on by the given key position. The {@link ReduceFunction} will receive input values based on the key value. Only input values with the same key will go to the same reducer. @param reducer The {@link ReduceFunction} that will be called for every element of the input values with the same key. @return The transformed DataStream.
reduce
java
apache/flink
flink-runtime/src/main/java/org/apache/flink/streaming/api/datastream/KeyedStream.java
https://github.com/apache/flink/blob/master/flink-runtime/src/main/java/org/apache/flink/streaming/api/datastream/KeyedStream.java
Apache-2.0
public SingleOutputStreamOperator<T> sum(String field) { return aggregate(new SumAggregator<>(field, getType(), getExecutionConfig())); }
Applies an aggregation that gives the current sum of the data stream at the given field by the given key. An independent aggregate is kept per key. @param field In case of a POJO, Scala case class, or Tuple type, the name of the (public) field on which to perform the aggregation. Additionally, a dot can be used to drill down into nested objects, as in {@code "field1.fieldxy" }. Furthermore "*" can be specified in case of a basic type (which is considered as having only one field). @return The transformed DataStream.
sum
java
apache/flink
flink-runtime/src/main/java/org/apache/flink/streaming/api/datastream/KeyedStream.java
https://github.com/apache/flink/blob/master/flink-runtime/src/main/java/org/apache/flink/streaming/api/datastream/KeyedStream.java
Apache-2.0
public SingleOutputStreamOperator<T> min(String field) { return aggregate( new ComparableAggregator<>( field, getType(), AggregationFunction.AggregationType.MIN, false, getExecutionConfig())); }
Applies an aggregation that gives the current minimum of the data stream at the given field expression by the given key. An independent aggregate is kept per key. A field expression is either the name of a public field or a getter method with parentheses of the {@link DataStream}'s underlying type. A dot can be used to drill down into objects, as in {@code "field1.fieldxy" }. @param field In case of a POJO, Scala case class, or Tuple type, the name of the (public) field on which to perform the aggregation. Additionally, a dot can be used to drill down into nested objects, as in {@code "field1.fieldxy" }. Furthermore "*" can be specified in case of a basic type (which is considered as having only one field). @return The transformed DataStream.
min
java
apache/flink
flink-runtime/src/main/java/org/apache/flink/streaming/api/datastream/KeyedStream.java
https://github.com/apache/flink/blob/master/flink-runtime/src/main/java/org/apache/flink/streaming/api/datastream/KeyedStream.java
Apache-2.0
public SingleOutputStreamOperator<T> max(String field) { return aggregate( new ComparableAggregator<>( field, getType(), AggregationFunction.AggregationType.MAX, false, getExecutionConfig())); }
Applies an aggregation that gives the current maximum of the data stream at the given field expression by the given key. An independent aggregate is kept per key. A field expression is either the name of a public field or a getter method with parentheses of the {@link DataStream}'s underlying type. A dot can be used to drill down into objects, as in {@code "field1.fieldxy" }. @param field In case of a POJO, Scala case class, or Tuple type, the name of the (public) field on which to perform the aggregation. Additionally, a dot can be used to drill down into nested objects, as in {@code "field1.fieldxy" }. Furthermore "*" can be specified in case of a basic type (which is considered as having only one field). @return The transformed DataStream.
max
java
apache/flink
flink-runtime/src/main/java/org/apache/flink/streaming/api/datastream/KeyedStream.java
https://github.com/apache/flink/blob/master/flink-runtime/src/main/java/org/apache/flink/streaming/api/datastream/KeyedStream.java
Apache-2.0
@SuppressWarnings({"rawtypes", "unchecked"}) public SingleOutputStreamOperator<T> minBy(String field, boolean first) { return aggregate( new ComparableAggregator( field, getType(), AggregationFunction.AggregationType.MINBY, first, getExecutionConfig())); }
Applies an aggregation that gives the current minimum element of the data stream by the given field expression by the given key. An independent aggregate is kept per key. A field expression is either the name of a public field or a getter method with parentheses of the {@link DataStream}'s underlying type. A dot can be used to drill down into objects, as in {@code "field1.fieldxy" }. @param field In case of a POJO, Scala case class, or Tuple type, the name of the (public) field on which to perform the aggregation. Additionally, a dot can be used to drill down into nested objects, as in {@code "field1.fieldxy" }. Furthermore "*" can be specified in case of a basic type (which is considered as having only one field). @param first If True then in case of field equality the first object will be returned @return The transformed DataStream.
minBy
java
apache/flink
flink-runtime/src/main/java/org/apache/flink/streaming/api/datastream/KeyedStream.java
https://github.com/apache/flink/blob/master/flink-runtime/src/main/java/org/apache/flink/streaming/api/datastream/KeyedStream.java
Apache-2.0
public SingleOutputStreamOperator<T> maxBy(String field, boolean first) { return aggregate( new ComparableAggregator<>( field, getType(), AggregationFunction.AggregationType.MAXBY, first, getExecutionConfig())); }
Applies an aggregation that gives the current maximum element of the data stream by the given field expression by the given key. An independent aggregate is kept per key. A field expression is either the name of a public field or a getter method with parentheses of the {@link DataStream}'s underlying type. A dot can be used to drill down into objects, as in {@code "field1.fieldxy" }. @param field In case of a POJO, Scala case class, or Tuple type, the name of the (public) field on which to perform the aggregation. Additionally, a dot can be used to drill down into nested objects, as in {@code "field1.fieldxy" }. Furthermore "*" can be specified in case of a basic type (which is considered as having only one field). @param first If True then in case of field equality the first object will be returned @return The transformed DataStream.
maxBy
java
apache/flink
flink-runtime/src/main/java/org/apache/flink/streaming/api/datastream/KeyedStream.java
https://github.com/apache/flink/blob/master/flink-runtime/src/main/java/org/apache/flink/streaming/api/datastream/KeyedStream.java
Apache-2.0
public SingleOutputStreamOperator<T> minBy(String positionToMinBy) { return this.minBy(positionToMinBy, true); }
Applies an aggregation that gives the current element with the minimum value at the given position by the given key. An independent aggregate is kept per key. If more elements have the minimum value at the given position, the operator returns the first one by default. @param positionToMinBy In case of a POJO, Scala case class, or Tuple type, the name of the (public) field on which to perform the aggregation. Additionally, a dot can be used to drill down into nested objects, as in {@code "field1.fieldxy" }. Furthermore "*" can be specified in case of a basic type (which is considered as having only one field). @return The transformed DataStream.
minBy
java
apache/flink
flink-runtime/src/main/java/org/apache/flink/streaming/api/datastream/KeyedStream.java
https://github.com/apache/flink/blob/master/flink-runtime/src/main/java/org/apache/flink/streaming/api/datastream/KeyedStream.java
Apache-2.0
public SingleOutputStreamOperator<T> maxBy(String positionToMaxBy) { return this.maxBy(positionToMaxBy, true); }
Applies an aggregation that gives the current element with the maximum value at the given position by the given key. An independent aggregate is kept per key. If more elements have the maximum value at the given position, the operator returns the first one by default. @param positionToMaxBy In case of a POJO, Scala case class, or Tuple type, the name of the (public) field on which to perform the aggregation. Additionally, a dot can be used to drill down into nested objects, as in {@code "field1.fieldxy" }. Furthermore "*" can be specified in case of a basic type (which is considered as having only one field). @return The transformed DataStream.
maxBy
java
apache/flink
flink-runtime/src/main/java/org/apache/flink/streaming/api/datastream/KeyedStream.java
https://github.com/apache/flink/blob/master/flink-runtime/src/main/java/org/apache/flink/streaming/api/datastream/KeyedStream.java
Apache-2.0
@PublicEvolving @Override public PartitionWindowedStream<T> fullWindowPartition() { return new KeyedPartitionWindowedStream<>(environment, this); }
Collect records from each partition into a separate full window. The window emission will be triggered at the end of inputs. For this keyed data stream(each record has a key), a partition only contains all records with the same key. @return The full windowed data stream on partition.
fullWindowPartition
java
apache/flink
flink-runtime/src/main/java/org/apache/flink/streaming/api/datastream/KeyedStream.java
https://github.com/apache/flink/blob/master/flink-runtime/src/main/java/org/apache/flink/streaming/api/datastream/KeyedStream.java
Apache-2.0
@PublicEvolving @Deprecated public QueryableStateStream<KEY, T> asQueryableState(String queryableStateName) { ValueStateDescriptor<T> valueStateDescriptor = new ValueStateDescriptor<>(UUID.randomUUID().toString(), getType()); return asQueryableState(queryableStateName, valueStateDescriptor); }
Publishes the keyed stream as queryable ValueState instance. @param queryableStateName Name under which to the publish the queryable state instance @return Queryable state instance @deprecated The Queryable State feature is deprecated since Flink 1.18, and will be removed in a future Flink major version.
asQueryableState
java
apache/flink
flink-runtime/src/main/java/org/apache/flink/streaming/api/datastream/KeyedStream.java
https://github.com/apache/flink/blob/master/flink-runtime/src/main/java/org/apache/flink/streaming/api/datastream/KeyedStream.java
Apache-2.0
@PublicEvolving @Deprecated public QueryableStateStream<KEY, T> asQueryableState( String queryableStateName, ValueStateDescriptor<T> stateDescriptor) { transform( "Queryable state: " + queryableStateName, getType(), new QueryableValueStateOperator<>(queryableStateName, stateDescriptor)); stateDescriptor.initializeSerializerUnlessSet(getExecutionConfig()); return new QueryableStateStream<>( queryableStateName, stateDescriptor, getKeyType().createSerializer(getExecutionConfig().getSerializerConfig())); }
Publishes the keyed stream as a queryable ValueState instance. @param queryableStateName Name under which to the publish the queryable state instance @param stateDescriptor State descriptor to create state instance from @return Queryable state instance @deprecated The Queryable State feature is deprecated since Flink 1.18, and will be removed in a future Flink major version.
asQueryableState
java
apache/flink
flink-runtime/src/main/java/org/apache/flink/streaming/api/datastream/KeyedStream.java
https://github.com/apache/flink/blob/master/flink-runtime/src/main/java/org/apache/flink/streaming/api/datastream/KeyedStream.java
Apache-2.0
@PublicEvolving @Deprecated public QueryableStateStream<KEY, T> asQueryableState( String queryableStateName, ReducingStateDescriptor<T> stateDescriptor) { transform( "Queryable state: " + queryableStateName, getType(), new QueryableAppendingStateOperator<>(queryableStateName, stateDescriptor)); stateDescriptor.initializeSerializerUnlessSet(getExecutionConfig()); return new QueryableStateStream<>( queryableStateName, stateDescriptor, getKeyType().createSerializer(getExecutionConfig().getSerializerConfig())); }
Publishes the keyed stream as a queryable ReducingState instance. @param queryableStateName Name under which to the publish the queryable state instance @param stateDescriptor State descriptor to create state instance from @return Queryable state instance @deprecated The Queryable State feature is deprecated since Flink 1.18, and will be removed in a future Flink major version.
asQueryableState
java
apache/flink
flink-runtime/src/main/java/org/apache/flink/streaming/api/datastream/KeyedStream.java
https://github.com/apache/flink/blob/master/flink-runtime/src/main/java/org/apache/flink/streaming/api/datastream/KeyedStream.java
Apache-2.0
public String getQueryableStateName() { return queryableStateName; }
Returns the name under which the state can be queried. @return Name under which state can be queried.
getQueryableStateName
java
apache/flink
flink-runtime/src/main/java/org/apache/flink/streaming/api/datastream/QueryableStateStream.java
https://github.com/apache/flink/blob/master/flink-runtime/src/main/java/org/apache/flink/streaming/api/datastream/QueryableStateStream.java
Apache-2.0
public StateDescriptor<?, V> getStateDescriptor() { return stateDescriptor; }
Returns the state descriptor for the queryable state instance. @return State descriptor for the state instance
getStateDescriptor
java
apache/flink
flink-runtime/src/main/java/org/apache/flink/streaming/api/datastream/QueryableStateStream.java
https://github.com/apache/flink/blob/master/flink-runtime/src/main/java/org/apache/flink/streaming/api/datastream/QueryableStateStream.java
Apache-2.0
@PublicEvolving public CachedDataStream<T> cache() { return new CachedDataStream<>(this.environment, this.transformation); }
Caches the intermediate result of the transformation. Only support bounded streams and currently only block mode is supported. The cache is generated lazily at the first time the intermediate result is computed. The cache will be clear when {@link CachedDataStream#invalidate()} called or the {@link StreamExecutionEnvironment} close. @return CachedDataStream that can use in later job to reuse the cached intermediate result.
cache
java
apache/flink
flink-runtime/src/main/java/org/apache/flink/streaming/api/datastream/SideOutputDataStream.java
https://github.com/apache/flink/blob/master/flink-runtime/src/main/java/org/apache/flink/streaming/api/datastream/SideOutputDataStream.java
Apache-2.0
public String getName() { return transformation.getName(); }
Gets the name of the current data stream. This name is used by the visualization and logging during runtime. @return Name of the stream.
getName
java
apache/flink
flink-runtime/src/main/java/org/apache/flink/streaming/api/datastream/SingleOutputStreamOperator.java
https://github.com/apache/flink/blob/master/flink-runtime/src/main/java/org/apache/flink/streaming/api/datastream/SingleOutputStreamOperator.java
Apache-2.0
public SingleOutputStreamOperator<T> name(String name) { transformation.setName(name); return this; }
Sets the name of the current data stream. This name is used by the visualization and logging during runtime. @return The named operator.
name
java
apache/flink
flink-runtime/src/main/java/org/apache/flink/streaming/api/datastream/SingleOutputStreamOperator.java
https://github.com/apache/flink/blob/master/flink-runtime/src/main/java/org/apache/flink/streaming/api/datastream/SingleOutputStreamOperator.java
Apache-2.0
public SingleOutputStreamOperator<T> setParallelism(int parallelism) { OperatorValidationUtils.validateParallelism(parallelism, canBeParallel()); transformation.setParallelism(parallelism); return this; }
Sets the parallelism for this operator. @param parallelism The parallelism for this operator. @return The operator with set parallelism.
setParallelism
java
apache/flink
flink-runtime/src/main/java/org/apache/flink/streaming/api/datastream/SingleOutputStreamOperator.java
https://github.com/apache/flink/blob/master/flink-runtime/src/main/java/org/apache/flink/streaming/api/datastream/SingleOutputStreamOperator.java
Apache-2.0
@PublicEvolving public SingleOutputStreamOperator<T> forceNonParallel() { transformation.setParallelism(1); transformation.setMaxParallelism(1); nonParallel = true; return this; }
Sets the parallelism and maximum parallelism of this operator to one. And mark this operator cannot set a non-1 degree of parallelism. @return The operator with only one parallelism.
forceNonParallel
java
apache/flink
flink-runtime/src/main/java/org/apache/flink/streaming/api/datastream/SingleOutputStreamOperator.java
https://github.com/apache/flink/blob/master/flink-runtime/src/main/java/org/apache/flink/streaming/api/datastream/SingleOutputStreamOperator.java
Apache-2.0
public SingleOutputStreamOperator<T> setBufferTimeout(long timeoutMillis) { checkArgument(timeoutMillis >= -1, "timeout must be >= -1"); transformation.setBufferTimeout(timeoutMillis); return this; }
Sets the buffering timeout for data produced by this operation. The timeout defines how long data may linger in a partially full buffer before being sent over the network. <p>Lower timeouts lead to lower tail latencies, but may affect throughput. Timeouts of 1 ms still sustain high throughput, even for jobs with high parallelism. <p>A value of '-1' means that the default buffer timeout should be used. A value of '0' indicates that no buffering should happen, and all records/events should be immediately sent through the network, without additional buffering. @param timeoutMillis The maximum time between two output flushes. @return The operator with buffer timeout set.
setBufferTimeout
java
apache/flink
flink-runtime/src/main/java/org/apache/flink/streaming/api/datastream/SingleOutputStreamOperator.java
https://github.com/apache/flink/blob/master/flink-runtime/src/main/java/org/apache/flink/streaming/api/datastream/SingleOutputStreamOperator.java
Apache-2.0
@PublicEvolving private SingleOutputStreamOperator<T> setChainingStrategy(ChainingStrategy strategy) { if (transformation instanceof PhysicalTransformation) { ((PhysicalTransformation<T>) transformation).setChainingStrategy(strategy); } else { throw new UnsupportedOperationException( "Cannot set chaining strategy on " + transformation); } return this; }
Sets the {@link ChainingStrategy} for the given operator affecting the way operators will possibly be co-located on the same thread for increased performance. @param strategy The selected {@link ChainingStrategy} @return The operator with the modified chaining strategy
setChainingStrategy
java
apache/flink
flink-runtime/src/main/java/org/apache/flink/streaming/api/datastream/SingleOutputStreamOperator.java
https://github.com/apache/flink/blob/master/flink-runtime/src/main/java/org/apache/flink/streaming/api/datastream/SingleOutputStreamOperator.java
Apache-2.0
@PublicEvolving public SingleOutputStreamOperator<T> disableChaining() { return setChainingStrategy(ChainingStrategy.NEVER); }
Turns off chaining for this operator so thread co-location will not be used as an optimization. <p>Chaining can be turned off for the whole job by {@link StreamExecutionEnvironment#disableOperatorChaining()} however it is not advised for performance considerations. @return The operator with chaining disabled
disableChaining
java
apache/flink
flink-runtime/src/main/java/org/apache/flink/streaming/api/datastream/SingleOutputStreamOperator.java
https://github.com/apache/flink/blob/master/flink-runtime/src/main/java/org/apache/flink/streaming/api/datastream/SingleOutputStreamOperator.java
Apache-2.0
@PublicEvolving public SingleOutputStreamOperator<T> startNewChain() { return setChainingStrategy(ChainingStrategy.HEAD); }
Starts a new task chain beginning at this operator. This operator will not be chained (thread co-located for increased performance) to any previous tasks even if possible. @return The operator with chaining set.
startNewChain
java
apache/flink
flink-runtime/src/main/java/org/apache/flink/streaming/api/datastream/SingleOutputStreamOperator.java
https://github.com/apache/flink/blob/master/flink-runtime/src/main/java/org/apache/flink/streaming/api/datastream/SingleOutputStreamOperator.java
Apache-2.0
public SingleOutputStreamOperator<T> returns(Class<T> typeClass) { requireNonNull(typeClass, "type class must not be null."); try { return returns(TypeInformation.of(typeClass)); } catch (InvalidTypesException e) { throw new InvalidTypesException( "Cannot infer the type information from the class alone." + "This is most likely because the class represents a generic type. In that case," + "please use the 'returns(TypeHint)' method instead."); } }
Adds a type information hint about the return type of this operator. This method can be used in cases where Flink cannot determine automatically what the produced type of a function is. That can be the case if the function uses generic type variables in the return type that cannot be inferred from the input type. <p>Classes can be used as type hints for non-generic types (classes without generic parameters), but not for generic types like for example Tuples. For those generic types, please use the {@link #returns(TypeHint)} method. @param typeClass The class of the returned data type. @return This operator with the type information corresponding to the given type class.
returns
java
apache/flink
flink-runtime/src/main/java/org/apache/flink/streaming/api/datastream/SingleOutputStreamOperator.java
https://github.com/apache/flink/blob/master/flink-runtime/src/main/java/org/apache/flink/streaming/api/datastream/SingleOutputStreamOperator.java
Apache-2.0
public SingleOutputStreamOperator<T> returns(TypeHint<T> typeHint) { requireNonNull(typeHint, "TypeHint must not be null"); try { return returns(TypeInformation.of(typeHint)); } catch (InvalidTypesException e) { throw new InvalidTypesException( "Cannot infer the type information from the type hint. " + "Make sure that the TypeHint does not use any generic type variables."); } }
Adds a type information hint about the return type of this operator. This method can be used in cases where Flink cannot determine automatically what the produced type of a function is. That can be the case if the function uses generic type variables in the return type that cannot be inferred from the input type. <p>Use this method the following way: <pre>{@code DataStream<Tuple2<String, Double>> result = stream.flatMap(new FunctionWithNonInferrableReturnType()) .returns(new TypeHint<Tuple2<String, Double>>(){}); }</pre> @param typeHint The type hint for the returned data type. @return This operator with the type information corresponding to the given type hint.
returns
java
apache/flink
flink-runtime/src/main/java/org/apache/flink/streaming/api/datastream/SingleOutputStreamOperator.java
https://github.com/apache/flink/blob/master/flink-runtime/src/main/java/org/apache/flink/streaming/api/datastream/SingleOutputStreamOperator.java
Apache-2.0
public SingleOutputStreamOperator<T> returns(TypeInformation<T> typeInfo) { requireNonNull(typeInfo, "TypeInformation must not be null"); transformation.setOutputType(typeInfo); return this; }
Adds a type information hint about the return type of this operator. This method can be used in cases where Flink cannot determine automatically what the produced type of a function is. That can be the case if the function uses generic type variables in the return type that cannot be inferred from the input type. <p>In most cases, the methods {@link #returns(Class)} and {@link #returns(TypeHint)} are preferable. @param typeInfo type information as a return type hint @return This operator with a given return type hint.
returns
java
apache/flink
flink-runtime/src/main/java/org/apache/flink/streaming/api/datastream/SingleOutputStreamOperator.java
https://github.com/apache/flink/blob/master/flink-runtime/src/main/java/org/apache/flink/streaming/api/datastream/SingleOutputStreamOperator.java
Apache-2.0
@PublicEvolving public SingleOutputStreamOperator<T> slotSharingGroup(SlotSharingGroup slotSharingGroup) { transformation.setSlotSharingGroup(slotSharingGroup); return this; }
Sets the slot sharing group of this operation. Parallel instances of operations that are in the same slot sharing group will be co-located in the same TaskManager slot, if possible. <p>Operations inherit the slot sharing group of input operations if all input operations are in the same slot sharing group and no slot sharing group was explicitly specified. <p>Initially an operation is in the default slot sharing group. An operation can be put into the default group explicitly by setting the slot sharing group with name {@code "default"}. @param slotSharingGroup Which contains name and its resource spec.
slotSharingGroup
java
apache/flink
flink-runtime/src/main/java/org/apache/flink/streaming/api/datastream/SingleOutputStreamOperator.java
https://github.com/apache/flink/blob/master/flink-runtime/src/main/java/org/apache/flink/streaming/api/datastream/SingleOutputStreamOperator.java
Apache-2.0
@PublicEvolving public SingleOutputStreamOperator<T> setDescription(String description) { transformation.setDescription(description); return this; }
Sets the description for this operation. <p>Description is used in json plan and web ui, but not in logging and metrics where only name is available. Description is expected to provide detailed information about the sink, while name is expected to be more simple, providing summary information only, so that we can have more user-friendly logging messages and metric tags without losing useful messages for debugging. @param description The description for this operation. @return The operation with new description.
setDescription
java
apache/flink
flink-runtime/src/main/java/org/apache/flink/streaming/api/datastream/SingleOutputStreamOperator.java
https://github.com/apache/flink/blob/master/flink-runtime/src/main/java/org/apache/flink/streaming/api/datastream/SingleOutputStreamOperator.java
Apache-2.0
@PublicEvolving public CachedDataStream<T> cache() { if (!(this.transformation instanceof PhysicalTransformation)) { throw new IllegalStateException( "Cache can only be called with physical transformation or side output transformation"); } return new CachedDataStream<>(this.environment, this.transformation); }
Cache the intermediate result of the transformation. Only support bounded streams and currently only block mode is supported. The cache is generated lazily at the first time the intermediate result is computed. The cache will be clear when {@link CachedDataStream#invalidate()} called or the {@link StreamExecutionEnvironment} close. @return CachedDataStream that can use in later job to reuse the cached intermediate result.
cache
java
apache/flink
flink-runtime/src/main/java/org/apache/flink/streaming/api/datastream/SingleOutputStreamOperator.java
https://github.com/apache/flink/blob/master/flink-runtime/src/main/java/org/apache/flink/streaming/api/datastream/SingleOutputStreamOperator.java
Apache-2.0
@Experimental public SingleOutputStreamOperator<T> enableAsyncState() { transformation.enableAsyncState(); return this; }
Enable the async state processing for following previous transformation. This also requires only State V2 APIs are used in the user function. @return the configured SingleOutputStreamOperator itself. @throws UnsupportedOperationException when the transformation does not support the async state processing.
enableAsyncState
java
apache/flink
flink-runtime/src/main/java/org/apache/flink/streaming/api/datastream/SingleOutputStreamOperator.java
https://github.com/apache/flink/blob/master/flink-runtime/src/main/java/org/apache/flink/streaming/api/datastream/SingleOutputStreamOperator.java
Apache-2.0
public <R> SingleOutputStreamOperator<R> reduce( ReduceFunction<T> reduceFunction, WindowFunction<T, R, K, W> function, TypeInformation<R> resultType) { // clean the closures function = input.getExecutionEnvironment().clean(function); reduceFunction = input.getExecutionEnvironment().clean(reduceFunction); final String opName = builder.generateOperatorName(); final String opDescription = builder.generateOperatorDescription(reduceFunction, function); OneInputStreamOperator<T, R> operator = isEnableAsyncState ? builder.asyncReduce(reduceFunction, function) : builder.reduce(reduceFunction, function); return input.transform(opName, resultType, operator).setDescription(opDescription); }
Applies the given window function to each window. The window function is called for each evaluation of the window for each key individually. The output of the window function is interpreted as a regular non-windowed stream. <p>Arriving data is incrementally aggregated using the given reducer. @param reduceFunction The reduce function that is used for incremental aggregation. @param function The window function. @param resultType Type information for the result type of the window function. @return The data stream that is the result of applying the window function to the window.
reduce
java
apache/flink
flink-runtime/src/main/java/org/apache/flink/streaming/api/datastream/WindowedStream.java
https://github.com/apache/flink/blob/master/flink-runtime/src/main/java/org/apache/flink/streaming/api/datastream/WindowedStream.java
Apache-2.0
public SingleOutputStreamOperator<T> minBy(String field) { return this.minBy(field, true); }
Applies an aggregation that gives the minimum element of every window of the data stream by the given field. If more elements have the same minimum value the operator returns the first element by default. @param field The field to minimize by @return The transformed DataStream.
minBy
java
apache/flink
flink-runtime/src/main/java/org/apache/flink/streaming/api/datastream/WindowedStream.java
https://github.com/apache/flink/blob/master/flink-runtime/src/main/java/org/apache/flink/streaming/api/datastream/WindowedStream.java
Apache-2.0
public SingleOutputStreamOperator<T> maxBy(String field) { return this.maxBy(field, true); }
Applies an aggregation that gives the maximum element of every window of the data stream by the given field. If more elements have the same maximum value the operator returns the first by default. @param field The field to maximize by @return The transformed DataStream.
maxBy
java
apache/flink
flink-runtime/src/main/java/org/apache/flink/streaming/api/datastream/WindowedStream.java
https://github.com/apache/flink/blob/master/flink-runtime/src/main/java/org/apache/flink/streaming/api/datastream/WindowedStream.java
Apache-2.0
public boolean isCheckpointingEnabled() { return getCheckpointInterval() > 0; }
Checks whether checkpointing is enabled. @return True if checkpointing is enables, false otherwise.
isCheckpointingEnabled
java
apache/flink
flink-runtime/src/main/java/org/apache/flink/streaming/api/environment/CheckpointConfig.java
https://github.com/apache/flink/blob/master/flink-runtime/src/main/java/org/apache/flink/streaming/api/environment/CheckpointConfig.java
Apache-2.0
@Deprecated public org.apache.flink.streaming.api.CheckpointingMode getCheckpointingMode() { return org.apache.flink.streaming.api.CheckpointingMode.convertFromCheckpointingMode( configuration.get(CheckpointingOptions.CHECKPOINTING_CONSISTENCY_MODE)); }
Gets the checkpointing mode (exactly-once vs. at-least-once). @return The checkpointing mode. @deprecated Use {@link #getCheckpointingConsistencyMode} instead.
getCheckpointingMode
java
apache/flink
flink-runtime/src/main/java/org/apache/flink/streaming/api/environment/CheckpointConfig.java
https://github.com/apache/flink/blob/master/flink-runtime/src/main/java/org/apache/flink/streaming/api/environment/CheckpointConfig.java
Apache-2.0
@Deprecated public void setCheckpointingMode( org.apache.flink.streaming.api.CheckpointingMode checkpointingMode) { configuration.set( CheckpointingOptions.CHECKPOINTING_CONSISTENCY_MODE, org.apache.flink.streaming.api.CheckpointingMode.convertToCheckpointingMode( checkpointingMode)); }
Sets the checkpointing mode (exactly-once vs. at-least-once). @param checkpointingMode The checkpointing mode. @deprecated Use {@link #setCheckpointingConsistencyMode} instead.
setCheckpointingMode
java
apache/flink
flink-runtime/src/main/java/org/apache/flink/streaming/api/environment/CheckpointConfig.java
https://github.com/apache/flink/blob/master/flink-runtime/src/main/java/org/apache/flink/streaming/api/environment/CheckpointConfig.java
Apache-2.0
public CheckpointingMode getCheckpointingConsistencyMode() { return configuration.get(CheckpointingOptions.CHECKPOINTING_CONSISTENCY_MODE); }
Gets the checkpointing consistency mode (exactly-once vs. at-least-once). @return The checkpointing mode.
getCheckpointingConsistencyMode
java
apache/flink
flink-runtime/src/main/java/org/apache/flink/streaming/api/environment/CheckpointConfig.java
https://github.com/apache/flink/blob/master/flink-runtime/src/main/java/org/apache/flink/streaming/api/environment/CheckpointConfig.java
Apache-2.0
public void setCheckpointingConsistencyMode(CheckpointingMode checkpointingMode) { configuration.set(CheckpointingOptions.CHECKPOINTING_CONSISTENCY_MODE, checkpointingMode); }
Sets the checkpointing consistency mode (exactly-once vs. at-least-once). @param checkpointingMode The checkpointing mode.
setCheckpointingConsistencyMode
java
apache/flink
flink-runtime/src/main/java/org/apache/flink/streaming/api/environment/CheckpointConfig.java
https://github.com/apache/flink/blob/master/flink-runtime/src/main/java/org/apache/flink/streaming/api/environment/CheckpointConfig.java
Apache-2.0
public long getCheckpointInterval() { return configuration .getOptional(CheckpointingOptions.CHECKPOINTING_INTERVAL) .map(Duration::toMillis) .orElse(-1L); }
Gets the interval in which checkpoints are periodically scheduled. <p>This setting defines the base interval. Checkpoint triggering may be delayed by the settings {@link #getMaxConcurrentCheckpoints()} and {@link #getMinPauseBetweenCheckpoints()}. @return The checkpoint interval, in milliseconds.
getCheckpointInterval
java
apache/flink
flink-runtime/src/main/java/org/apache/flink/streaming/api/environment/CheckpointConfig.java
https://github.com/apache/flink/blob/master/flink-runtime/src/main/java/org/apache/flink/streaming/api/environment/CheckpointConfig.java
Apache-2.0
public void setCheckpointInterval(long checkpointInterval) { if (checkpointInterval < MINIMAL_CHECKPOINT_TIME) { throw new IllegalArgumentException( String.format( "Checkpoint interval must be larger than or equal to %s ms", MINIMAL_CHECKPOINT_TIME)); } configuration.set( CheckpointingOptions.CHECKPOINTING_INTERVAL, Duration.ofMillis(checkpointInterval)); }
Sets the interval in which checkpoints are periodically scheduled. <p>This setting defines the base interval. Checkpoint triggering may be delayed by the settings {@link #setMaxConcurrentCheckpoints(int)} and {@link #setMinPauseBetweenCheckpoints(long)}. @param checkpointInterval The checkpoint interval, in milliseconds.
setCheckpointInterval
java
apache/flink
flink-runtime/src/main/java/org/apache/flink/streaming/api/environment/CheckpointConfig.java
https://github.com/apache/flink/blob/master/flink-runtime/src/main/java/org/apache/flink/streaming/api/environment/CheckpointConfig.java
Apache-2.0
public long getCheckpointIntervalDuringBacklog() { long intervalDuringBacklog = configuration .getOptional(CheckpointingOptions.CHECKPOINTING_INTERVAL_DURING_BACKLOG) .map(Duration::toMillis) .orElseGet(this::getCheckpointInterval); if (intervalDuringBacklog < MINIMAL_CHECKPOINT_TIME) { intervalDuringBacklog = CheckpointCoordinatorConfiguration.DISABLED_CHECKPOINT_INTERVAL; } long checkpointInterval = getCheckpointInterval(); if (checkpointInterval < MINIMAL_CHECKPOINT_TIME) { checkpointInterval = CheckpointCoordinatorConfiguration.DISABLED_CHECKPOINT_INTERVAL; } if (intervalDuringBacklog < checkpointInterval) { throw new IllegalArgumentException( "Checkpoint interval during backlog must " + "be larger than or equal to that in normal situation."); } return intervalDuringBacklog; }
Gets the interval in which checkpoints are periodically scheduled during backlog. <p>This setting defines the base interval. Checkpoint triggering may be delayed by the settings {@link #getMaxConcurrentCheckpoints()} and {@link #getMinPauseBetweenCheckpoints()}. <p>If not explicitly configured, checkpoint interval during backlog will be the same as that in normal situation(see {@link #getCheckpointInterval()}). If the return value is zero, it means that checkpoints would be disabled during backlog. @return The checkpoint interval, in milliseconds.
getCheckpointIntervalDuringBacklog
java
apache/flink
flink-runtime/src/main/java/org/apache/flink/streaming/api/environment/CheckpointConfig.java
https://github.com/apache/flink/blob/master/flink-runtime/src/main/java/org/apache/flink/streaming/api/environment/CheckpointConfig.java
Apache-2.0
public void setCheckpointIntervalDuringBacklog(long checkpointInterval) { if (checkpointInterval != 0 && checkpointInterval < MINIMAL_CHECKPOINT_TIME) { throw new IllegalArgumentException( String.format( "Checkpoint interval must be zero or larger than or equal to %s ms", MINIMAL_CHECKPOINT_TIME)); } configuration.set( CheckpointingOptions.CHECKPOINTING_INTERVAL_DURING_BACKLOG, Duration.ofMillis(checkpointInterval)); }
Sets the interval in which checkpoints are periodically scheduled during backlog. <p>This setting defines the base interval. Checkpoint triggering may be delayed by the settings {@link #setMaxConcurrentCheckpoints(int)} and {@link #setMinPauseBetweenCheckpoints(long)}. <p>If not explicitly configured, checkpoint interval during backlog will be the same as that in normal situation(see {@link #setCheckpointInterval(long)}). If configured to zero, checkpoints would be disabled during backlog. @param checkpointInterval The checkpoint interval, in milliseconds.
setCheckpointIntervalDuringBacklog
java
apache/flink
flink-runtime/src/main/java/org/apache/flink/streaming/api/environment/CheckpointConfig.java
https://github.com/apache/flink/blob/master/flink-runtime/src/main/java/org/apache/flink/streaming/api/environment/CheckpointConfig.java
Apache-2.0
public long getCheckpointTimeout() { return configuration.get(CheckpointingOptions.CHECKPOINTING_TIMEOUT).toMillis(); }
Gets the maximum time that a checkpoint may take before being discarded. @return The checkpoint timeout, in milliseconds.
getCheckpointTimeout
java
apache/flink
flink-runtime/src/main/java/org/apache/flink/streaming/api/environment/CheckpointConfig.java
https://github.com/apache/flink/blob/master/flink-runtime/src/main/java/org/apache/flink/streaming/api/environment/CheckpointConfig.java
Apache-2.0
public void setCheckpointTimeout(long checkpointTimeout) { if (checkpointTimeout < MINIMAL_CHECKPOINT_TIME) { throw new IllegalArgumentException( String.format( "Checkpoint timeout must be larger than or equal to %s ms", MINIMAL_CHECKPOINT_TIME)); } configuration.set( CheckpointingOptions.CHECKPOINTING_TIMEOUT, Duration.ofMillis(checkpointTimeout)); }
Sets the maximum time that a checkpoint may take before being discarded. @param checkpointTimeout The checkpoint timeout, in milliseconds.
setCheckpointTimeout
java
apache/flink
flink-runtime/src/main/java/org/apache/flink/streaming/api/environment/CheckpointConfig.java
https://github.com/apache/flink/blob/master/flink-runtime/src/main/java/org/apache/flink/streaming/api/environment/CheckpointConfig.java
Apache-2.0