code
stringlengths 25
201k
| docstring
stringlengths 19
96.2k
| func_name
stringlengths 0
235
| language
stringclasses 1
value | repo
stringlengths 8
51
| path
stringlengths 11
314
| url
stringlengths 62
377
| license
stringclasses 7
values |
|---|---|---|---|---|---|---|---|
public static FilePathFilter createDefaultFilter() {
return DefaultFilter.INSTANCE;
}
|
Returns the default filter, which excludes the following files:
<ul>
<li>Files starting with "_"
<li>Files starting with "."
<li>Files containing the string "_COPYING_"
</ul>
@return The singleton instance of the default file path filter.
|
createDefaultFilter
|
java
|
apache/flink
|
flink-core/src/main/java/org/apache/flink/api/common/io/FilePathFilter.java
|
https://github.com/apache/flink/blob/master/flink-core/src/main/java/org/apache/flink/api/common/io/FilePathFilter.java
|
Apache-2.0
|
@Override
public boolean filterPath(Path filePath) {
return filePath == null
|| filePath.getName().startsWith(".")
|| filePath.getName().startsWith("_")
|| filePath.getName().contains(HADOOP_COPYING);
}
|
The default file path filtering method and is used if no other such function is provided.
This filter leaves out files starting with ".", "_", and "_COPYING_".
|
filterPath
|
java
|
apache/flink
|
flink-core/src/main/java/org/apache/flink/api/common/io/FilePathFilter.java
|
https://github.com/apache/flink/blob/master/flink-core/src/main/java/org/apache/flink/api/common/io/FilePathFilter.java
|
Apache-2.0
|
public void addInputSplit(LocatableInputSplitWithCount split) {
int localCount = split.getLocalCount();
if (minLocalCount == -1) {
// first split to add
this.minLocalCount = localCount;
this.elementCycleCount = 1;
this.splits.offerFirst(split);
} else if (localCount < minLocalCount) {
// split with new min local count
this.nextMinLocalCount = this.minLocalCount;
this.minLocalCount = localCount;
// all other splits have more local host than this one
this.elementCycleCount = 1;
splits.offerFirst(split);
} else if (localCount == minLocalCount) {
this.elementCycleCount++;
this.splits.offerFirst(split);
} else {
if (localCount < nextMinLocalCount) {
nextMinLocalCount = localCount;
}
splits.offerLast(split);
}
}
|
Adds a single input split
@param split The input split to add
|
addInputSplit
|
java
|
apache/flink
|
flink-core/src/main/java/org/apache/flink/api/common/io/LocatableInputSplitAssigner.java
|
https://github.com/apache/flink/blob/master/flink-core/src/main/java/org/apache/flink/api/common/io/LocatableInputSplitAssigner.java
|
Apache-2.0
|
@Override
public final void writeRecord(OUT record) throws IOException {
checkAsyncErrors();
tryAcquire(1);
final CompletionStage<V> completionStage;
try {
completionStage = send(record);
} catch (Throwable e) {
semaphore.release();
throw e;
}
completionStage.whenComplete(
(result, throwable) -> {
if (throwable == null) {
callback.onSuccess(result);
} else {
callback.onFailure(throwable);
}
});
}
|
Asynchronously write a record and deal with {@link OutputFormatBase#maxConcurrentRequests}.
To specify how a record is written, please override the {@link OutputFormatBase#send(Object)}
method.
|
writeRecord
|
java
|
apache/flink
|
flink-core/src/main/java/org/apache/flink/api/common/io/OutputFormatBase.java
|
https://github.com/apache/flink/blob/master/flink-core/src/main/java/org/apache/flink/api/common/io/OutputFormatBase.java
|
Apache-2.0
|
private void checkAsyncErrors() throws IOException {
final Throwable currentError = throwable.getAndSet(null);
if (currentError != null) {
throw new IOException("Write record failed", currentError);
}
}
|
Send the actual record for writing.
@return a CompletionStage that represents the writing task.
|
checkAsyncErrors
|
java
|
apache/flink
|
flink-core/src/main/java/org/apache/flink/api/common/io/OutputFormatBase.java
|
https://github.com/apache/flink/blob/master/flink-core/src/main/java/org/apache/flink/api/common/io/OutputFormatBase.java
|
Apache-2.0
|
@Override
public final void close() throws IOException {
checkAsyncErrors();
flush();
checkAsyncErrors();
postClose();
}
|
Close the format waiting for pending writes and reports errors.
|
close
|
java
|
apache/flink
|
flink-core/src/main/java/org/apache/flink/api/common/io/OutputFormatBase.java
|
https://github.com/apache/flink/blob/master/flink-core/src/main/java/org/apache/flink/api/common/io/OutputFormatBase.java
|
Apache-2.0
|
@PublicEvolving
public void openInputFormat() throws IOException {
// do nothing here, just for subclasses
}
|
Opens this InputFormat instance. This method is called once per parallel instance. Resources
should be allocated in this method. (e.g. database connections, cache, etc.)
@see InputFormat
@throws IOException in case allocating the resources failed.
|
openInputFormat
|
java
|
apache/flink
|
flink-core/src/main/java/org/apache/flink/api/common/io/RichInputFormat.java
|
https://github.com/apache/flink/blob/master/flink-core/src/main/java/org/apache/flink/api/common/io/RichInputFormat.java
|
Apache-2.0
|
@PublicEvolving
public void closeInputFormat() throws IOException {
// do nothing here, just for subclasses
}
|
Closes this InputFormat instance. This method is called once per parallel instance. Resources
allocated during {@link #openInputFormat()} should be closed in this method.
@see InputFormat
@throws IOException in case closing the resources failed
|
closeInputFormat
|
java
|
apache/flink
|
flink-core/src/main/java/org/apache/flink/api/common/io/RichInputFormat.java
|
https://github.com/apache/flink/blob/master/flink-core/src/main/java/org/apache/flink/api/common/io/RichInputFormat.java
|
Apache-2.0
|
@Override
protected T deserialize(T reuse, DataInputView dataInput) throws IOException {
reuse.read(dataInput);
return reuse;
}
|
Reads elements by deserializing them with their regular serialization/deserialization
functionality.
@see SerializedOutputFormat
|
deserialize
|
java
|
apache/flink
|
flink-core/src/main/java/org/apache/flink/api/common/io/SerializedInputFormat.java
|
https://github.com/apache/flink/blob/master/flink-core/src/main/java/org/apache/flink/api/common/io/SerializedInputFormat.java
|
Apache-2.0
|
@Override
protected void serialize(T record, DataOutputView dataOutputView) throws IOException {
record.write(dataOutputView);
}
|
Stores elements by serializing them with their regular serialization/deserialization
functionality.
@see SerializedInputFormat
|
serialize
|
java
|
apache/flink
|
flink-core/src/main/java/org/apache/flink/api/common/io/SerializedOutputFormat.java
|
https://github.com/apache/flink/blob/master/flink-core/src/main/java/org/apache/flink/api/common/io/SerializedOutputFormat.java
|
Apache-2.0
|
public Map<String, Operator<?>> getBroadcastInputs() {
return this.broadcastInputs;
}
|
Returns the input, or null, if none is set.
@return The broadcast input root operator.
|
getBroadcastInputs
|
java
|
apache/flink
|
flink-core/src/main/java/org/apache/flink/api/common/operators/AbstractUdfOperator.java
|
https://github.com/apache/flink/blob/master/flink-core/src/main/java/org/apache/flink/api/common/operators/AbstractUdfOperator.java
|
Apache-2.0
|
public void setBroadcastVariable(String name, Operator<?> root) {
if (name == null) {
throw new IllegalArgumentException("The broadcast input name may not be null.");
}
if (root == null) {
throw new IllegalArgumentException(
"The broadcast input root operator may not be null.");
}
this.broadcastInputs.put(name, root);
}
|
Binds the result produced by a plan rooted at {@code root} to a variable used by the UDF
wrapped in this operator.
@param root The root of the plan producing this input.
|
setBroadcastVariable
|
java
|
apache/flink
|
flink-core/src/main/java/org/apache/flink/api/common/operators/AbstractUdfOperator.java
|
https://github.com/apache/flink/blob/master/flink-core/src/main/java/org/apache/flink/api/common/operators/AbstractUdfOperator.java
|
Apache-2.0
|
public <T> void setBroadcastVariables(Map<String, Operator<T>> inputs) {
this.broadcastInputs.clear();
this.broadcastInputs.putAll(inputs);
}
|
Clears all previous broadcast inputs and binds the given inputs as broadcast variables of
this operator.
@param inputs The {@code<name, root>} pairs to be set as broadcast inputs.
|
setBroadcastVariables
|
java
|
apache/flink
|
flink-core/src/main/java/org/apache/flink/api/common/operators/AbstractUdfOperator.java
|
https://github.com/apache/flink/blob/master/flink-core/src/main/java/org/apache/flink/api/common/operators/AbstractUdfOperator.java
|
Apache-2.0
|
protected static <U> Class<U>[] asArray(Class<U> clazz) {
@SuppressWarnings("unchecked")
Class<U>[] array = new Class[] {clazz};
return array;
}
|
Generic utility function that wraps a single class object into an array of that class type.
@param <U> The type of the classes.
@param clazz The class object to be wrapped.
@return An array wrapping the class object.
|
asArray
|
java
|
apache/flink
|
flink-core/src/main/java/org/apache/flink/api/common/operators/AbstractUdfOperator.java
|
https://github.com/apache/flink/blob/master/flink-core/src/main/java/org/apache/flink/api/common/operators/AbstractUdfOperator.java
|
Apache-2.0
|
protected static <U> Class<U>[] emptyClassArray() {
@SuppressWarnings("unchecked")
Class<U>[] array = new Class[0];
return array;
}
|
Generic utility function that returns an empty class array.
@param <U> The type of the classes.
@return An empty array of type <tt>Class<U></tt>.
|
emptyClassArray
|
java
|
apache/flink
|
flink-core/src/main/java/org/apache/flink/api/common/operators/AbstractUdfOperator.java
|
https://github.com/apache/flink/blob/master/flink-core/src/main/java/org/apache/flink/api/common/operators/AbstractUdfOperator.java
|
Apache-2.0
|
public Set<FieldSet> getUniqueFields() {
return this.uniqueFields;
}
|
Gets the FieldSets that are unique
@return List of FieldSet that are unique
|
getUniqueFields
|
java
|
apache/flink
|
flink-core/src/main/java/org/apache/flink/api/common/operators/CompilerHints.java
|
https://github.com/apache/flink/blob/master/flink-core/src/main/java/org/apache/flink/api/common/operators/CompilerHints.java
|
Apache-2.0
|
public void addUniqueField(FieldSet uniqueFieldSet) {
if (this.uniqueFields == null) {
this.uniqueFields = new HashSet<FieldSet>();
}
this.uniqueFields.add(uniqueFieldSet);
}
|
Adds a FieldSet to be unique
@param uniqueFieldSet The unique FieldSet
|
addUniqueField
|
java
|
apache/flink
|
flink-core/src/main/java/org/apache/flink/api/common/operators/CompilerHints.java
|
https://github.com/apache/flink/blob/master/flink-core/src/main/java/org/apache/flink/api/common/operators/CompilerHints.java
|
Apache-2.0
|
public void addUniqueField(int field) {
if (this.uniqueFields == null) {
this.uniqueFields = new HashSet<FieldSet>();
}
this.uniqueFields.add(new FieldSet(field));
}
|
Adds a field as having only unique values.
@param field The field with unique values.
|
addUniqueField
|
java
|
apache/flink
|
flink-core/src/main/java/org/apache/flink/api/common/operators/CompilerHints.java
|
https://github.com/apache/flink/blob/master/flink-core/src/main/java/org/apache/flink/api/common/operators/CompilerHints.java
|
Apache-2.0
|
public Operator<IN1> getFirstInput() {
return this.input1;
}
|
Returns the first input, or null, if none is set.
@return The contract's first input.
|
getFirstInput
|
java
|
apache/flink
|
flink-core/src/main/java/org/apache/flink/api/common/operators/DualInputOperator.java
|
https://github.com/apache/flink/blob/master/flink-core/src/main/java/org/apache/flink/api/common/operators/DualInputOperator.java
|
Apache-2.0
|
public Operator<IN2> getSecondInput() {
return this.input2;
}
|
Returns the second input, or null, if none is set.
@return The contract's second input.
|
getSecondInput
|
java
|
apache/flink
|
flink-core/src/main/java/org/apache/flink/api/common/operators/DualInputOperator.java
|
https://github.com/apache/flink/blob/master/flink-core/src/main/java/org/apache/flink/api/common/operators/DualInputOperator.java
|
Apache-2.0
|
public void setFirstInput(Operator<IN1> input) {
this.input1 = input;
}
|
Clears all previous connections and connects the first input to the task wrapped in this
contract
@param input The contract that is connected as the first input.
|
setFirstInput
|
java
|
apache/flink
|
flink-core/src/main/java/org/apache/flink/api/common/operators/DualInputOperator.java
|
https://github.com/apache/flink/blob/master/flink-core/src/main/java/org/apache/flink/api/common/operators/DualInputOperator.java
|
Apache-2.0
|
public Operator<IN> getInput() {
return this.input;
}
|
Returns this operator's input operator.
@return This operator's input.
|
getInput
|
java
|
apache/flink
|
flink-core/src/main/java/org/apache/flink/api/common/operators/GenericDataSinkBase.java
|
https://github.com/apache/flink/blob/master/flink-core/src/main/java/org/apache/flink/api/common/operators/GenericDataSinkBase.java
|
Apache-2.0
|
public Ordering getLocalOrder() {
return this.localOrdering;
}
|
Gets the order, in which the data sink writes its data locally. Local order means that with
in each fragment of the file inside the distributed file system, the data is ordered, but not
across file fragments.
@return NONE, if the sink writes data in any order, or ASCENDING (resp. DESCENDING), if the
sink writes it data with a local ascending (resp. descending) order.
|
getLocalOrder
|
java
|
apache/flink
|
flink-core/src/main/java/org/apache/flink/api/common/operators/GenericDataSinkBase.java
|
https://github.com/apache/flink/blob/master/flink-core/src/main/java/org/apache/flink/api/common/operators/GenericDataSinkBase.java
|
Apache-2.0
|
public void setLocalOrder(Ordering localOrder) {
this.localOrdering = localOrder;
}
|
Sets the order in which the sink must write its data within each fragment in the distributed
file system. For any value other then <tt>NONE</tt>, this will cause the system to perform a
local sort, or try to reuse an order from a previous operation.
@param localOrder The local order to write the data in.
|
setLocalOrder
|
java
|
apache/flink
|
flink-core/src/main/java/org/apache/flink/api/common/operators/GenericDataSinkBase.java
|
https://github.com/apache/flink/blob/master/flink-core/src/main/java/org/apache/flink/api/common/operators/GenericDataSinkBase.java
|
Apache-2.0
|
public UserCodeWrapper<? extends OutputFormat<IN>> getFormatWrapper() {
return this.formatWrapper;
}
|
Gets the class describing this sinks output format.
@return The output format class.
|
getFormatWrapper
|
java
|
apache/flink
|
flink-core/src/main/java/org/apache/flink/api/common/operators/GenericDataSinkBase.java
|
https://github.com/apache/flink/blob/master/flink-core/src/main/java/org/apache/flink/api/common/operators/GenericDataSinkBase.java
|
Apache-2.0
|
public UserCodeWrapper<? extends T> getFormatWrapper() {
return this.formatWrapper;
}
|
Gets the class describing the input format.
@return The class describing the input format.
|
getFormatWrapper
|
java
|
apache/flink
|
flink-core/src/main/java/org/apache/flink/api/common/operators/GenericDataSourceBase.java
|
https://github.com/apache/flink/blob/master/flink-core/src/main/java/org/apache/flink/api/common/operators/GenericDataSourceBase.java
|
Apache-2.0
|
public String getStatisticsKey() {
return this.statisticsKey;
}
|
Gets the key under which statistics about this data source may be obtained from the
statistics cache.
@return The statistics cache key.
|
getStatisticsKey
|
java
|
apache/flink
|
flink-core/src/main/java/org/apache/flink/api/common/operators/GenericDataSourceBase.java
|
https://github.com/apache/flink/blob/master/flink-core/src/main/java/org/apache/flink/api/common/operators/GenericDataSourceBase.java
|
Apache-2.0
|
public void setStatisticsKey(String statisticsKey) {
this.statisticsKey = statisticsKey;
}
|
Sets the key under which statistics about this data source may be obtained from the
statistics cache. Useful for testing purposes, when providing mock statistics.
@param statisticsKey The key for the statistics object.
|
setStatisticsKey
|
java
|
apache/flink
|
flink-core/src/main/java/org/apache/flink/api/common/operators/GenericDataSourceBase.java
|
https://github.com/apache/flink/blob/master/flink-core/src/main/java/org/apache/flink/api/common/operators/GenericDataSourceBase.java
|
Apache-2.0
|
public void setSplitDataProperties(SplitDataProperties<OUT> splitDataProperties) {
this.splitProperties = splitDataProperties;
}
|
Sets properties of input splits for this data source. Split properties can help to generate
more efficient execution plans. <br>
<b> IMPORTANT: Providing wrong split data properties can cause wrong results! </b>
@param splitDataProperties The data properties of this data source's splits.
|
setSplitDataProperties
|
java
|
apache/flink
|
flink-core/src/main/java/org/apache/flink/api/common/operators/GenericDataSourceBase.java
|
https://github.com/apache/flink/blob/master/flink-core/src/main/java/org/apache/flink/api/common/operators/GenericDataSourceBase.java
|
Apache-2.0
|
public SplitDataProperties<OUT> getSplitDataProperties() {
return this.splitProperties;
}
|
Returns the data properties of this data source's splits.
@return The data properties of this data source's splits or null if no properties have been
set.
|
getSplitDataProperties
|
java
|
apache/flink
|
flink-core/src/main/java/org/apache/flink/api/common/operators/GenericDataSourceBase.java
|
https://github.com/apache/flink/blob/master/flink-core/src/main/java/org/apache/flink/api/common/operators/GenericDataSourceBase.java
|
Apache-2.0
|
static MailOptions options() {
return MailOptionsImpl.DEFAULT;
}
|
Extra options to configure enqueued mails.
|
options
|
java
|
apache/flink
|
flink-core/src/main/java/org/apache/flink/api/common/operators/MailboxExecutor.java
|
https://github.com/apache/flink/blob/master/flink-core/src/main/java/org/apache/flink/api/common/operators/MailboxExecutor.java
|
Apache-2.0
|
static MailOptions deferrable() {
return MailOptionsImpl.DEFERRABLE;
}
|
Mark this mail as deferrable.
<p>Runtime can decide to defer execution of deferrable mails. For example, to unblock
subtask thread as quickly as possible, deferrable mails are not executed during {@link
#yield()} or {@link #tryYield()}. This is done to speed up checkpointing, by skipping
execution of potentially long-running mails.
|
deferrable
|
java
|
apache/flink
|
flink-core/src/main/java/org/apache/flink/api/common/operators/MailboxExecutor.java
|
https://github.com/apache/flink/blob/master/flink-core/src/main/java/org/apache/flink/api/common/operators/MailboxExecutor.java
|
Apache-2.0
|
default void execute(ThrowingRunnable<? extends Exception> command, String description) {
execute(command, description, EMPTY_ARGS);
}
|
Executes the given command at some time in the future in the mailbox thread.
<p>An optional description can (and should) be added to ease debugging and error-reporting.
The description may contain placeholder that refer to the provided description arguments
using {@link java.util.Formatter} syntax. The actual description is only formatted on demand.
@param command the runnable task to add to the mailbox for execution.
@param description the optional description for the command that is used for debugging and
error-reporting.
@throws RejectedExecutionException if this task cannot be accepted for execution, e.g.
because the mailbox is quiesced or closed.
|
execute
|
java
|
apache/flink
|
flink-core/src/main/java/org/apache/flink/api/common/operators/MailboxExecutor.java
|
https://github.com/apache/flink/blob/master/flink-core/src/main/java/org/apache/flink/api/common/operators/MailboxExecutor.java
|
Apache-2.0
|
default void execute(
MailOptions mailOptions,
ThrowingRunnable<? extends Exception> command,
String description) {
execute(mailOptions, command, description, EMPTY_ARGS);
}
|
Executes the given command at some time in the future in the mailbox thread.
<p>An optional description can (and should) be added to ease debugging and error-reporting.
The description may contain placeholder that refer to the provided description arguments
using {@link java.util.Formatter} syntax. The actual description is only formatted on demand.
@param mailOptions additional options to configure behaviour of the {@code command}
@param command the runnable task to add to the mailbox for execution.
@param description the optional description for the command that is used for debugging and
error-reporting.
@throws RejectedExecutionException if this task cannot be accepted for execution, e.g.
because the mailbox is quiesced or closed.
|
execute
|
java
|
apache/flink
|
flink-core/src/main/java/org/apache/flink/api/common/operators/MailboxExecutor.java
|
https://github.com/apache/flink/blob/master/flink-core/src/main/java/org/apache/flink/api/common/operators/MailboxExecutor.java
|
Apache-2.0
|
default void execute(
ThrowingRunnable<? extends Exception> command,
String descriptionFormat,
Object... descriptionArgs) {
execute(MailOptions.options(), command, descriptionFormat, descriptionArgs);
}
|
Executes the given command at some time in the future in the mailbox thread.
<p>An optional description can (and should) be added to ease debugging and error-reporting.
The description may contain placeholder that refer to the provided description arguments
using {@link java.util.Formatter} syntax. The actual description is only formatted on demand.
@param command the runnable task to add to the mailbox for execution.
@param descriptionFormat the optional description for the command that is used for debugging
and error-reporting.
@param descriptionArgs the parameters used to format the final description string.
@throws RejectedExecutionException if this task cannot be accepted for execution, e.g.
because the mailbox is quiesced or closed.
|
execute
|
java
|
apache/flink
|
flink-core/src/main/java/org/apache/flink/api/common/operators/MailboxExecutor.java
|
https://github.com/apache/flink/blob/master/flink-core/src/main/java/org/apache/flink/api/common/operators/MailboxExecutor.java
|
Apache-2.0
|
public boolean isDeferrable() {
return deferrable;
}
|
Options to configure behaviour of executing mailbox mails.
|
isDeferrable
|
java
|
apache/flink
|
flink-core/src/main/java/org/apache/flink/api/common/operators/MailOptionsImpl.java
|
https://github.com/apache/flink/blob/master/flink-core/src/main/java/org/apache/flink/api/common/operators/MailOptionsImpl.java
|
Apache-2.0
|
public String getName() {
return this.name;
}
|
Gets the name of the contract instance. The name is only used to describe the contract
instance in logging output and graphical representations.
@return The contract instance's name.
|
getName
|
java
|
apache/flink
|
flink-core/src/main/java/org/apache/flink/api/common/operators/Operator.java
|
https://github.com/apache/flink/blob/master/flink-core/src/main/java/org/apache/flink/api/common/operators/Operator.java
|
Apache-2.0
|
public CompilerHints getCompilerHints() {
return this.compilerHints;
}
|
Gets the compiler hints for this contract instance. In the compiler hints, different fields
may be set (for example the selectivity) that will be evaluated by the pact compiler when
generating plan alternatives.
@return The compiler hints object.
|
getCompilerHints
|
java
|
apache/flink
|
flink-core/src/main/java/org/apache/flink/api/common/operators/Operator.java
|
https://github.com/apache/flink/blob/master/flink-core/src/main/java/org/apache/flink/api/common/operators/Operator.java
|
Apache-2.0
|
public Configuration getParameters() {
return this.parameters;
}
|
Gets the stub parameters of this contract. The stub parameters are a map that maps string
keys to string or integer values. The map is accessible by the user code at runtime.
Parameters that the user code needs to access at runtime to configure its behavior are
typically stored in that configuration object.
@return The configuration containing the stub parameters.
|
getParameters
|
java
|
apache/flink
|
flink-core/src/main/java/org/apache/flink/api/common/operators/Operator.java
|
https://github.com/apache/flink/blob/master/flink-core/src/main/java/org/apache/flink/api/common/operators/Operator.java
|
Apache-2.0
|
public void setParameter(String key, String value) {
this.parameters.setString(key, value);
}
|
Sets a stub parameters in the configuration of this contract. The stub parameters are
accessible by the user code at runtime. Parameters that the user code needs to access at
runtime to configure its behavior are typically stored as stub parameters.
@see #getParameters()
@param key The parameter key.
@param value The parameter value.
|
setParameter
|
java
|
apache/flink
|
flink-core/src/main/java/org/apache/flink/api/common/operators/Operator.java
|
https://github.com/apache/flink/blob/master/flink-core/src/main/java/org/apache/flink/api/common/operators/Operator.java
|
Apache-2.0
|
public int getParallelism() {
return this.parallelism;
}
|
Gets the parallelism for this contract instance. The parallelism denotes how many parallel
instances of the user function will be spawned during the execution. If this value is {@link
ExecutionConfig#PARALLELISM_DEFAULT}, then the system will decide the number of parallel
instances by itself.
@return The parallelism.
|
getParallelism
|
java
|
apache/flink
|
flink-core/src/main/java/org/apache/flink/api/common/operators/Operator.java
|
https://github.com/apache/flink/blob/master/flink-core/src/main/java/org/apache/flink/api/common/operators/Operator.java
|
Apache-2.0
|
public void setParallelism(int parallelism) {
this.parallelism = parallelism;
}
|
Sets the parallelism for this contract instance. The parallelism denotes how many parallel
instances of the user function will be spawned during the execution.
@param parallelism The number of parallel instances to spawn. Set this value to {@link
ExecutionConfig#PARALLELISM_DEFAULT} to let the system decide on its own.
|
setParallelism
|
java
|
apache/flink
|
flink-core/src/main/java/org/apache/flink/api/common/operators/Operator.java
|
https://github.com/apache/flink/blob/master/flink-core/src/main/java/org/apache/flink/api/common/operators/Operator.java
|
Apache-2.0
|
@PublicEvolving
public ResourceSpec getMinResources() {
return this.minResources;
}
|
Gets the minimum resources for this operator. The minimum resources denotes how many
resources will be needed at least minimum for the operator or user function during the
execution.
@return The minimum resources of this operator.
|
getMinResources
|
java
|
apache/flink
|
flink-core/src/main/java/org/apache/flink/api/common/operators/Operator.java
|
https://github.com/apache/flink/blob/master/flink-core/src/main/java/org/apache/flink/api/common/operators/Operator.java
|
Apache-2.0
|
@PublicEvolving
public ResourceSpec getPreferredResources() {
return this.preferredResources;
}
|
Gets the preferred resources for this contract instance. The preferred resources denote how
many resources will be needed in the maximum for the user function during the execution.
@return The preferred resource of this operator.
|
getPreferredResources
|
java
|
apache/flink
|
flink-core/src/main/java/org/apache/flink/api/common/operators/Operator.java
|
https://github.com/apache/flink/blob/master/flink-core/src/main/java/org/apache/flink/api/common/operators/Operator.java
|
Apache-2.0
|
@PublicEvolving
public void setResources(ResourceSpec minResources, ResourceSpec preferredResources) {
this.minResources = minResources;
this.preferredResources = preferredResources;
}
|
Sets the minimum and preferred resources for this contract instance. The resource denotes how
many memories and cpu cores of the user function will be consumed during the execution.
@param minResources The minimum resource of this operator.
@param preferredResources The preferred resource of this operator.
|
setResources
|
java
|
apache/flink
|
flink-core/src/main/java/org/apache/flink/api/common/operators/Operator.java
|
https://github.com/apache/flink/blob/master/flink-core/src/main/java/org/apache/flink/api/common/operators/Operator.java
|
Apache-2.0
|
public UserCodeWrapper<?> getUserCodeWrapper() {
return null;
}
|
Gets the user code wrapper. In the case of a pact, that object will be the stub with the user
function, in the case of an input or output format, it will be the format object.
@return The class with the user code.
|
getUserCodeWrapper
|
java
|
apache/flink
|
flink-core/src/main/java/org/apache/flink/api/common/operators/Operator.java
|
https://github.com/apache/flink/blob/master/flink-core/src/main/java/org/apache/flink/api/common/operators/Operator.java
|
Apache-2.0
|
public ResourceSpec merge(final ResourceSpec other) {
checkNotNull(other, "Cannot merge with null resources");
if (this.equals(UNKNOWN) || other.equals(UNKNOWN)) {
return UNKNOWN;
}
Map<String, ExternalResource> resultExtendedResource = new HashMap<>(extendedResources);
other.extendedResources.forEach(
(String name, ExternalResource resource) -> {
resultExtendedResource.compute(
name,
(ignored, oldResource) ->
oldResource == null ? resource : oldResource.merge(resource));
});
return new ResourceSpec(
this.cpuCores.merge(other.cpuCores),
this.taskHeapMemory.add(other.taskHeapMemory),
this.taskOffHeapMemory.add(other.taskOffHeapMemory),
this.managedMemory.add(other.managedMemory),
resultExtendedResource);
}
|
Used by system internally to merge the other resources of chained operators when generating
the job graph.
@param other Reference to resource to merge in.
@return The new resource with merged values.
|
merge
|
java
|
apache/flink
|
flink-core/src/main/java/org/apache/flink/api/common/operators/ResourceSpec.java
|
https://github.com/apache/flink/blob/master/flink-core/src/main/java/org/apache/flink/api/common/operators/ResourceSpec.java
|
Apache-2.0
|
public TypeInformation<IN> getInputType() {
return inputType;
}
|
@param inputType Input type of first input
@param outputType The output type of the operator
|
getInputType
|
java
|
apache/flink
|
flink-core/src/main/java/org/apache/flink/api/common/operators/UnaryOperatorInformation.java
|
https://github.com/apache/flink/blob/master/flink-core/src/main/java/org/apache/flink/api/common/operators/UnaryOperatorInformation.java
|
Apache-2.0
|
public Operator<T> getPartialSolution() {
return this.inputPlaceHolder;
}
|
@return The operator representing the partial solution.
|
getPartialSolution
|
java
|
apache/flink
|
flink-core/src/main/java/org/apache/flink/api/common/operators/base/BulkIterationBase.java
|
https://github.com/apache/flink/blob/master/flink-core/src/main/java/org/apache/flink/api/common/operators/base/BulkIterationBase.java
|
Apache-2.0
|
public Operator<T> getNextPartialSolution() {
return this.iterationResult;
}
|
@return The operator representing the next partial solution.
|
getNextPartialSolution
|
java
|
apache/flink
|
flink-core/src/main/java/org/apache/flink/api/common/operators/base/BulkIterationBase.java
|
https://github.com/apache/flink/blob/master/flink-core/src/main/java/org/apache/flink/api/common/operators/base/BulkIterationBase.java
|
Apache-2.0
|
public Operator<?> getTerminationCriterion() {
return this.terminationCriterion;
}
|
@return The operator representing the termination criterion.
|
getTerminationCriterion
|
java
|
apache/flink
|
flink-core/src/main/java/org/apache/flink/api/common/operators/base/BulkIterationBase.java
|
https://github.com/apache/flink/blob/master/flink-core/src/main/java/org/apache/flink/api/common/operators/base/BulkIterationBase.java
|
Apache-2.0
|
public Map<String, Operator<?>> getBroadcastInputs() {
return Collections.emptyMap();
}
|
The BulkIteration meta operator cannot have broadcast inputs.
@return An empty map.
|
getBroadcastInputs
|
java
|
apache/flink
|
flink-core/src/main/java/org/apache/flink/api/common/operators/base/BulkIterationBase.java
|
https://github.com/apache/flink/blob/master/flink-core/src/main/java/org/apache/flink/api/common/operators/base/BulkIterationBase.java
|
Apache-2.0
|
public void setBroadcastVariable(String name, Operator<?> root) {
throw new UnsupportedOperationException(
"The BulkIteration meta operator cannot have broadcast inputs.");
}
|
The BulkIteration meta operator cannot have broadcast inputs. This method always throws an
exception.
@param name Ignored.
@param root Ignored.
|
setBroadcastVariable
|
java
|
apache/flink
|
flink-core/src/main/java/org/apache/flink/api/common/operators/base/BulkIterationBase.java
|
https://github.com/apache/flink/blob/master/flink-core/src/main/java/org/apache/flink/api/common/operators/base/BulkIterationBase.java
|
Apache-2.0
|
public <X> void setBroadcastVariables(Map<String, Operator<X>> inputs) {
throw new UnsupportedOperationException(
"The BulkIteration meta operator cannot have broadcast inputs.");
}
|
The BulkIteration meta operator cannot have broadcast inputs. This method always throws an
exception.
@param inputs Ignored
|
setBroadcastVariables
|
java
|
apache/flink
|
flink-core/src/main/java/org/apache/flink/api/common/operators/base/BulkIterationBase.java
|
https://github.com/apache/flink/blob/master/flink-core/src/main/java/org/apache/flink/api/common/operators/base/BulkIterationBase.java
|
Apache-2.0
|
public Operator<ST> getSolutionSet() {
return this.solutionSetPlaceholder;
}
|
Gets the contract that represents the solution set for the step function.
@return The solution set for the step function.
|
getSolutionSet
|
java
|
apache/flink
|
flink-core/src/main/java/org/apache/flink/api/common/operators/base/DeltaIterationBase.java
|
https://github.com/apache/flink/blob/master/flink-core/src/main/java/org/apache/flink/api/common/operators/base/DeltaIterationBase.java
|
Apache-2.0
|
public Operator<WT> getWorkset() {
return this.worksetPlaceholder;
}
|
Gets the contract that represents the workset for the step function.
@return The workset for the step function.
|
getWorkset
|
java
|
apache/flink
|
flink-core/src/main/java/org/apache/flink/api/common/operators/base/DeltaIterationBase.java
|
https://github.com/apache/flink/blob/master/flink-core/src/main/java/org/apache/flink/api/common/operators/base/DeltaIterationBase.java
|
Apache-2.0
|
public void setNextWorkset(Operator<WT> result) {
this.nextWorkset = result;
}
|
Sets the contract of the step function that represents the next workset. This contract is
considered one of the two sinks of the step function (the other one being the solution set
delta).
@param result The contract representing the next workset.
|
setNextWorkset
|
java
|
apache/flink
|
flink-core/src/main/java/org/apache/flink/api/common/operators/base/DeltaIterationBase.java
|
https://github.com/apache/flink/blob/master/flink-core/src/main/java/org/apache/flink/api/common/operators/base/DeltaIterationBase.java
|
Apache-2.0
|
public Operator<WT> getNextWorkset() {
return this.nextWorkset;
}
|
Gets the contract that has been set as the next workset.
@return The contract that has been set as the next workset.
|
getNextWorkset
|
java
|
apache/flink
|
flink-core/src/main/java/org/apache/flink/api/common/operators/base/DeltaIterationBase.java
|
https://github.com/apache/flink/blob/master/flink-core/src/main/java/org/apache/flink/api/common/operators/base/DeltaIterationBase.java
|
Apache-2.0
|
public void setSolutionSetDelta(Operator<ST> delta) {
this.solutionSetDelta = delta;
}
|
Sets the contract of the step function that represents the solution set delta. This contract
is considered one of the two sinks of the step function (the other one being the next
workset).
@param delta The contract representing the solution set delta.
|
setSolutionSetDelta
|
java
|
apache/flink
|
flink-core/src/main/java/org/apache/flink/api/common/operators/base/DeltaIterationBase.java
|
https://github.com/apache/flink/blob/master/flink-core/src/main/java/org/apache/flink/api/common/operators/base/DeltaIterationBase.java
|
Apache-2.0
|
public Operator<ST> getSolutionSetDelta() {
return this.solutionSetDelta;
}
|
Gets the contract that has been set as the solution set delta.
@return The contract that has been set as the solution set delta.
|
getSolutionSetDelta
|
java
|
apache/flink
|
flink-core/src/main/java/org/apache/flink/api/common/operators/base/DeltaIterationBase.java
|
https://github.com/apache/flink/blob/master/flink-core/src/main/java/org/apache/flink/api/common/operators/base/DeltaIterationBase.java
|
Apache-2.0
|
public Operator<ST> getInitialSolutionSet() {
return getFirstInput();
}
|
Returns the initial solution set input, or null, if none is set.
@return The iteration's initial solution set input.
|
getInitialSolutionSet
|
java
|
apache/flink
|
flink-core/src/main/java/org/apache/flink/api/common/operators/base/DeltaIterationBase.java
|
https://github.com/apache/flink/blob/master/flink-core/src/main/java/org/apache/flink/api/common/operators/base/DeltaIterationBase.java
|
Apache-2.0
|
public Operator<WT> getInitialWorkset() {
return getSecondInput();
}
|
Returns the initial workset input, or null, if none is set.
@return The iteration's workset input.
|
getInitialWorkset
|
java
|
apache/flink
|
flink-core/src/main/java/org/apache/flink/api/common/operators/base/DeltaIterationBase.java
|
https://github.com/apache/flink/blob/master/flink-core/src/main/java/org/apache/flink/api/common/operators/base/DeltaIterationBase.java
|
Apache-2.0
|
public void setBroadcastVariable(String name, Operator<?> root) {
throw new UnsupportedOperationException(
"The DeltaIteration meta operator cannot have broadcast inputs.");
}
|
The DeltaIteration meta operator cannot have broadcast inputs. This method always throws an
exception.
@param name Ignored.
@param root Ignored.
|
setBroadcastVariable
|
java
|
apache/flink
|
flink-core/src/main/java/org/apache/flink/api/common/operators/base/DeltaIterationBase.java
|
https://github.com/apache/flink/blob/master/flink-core/src/main/java/org/apache/flink/api/common/operators/base/DeltaIterationBase.java
|
Apache-2.0
|
public <X> void setBroadcastVariables(Map<String, Operator<X>> inputs) {
throw new UnsupportedOperationException(
"The DeltaIteration meta operator cannot have broadcast inputs.");
}
|
The DeltaIteration meta operator cannot have broadcast inputs. This method always throws an
exception.
@param inputs Ignored
|
setBroadcastVariables
|
java
|
apache/flink
|
flink-core/src/main/java/org/apache/flink/api/common/operators/base/DeltaIterationBase.java
|
https://github.com/apache/flink/blob/master/flink-core/src/main/java/org/apache/flink/api/common/operators/base/DeltaIterationBase.java
|
Apache-2.0
|
public void setSolutionSetUnManaged(boolean solutionSetUnManaged) {
this.solutionSetUnManaged = solutionSetUnManaged;
}
|
Sets whether to keep the solution set in managed memory (safe against heap exhaustion) or
unmanaged memory (objects on heap).
@param solutionSetUnManaged True to keep the solution set in unmanaged memory, false to keep
it in managed memory.
@see #isSolutionSetUnManaged()
|
setSolutionSetUnManaged
|
java
|
apache/flink
|
flink-core/src/main/java/org/apache/flink/api/common/operators/base/DeltaIterationBase.java
|
https://github.com/apache/flink/blob/master/flink-core/src/main/java/org/apache/flink/api/common/operators/base/DeltaIterationBase.java
|
Apache-2.0
|
public boolean isSolutionSetUnManaged() {
return solutionSetUnManaged;
}
|
gets whether the solution set is in managed or unmanaged memory.
@return True, if the solution set is in unmanaged memory (object heap), false if in managed
memory.
@see #setSolutionSetUnManaged(boolean)
|
isSolutionSetUnManaged
|
java
|
apache/flink
|
flink-core/src/main/java/org/apache/flink/api/common/operators/base/DeltaIterationBase.java
|
https://github.com/apache/flink/blob/master/flink-core/src/main/java/org/apache/flink/api/common/operators/base/DeltaIterationBase.java
|
Apache-2.0
|
public void setCombinable(boolean combinable) {
// sanity check
if (combinable
&& !GroupCombineFunction.class.isAssignableFrom(
this.userFunction.getUserCodeClass())) {
throw new IllegalArgumentException(
"Cannot set a UDF as combinable if it does not implement the interface "
+ GroupCombineFunction.class.getName());
} else {
this.combinable = combinable;
}
}
|
Marks the group reduce operation as combinable. Combinable operations may pre-reduce the data
before the actual group reduce operations. Combinable user-defined functions must implement
the interface {@link GroupCombineFunction}.
@param combinable Flag to mark the group reduce operation as combinable.
|
setCombinable
|
java
|
apache/flink
|
flink-core/src/main/java/org/apache/flink/api/common/operators/base/GroupReduceOperatorBase.java
|
https://github.com/apache/flink/blob/master/flink-core/src/main/java/org/apache/flink/api/common/operators/base/GroupReduceOperatorBase.java
|
Apache-2.0
|
public FieldList toFieldList() {
int[] pos = toArray();
Arrays.sort(pos);
return new FieldList(pos);
}
|
Turns the FieldSet into an ordered FieldList.
@return An ordered FieldList.
|
toFieldList
|
java
|
apache/flink
|
flink-core/src/main/java/org/apache/flink/api/common/operators/util/FieldSet.java
|
https://github.com/apache/flink/blob/master/flink-core/src/main/java/org/apache/flink/api/common/operators/util/FieldSet.java
|
Apache-2.0
|
public int[] toArray() {
int[] a = new int[this.collection.size()];
int i = 0;
for (int col : this.collection) {
a[i++] = col;
}
return a;
}
|
Transforms the field set into an array of field IDs. Whether the IDs are ordered or unordered
depends on the specific subclass of the field set.
@return An array of all contained field IDs.
|
toArray
|
java
|
apache/flink
|
flink-core/src/main/java/org/apache/flink/api/common/operators/util/FieldSet.java
|
https://github.com/apache/flink/blob/master/flink-core/src/main/java/org/apache/flink/api/common/operators/util/FieldSet.java
|
Apache-2.0
|
@Override
public TypeInformation<T> getProducedType() {
return type;
}
|
Gets the type produced by this deserializer. This is the type that was passed to the
constructor, or reflectively inferred (if the default constructor was called).
|
getProducedType
|
java
|
apache/flink
|
flink-core/src/main/java/org/apache/flink/api/common/serialization/AbstractDeserializationSchema.java
|
https://github.com/apache/flink/blob/master/flink-core/src/main/java/org/apache/flink/api/common/serialization/AbstractDeserializationSchema.java
|
Apache-2.0
|
@PublicEvolving
default void open(InitializationContext context) throws Exception {}
|
Initialization method for the schema. It is called before the actual working methods {@link
#deserialize} and thus suitable for one time setup work.
<p>The provided {@link InitializationContext} can be used to access additional features such
as e.g. registering user metrics.
@param context Contextual information that can be used during initialization.
|
open
|
java
|
apache/flink
|
flink-core/src/main/java/org/apache/flink/api/common/serialization/DeserializationSchema.java
|
https://github.com/apache/flink/blob/master/flink-core/src/main/java/org/apache/flink/api/common/serialization/DeserializationSchema.java
|
Apache-2.0
|
public <T extends Serializer<?> & Serializable> void addDefaultKryoSerializer(
Class<?> type, T serializer) {
if (type == null || serializer == null) {
throw new NullPointerException("Cannot register null class or serializer.");
}
defaultKryoSerializers.put(type, new SerializableSerializer<>(serializer));
}
|
Adds a new Kryo default serializer to the Runtime.
<p>Note that the serializer instance must be serializable (as defined by
java.io.Serializable), because it may be distributed to the worker nodes by java
serialization.
@param type The class of the types serialized with the given serializer.
@param serializer The serializer to use.
|
addDefaultKryoSerializer
|
java
|
apache/flink
|
flink-core/src/main/java/org/apache/flink/api/common/serialization/SerializerConfigImpl.java
|
https://github.com/apache/flink/blob/master/flink-core/src/main/java/org/apache/flink/api/common/serialization/SerializerConfigImpl.java
|
Apache-2.0
|
public void addDefaultKryoSerializer(
Class<?> type, Class<? extends Serializer<?>> serializerClass) {
if (type == null || serializerClass == null) {
throw new NullPointerException("Cannot register null class or serializer.");
}
defaultKryoSerializerClasses.put(type, serializerClass);
}
|
Adds a new Kryo default serializer to the Runtime.
@param type The class of the types serialized with the given serializer.
@param serializerClass The class of the serializer to use.
|
addDefaultKryoSerializer
|
java
|
apache/flink
|
flink-core/src/main/java/org/apache/flink/api/common/serialization/SerializerConfigImpl.java
|
https://github.com/apache/flink/blob/master/flink-core/src/main/java/org/apache/flink/api/common/serialization/SerializerConfigImpl.java
|
Apache-2.0
|
public <T extends Serializer<?> & Serializable> void registerTypeWithKryoSerializer(
Class<?> type, T serializer) {
if (type == null || serializer == null) {
throw new NullPointerException("Cannot register null class or serializer.");
}
registeredTypesWithKryoSerializers.put(type, new SerializableSerializer<>(serializer));
}
|
Registers the given type with a Kryo Serializer.
<p>Note that the serializer instance must be serializable (as defined by
java.io.Serializable), because it may be distributed to the worker nodes by java
serialization.
@param type The class of the types serialized with the given serializer.
@param serializer The serializer to use.
|
registerTypeWithKryoSerializer
|
java
|
apache/flink
|
flink-core/src/main/java/org/apache/flink/api/common/serialization/SerializerConfigImpl.java
|
https://github.com/apache/flink/blob/master/flink-core/src/main/java/org/apache/flink/api/common/serialization/SerializerConfigImpl.java
|
Apache-2.0
|
@SuppressWarnings("rawtypes")
public void registerTypeWithKryoSerializer(
Class<?> type, Class<? extends Serializer> serializerClass) {
if (type == null || serializerClass == null) {
throw new NullPointerException("Cannot register null class or serializer.");
}
@SuppressWarnings("unchecked")
Class<? extends Serializer<?>> castedSerializerClass =
(Class<? extends Serializer<?>>) serializerClass;
registeredTypesWithKryoSerializerClasses.put(type, castedSerializerClass);
}
|
Registers the given Serializer via its class as a serializer for the given type at the
KryoSerializer.
@param type The class of the types serialized with the given serializer.
@param serializerClass The class of the serializer to use.
|
registerTypeWithKryoSerializer
|
java
|
apache/flink
|
flink-core/src/main/java/org/apache/flink/api/common/serialization/SerializerConfigImpl.java
|
https://github.com/apache/flink/blob/master/flink-core/src/main/java/org/apache/flink/api/common/serialization/SerializerConfigImpl.java
|
Apache-2.0
|
public LinkedHashMap<Class<?>, SerializableSerializer<?>>
getRegisteredTypesWithKryoSerializers() {
return registeredTypesWithKryoSerializers;
}
|
Returns the registered types with Kryo Serializers.
|
getRegisteredTypesWithKryoSerializers
|
java
|
apache/flink
|
flink-core/src/main/java/org/apache/flink/api/common/serialization/SerializerConfigImpl.java
|
https://github.com/apache/flink/blob/master/flink-core/src/main/java/org/apache/flink/api/common/serialization/SerializerConfigImpl.java
|
Apache-2.0
|
public LinkedHashMap<Class<?>, Class<? extends Serializer<?>>>
getRegisteredTypesWithKryoSerializerClasses() {
return registeredTypesWithKryoSerializerClasses;
}
|
Returns the registered types with their Kryo Serializer classes.
|
getRegisteredTypesWithKryoSerializerClasses
|
java
|
apache/flink
|
flink-core/src/main/java/org/apache/flink/api/common/serialization/SerializerConfigImpl.java
|
https://github.com/apache/flink/blob/master/flink-core/src/main/java/org/apache/flink/api/common/serialization/SerializerConfigImpl.java
|
Apache-2.0
|
public LinkedHashMap<Class<?>, SerializableSerializer<?>> getDefaultKryoSerializers() {
return defaultKryoSerializers;
}
|
Returns the registered default Kryo Serializers.
|
getDefaultKryoSerializers
|
java
|
apache/flink
|
flink-core/src/main/java/org/apache/flink/api/common/serialization/SerializerConfigImpl.java
|
https://github.com/apache/flink/blob/master/flink-core/src/main/java/org/apache/flink/api/common/serialization/SerializerConfigImpl.java
|
Apache-2.0
|
public LinkedHashMap<Class<?>, Class<? extends Serializer<?>>>
getDefaultKryoSerializerClasses() {
return defaultKryoSerializerClasses;
}
|
Returns the registered default Kryo Serializer classes.
|
getDefaultKryoSerializerClasses
|
java
|
apache/flink
|
flink-core/src/main/java/org/apache/flink/api/common/serialization/SerializerConfigImpl.java
|
https://github.com/apache/flink/blob/master/flink-core/src/main/java/org/apache/flink/api/common/serialization/SerializerConfigImpl.java
|
Apache-2.0
|
public Map<Class<?>, Class<? extends TypeInfoFactory<?>>> getRegisteredTypeInfoFactories() {
return registeredTypeInfoFactories;
}
|
Returns the registered type info factories.
|
getRegisteredTypeInfoFactories
|
java
|
apache/flink
|
flink-core/src/main/java/org/apache/flink/api/common/serialization/SerializerConfigImpl.java
|
https://github.com/apache/flink/blob/master/flink-core/src/main/java/org/apache/flink/api/common/serialization/SerializerConfigImpl.java
|
Apache-2.0
|
@Override
public void encode(IN element, OutputStream stream) throws IOException {
if (charset == null) {
charset = Charset.forName(charsetName);
}
stream.write(element.toString().getBytes(charset));
stream.write('\n');
}
|
Creates a new {@code StringWriter} that uses the given charset to convert strings to bytes.
@param charsetName Name of the charset to be used, must be valid input for {@code
Charset.forName(charsetName)}
|
encode
|
java
|
apache/flink
|
flink-core/src/main/java/org/apache/flink/api/common/serialization/SimpleStringEncoder.java
|
https://github.com/apache/flink/blob/master/flink-core/src/main/java/org/apache/flink/api/common/serialization/SimpleStringEncoder.java
|
Apache-2.0
|
@Override
public T deserialize(byte[] message) {
if (dis != null) {
dis.setBuffer(message);
} else {
dis = new DataInputDeserializer(message);
}
try {
return serializer.deserialize(dis);
} catch (IOException e) {
throw new RuntimeException("Unable to deserialize message", e);
}
}
|
Creates a new de-/serialization schema for the given type.
@param typeInfo The type information for the type de-/serialized by this schema.
@param serializer The serializer to use for de-/serialization.
|
deserialize
|
java
|
apache/flink
|
flink-core/src/main/java/org/apache/flink/api/common/serialization/TypeInformationSerializationSchema.java
|
https://github.com/apache/flink/blob/master/flink-core/src/main/java/org/apache/flink/api/common/serialization/TypeInformationSerializationSchema.java
|
Apache-2.0
|
public TypeSerializer<UK> getKeySerializer() {
final TypeSerializer<Map<UK, UV>> rawSerializer = getSerializer();
if (!(rawSerializer instanceof MapSerializer)) {
throw new IllegalStateException("Unexpected serializer type.");
}
return ((MapSerializer<UK, UV>) rawSerializer).getKeySerializer();
}
|
Gets the serializer for the keys in the state.
@return The serializer for the keys in the state.
|
getKeySerializer
|
java
|
apache/flink
|
flink-core/src/main/java/org/apache/flink/api/common/state/MapStateDescriptor.java
|
https://github.com/apache/flink/blob/master/flink-core/src/main/java/org/apache/flink/api/common/state/MapStateDescriptor.java
|
Apache-2.0
|
public TypeSerializer<UV> getValueSerializer() {
final TypeSerializer<Map<UK, UV>> rawSerializer = getSerializer();
if (!(rawSerializer instanceof MapSerializer)) {
throw new IllegalStateException("Unexpected serializer type.");
}
return ((MapSerializer<UK, UV>) rawSerializer).getValueSerializer();
}
|
Gets the serializer for the values in the state.
@return The serializer for the values in the state.
|
getValueSerializer
|
java
|
apache/flink
|
flink-core/src/main/java/org/apache/flink/api/common/state/MapStateDescriptor.java
|
https://github.com/apache/flink/blob/master/flink-core/src/main/java/org/apache/flink/api/common/state/MapStateDescriptor.java
|
Apache-2.0
|
@Deprecated
public void setQueryable(String queryableStateName) {
Preconditions.checkArgument(
ttlConfig.getUpdateType() == StateTtlConfig.UpdateType.Disabled,
"Queryable state is currently not supported with TTL");
if (this.queryableStateName == null) {
this.queryableStateName =
Preconditions.checkNotNull(queryableStateName, "Registration name");
} else {
throw new IllegalStateException("Queryable state name already set");
}
}
|
Sets the name for queries of state created from this descriptor.
<p>If a name is set, the created state will be published for queries during runtime. The name
needs to be unique per job. If there is another state instance published under the same name,
the job will fail during runtime.
@param queryableStateName State name for queries (unique name per job)
@throws IllegalStateException If queryable state name already set
@deprecated The Queryable State feature is deprecated since Flink 1.18, and will be removed
in a future Flink major version.
|
setQueryable
|
java
|
apache/flink
|
flink-core/src/main/java/org/apache/flink/api/common/state/StateDescriptor.java
|
https://github.com/apache/flink/blob/master/flink-core/src/main/java/org/apache/flink/api/common/state/StateDescriptor.java
|
Apache-2.0
|
@Nullable
@Deprecated
public String getQueryableStateName() {
return queryableStateName;
}
|
Returns the queryable state name.
@return Queryable state name or <code>null</code> if not set.
@deprecated The Queryable State feature is deprecated since Flink 1.18, and will be removed
in a future Flink major version.
|
getQueryableStateName
|
java
|
apache/flink
|
flink-core/src/main/java/org/apache/flink/api/common/state/StateDescriptor.java
|
https://github.com/apache/flink/blob/master/flink-core/src/main/java/org/apache/flink/api/common/state/StateDescriptor.java
|
Apache-2.0
|
@Deprecated
public boolean isQueryable() {
return queryableStateName != null;
}
|
Returns whether the state created from this descriptor is queryable.
@return <code>true</code> if state is queryable, <code>false</code> otherwise.
@deprecated The Queryable State feature is deprecated since Flink 1.18, and will be removed
in a future Flink major version.
|
isQueryable
|
java
|
apache/flink
|
flink-core/src/main/java/org/apache/flink/api/common/state/StateDescriptor.java
|
https://github.com/apache/flink/blob/master/flink-core/src/main/java/org/apache/flink/api/common/state/StateDescriptor.java
|
Apache-2.0
|
@Nonnull
public Builder cleanupFullSnapshot() {
strategies.put(CleanupStrategies.Strategies.FULL_STATE_SCAN_SNAPSHOT, EMPTY_STRATEGY);
return this;
}
|
Cleanup expired state in full snapshot on checkpoint.
|
cleanupFullSnapshot
|
java
|
apache/flink
|
flink-core/src/main/java/org/apache/flink/api/common/state/StateTtlConfig.java
|
https://github.com/apache/flink/blob/master/flink-core/src/main/java/org/apache/flink/api/common/state/StateTtlConfig.java
|
Apache-2.0
|
@Nonnull
public Builder cleanupIncrementally(
@Nonnegative int cleanupSize, boolean runCleanupForEveryRecord) {
strategies.put(
CleanupStrategies.Strategies.INCREMENTAL_CLEANUP,
new IncrementalCleanupStrategy(cleanupSize, runCleanupForEveryRecord));
return this;
}
|
Cleanup expired state incrementally cleanup local state.
<p>Upon every state access this cleanup strategy checks a bunch of state keys for
expiration and cleans up expired ones. It keeps a lazy iterator through all keys with
relaxed consistency if backend supports it. This way all keys should be regularly checked
and cleaned eventually over time if any state is constantly being accessed.
<p>Additionally to the incremental cleanup upon state access, it can also run per every
record. Caution: if there are a lot of registered states using this option, they all will
be iterated for every record to check if there is something to cleanup.
<p>Note: if no access happens to this state or no records are processed in case of {@code
runCleanupForEveryRecord}, expired state will persist.
<p>Note: Time spent for the incremental cleanup increases record processing latency.
<p>Note: At the moment incremental cleanup is implemented only for Heap state backend.
Setting it for RocksDB will have no effect.
<p>Note: If heap state backend is used with synchronous snapshotting, the global iterator
keeps a copy of all keys while iterating because of its specific implementation which
does not support concurrent modifications. Enabling of this feature will increase memory
consumption then. Asynchronous snapshotting does not have this problem.
@param cleanupSize max number of keys pulled from queue for clean up upon state touch for
any key
@param runCleanupForEveryRecord run incremental cleanup per each processed record
|
cleanupIncrementally
|
java
|
apache/flink
|
flink-core/src/main/java/org/apache/flink/api/common/state/StateTtlConfig.java
|
https://github.com/apache/flink/blob/master/flink-core/src/main/java/org/apache/flink/api/common/state/StateTtlConfig.java
|
Apache-2.0
|
@Nonnull
public Builder cleanupInRocksdbCompactFilter(long queryTimeAfterNumEntries) {
strategies.put(
CleanupStrategies.Strategies.ROCKSDB_COMPACTION_FILTER,
new RocksdbCompactFilterCleanupStrategy(queryTimeAfterNumEntries));
return this;
}
|
Cleanup expired state while Rocksdb compaction is running.
<p>RocksDB compaction filter will query current timestamp, used to check expiration, from
Flink every time after processing {@code queryTimeAfterNumEntries} number of state
entries. Updating the timestamp more often can improve cleanup speed but it decreases
compaction performance because it uses JNI call from native code.
@param queryTimeAfterNumEntries number of state entries to process by compaction filter
before updating current timestamp
|
cleanupInRocksdbCompactFilter
|
java
|
apache/flink
|
flink-core/src/main/java/org/apache/flink/api/common/state/StateTtlConfig.java
|
https://github.com/apache/flink/blob/master/flink-core/src/main/java/org/apache/flink/api/common/state/StateTtlConfig.java
|
Apache-2.0
|
@Nonnull
public Builder cleanupInRocksdbCompactFilter(
long queryTimeAfterNumEntries, Duration periodicCompactionTime) {
strategies.put(
CleanupStrategies.Strategies.ROCKSDB_COMPACTION_FILTER,
new RocksdbCompactFilterCleanupStrategy(
queryTimeAfterNumEntries, periodicCompactionTime));
return this;
}
|
Cleanup expired state while Rocksdb compaction is running.
<p>RocksDB compaction filter will query current timestamp, used to check expiration, from
Flink every time after processing {@code queryTimeAfterNumEntries} number of state
entries. Updating the timestamp more often can improve cleanup speed but it decreases
compaction performance because it uses JNI call from native code.
<p>Periodic compaction could speed up expired state entries cleanup, especially for state
entries rarely accessed. Files older than this value will be picked up for compaction,
and re-written to the same level as they were before. It makes sure a file goes through
compaction filters periodically.
@param queryTimeAfterNumEntries number of state entries to process by compaction filter
before updating current timestamp
@param periodicCompactionTime periodic compaction which could speed up expired state
cleanup. 0 means turning off periodic compaction.
|
cleanupInRocksdbCompactFilter
|
java
|
apache/flink
|
flink-core/src/main/java/org/apache/flink/api/common/state/StateTtlConfig.java
|
https://github.com/apache/flink/blob/master/flink-core/src/main/java/org/apache/flink/api/common/state/StateTtlConfig.java
|
Apache-2.0
|
@Override
public Type getType() {
return Type.VALUE;
}
|
Creates a new {@code ValueStateDescriptor} with the given name and the specific serializer.
@param name The (unique) name for the state.
@param typeSerializer The type serializer of the values in the state.
|
getType
|
java
|
apache/flink
|
flink-core/src/main/java/org/apache/flink/api/common/state/ValueStateDescriptor.java
|
https://github.com/apache/flink/blob/master/flink-core/src/main/java/org/apache/flink/api/common/state/ValueStateDescriptor.java
|
Apache-2.0
|
@Override
public Type getType() {
return Type.LIST;
}
|
Creates a new {@code ListStateDescriptor} with the given name and list element type.
<p>If this constructor fails (because it is not possible to describe the type via a class),
consider using the {@link #ListStateDescriptor(String, TypeInformation)} constructor.
@param name The (unique) name for the state.
@param elementTypeClass The type of the elements in the state.
|
getType
|
java
|
apache/flink
|
flink-core/src/main/java/org/apache/flink/api/common/state/v2/ListStateDescriptor.java
|
https://github.com/apache/flink/blob/master/flink-core/src/main/java/org/apache/flink/api/common/state/v2/ListStateDescriptor.java
|
Apache-2.0
|
public TypeInformation<T> getTypeInformation() {
return typeInfo;
}
|
The type information describing the value type. Only used to if the serializer is created
lazily.
|
getTypeInformation
|
java
|
apache/flink
|
flink-core/src/main/java/org/apache/flink/api/common/state/v2/StateSerializerReference.java
|
https://github.com/apache/flink/blob/master/flink-core/src/main/java/org/apache/flink/api/common/state/v2/StateSerializerReference.java
|
Apache-2.0
|
public boolean isInitialized() {
return get() != null;
}
|
Checks whether the serializer has been initialized. Serializer initialization is lazy, to
allow parametrization of serializers with an {@link ExecutionConfig} via {@link
#initializeUnlessSet(ExecutionConfig)}.
@return True if the serializers have been initialized, false otherwise.
|
isInitialized
|
java
|
apache/flink
|
flink-core/src/main/java/org/apache/flink/api/common/state/v2/StateSerializerReference.java
|
https://github.com/apache/flink/blob/master/flink-core/src/main/java/org/apache/flink/api/common/state/v2/StateSerializerReference.java
|
Apache-2.0
|
public Deadline plus(Duration other) {
return new Deadline(addHandlingOverflow(timeNanos, other.toNanos()), this.clock);
}
|
Clock providing the time for this deadline.
|
plus
|
java
|
apache/flink
|
flink-core/src/main/java/org/apache/flink/api/common/time/Deadline.java
|
https://github.com/apache/flink/blob/master/flink-core/src/main/java/org/apache/flink/api/common/time/Deadline.java
|
Apache-2.0
|
public Duration timeLeft() {
return Duration.ofNanos(Math.subtractExact(timeNanos, clock.relativeTimeNanos()));
}
|
Returns the time left between the deadline and now. The result is negative if the deadline
has passed.
|
timeLeft
|
java
|
apache/flink
|
flink-core/src/main/java/org/apache/flink/api/common/time/Deadline.java
|
https://github.com/apache/flink/blob/master/flink-core/src/main/java/org/apache/flink/api/common/time/Deadline.java
|
Apache-2.0
|
public Duration timeLeftIfAny() throws TimeoutException {
long nanos = Math.subtractExact(timeNanos, clock.relativeTimeNanos());
if (nanos <= 0) {
throw new TimeoutException();
}
return Duration.ofNanos(nanos);
}
|
Returns the time left between the deadline and now. If no time is left, a {@link
TimeoutException} will be thrown.
@throws TimeoutException if no time is left
|
timeLeftIfAny
|
java
|
apache/flink
|
flink-core/src/main/java/org/apache/flink/api/common/time/Deadline.java
|
https://github.com/apache/flink/blob/master/flink-core/src/main/java/org/apache/flink/api/common/time/Deadline.java
|
Apache-2.0
|
public boolean hasTimeLeft() {
return !isOverdue();
}
|
Returns whether there is any time left between the deadline and now.
|
hasTimeLeft
|
java
|
apache/flink
|
flink-core/src/main/java/org/apache/flink/api/common/time/Deadline.java
|
https://github.com/apache/flink/blob/master/flink-core/src/main/java/org/apache/flink/api/common/time/Deadline.java
|
Apache-2.0
|
public boolean isOverdue() {
return timeNanos <= clock.relativeTimeNanos();
}
|
Determines whether the deadline is in the past, i.e. whether the time left is zero or
negative.
|
isOverdue
|
java
|
apache/flink
|
flink-core/src/main/java/org/apache/flink/api/common/time/Deadline.java
|
https://github.com/apache/flink/blob/master/flink-core/src/main/java/org/apache/flink/api/common/time/Deadline.java
|
Apache-2.0
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.