code
stringlengths
25
201k
docstring
stringlengths
19
96.2k
func_name
stringlengths
0
235
language
stringclasses
1 value
repo
stringlengths
8
51
path
stringlengths
11
314
url
stringlengths
62
377
license
stringclasses
7 values
public long getInactivityInterval() { return inactivityInterval; }
Returns time duration of allowed inactivity after which a part file will have to roll. @return Time duration in milliseconds
getInactivityInterval
java
apache/flink
flink-connectors/flink-file-sink-common/src/main/java/org/apache/flink/streaming/api/functions/sink/filesystem/rollingpolicies/DefaultRollingPolicy.java
https://github.com/apache/flink/blob/master/flink-connectors/flink-file-sink-common/src/main/java/org/apache/flink/streaming/api/functions/sink/filesystem/rollingpolicies/DefaultRollingPolicy.java
Apache-2.0
public static DefaultRollingPolicy.PolicyBuilder builder() { return new DefaultRollingPolicy.PolicyBuilder( DEFAULT_MAX_PART_SIZE, DEFAULT_ROLLOVER_INTERVAL, DEFAULT_INACTIVITY_INTERVAL); }
Creates a new {@link PolicyBuilder} that is used to configure and build an instance of {@code DefaultRollingPolicy}.
builder
java
apache/flink
flink-connectors/flink-file-sink-common/src/main/java/org/apache/flink/streaming/api/functions/sink/filesystem/rollingpolicies/DefaultRollingPolicy.java
https://github.com/apache/flink/blob/master/flink-connectors/flink-file-sink-common/src/main/java/org/apache/flink/streaming/api/functions/sink/filesystem/rollingpolicies/DefaultRollingPolicy.java
Apache-2.0
@Deprecated public static DefaultRollingPolicy.PolicyBuilder create() { return builder(); }
This method is {@link Deprecated}, use {@link DefaultRollingPolicy#builder()} instead.
create
java
apache/flink
flink-connectors/flink-file-sink-common/src/main/java/org/apache/flink/streaming/api/functions/sink/filesystem/rollingpolicies/DefaultRollingPolicy.java
https://github.com/apache/flink/blob/master/flink-connectors/flink-file-sink-common/src/main/java/org/apache/flink/streaming/api/functions/sink/filesystem/rollingpolicies/DefaultRollingPolicy.java
Apache-2.0
public DefaultRollingPolicy.PolicyBuilder withMaxPartSize(final MemorySize size) { Preconditions.checkNotNull(size, "Rolling policy memory size cannot be null"); return new PolicyBuilder(size.getBytes(), rolloverInterval, inactivityInterval); }
Sets the part size above which a part file will have to roll. @param size the allowed part size.
withMaxPartSize
java
apache/flink
flink-connectors/flink-file-sink-common/src/main/java/org/apache/flink/streaming/api/functions/sink/filesystem/rollingpolicies/DefaultRollingPolicy.java
https://github.com/apache/flink/blob/master/flink-connectors/flink-file-sink-common/src/main/java/org/apache/flink/streaming/api/functions/sink/filesystem/rollingpolicies/DefaultRollingPolicy.java
Apache-2.0
@Deprecated public DefaultRollingPolicy.PolicyBuilder withMaxPartSize(final long size) { Preconditions.checkState(size > 0L); return new PolicyBuilder(size, rolloverInterval, inactivityInterval); }
Sets the part size above which a part file will have to roll. @param size the allowed part size. @deprecated Use {@link #withMaxPartSize(MemorySize)} instead.
withMaxPartSize
java
apache/flink
flink-connectors/flink-file-sink-common/src/main/java/org/apache/flink/streaming/api/functions/sink/filesystem/rollingpolicies/DefaultRollingPolicy.java
https://github.com/apache/flink/blob/master/flink-connectors/flink-file-sink-common/src/main/java/org/apache/flink/streaming/api/functions/sink/filesystem/rollingpolicies/DefaultRollingPolicy.java
Apache-2.0
public void read(ObjectInputStream in) throws IOException { this.credentials = new Credentials(); credentials.readFields(in); }
A common base for both "mapred" and "mapreduce" Hadoop input formats.
read
java
apache/flink
flink-connectors/flink-hadoop-compatibility/src/main/java/org/apache/flink/api/java/hadoop/common/HadoopInputFormatCommonBase.java
https://github.com/apache/flink/blob/master/flink-connectors/flink-hadoop-compatibility/src/main/java/org/apache/flink/api/java/hadoop/common/HadoopInputFormatCommonBase.java
Apache-2.0
public static Credentials getCredentialsFromUGI(UserGroupInformation ugi) { return ugi.getCredentials(); }
@param ugi The user information @return new credentials object from the user information.
getCredentialsFromUGI
java
apache/flink
flink-connectors/flink-hadoop-compatibility/src/main/java/org/apache/flink/api/java/hadoop/common/HadoopInputFormatCommonBase.java
https://github.com/apache/flink/blob/master/flink-connectors/flink-hadoop-compatibility/src/main/java/org/apache/flink/api/java/hadoop/common/HadoopInputFormatCommonBase.java
Apache-2.0
@Override public void writeRecord(Tuple2<K, V> record) throws IOException { this.recordWriter.write(record.f0, record.f1); }
Wrapper for using HadoopOutputFormats (mapred-variant) with Flink. <p>The IF is returning a {@code Tuple2<K,V>}. @param <K> Type of the key @param <V> Type of the value.
writeRecord
java
apache/flink
flink-connectors/flink-hadoop-compatibility/src/main/java/org/apache/flink/api/java/hadoop/mapred/HadoopOutputFormat.java
https://github.com/apache/flink/blob/master/flink-connectors/flink-hadoop-compatibility/src/main/java/org/apache/flink/api/java/hadoop/mapred/HadoopOutputFormat.java
Apache-2.0
@Override public void open(InitializationContext context) throws IOException { int taskNumber = context.getTaskNumber(); // enforce sequential open() calls synchronized (OPEN_MUTEX) { if (Integer.toString(taskNumber + 1).length() > 6) { throw new IOException("Task id too large."); } TaskAttemptID taskAttemptID = TaskAttemptID.forName( "attempt__0000_r_" + String.format( "%" + (6 - Integer.toString( taskNumber + 1) .length()) + "s", " ") .replace(" ", "0") + Integer.toString(taskNumber + 1) + "_0"); this.jobConf.set("mapred.task.id", taskAttemptID.toString()); this.jobConf.setInt("mapred.task.partition", taskNumber + 1); // for hadoop 2.2 this.jobConf.set("mapreduce.task.attempt.id", taskAttemptID.toString()); this.jobConf.setInt("mapreduce.task.partition", taskNumber + 1); this.context = new TaskAttemptContextImpl(this.jobConf, taskAttemptID); this.outputCommitter = this.jobConf.getOutputCommitter(); JobContext jobContext = new JobContextImpl(this.jobConf, new JobID()); this.outputCommitter.setupJob(jobContext); this.recordWriter = this.mapredOutputFormat.getRecordWriter( null, this.jobConf, Integer.toString(taskNumber + 1), new HadoopDummyProgressable()); } }
create the temporary output file for hadoop RecordWriter. @param context The initialization context. @throws java.io.IOException
open
java
apache/flink
flink-connectors/flink-hadoop-compatibility/src/main/java/org/apache/flink/api/java/hadoop/mapred/HadoopOutputFormatBase.java
https://github.com/apache/flink/blob/master/flink-connectors/flink-hadoop-compatibility/src/main/java/org/apache/flink/api/java/hadoop/mapred/HadoopOutputFormatBase.java
Apache-2.0
public static void mergeHadoopConf(JobConf jobConf) { // we have to load the global configuration here, because the HadoopInputFormatBase does not // have access to a Flink configuration object org.apache.flink.configuration.Configuration flinkConfiguration = GlobalConfiguration.loadConfiguration(); Configuration hadoopConf = getHadoopConfiguration(flinkConfiguration); for (Map.Entry<String, String> e : hadoopConf) { if (jobConf.get(e.getKey()) == null) { jobConf.set(e.getKey(), e.getValue()); } } }
Merge HadoopConfiguration into JobConf. This is necessary for the HDFS configuration.
mergeHadoopConf
java
apache/flink
flink-connectors/flink-hadoop-compatibility/src/main/java/org/apache/flink/api/java/hadoop/mapred/utils/HadoopUtils.java
https://github.com/apache/flink/blob/master/flink-connectors/flink-hadoop-compatibility/src/main/java/org/apache/flink/api/java/hadoop/mapred/utils/HadoopUtils.java
Apache-2.0
public static Configuration getHadoopConfiguration( org.apache.flink.configuration.Configuration flinkConfiguration) { Configuration retConf = new Configuration(); // We need to load both core-site.xml and hdfs-site.xml to determine the default fs path and // the hdfs configuration // Try to load HDFS configuration from Hadoop's own configuration files // Approach environment variables for (String possibleHadoopConfPath : possibleHadoopConfPaths(flinkConfiguration)) { if (new File(possibleHadoopConfPath).exists()) { if (new File(possibleHadoopConfPath + "/core-site.xml").exists()) { retConf.addResource( new org.apache.hadoop.fs.Path( possibleHadoopConfPath + "/core-site.xml")); if (LOG.isDebugEnabled()) { LOG.debug( "Adding " + possibleHadoopConfPath + "/core-site.xml to hadoop configuration"); } } if (new File(possibleHadoopConfPath + "/hdfs-site.xml").exists()) { retConf.addResource( new org.apache.hadoop.fs.Path( possibleHadoopConfPath + "/hdfs-site.xml")); if (LOG.isDebugEnabled()) { LOG.debug( "Adding " + possibleHadoopConfPath + "/hdfs-site.xml to hadoop configuration"); } } } } return retConf; }
Returns a new Hadoop Configuration object using the path to the hadoop conf configured in the main configuration (config.yaml). This method is public because its being used in the HadoopDataSource. @param flinkConfiguration Flink configuration object @return A Hadoop configuration instance
getHadoopConfiguration
java
apache/flink
flink-connectors/flink-hadoop-compatibility/src/main/java/org/apache/flink/api/java/hadoop/mapred/utils/HadoopUtils.java
https://github.com/apache/flink/blob/master/flink-connectors/flink-hadoop-compatibility/src/main/java/org/apache/flink/api/java/hadoop/mapred/utils/HadoopUtils.java
Apache-2.0
public static String[] possibleHadoopConfPaths( org.apache.flink.configuration.Configuration flinkConfiguration) { String[] possiblePaths = new String[4]; possiblePaths[0] = System.getenv("HADOOP_CONF_DIR"); if (System.getenv("HADOOP_HOME") != null) { possiblePaths[1] = System.getenv("HADOOP_HOME") + "/conf"; possiblePaths[2] = System.getenv("HADOOP_HOME") + "/etc/hadoop"; // hadoop 2.2 } return Arrays.stream(possiblePaths).filter(Objects::nonNull).toArray(String[]::new); }
Get possible Hadoop conf dir paths, based on environment variables and flink configuration. @param flinkConfiguration The flink configuration that may contain the path to Hadoop conf dir. @return an array of possible paths
possibleHadoopConfPaths
java
apache/flink
flink-connectors/flink-hadoop-compatibility/src/main/java/org/apache/flink/api/java/hadoop/mapred/utils/HadoopUtils.java
https://github.com/apache/flink/blob/master/flink-connectors/flink-hadoop-compatibility/src/main/java/org/apache/flink/api/java/hadoop/mapred/utils/HadoopUtils.java
Apache-2.0
@Override public void writeRecord(Tuple2<K, V> record) throws IOException { try { this.recordWriter.write(record.f0, record.f1); } catch (InterruptedException e) { throw new IOException("Could not write Record.", e); } }
OutputFormat implementation allowing to use Hadoop (mapreduce) OutputFormats with Flink. @param <K> Key Type @param <V> Value Type
writeRecord
java
apache/flink
flink-connectors/flink-hadoop-compatibility/src/main/java/org/apache/flink/api/java/hadoop/mapreduce/HadoopOutputFormat.java
https://github.com/apache/flink/blob/master/flink-connectors/flink-hadoop-compatibility/src/main/java/org/apache/flink/api/java/hadoop/mapreduce/HadoopOutputFormat.java
Apache-2.0
@Override public void open(InitializationContext context) throws IOException { int taskNumber = context.getTaskNumber(); // enforce sequential open() calls synchronized (OPEN_MUTEX) { if (Integer.toString(taskNumber + 1).length() > 6) { throw new IOException("Task id too large."); } this.taskNumber = taskNumber + 1; // for hadoop 2.2 this.configuration.set("mapreduce.output.basename", "tmp"); TaskAttemptID taskAttemptID = TaskAttemptID.forName( "attempt__0000_r_" + String.format( "%" + (6 - Integer.toString( taskNumber + 1) .length()) + "s", " ") .replace(" ", "0") + Integer.toString(taskNumber + 1) + "_0"); this.configuration.set("mapred.task.id", taskAttemptID.toString()); this.configuration.setInt("mapred.task.partition", taskNumber + 1); // for hadoop 2.2 this.configuration.set("mapreduce.task.attempt.id", taskAttemptID.toString()); this.configuration.setInt("mapreduce.task.partition", taskNumber + 1); try { this.context = new TaskAttemptContextImpl(this.configuration, taskAttemptID); this.outputCommitter = this.mapreduceOutputFormat.getOutputCommitter(this.context); this.outputCommitter.setupJob(new JobContextImpl(this.configuration, new JobID())); } catch (Exception e) { throw new RuntimeException(e); } this.context.getCredentials().addAll(this.credentials); Credentials currentUserCreds = getCredentialsFromUGI(UserGroupInformation.getCurrentUser()); if (currentUserCreds != null) { this.context.getCredentials().addAll(currentUserCreds); } // compatible for hadoop 2.2.0, the temporary output directory is different from hadoop // 1.2.1 if (outputCommitter instanceof FileOutputCommitter) { this.configuration.set( "mapreduce.task.output.dir", ((FileOutputCommitter) this.outputCommitter).getWorkPath().toString()); } try { this.recordWriter = this.mapreduceOutputFormat.getRecordWriter(this.context); } catch (InterruptedException e) { throw new IOException("Could not create RecordWriter.", e); } } }
create the temporary output file for hadoop RecordWriter. @throws java.io.IOException
open
java
apache/flink
flink-connectors/flink-hadoop-compatibility/src/main/java/org/apache/flink/api/java/hadoop/mapreduce/HadoopOutputFormatBase.java
https://github.com/apache/flink/blob/master/flink-connectors/flink-hadoop-compatibility/src/main/java/org/apache/flink/api/java/hadoop/mapreduce/HadoopOutputFormatBase.java
Apache-2.0
public static void mergeHadoopConf(Configuration hadoopConfig) { // we have to load the global configuration here, because the HadoopInputFormatBase does not // have access to a Flink configuration object org.apache.flink.configuration.Configuration flinkConfiguration = GlobalConfiguration.loadConfiguration(); Configuration hadoopConf = org.apache.flink.api.java.hadoop.mapred.utils.HadoopUtils.getHadoopConfiguration( flinkConfiguration); for (Map.Entry<String, String> e : hadoopConf) { if (hadoopConfig.get(e.getKey()) == null) { hadoopConfig.set(e.getKey(), e.getValue()); } } }
Merge HadoopConfiguration into Configuration. This is necessary for the HDFS configuration.
mergeHadoopConf
java
apache/flink
flink-connectors/flink-hadoop-compatibility/src/main/java/org/apache/flink/api/java/hadoop/mapreduce/utils/HadoopUtils.java
https://github.com/apache/flink/blob/master/flink-connectors/flink-hadoop-compatibility/src/main/java/org/apache/flink/api/java/hadoop/mapreduce/utils/HadoopUtils.java
Apache-2.0
public static <K, V> HadoopInputFormat<K, V> readSequenceFile( Class<K> key, Class<V> value, String inputPath) throws IOException { return readHadoopFile( new org.apache.hadoop.mapred.SequenceFileInputFormat<K, V>(), key, value, inputPath); }
Creates a Flink {@link InputFormat} to read a Hadoop sequence file for the given key and value classes. @return A Flink InputFormat that wraps a Hadoop SequenceFileInputFormat.
readSequenceFile
java
apache/flink
flink-connectors/flink-hadoop-compatibility/src/main/java/org/apache/flink/hadoopcompatibility/HadoopInputs.java
https://github.com/apache/flink/blob/master/flink-connectors/flink-hadoop-compatibility/src/main/java/org/apache/flink/hadoopcompatibility/HadoopInputs.java
Apache-2.0
public static ParameterTool paramsFromGenericOptionsParser(String[] args) throws IOException { Option[] options = new GenericOptionsParser(args).getCommandLine().getOptions(); Map<String, String> map = new HashMap<String, String>(); for (Option option : options) { String[] split = option.getValue().split("="); map.put(split[0], split[1]); } return ParameterTool.fromMap(map); }
Returns {@link ParameterTool} for the arguments parsed by {@link GenericOptionsParser}. @param args Input array arguments. It should be parsable by {@link GenericOptionsParser} @return A {@link ParameterTool} @throws IOException If arguments cannot be parsed by {@link GenericOptionsParser} @see GenericOptionsParser
paramsFromGenericOptionsParser
java
apache/flink
flink-connectors/flink-hadoop-compatibility/src/main/java/org/apache/flink/hadoopcompatibility/HadoopUtils.java
https://github.com/apache/flink/blob/master/flink-connectors/flink-hadoop-compatibility/src/main/java/org/apache/flink/hadoopcompatibility/HadoopUtils.java
Apache-2.0
public void setFlinkCollector(Collector<Tuple2<KEY, VALUE>> flinkCollector) { this.flinkCollector = flinkCollector; }
Set the wrapped Flink collector. @param flinkCollector The wrapped Flink OutputCollector.
setFlinkCollector
java
apache/flink
flink-connectors/flink-hadoop-compatibility/src/main/java/org/apache/flink/hadoopcompatibility/mapred/wrapper/HadoopOutputCollector.java
https://github.com/apache/flink/blob/master/flink-connectors/flink-hadoop-compatibility/src/main/java/org/apache/flink/hadoopcompatibility/mapred/wrapper/HadoopOutputCollector.java
Apache-2.0
@Override public void collect(final KEY key, final VALUE val) throws IOException { this.outTuple.f0 = key; this.outTuple.f1 = val; this.flinkCollector.collect(outTuple); }
Use the wrapped Flink collector to collect a key-value pair for Flink. @param key the key to collect @param val the value to collect @throws IOException unexpected of key or value in key-value pair.
collect
java
apache/flink
flink-connectors/flink-hadoop-compatibility/src/main/java/org/apache/flink/hadoopcompatibility/mapred/wrapper/HadoopOutputCollector.java
https://github.com/apache/flink/blob/master/flink-connectors/flink-hadoop-compatibility/src/main/java/org/apache/flink/hadoopcompatibility/mapred/wrapper/HadoopOutputCollector.java
Apache-2.0
@Override public void flatMap( Tuple2<LongWritable, Text> value, Collector<Tuple2<String, Integer>> out) { // normalize and split the line String line = value.f1.toString(); String[] tokens = line.toLowerCase().split("\\W+"); // emit the pairs for (String token : tokens) { if (token.length() > 0) { out.collect(new Tuple2<String, Integer>(token, 1)); } } }
Splits a line into words and converts Hadoop Writables into normal Java data types.
flatMap
java
apache/flink
flink-connectors/flink-hadoop-compatibility/src/test/java/org/apache/flink/test/hadoopcompatibility/mapreduce/example/WordCount.java
https://github.com/apache/flink/blob/master/flink-connectors/flink-hadoop-compatibility/src/test/java/org/apache/flink/test/hadoopcompatibility/mapreduce/example/WordCount.java
Apache-2.0
@Override public Tuple2<Text, IntWritable> map(Tuple2<String, Integer> value) throws Exception { return new Tuple2<Text, IntWritable>(new Text(value.f0), new IntWritable(value.f1)); }
Converts Java data types to Hadoop Writables.
map
java
apache/flink
flink-connectors/flink-hadoop-compatibility/src/test/java/org/apache/flink/test/hadoopcompatibility/mapreduce/example/WordCount.java
https://github.com/apache/flink/blob/master/flink-connectors/flink-hadoop-compatibility/src/test/java/org/apache/flink/test/hadoopcompatibility/mapreduce/example/WordCount.java
Apache-2.0
@Override public InlineElement getDescription() { return description; }
Downstream can start running anytime, as long as the upstream has started. <p>This adapts the resource usage to whatever is available. <p>This type will selective spilling data to reduce disk writes as much as possible.
getDescription
java
apache/flink
flink-core/src/main/java/org/apache/flink/api/common/BatchShuffleMode.java
https://github.com/apache/flink/blob/master/flink-core/src/main/java/org/apache/flink/api/common/BatchShuffleMode.java
Apache-2.0
@Internal public SerializerConfig getSerializerConfig() { return serializerConfig; }
In the long run, this field should be somehow merged with the {@link Configuration} from StreamExecutionEnvironment.
getSerializerConfig
java
apache/flink
flink-core/src/main/java/org/apache/flink/api/common/ExecutionConfig.java
https://github.com/apache/flink/blob/master/flink-core/src/main/java/org/apache/flink/api/common/ExecutionConfig.java
Apache-2.0
public ExecutionConfig enableClosureCleaner() { return setClosureCleanerLevel(ClosureCleanerLevel.RECURSIVE); }
Enables the ClosureCleaner. This analyzes user code functions and sets fields to null that are not used. This will in most cases make closures or anonymous inner classes serializable that where not serializable due to some Scala or Java implementation artifact. User code must be serializable because it needs to be sent to worker nodes.
enableClosureCleaner
java
apache/flink
flink-core/src/main/java/org/apache/flink/api/common/ExecutionConfig.java
https://github.com/apache/flink/blob/master/flink-core/src/main/java/org/apache/flink/api/common/ExecutionConfig.java
Apache-2.0
public boolean isClosureCleanerEnabled() { return !(getClosureCleanerLevel() == ClosureCleanerLevel.NONE); }
Returns whether the ClosureCleaner is enabled. @see #enableClosureCleaner()
isClosureCleanerEnabled
java
apache/flink
flink-core/src/main/java/org/apache/flink/api/common/ExecutionConfig.java
https://github.com/apache/flink/blob/master/flink-core/src/main/java/org/apache/flink/api/common/ExecutionConfig.java
Apache-2.0
public ExecutionConfig setClosureCleanerLevel(ClosureCleanerLevel level) { configuration.set(PipelineOptions.CLOSURE_CLEANER_LEVEL, level); return this; }
Configures the closure cleaner. Please see {@link ClosureCleanerLevel} for details on the different settings.
setClosureCleanerLevel
java
apache/flink
flink-core/src/main/java/org/apache/flink/api/common/ExecutionConfig.java
https://github.com/apache/flink/blob/master/flink-core/src/main/java/org/apache/flink/api/common/ExecutionConfig.java
Apache-2.0
@PublicEvolving public long getAutoWatermarkInterval() { return configuration.get(PipelineOptions.AUTO_WATERMARK_INTERVAL).toMillis(); }
Returns the interval of the automatic watermark emission. @see #setAutoWatermarkInterval(long)
getAutoWatermarkInterval
java
apache/flink
flink-core/src/main/java/org/apache/flink/api/common/ExecutionConfig.java
https://github.com/apache/flink/blob/master/flink-core/src/main/java/org/apache/flink/api/common/ExecutionConfig.java
Apache-2.0
@PublicEvolving public ExecutionConfig setLatencyTrackingInterval(long interval) { configuration.set(MetricOptions.LATENCY_INTERVAL, Duration.ofMillis(interval)); return this; }
Interval for sending latency tracking marks from the sources to the sinks. Flink will send latency tracking marks from the sources at the specified interval. <p>Setting a tracking interval <= 0 disables the latency tracking. @param interval Interval in milliseconds.
setLatencyTrackingInterval
java
apache/flink
flink-core/src/main/java/org/apache/flink/api/common/ExecutionConfig.java
https://github.com/apache/flink/blob/master/flink-core/src/main/java/org/apache/flink/api/common/ExecutionConfig.java
Apache-2.0
public int getParallelism() { return configuration.get(CoreOptions.DEFAULT_PARALLELISM); }
Gets the parallelism with which operation are executed by default. Operations can individually override this value to use a specific parallelism. <p>Other operations may need to run with a different parallelism - for example calling a reduce operation over the entire data set will involve an operation that runs with a parallelism of one (the final reduce to the single result value). @return The parallelism used by operations, unless they override that value. This method returns {@link #PARALLELISM_DEFAULT} if the environment's default parallelism should be used.
getParallelism
java
apache/flink
flink-core/src/main/java/org/apache/flink/api/common/ExecutionConfig.java
https://github.com/apache/flink/blob/master/flink-core/src/main/java/org/apache/flink/api/common/ExecutionConfig.java
Apache-2.0
public long getTaskCancellationInterval() { return configuration.get(TaskManagerOptions.TASK_CANCELLATION_INTERVAL).toMillis(); }
Gets the interval (in milliseconds) between consecutive attempts to cancel a running task.
getTaskCancellationInterval
java
apache/flink
flink-core/src/main/java/org/apache/flink/api/common/ExecutionConfig.java
https://github.com/apache/flink/blob/master/flink-core/src/main/java/org/apache/flink/api/common/ExecutionConfig.java
Apache-2.0
public ExecutionConfig setTaskCancellationInterval(long interval) { configuration.set( TaskManagerOptions.TASK_CANCELLATION_INTERVAL, Duration.ofMillis(interval)); return this; }
Sets the configuration parameter specifying the interval (in milliseconds) between consecutive attempts to cancel a running task. @param interval the interval (in milliseconds).
setTaskCancellationInterval
java
apache/flink
flink-core/src/main/java/org/apache/flink/api/common/ExecutionConfig.java
https://github.com/apache/flink/blob/master/flink-core/src/main/java/org/apache/flink/api/common/ExecutionConfig.java
Apache-2.0
@PublicEvolving public long getTaskCancellationTimeout() { return configuration.get(TaskManagerOptions.TASK_CANCELLATION_TIMEOUT).toMillis(); }
Returns the timeout (in milliseconds) after which an ongoing task cancellation leads to a fatal TaskManager error. <p>The value <code>0</code> means that the timeout is disabled. In this case a stuck cancellation will not lead to a fatal error.
getTaskCancellationTimeout
java
apache/flink
flink-core/src/main/java/org/apache/flink/api/common/ExecutionConfig.java
https://github.com/apache/flink/blob/master/flink-core/src/main/java/org/apache/flink/api/common/ExecutionConfig.java
Apache-2.0
private void setAutoGeneratedUids(boolean autoGeneratedUids) { configuration.set(PipelineOptions.AUTO_GENERATE_UIDS, autoGeneratedUids); }
Disables auto-generated UIDs. Forces users to manually specify UIDs on DataStream applications. <p>It is highly recommended that users specify UIDs before deploying to production since they are used to match state in savepoints to operators in a job. Because auto-generated ID's are likely to change when modifying a job, specifying custom IDs allow an application to evolve overtime without discarding state.
setAutoGeneratedUids
java
apache/flink
flink-core/src/main/java/org/apache/flink/api/common/ExecutionConfig.java
https://github.com/apache/flink/blob/master/flink-core/src/main/java/org/apache/flink/api/common/ExecutionConfig.java
Apache-2.0
public boolean hasAutoGeneratedUIDsEnabled() { return configuration.get(PipelineOptions.AUTO_GENERATE_UIDS); }
Checks whether auto generated UIDs are supported. <p>Auto generated UIDs are enabled by default. @see #enableAutoGeneratedUIDs() @see #disableAutoGeneratedUIDs()
hasAutoGeneratedUIDsEnabled
java
apache/flink
flink-core/src/main/java/org/apache/flink/api/common/ExecutionConfig.java
https://github.com/apache/flink/blob/master/flink-core/src/main/java/org/apache/flink/api/common/ExecutionConfig.java
Apache-2.0
public ExecutionConfig enableObjectReuse() { return setObjectReuse(true); }
Enables reusing objects that Flink internally uses for deserialization and passing data to user-code functions. Keep in mind that this can lead to bugs when the user-code function of an operation is not aware of this behaviour.
enableObjectReuse
java
apache/flink
flink-core/src/main/java/org/apache/flink/api/common/ExecutionConfig.java
https://github.com/apache/flink/blob/master/flink-core/src/main/java/org/apache/flink/api/common/ExecutionConfig.java
Apache-2.0
@Override public InlineElement getDescription() { return description; }
Configuration settings for the closure cleaner.
getDescription
java
apache/flink
flink-core/src/main/java/org/apache/flink/api/common/ExecutionConfig.java
https://github.com/apache/flink/blob/master/flink-core/src/main/java/org/apache/flink/api/common/ExecutionConfig.java
Apache-2.0
public long getNetRuntime() { return this.netRuntime; }
Gets the net execution time of the job, i.e., the execution time in the parallel system, without the pre-flight steps like the optimizer. @return The net execution time in milliseconds.
getNetRuntime
java
apache/flink
flink-core/src/main/java/org/apache/flink/api/common/JobExecutionResult.java
https://github.com/apache/flink/blob/master/flink-core/src/main/java/org/apache/flink/api/common/JobExecutionResult.java
Apache-2.0
@SuppressWarnings("unchecked") public <T> T getAccumulatorResult(String accumulatorName) { OptionalFailure<Object> result = this.accumulatorResults.get(accumulatorName); if (result != null) { return (T) result.getUnchecked(); } else { return null; } }
Gets the accumulator with the given name. Returns {@code null}, if no accumulator with that name was produced. @param accumulatorName The name of the accumulator. @param <T> The generic type of the accumulator value. @return The value of the accumulator with the given name.
getAccumulatorResult
java
apache/flink
flink-core/src/main/java/org/apache/flink/api/common/JobExecutionResult.java
https://github.com/apache/flink/blob/master/flink-core/src/main/java/org/apache/flink/api/common/JobExecutionResult.java
Apache-2.0
public static JobID generate() { return new JobID(); }
Creates a new (statistically) random JobID. @return A new random JobID.
generate
java
apache/flink
flink-core/src/main/java/org/apache/flink/api/common/JobID.java
https://github.com/apache/flink/blob/master/flink-core/src/main/java/org/apache/flink/api/common/JobID.java
Apache-2.0
public static JobID fromHexString(String hexString) { try { return new JobID(StringUtils.hexStringToByte(hexString)); } catch (Exception e) { throw new IllegalArgumentException( "Cannot parse JobID from \"" + hexString + "\". The expected format is " + "[0-9a-fA-F]{32}, e.g. fd72014d4c864993a2e5a9287b4a9c5d.", e); } }
Parses a JobID from the given string. @param hexString string representation of a JobID @return Parsed JobID @throws IllegalArgumentException if the JobID could not be parsed from the given string
fromHexString
java
apache/flink
flink-core/src/main/java/org/apache/flink/api/common/JobID.java
https://github.com/apache/flink/blob/master/flink-core/src/main/java/org/apache/flink/api/common/JobID.java
Apache-2.0
public boolean isGloballyTerminalState() { return terminalState == TerminalState.GLOBALLY; }
Checks whether this state is <i>globally terminal</i>. A globally terminal job is complete and cannot fail any more and will not be restarted or recovered by another standby master node. <p>When a globally terminal state has been reached, all recovery data for the job is dropped from the high-availability services. @return True, if this job status is globally terminal, false otherwise.
isGloballyTerminalState
java
apache/flink
flink-core/src/main/java/org/apache/flink/api/common/JobStatus.java
https://github.com/apache/flink/blob/master/flink-core/src/main/java/org/apache/flink/api/common/JobStatus.java
Apache-2.0
public boolean isTerminalState() { return terminalState != TerminalState.NON_TERMINAL; }
Checks whether this state is <i>locally terminal</i>. Locally terminal refers to the state of a job's execution graph within an executing JobManager. If the execution graph is locally terminal, the JobManager will not continue executing or recovering the job. <p>The only state that is locally terminal, but not globally terminal is {@link #SUSPENDED}, which is typically entered when the executing JobManager loses its leader status. @return True, if this job status is terminal, false otherwise.
isTerminalState
java
apache/flink
flink-core/src/main/java/org/apache/flink/api/common/JobStatus.java
https://github.com/apache/flink/blob/master/flink-core/src/main/java/org/apache/flink/api/common/JobStatus.java
Apache-2.0
public JobID getJobID() { return jobID; }
Returns the JobID assigned to the job by the Flink runtime. @return jobID, or null if the job has been executed on a runtime without JobIDs or if the execution failed.
getJobID
java
apache/flink
flink-core/src/main/java/org/apache/flink/api/common/JobSubmissionResult.java
https://github.com/apache/flink/blob/master/flink-core/src/main/java/org/apache/flink/api/common/JobSubmissionResult.java
Apache-2.0
public boolean isJobExecutionResult() { return false; }
Checks if this JobSubmissionResult is also a JobExecutionResult. See {@code getJobExecutionResult} to retrieve the JobExecutionResult. @return True if this is a JobExecutionResult, false otherwise
isJobExecutionResult
java
apache/flink
flink-core/src/main/java/org/apache/flink/api/common/JobSubmissionResult.java
https://github.com/apache/flink/blob/master/flink-core/src/main/java/org/apache/flink/api/common/JobSubmissionResult.java
Apache-2.0
public JobExecutionResult getJobExecutionResult() { throw new ClassCastException("This JobSubmissionResult is not a JobExecutionResult."); }
Returns the JobExecutionResult if available. @return The JobExecutionResult @throws ClassCastException if this is not a JobExecutionResult
getJobExecutionResult
java
apache/flink
flink-core/src/main/java/org/apache/flink/api/common/JobSubmissionResult.java
https://github.com/apache/flink/blob/master/flink-core/src/main/java/org/apache/flink/api/common/JobSubmissionResult.java
Apache-2.0
public void addDataSink(GenericDataSinkBase<?> sink) { checkNotNull(sink, "The data sink must not be null."); if (!this.sinks.contains(sink)) { this.sinks.add(sink); } }
Adds a data sink to the set of sinks in this program. @param sink The data sink to add.
addDataSink
java
apache/flink
flink-core/src/main/java/org/apache/flink/api/common/Plan.java
https://github.com/apache/flink/blob/master/flink-core/src/main/java/org/apache/flink/api/common/Plan.java
Apache-2.0
public Collection<? extends GenericDataSinkBase<?>> getDataSinks() { return this.sinks; }
Gets all the data sinks of this job. @return All sinks of the program.
getDataSinks
java
apache/flink
flink-core/src/main/java/org/apache/flink/api/common/Plan.java
https://github.com/apache/flink/blob/master/flink-core/src/main/java/org/apache/flink/api/common/Plan.java
Apache-2.0
public String getJobName() { return this.jobName; }
Gets the name of this job. @return The name of the job.
getJobName
java
apache/flink
flink-core/src/main/java/org/apache/flink/api/common/Plan.java
https://github.com/apache/flink/blob/master/flink-core/src/main/java/org/apache/flink/api/common/Plan.java
Apache-2.0
public void setJobName(String jobName) { checkNotNull(jobName, "The job name must not be null."); this.jobName = jobName; }
Sets the jobName for this Plan. @param jobName The jobName to set.
setJobName
java
apache/flink
flink-core/src/main/java/org/apache/flink/api/common/Plan.java
https://github.com/apache/flink/blob/master/flink-core/src/main/java/org/apache/flink/api/common/Plan.java
Apache-2.0
public JobID getJobId() { return jobId; }
Gets the ID of the job that the dataflow plan belongs to. If this ID is not set, then the dataflow represents its own independent job. @return The ID of the job that the dataflow plan belongs to.
getJobId
java
apache/flink
flink-core/src/main/java/org/apache/flink/api/common/Plan.java
https://github.com/apache/flink/blob/master/flink-core/src/main/java/org/apache/flink/api/common/Plan.java
Apache-2.0
public void setJobId(JobID jobId) { this.jobId = jobId; }
Sets the ID of the job that the dataflow plan belongs to. If this ID is set to {@code null}, then the dataflow represents its own independent job. @param jobId The ID of the job that the dataflow plan belongs to.
setJobId
java
apache/flink
flink-core/src/main/java/org/apache/flink/api/common/Plan.java
https://github.com/apache/flink/blob/master/flink-core/src/main/java/org/apache/flink/api/common/Plan.java
Apache-2.0
public int getDefaultParallelism() { return this.defaultParallelism; }
Gets the default parallelism for this job. That degree is always used when an operator is not explicitly given a parallelism. @return The default parallelism for the plan.
getDefaultParallelism
java
apache/flink
flink-core/src/main/java/org/apache/flink/api/common/Plan.java
https://github.com/apache/flink/blob/master/flink-core/src/main/java/org/apache/flink/api/common/Plan.java
Apache-2.0
public void setDefaultParallelism(int defaultParallelism) { checkArgument( defaultParallelism >= 1 || defaultParallelism == ExecutionConfig.PARALLELISM_DEFAULT, "The default parallelism must be positive, or ExecutionConfig.PARALLELISM_DEFAULT if the system should use the globally configured default."); this.defaultParallelism = defaultParallelism; }
Sets the default parallelism for this plan. That degree is always used when an operator is not explicitly given a parallelism. @param defaultParallelism The default parallelism for the plan.
setDefaultParallelism
java
apache/flink
flink-core/src/main/java/org/apache/flink/api/common/Plan.java
https://github.com/apache/flink/blob/master/flink-core/src/main/java/org/apache/flink/api/common/Plan.java
Apache-2.0
public String getPostPassClassName() { return "org.apache.flink.optimizer.postpass.JavaApiPostPass"; }
Gets the optimizer post-pass class for this job. The post-pass typically creates utility classes for data types and is specific to a particular data model (record, tuple, Scala, ...) @return The name of the class implementing the optimizer post-pass.
getPostPassClassName
java
apache/flink
flink-core/src/main/java/org/apache/flink/api/common/Plan.java
https://github.com/apache/flink/blob/master/flink-core/src/main/java/org/apache/flink/api/common/Plan.java
Apache-2.0
public ExecutionConfig getExecutionConfig() { if (executionConfig == null) { throw new RuntimeException("Execution config has not been set properly for this plan"); } return executionConfig; }
Gets the execution config object. @return The execution config object.
getExecutionConfig
java
apache/flink
flink-core/src/main/java/org/apache/flink/api/common/Plan.java
https://github.com/apache/flink/blob/master/flink-core/src/main/java/org/apache/flink/api/common/Plan.java
Apache-2.0
public void setExecutionConfig(ExecutionConfig executionConfig) { this.executionConfig = executionConfig; }
Sets the runtime config object defining execution parameters. @param executionConfig The execution config to use.
setExecutionConfig
java
apache/flink
flink-core/src/main/java/org/apache/flink/api/common/Plan.java
https://github.com/apache/flink/blob/master/flink-core/src/main/java/org/apache/flink/api/common/Plan.java
Apache-2.0
@Override public void accept(Visitor<Operator<?>> visitor) { for (GenericDataSinkBase<?> sink : this.sinks) { sink.accept(visitor); } }
Traverses the job depth first from all data sinks on towards the sources. @see Visitable#accept(Visitor)
accept
java
apache/flink
flink-core/src/main/java/org/apache/flink/api/common/Plan.java
https://github.com/apache/flink/blob/master/flink-core/src/main/java/org/apache/flink/api/common/Plan.java
Apache-2.0
public void registerCachedFile(String name, DistributedCacheEntry entry) throws IOException { if (!this.cacheFile.containsKey(name)) { this.cacheFile.put(name, entry); } else { throw new IOException("cache file " + name + "already exists!"); } }
Register cache files at program level. @param entry contains all relevant information @param name user defined name of that file @throws java.io.IOException
registerCachedFile
java
apache/flink
flink-core/src/main/java/org/apache/flink/api/common/Plan.java
https://github.com/apache/flink/blob/master/flink-core/src/main/java/org/apache/flink/api/common/Plan.java
Apache-2.0
public T getSerializer() { return serializer; }
The wrapper to make serializer serializable. <p>This can be removed after {@link KryoSerializer} only allow serializer class.
getSerializer
java
apache/flink
flink-core/src/main/java/org/apache/flink/api/common/SerializableSerializer.java
https://github.com/apache/flink/blob/master/flink-core/src/main/java/org/apache/flink/api/common/SerializableSerializer.java
Apache-2.0
public static void mergeInto( Map<String, OptionalFailure<Accumulator<?, ?>>> target, Map<String, Accumulator<?, ?>> toMerge) { for (Map.Entry<String, Accumulator<?, ?>> otherEntry : toMerge.entrySet()) { OptionalFailure<Accumulator<?, ?>> ownAccumulator = target.get(otherEntry.getKey()); if (ownAccumulator == null) { // Create initial counter (copy!) target.put( otherEntry.getKey(), wrapUnchecked(otherEntry.getKey(), () -> otherEntry.getValue().clone())); } else if (ownAccumulator.isFailure()) { continue; } else { Accumulator<?, ?> accumulator = ownAccumulator.getUnchecked(); // Both should have the same type compareAccumulatorTypes( otherEntry.getKey(), accumulator.getClass(), otherEntry.getValue().getClass()); // Merge target counter with other counter target.put( otherEntry.getKey(), wrapUnchecked( otherEntry.getKey(), () -> mergeSingle(accumulator, otherEntry.getValue().clone()))); } } }
Merge two collections of accumulators. The second will be merged into the first. @param target The collection of accumulators that will be updated @param toMerge The collection of accumulators that will be merged into the other
mergeInto
java
apache/flink
flink-core/src/main/java/org/apache/flink/api/common/accumulators/AccumulatorHelper.java
https://github.com/apache/flink/blob/master/flink-core/src/main/java/org/apache/flink/api/common/accumulators/AccumulatorHelper.java
Apache-2.0
@SuppressWarnings("rawtypes") public static void compareAccumulatorTypes( Object name, Class<? extends Accumulator> first, Class<? extends Accumulator> second) throws UnsupportedOperationException { if (first == null || second == null) { throw new NullPointerException(); } if (first != second) { if (!first.getName().equals(second.getName())) { throw new UnsupportedOperationException( "The accumulator object '" + name + "' was created with two different types: " + first.getName() + " and " + second.getName()); } else { // damn, name is the same, but different classloaders throw new UnsupportedOperationException( "The accumulator object '" + name + "' was created with two different classes: " + first + " and " + second + " Both have the same type (" + first.getName() + ") but different classloaders: " + first.getClassLoader() + " and " + second.getClassLoader()); } } }
Compare both classes and throw {@link UnsupportedOperationException} if they differ.
compareAccumulatorTypes
java
apache/flink
flink-core/src/main/java/org/apache/flink/api/common/accumulators/AccumulatorHelper.java
https://github.com/apache/flink/blob/master/flink-core/src/main/java/org/apache/flink/api/common/accumulators/AccumulatorHelper.java
Apache-2.0
public static Map<String, OptionalFailure<Object>> deserializeAccumulators( Map<String, SerializedValue<OptionalFailure<Object>>> serializedAccumulators, ClassLoader loader) throws IOException, ClassNotFoundException { if (serializedAccumulators == null || serializedAccumulators.isEmpty()) { return Collections.emptyMap(); } Map<String, OptionalFailure<Object>> accumulators = CollectionUtil.newHashMapWithExpectedSize(serializedAccumulators.size()); for (Map.Entry<String, SerializedValue<OptionalFailure<Object>>> entry : serializedAccumulators.entrySet()) { OptionalFailure<Object> value = null; if (entry.getValue() != null) { value = entry.getValue().deserializeValue(loader); } accumulators.put(entry.getKey(), value); } return accumulators; }
Takes the serialized accumulator results and tries to deserialize them using the provided class loader. @param serializedAccumulators The serialized accumulator results. @param loader The class loader to use. @return The deserialized accumulator results.
deserializeAccumulators
java
apache/flink
flink-core/src/main/java/org/apache/flink/api/common/accumulators/AccumulatorHelper.java
https://github.com/apache/flink/blob/master/flink-core/src/main/java/org/apache/flink/api/common/accumulators/AccumulatorHelper.java
Apache-2.0
public static Map<String, Object> deserializeAndUnwrapAccumulators( Map<String, SerializedValue<OptionalFailure<Object>>> serializedAccumulators, ClassLoader loader) throws IOException, ClassNotFoundException { Map<String, OptionalFailure<Object>> deserializedAccumulators = deserializeAccumulators(serializedAccumulators, loader); if (deserializedAccumulators.isEmpty()) { return Collections.emptyMap(); } Map<String, Object> accumulators = CollectionUtil.newHashMapWithExpectedSize(serializedAccumulators.size()); for (Map.Entry<String, OptionalFailure<Object>> entry : deserializedAccumulators.entrySet()) { accumulators.put(entry.getKey(), entry.getValue().getUnchecked()); } return accumulators; }
Takes the serialized accumulator results and tries to deserialize them using the provided class loader, and then try to unwrap the value unchecked. @param serializedAccumulators The serialized accumulator results. @param loader The class loader to use. @return The deserialized and unwrapped accumulator results.
deserializeAndUnwrapAccumulators
java
apache/flink
flink-core/src/main/java/org/apache/flink/api/common/accumulators/AccumulatorHelper.java
https://github.com/apache/flink/blob/master/flink-core/src/main/java/org/apache/flink/api/common/accumulators/AccumulatorHelper.java
Apache-2.0
public Aggregator<T> getAggregator() { return aggregator; }
Gets the aggregator. @return The aggregator.
getAggregator
java
apache/flink
flink-core/src/main/java/org/apache/flink/api/common/aggregators/AggregatorWithName.java
https://github.com/apache/flink/blob/master/flink-core/src/main/java/org/apache/flink/api/common/aggregators/AggregatorWithName.java
Apache-2.0
public boolean updateCombinedWatermark() { long minimumOverAllOutputs = Long.MAX_VALUE; // if we don't have any outputs minimumOverAllOutputs is not valid, it's still // at its initial Long.MAX_VALUE state and we must not emit that if (partialWatermarks.isEmpty()) { return false; } boolean allIdle = true; for (PartialWatermark partialWatermark : partialWatermarks) { if (!partialWatermark.isIdle()) { minimumOverAllOutputs = Math.min(minimumOverAllOutputs, partialWatermark.getWatermark()); allIdle = false; } } this.idle = allIdle; if (!allIdle && minimumOverAllOutputs > combinedWatermark) { combinedWatermark = minimumOverAllOutputs; return true; } return false; }
Checks whether we need to update the combined watermark. <p><b>NOTE:</b>It can update {@link #isIdle()} status. @return true, if the combined watermark changed
updateCombinedWatermark
java
apache/flink
flink-core/src/main/java/org/apache/flink/api/common/eventtime/CombinedWatermarkStatus.java
https://github.com/apache/flink/blob/master/flink-core/src/main/java/org/apache/flink/api/common/eventtime/CombinedWatermarkStatus.java
Apache-2.0
private long getWatermark() { checkState(!idle, "Output is idle."); return watermark; }
Returns the current watermark timestamp. This will throw {@link IllegalStateException} if the output is currently idle.
getWatermark
java
apache/flink
flink-core/src/main/java/org/apache/flink/api/common/eventtime/CombinedWatermarkStatus.java
https://github.com/apache/flink/blob/master/flink-core/src/main/java/org/apache/flink/api/common/eventtime/CombinedWatermarkStatus.java
Apache-2.0
public static IndexedCombinedWatermarkStatus forInputsCount(int inputsCount) { CombinedWatermarkStatus.PartialWatermark[] partialWatermarks = IntStream.range(0, inputsCount) .mapToObj( i -> new CombinedWatermarkStatus.PartialWatermark( new NoOpWatermarkUpdateListener())) .toArray(CombinedWatermarkStatus.PartialWatermark[]::new); CombinedWatermarkStatus combinedWatermarkStatus = new CombinedWatermarkStatus(); for (CombinedWatermarkStatus.PartialWatermark partialWatermark : partialWatermarks) { combinedWatermarkStatus.add(partialWatermark); } return new IndexedCombinedWatermarkStatus(combinedWatermarkStatus, partialWatermarks); }
Represents combined value and status of a watermark for a set number of input partial watermarks.
forInputsCount
java
apache/flink
flink-core/src/main/java/org/apache/flink/api/common/eventtime/IndexedCombinedWatermarkStatus.java
https://github.com/apache/flink/blob/master/flink-core/src/main/java/org/apache/flink/api/common/eventtime/IndexedCombinedWatermarkStatus.java
Apache-2.0
@Override public long extractTimestamp(T element, long recordTimestamp) { // make sure timestamps are monotonously increasing, even when the system clock re-syncs final long now = Math.max(System.currentTimeMillis(), maxTimestamp); maxTimestamp = now; return now; }
A timestamp assigner that assigns timestamps based on the machine's wall clock. If this assigner is used after a stream source, it realizes "ingestion time" semantics. @param <T> The type of the elements that get timestamps assigned.
extractTimestamp
java
apache/flink
flink-core/src/main/java/org/apache/flink/api/common/eventtime/IngestionTimeAssigner.java
https://github.com/apache/flink/blob/master/flink-core/src/main/java/org/apache/flink/api/common/eventtime/IngestionTimeAssigner.java
Apache-2.0
@Override public long extractTimestamp(E element, long recordTimestamp) { return recordTimestamp; }
A {@link TimestampAssigner} that forwards the already-assigned timestamp. This is for use when records come out of a source with valid timestamps, for example from the Kafka Metadata.
extractTimestamp
java
apache/flink
flink-core/src/main/java/org/apache/flink/api/common/eventtime/RecordTimestampAssigner.java
https://github.com/apache/flink/blob/master/flink-core/src/main/java/org/apache/flink/api/common/eventtime/RecordTimestampAssigner.java
Apache-2.0
@Override public TimestampAssigner<T> createTimestampAssigner(Context context) { return assigner; }
We need an actual class. Implementing this as a lambda in {@link #of(SerializableTimestampAssigner)} would not allow the {@link ClosureCleaner} to "reach" into the {@link SerializableTimestampAssigner}.
createTimestampAssigner
java
apache/flink
flink-core/src/main/java/org/apache/flink/api/common/eventtime/TimestampAssignerSupplier.java
https://github.com/apache/flink/blob/master/flink-core/src/main/java/org/apache/flink/api/common/eventtime/TimestampAssignerSupplier.java
Apache-2.0
public long getTimestamp() { return timestamp; }
Returns the timestamp associated with this Watermark.
getTimestamp
java
apache/flink
flink-core/src/main/java/org/apache/flink/api/common/eventtime/Watermark.java
https://github.com/apache/flink/blob/master/flink-core/src/main/java/org/apache/flink/api/common/eventtime/Watermark.java
Apache-2.0
public void registerNewOutput(String id, WatermarkUpdateListener onWatermarkUpdate) { final PartialWatermark outputState = new PartialWatermark(onWatermarkUpdate); final PartialWatermark previouslyRegistered = watermarkPerOutputId.putIfAbsent(id, outputState); checkState(previouslyRegistered == null, "Already contains an output for ID %s", id); combinedWatermarkStatus.add(outputState); }
Registers a new multiplexed output, which creates internal states for that output and returns an output ID that can be used to get a deferred or immediate {@link WatermarkOutput} for that output.
registerNewOutput
java
apache/flink
flink-core/src/main/java/org/apache/flink/api/common/eventtime/WatermarkOutputMultiplexer.java
https://github.com/apache/flink/blob/master/flink-core/src/main/java/org/apache/flink/api/common/eventtime/WatermarkOutputMultiplexer.java
Apache-2.0
public WatermarkOutput getImmediateOutput(String outputId) { final PartialWatermark outputState = watermarkPerOutputId.get(outputId); Preconditions.checkArgument( outputState != null, "no output registered under id %s", outputId); return new ImmediateOutput(outputState); }
Returns an immediate {@link WatermarkOutput} for the given output ID. <p>>See {@link WatermarkOutputMultiplexer} for a description of immediate and deferred outputs.
getImmediateOutput
java
apache/flink
flink-core/src/main/java/org/apache/flink/api/common/eventtime/WatermarkOutputMultiplexer.java
https://github.com/apache/flink/blob/master/flink-core/src/main/java/org/apache/flink/api/common/eventtime/WatermarkOutputMultiplexer.java
Apache-2.0
public WatermarkOutput getDeferredOutput(String outputId) { final PartialWatermark outputState = watermarkPerOutputId.get(outputId); Preconditions.checkArgument( outputState != null, "no output registered under id %s", outputId); return new DeferredOutput(outputState); }
Returns a deferred {@link WatermarkOutput} for the given output ID. <p>>See {@link WatermarkOutputMultiplexer} for a description of immediate and deferred outputs.
getDeferredOutput
java
apache/flink
flink-core/src/main/java/org/apache/flink/api/common/eventtime/WatermarkOutputMultiplexer.java
https://github.com/apache/flink/blob/master/flink-core/src/main/java/org/apache/flink/api/common/eventtime/WatermarkOutputMultiplexer.java
Apache-2.0
@Override default TimestampAssigner<T> createTimestampAssigner( TimestampAssignerSupplier.Context context) { // By default, this is {@link RecordTimestampAssigner}, // for cases where records come out of a source with valid timestamps, for example from // Kafka. return new RecordTimestampAssigner<>(); }
Instantiates a {@link TimestampAssigner} for assigning timestamps according to this strategy.
createTimestampAssigner
java
apache/flink
flink-core/src/main/java/org/apache/flink/api/common/eventtime/WatermarkStrategy.java
https://github.com/apache/flink/blob/master/flink-core/src/main/java/org/apache/flink/api/common/eventtime/WatermarkStrategy.java
Apache-2.0
default WatermarkStrategy<T> withTimestampAssigner( TimestampAssignerSupplier<T> timestampAssigner) { checkNotNull(timestampAssigner, "timestampAssigner"); return new WatermarkStrategyWithTimestampAssigner<>(this, timestampAssigner); }
Creates a new {@code WatermarkStrategy} that wraps this strategy but instead uses the given {@link TimestampAssigner} (via a {@link TimestampAssignerSupplier}). <p>You can use this when a {@link TimestampAssigner} needs additional context, for example access to the metrics system. <pre> {@code WatermarkStrategy<Object> wmStrategy = WatermarkStrategy .forMonotonousTimestamps() .withTimestampAssigner((ctx) -> new MetricsReportingAssigner(ctx)); }</pre>
withTimestampAssigner
java
apache/flink
flink-core/src/main/java/org/apache/flink/api/common/eventtime/WatermarkStrategy.java
https://github.com/apache/flink/blob/master/flink-core/src/main/java/org/apache/flink/api/common/eventtime/WatermarkStrategy.java
Apache-2.0
default WatermarkStrategy<T> withTimestampAssigner( SerializableTimestampAssigner<T> timestampAssigner) { checkNotNull(timestampAssigner, "timestampAssigner"); return new WatermarkStrategyWithTimestampAssigner<>( this, TimestampAssignerSupplier.of(timestampAssigner)); }
Creates a new {@code WatermarkStrategy} that wraps this strategy but instead uses the given {@link SerializableTimestampAssigner}. <p>You can use this in case you want to specify a {@link TimestampAssigner} via a lambda function. <pre> {@code WatermarkStrategy<CustomObject> wmStrategy = WatermarkStrategy .<CustomObject>forMonotonousTimestamps() .withTimestampAssigner((event, timestamp) -> event.getTimestamp()); }</pre>
withTimestampAssigner
java
apache/flink
flink-core/src/main/java/org/apache/flink/api/common/eventtime/WatermarkStrategy.java
https://github.com/apache/flink/blob/master/flink-core/src/main/java/org/apache/flink/api/common/eventtime/WatermarkStrategy.java
Apache-2.0
default WatermarkStrategy<T> withIdleness(Duration idleTimeout) { checkNotNull(idleTimeout, "idleTimeout"); checkArgument( !(idleTimeout.isZero() || idleTimeout.isNegative()), "idleTimeout must be greater than zero"); return new WatermarkStrategyWithIdleness<>(this, idleTimeout); }
Creates a new enriched {@link WatermarkStrategy} that also does idleness detection in the created {@link WatermarkGenerator}. <p>Add an idle timeout to the watermark strategy. If no records flow in a partition of a stream for that amount of time, then that partition is considered "idle" and will not hold back the progress of watermarks in downstream operators. <p>Idleness can be important if some partitions have little data and might not have events during some periods. Without idleness, these streams can stall the overall event time progress of the application.
withIdleness
java
apache/flink
flink-core/src/main/java/org/apache/flink/api/common/eventtime/WatermarkStrategy.java
https://github.com/apache/flink/blob/master/flink-core/src/main/java/org/apache/flink/api/common/eventtime/WatermarkStrategy.java
Apache-2.0
static <T> WatermarkStrategy<T> forMonotonousTimestamps() { return (ctx) -> new AscendingTimestampsWatermarks<>(); }
Creates a watermark strategy for situations with monotonously ascending timestamps. <p>The watermarks are generated periodically and tightly follow the latest timestamp in the data. The delay introduced by this strategy is mainly the periodic interval in which the watermarks are generated. @see AscendingTimestampsWatermarks
forMonotonousTimestamps
java
apache/flink
flink-core/src/main/java/org/apache/flink/api/common/eventtime/WatermarkStrategy.java
https://github.com/apache/flink/blob/master/flink-core/src/main/java/org/apache/flink/api/common/eventtime/WatermarkStrategy.java
Apache-2.0
static <T> WatermarkStrategy<T> forBoundedOutOfOrderness(Duration maxOutOfOrderness) { return (ctx) -> new BoundedOutOfOrdernessWatermarks<>(maxOutOfOrderness); }
Creates a watermark strategy for situations where records are out of order, but you can place an upper bound on how far the events are out of order. An out-of-order bound B means that once the an event with timestamp T was encountered, no events older than {@code T - B} will follow any more. <p>The watermarks are generated periodically. The delay introduced by this watermark strategy is the periodic interval length, plus the out of orderness bound. @see BoundedOutOfOrdernessWatermarks
forBoundedOutOfOrderness
java
apache/flink
flink-core/src/main/java/org/apache/flink/api/common/eventtime/WatermarkStrategy.java
https://github.com/apache/flink/blob/master/flink-core/src/main/java/org/apache/flink/api/common/eventtime/WatermarkStrategy.java
Apache-2.0
static <T> WatermarkStrategy<T> forGenerator(WatermarkGeneratorSupplier<T> generatorSupplier) { return generatorSupplier::createWatermarkGenerator; }
Creates a watermark strategy based on an existing {@link WatermarkGeneratorSupplier}.
forGenerator
java
apache/flink
flink-core/src/main/java/org/apache/flink/api/common/eventtime/WatermarkStrategy.java
https://github.com/apache/flink/blob/master/flink-core/src/main/java/org/apache/flink/api/common/eventtime/WatermarkStrategy.java
Apache-2.0
static <T> WatermarkStrategy<T> noWatermarks() { return (ctx) -> new NoWatermarksGenerator<>(); }
Creates a watermark strategy that generates no watermarks at all. This may be useful in scenarios that do pure processing-time based stream processing.
noWatermarks
java
apache/flink
flink-core/src/main/java/org/apache/flink/api/common/eventtime/WatermarkStrategy.java
https://github.com/apache/flink/blob/master/flink-core/src/main/java/org/apache/flink/api/common/eventtime/WatermarkStrategy.java
Apache-2.0
public boolean checkIfIdle() { if (counter != lastCounter) { // activity since the last check. we reset the timer lastCounter = counter; startOfInactivityNanos = 0L; return false; } else // timer started but has not yet reached idle timeout if (startOfInactivityNanos == 0L) { // first time that we see no activity since the last periodic probe // begin the timer startOfInactivityNanos = clock.relativeTimeNanos(); return false; } else { return clock.relativeTimeNanos() - startOfInactivityNanos > maxIdleTimeNanos; } }
The duration before the output is marked as idle.
checkIfIdle
java
apache/flink
flink-core/src/main/java/org/apache/flink/api/common/eventtime/WatermarksWithIdleness.java
https://github.com/apache/flink/blob/master/flink-core/src/main/java/org/apache/flink/api/common/eventtime/WatermarksWithIdleness.java
Apache-2.0
public Configuration getConfiguration() { return configuration; }
A special {@link OpenContext} for passing configuration to udf.
getConfiguration
java
apache/flink
flink-core/src/main/java/org/apache/flink/api/common/functions/WithConfigurationOpenContext.java
https://github.com/apache/flink/blob/master/flink-core/src/main/java/org/apache/flink/api/common/functions/WithConfigurationOpenContext.java
Apache-2.0
public int getInfoSize() { return 8 + 8 + 8; }
A block of 24 bytes written at the <i>end</i> of a block in a binary file, and containing i) the number of records in the block, ii) the accumulated number of records, and iii) the offset of the first record in the block.
getInfoSize
java
apache/flink
flink-core/src/main/java/org/apache/flink/api/common/io/BlockInfo.java
https://github.com/apache/flink/blob/master/flink-core/src/main/java/org/apache/flink/api/common/io/BlockInfo.java
Apache-2.0
public long getFirstRecordStart() { return this.firstRecordStart; }
Returns the firstRecordStart. @return the firstRecordStart
getFirstRecordStart
java
apache/flink
flink-core/src/main/java/org/apache/flink/api/common/io/BlockInfo.java
https://github.com/apache/flink/blob/master/flink-core/src/main/java/org/apache/flink/api/common/io/BlockInfo.java
Apache-2.0
public long getRecordCount() { return this.recordCount; }
Returns the recordCount. @return the recordCount
getRecordCount
java
apache/flink
flink-core/src/main/java/org/apache/flink/api/common/io/BlockInfo.java
https://github.com/apache/flink/blob/master/flink-core/src/main/java/org/apache/flink/api/common/io/BlockInfo.java
Apache-2.0
public long getAccumulatedRecordCount() { return this.accumulatedRecordCount; }
Returns the accumulated record count. @return the accumulated record count
getAccumulatedRecordCount
java
apache/flink
flink-core/src/main/java/org/apache/flink/api/common/io/BlockInfo.java
https://github.com/apache/flink/blob/master/flink-core/src/main/java/org/apache/flink/api/common/io/BlockInfo.java
Apache-2.0
public void setAccumulatedRecordCount(long accumulatedRecordCount) { this.accumulatedRecordCount = accumulatedRecordCount; }
Sets the accumulatedRecordCount to the specified value. @param accumulatedRecordCount the accumulatedRecordCount to set
setAccumulatedRecordCount
java
apache/flink
flink-core/src/main/java/org/apache/flink/api/common/io/BlockInfo.java
https://github.com/apache/flink/blob/master/flink-core/src/main/java/org/apache/flink/api/common/io/BlockInfo.java
Apache-2.0
public void setRecordCount(long recordCount) { this.recordCount = recordCount; }
Sets the recordCount to the specified value. @param recordCount the recordCount to set
setRecordCount
java
apache/flink
flink-core/src/main/java/org/apache/flink/api/common/io/BlockInfo.java
https://github.com/apache/flink/blob/master/flink-core/src/main/java/org/apache/flink/api/common/io/BlockInfo.java
Apache-2.0
private boolean fillBuffer(int offset) throws IOException { int maxReadLength = this.readBuffer.length - offset; // special case for reading the whole split. if (this.splitLength == FileInputFormat.READ_WHOLE_SPLIT_FLAG) { int read = this.stream.read(this.readBuffer, offset, maxReadLength); if (read == -1) { this.stream.close(); this.stream = null; return false; } else { this.readPos = offset; this.limit = read + offset; return true; } } // else .. int toRead; if (this.splitLength > 0) { // if we have more data, read that toRead = this.splitLength > maxReadLength ? maxReadLength : (int) this.splitLength; } else { // if we have exhausted our split, we need to complete the current record, or read one // more across the next split. // the reason is that the next split will skip over the beginning until it finds the // first // delimiter, discarding it as an incomplete chunk of data that belongs to the last // record in the // previous split. toRead = maxReadLength; this.overLimit = true; } int read = this.stream.read(this.readBuffer, offset, toRead); if (read == -1) { this.stream.close(); this.stream = null; return false; } else { this.splitLength -= read; this.readPos = offset; // position from where to start reading this.limit = read + offset; // number of valid bytes in the read buffer return true; } }
Fills the read buffer with bytes read from the file starting from an offset.
fillBuffer
java
apache/flink
flink-core/src/main/java/org/apache/flink/api/common/io/DelimitedInputFormat.java
https://github.com/apache/flink/blob/master/flink-core/src/main/java/org/apache/flink/api/common/io/DelimitedInputFormat.java
Apache-2.0
protected static String extractFileExtension(String fileName) { checkNotNull(fileName); int lastPeriodIndex = fileName.lastIndexOf('.'); if (lastPeriodIndex < 0) { return null; } else { return fileName.substring(lastPeriodIndex + 1); } }
Returns the extension of a file name (!= a path). @return the extension of the file name or {@code null} if there is no extension.
extractFileExtension
java
apache/flink
flink-core/src/main/java/org/apache/flink/api/common/io/FileInputFormat.java
https://github.com/apache/flink/blob/master/flink-core/src/main/java/org/apache/flink/api/common/io/FileInputFormat.java
Apache-2.0
public void setFilePath(Path filePath) { if (filePath == null) { throw new IllegalArgumentException("File path must not be null."); } setFilePaths(filePath); }
Sets a single path of a file to be read. @param filePath The path of the file to read.
setFilePath
java
apache/flink
flink-core/src/main/java/org/apache/flink/api/common/io/FileInputFormat.java
https://github.com/apache/flink/blob/master/flink-core/src/main/java/org/apache/flink/api/common/io/FileInputFormat.java
Apache-2.0
public long getSplitStart() { return splitStart; }
Gets the start of the current split. @return The start of the split.
getSplitStart
java
apache/flink
flink-core/src/main/java/org/apache/flink/api/common/io/FileInputFormat.java
https://github.com/apache/flink/blob/master/flink-core/src/main/java/org/apache/flink/api/common/io/FileInputFormat.java
Apache-2.0
public boolean acceptFile(FileStatus fileStatus) { final String name = fileStatus.getPath().getName(); return !name.startsWith("_") && !name.startsWith(".") && !filesFilter.filterPath(fileStatus.getPath()); }
A simple hook to filter files and directories from the input. The method may be overridden. Hadoop's FileInputFormat has a similar mechanism and applies the same filters by default. @param fileStatus The file status to check. @return true, if the given file or directory is accepted
acceptFile
java
apache/flink
flink-core/src/main/java/org/apache/flink/api/common/io/FileInputFormat.java
https://github.com/apache/flink/blob/master/flink-core/src/main/java/org/apache/flink/api/common/io/FileInputFormat.java
Apache-2.0
private int getBlockIndexForPosition( BlockLocation[] blocks, long offset, long halfSplitSize, int startIndex) { // go over all indexes after the startIndex for (int i = startIndex; i < blocks.length; i++) { long blockStart = blocks[i].getOffset(); long blockEnd = blockStart + blocks[i].getLength(); if (offset >= blockStart && offset < blockEnd) { // got the block where the split starts // check if the next block contains more than this one does if (i < blocks.length - 1 && blockEnd - offset < halfSplitSize) { return i + 1; } else { return i; } } } throw new IllegalArgumentException("The given offset is not contained in the any block."); }
Retrieves the index of the <tt>BlockLocation</tt> that contains the part of the file described by the given offset. @param blocks The different blocks of the file. Must be ordered by their offset. @param offset The offset of the position in the file. @param startIndex The earliest index to look at. @return The index of the block containing the given position.
getBlockIndexForPosition
java
apache/flink
flink-core/src/main/java/org/apache/flink/api/common/io/FileInputFormat.java
https://github.com/apache/flink/blob/master/flink-core/src/main/java/org/apache/flink/api/common/io/FileInputFormat.java
Apache-2.0
@Override public void open(FileInputSplit fileSplit) throws IOException { this.currentSplit = fileSplit; this.splitStart = fileSplit.getStart(); final Path path = fileSplit.getPath(); this.splitLength = testForUnsplittable(path.getFileSystem().getFileStatus(path)) ? READ_WHOLE_SPLIT_FLAG : fileSplit.getLength(); if (LOG.isDebugEnabled()) { LOG.debug( "Opening input split " + fileSplit.getPath() + " [" + this.splitStart + "," + this.splitLength + "]"); } // open the split in an asynchronous thread final InputSplitOpenThread isot = new InputSplitOpenThread(fileSplit, this.openTimeout); isot.start(); try { this.stream = isot.waitForCompletion(); this.stream = decorateInputStream(this.stream, fileSplit); } catch (Throwable t) { throw new IOException( "Error opening the Input Split " + fileSplit.getPath() + " [" + splitStart + "," + splitLength + "]: " + t.getMessage(), t); } // get FSDataInputStream if (this.splitStart != 0) { this.stream.seek(this.splitStart); } }
Opens an input stream to the file defined in the input format. The stream is positioned at the beginning of the given split. <p>The stream is actually opened in an asynchronous thread to make sure any interruptions to the thread working on the input format do not reach the file system.
open
java
apache/flink
flink-core/src/main/java/org/apache/flink/api/common/io/FileInputFormat.java
https://github.com/apache/flink/blob/master/flink-core/src/main/java/org/apache/flink/api/common/io/FileInputFormat.java
Apache-2.0
public long getLastModificationTime() { return fileModTime; }
Gets the timestamp of the last modification. @return The timestamp of the last modification.
getLastModificationTime
java
apache/flink
flink-core/src/main/java/org/apache/flink/api/common/io/FileInputFormat.java
https://github.com/apache/flink/blob/master/flink-core/src/main/java/org/apache/flink/api/common/io/FileInputFormat.java
Apache-2.0
private void abortWait() { this.aborted = true; final FSDataInputStream inStream = this.fdis; this.fdis = null; if (inStream != null) { try { inStream.close(); } catch (Throwable t) { } } }
Double checked procedure setting the abort flag and closing the stream.
abortWait
java
apache/flink
flink-core/src/main/java/org/apache/flink/api/common/io/FileInputFormat.java
https://github.com/apache/flink/blob/master/flink-core/src/main/java/org/apache/flink/api/common/io/FileInputFormat.java
Apache-2.0
public static void initDefaultsFromConfiguration(Configuration configuration) { final boolean overwrite = configuration.get(CoreOptions.FILESYTEM_DEFAULT_OVERRIDE); DEFAULT_WRITE_MODE = overwrite ? WriteMode.OVERWRITE : WriteMode.NO_OVERWRITE; final boolean alwaysCreateDirectory = configuration.get(CoreOptions.FILESYSTEM_OUTPUT_ALWAYS_CREATE_DIRECTORY); DEFAULT_OUTPUT_DIRECTORY_MODE = alwaysCreateDirectory ? OutputDirectoryMode.ALWAYS : OutputDirectoryMode.PARONLY; }
Initialize defaults for output format. Needs to be a static method because it is configured for local cluster execution. @param configuration The configuration to load defaults from
initDefaultsFromConfiguration
java
apache/flink
flink-core/src/main/java/org/apache/flink/api/common/io/FileOutputFormat.java
https://github.com/apache/flink/blob/master/flink-core/src/main/java/org/apache/flink/api/common/io/FileOutputFormat.java
Apache-2.0