code stringlengths 25 201k | docstring stringlengths 19 96.2k | func_name stringlengths 0 235 | language stringclasses 1 value | repo stringlengths 8 51 | path stringlengths 11 314 | url stringlengths 62 377 | license stringclasses 7 values |
|---|---|---|---|---|---|---|---|
public static void main(String[] args) throws Exception {
final ParameterTool parameterTool = ParameterTool.fromArgs(args);
if (parameterTool.getNumberOfParameters() < 1) {
System.out.println("Missing parameters!\nUsage: --numRecords <numRecords>");
return;
}
int numRecordsToEmit = parameterTool.getInt("numRecords");
final StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment();
env.enableCheckpointing(5000);
DataStream<String> source =
env.fromSequence(0, numRecordsToEmit - 1)
.map((MapFunction<Long, String>) Utils::prefix);
try (CloseableIterator<String> data = source.collectAsync()) {
env.execute("Quickstart example");
int count = 0;
while (data.hasNext()) {
data.next();
count++;
}
if (count != numRecordsToEmit) {
throw new RuntimeException(
String.format(
"Unexpected number of records; expected :%s actual: %s",
numRecordsToEmit, count));
}
}
} | End to end test for quickstarts. | main | java | apache/flink | flink-end-to-end-tests/flink-quickstart-test/src/main/java/org/apache/flink/quickstarts/test/QuickstartExample.java | https://github.com/apache/flink/blob/master/flink-end-to-end-tests/flink-quickstart-test/src/main/java/org/apache/flink/quickstarts/test/QuickstartExample.java | Apache-2.0 |
public static String prefix(Long message) {
return "message #" + message;
} | Dummy util to test packaging of Flink dependencies in quickstarts. | prefix | java | apache/flink | flink-end-to-end-tests/flink-quickstart-test-dummy-dependency/src/main/java/org/apache/flink/quickstarts/test/utils/Utils.java | https://github.com/apache/flink/blob/master/flink-end-to-end-tests/flink-quickstart-test-dummy-dependency/src/main/java/org/apache/flink/quickstarts/test/utils/Utils.java | Apache-2.0 |
public String eval(String input, String regex, String replacement) {
return input.replaceAll(regex, replacement);
} | Scalar function for replacing all occurrences of a regular expression with a replacement string. | eval | java | apache/flink | flink-end-to-end-tests/flink-sql-client-test/src/main/java/org/apache/flink/table/toolbox/StringRegexReplaceFunction.java | https://github.com/apache/flink/blob/master/flink-end-to-end-tests/flink-sql-client-test/src/main/java/org/apache/flink/table/toolbox/StringRegexReplaceFunction.java | Apache-2.0 |
public static void main(String[] args) throws Exception {
ParameterTool params = ParameterTool.fromArgs(args);
String outputPath = params.getRequired("outputPath");
final StreamExecutionEnvironment sEnv =
StreamExecutionEnvironment.getExecutionEnvironment();
Configuration configuration = new Configuration();
configuration.set(RestartStrategyOptions.RESTART_STRATEGY, "fixed-delay");
configuration.set(RestartStrategyOptions.RESTART_STRATEGY_FIXED_DELAY_ATTEMPTS, 3);
configuration.set(
RestartStrategyOptions.RESTART_STRATEGY_FIXED_DELAY_DELAY, Duration.ofSeconds(10L));
sEnv.configure(configuration);
sEnv.enableCheckpointing(4000);
sEnv.getConfig().setAutoWatermarkInterval(1000);
final StreamTableEnvironment tEnv = StreamTableEnvironment.create(sEnv);
final Schema tableSchema =
Schema.newBuilder()
.column("key", DataTypes.INT())
.column("rowtime", DataTypes.TIMESTAMP(3).bridgedTo(Timestamp.class))
.column("payload", DataTypes.STRING())
.watermark("rowtime", "rowtime - interval '1' second")
.build();
RowTypeInfo sourceType =
new RowTypeInfo(
new TypeInformation[] {Types.INT, Types.SQL_TIMESTAMP, Types.STRING},
new String[] {"key", "rowtime", "payload"});
DataStream<Row> source1 = sEnv.addSource(new Generator(10, 100, 60, 0), sourceType);
tEnv.createTemporaryView("table1", source1, tableSchema);
DataStream<Row> source2 = sEnv.addSource(new Generator(5, 0.2f, 60, 5), sourceType);
tEnv.createTemporaryView("table2", source2, tableSchema);
int overWindowSizeSeconds = 1;
int tumbleWindowSizeSeconds = 10;
String overQuery =
String.format(
"SELECT "
+ " key, "
+ " rowtime, "
+ " COUNT(*) OVER (PARTITION BY key ORDER BY rowtime RANGE BETWEEN INTERVAL '%d' SECOND PRECEDING AND CURRENT ROW) AS cnt "
+ "FROM table1",
overWindowSizeSeconds);
String tumbleQuery =
String.format(
"SELECT "
+ " key, "
+ " CASE SUM(cnt) / COUNT(*) WHEN 101 THEN 1 ELSE 99 END AS correct, "
+ " TUMBLE_START(rowtime, INTERVAL '%d' SECOND) AS wStart, "
+ " TUMBLE_ROWTIME(rowtime, INTERVAL '%d' SECOND) AS rowtime "
+ "FROM (%s) "
+ "WHERE rowtime > TIMESTAMP '1970-01-01 00:00:01' "
+ "GROUP BY key, TUMBLE(rowtime, INTERVAL '%d' SECOND)",
tumbleWindowSizeSeconds,
tumbleWindowSizeSeconds,
overQuery,
tumbleWindowSizeSeconds);
String joinQuery =
String.format(
"SELECT "
+ " t1.key, "
+ " t2.rowtime AS rowtime, "
+ " t2.correct,"
+ " t2.wStart "
+ "FROM table2 t1, (%s) t2 "
+ "WHERE "
+ " t1.key = t2.key AND "
+ " t1.rowtime BETWEEN t2.rowtime AND t2.rowtime + INTERVAL '%d' SECOND",
tumbleQuery, tumbleWindowSizeSeconds);
String finalAgg =
String.format(
"SELECT "
+ " SUM(correct) AS correct, "
+ " TUMBLE_START(rowtime, INTERVAL '20' SECOND) AS rowtime "
+ "FROM (%s) "
+ "GROUP BY TUMBLE(rowtime, INTERVAL '20' SECOND)",
joinQuery);
// get Table for SQL query
Table result = tEnv.sqlQuery(finalAgg);
// convert Table into append-only DataStream
DataStream<Row> resultStream =
tEnv.toDataStream(
result,
DataTypes.ROW(
DataTypes.INT(), DataTypes.TIMESTAMP().bridgedTo(Timestamp.class)));
final StreamingFileSink<Row> sink =
StreamingFileSink.forRowFormat(
new Path(outputPath),
(Encoder<Row>)
(element, stream) -> {
PrintStream out = new PrintStream(stream);
out.println(element.toString());
})
.withBucketAssigner(new KeyBucketAssigner())
.withRollingPolicy(OnCheckpointRollingPolicy.build())
.build();
resultStream
// inject a KillMapper that forwards all records but terminates the first execution
// attempt
.map(new KillMapper())
.setParallelism(1)
// add sink function
.addSink(sink)
.setParallelism(1);
sEnv.execute();
} | End-to-end test for Stream SQL queries.
<p>Includes the following SQL features: - OVER window aggregation - keyed and non-keyed GROUP BY
TUMBLE aggregation - windowed INNER JOIN - TableSource with event-time attribute
<p>The stream is bounded and will complete after about a minute. The result is always constant.
The job is killed on the first attempt and restarted.
<p>Parameters: -outputPath Sets the path to where the result data is written. | main | java | apache/flink | flink-end-to-end-tests/flink-stream-sql-test/src/main/java/org/apache/flink/sql/tests/StreamSQLTestProgram.java | https://github.com/apache/flink/blob/master/flink-end-to-end-tests/flink-stream-sql-test/src/main/java/org/apache/flink/sql/tests/StreamSQLTestProgram.java | Apache-2.0 |
public void register2Catalog(TableEnvironment tEnv, String table) {
tEnv.getCatalog(tEnv.getCurrentCatalog())
.ifPresent(
catalog -> {
try {
catalog.alterTableStatistics(
new ObjectPath(tEnv.getCurrentDatabase(), table),
catalogTableStatistics,
false);
} catch (Exception e) {
throw new RuntimeException(e);
}
});
tEnv.getCatalog(tEnv.getCurrentCatalog())
.ifPresent(
catalog -> {
try {
catalog.alterTableColumnStatistics(
new ObjectPath(tEnv.getCurrentDatabase(), table),
catalogColumnStatistics,
false);
} catch (Exception e) {
throw new RuntimeException(e);
}
});
} | Class to describe catalog table statistics. Consists of {@link CatalogTableStatistics} and {@link
CatalogColumnStatistics}. | register2Catalog | java | apache/flink | flink-end-to-end-tests/flink-tpcds-test/src/main/java/org/apache/flink/table/tpcds/stats/CatalogTableStats.java | https://github.com/apache/flink/blob/master/flink-end-to-end-tests/flink-tpcds-test/src/main/java/org/apache/flink/table/tpcds/stats/CatalogTableStats.java | Apache-2.0 |
public static void main(String[] args) throws IOException {
if (args.length != 2) {
System.out.println(
"Exactly 2 paths must be provided, the expected result path and the actual result path");
System.exit(1);
}
String expectedPath = args[0];
String actualPath = args[1];
File[] partitions = new File(actualPath).listFiles();
if (partitions == null) {
throw new IllegalArgumentException(
String.format(
"The specified actual result path: %s doesn't exists.", actualPath));
}
if (partitions.length > 1) {
throw new UnsupportedOperationException(
"Please set the sink.parallelism 1 to keep the partition number is 1.");
}
try (BufferedReader expectedReader = new BufferedReader(new FileReader(expectedPath));
BufferedReader actualReader = new BufferedReader(new FileReader(partitions[0]))) {
int expectedLineNum = 0;
int actualLineNum = 0;
String expectedLine, actualLine;
while ((expectedLine = expectedReader.readLine()) != null
&& (actualLine = actualReader.readLine()) != null) {
String[] expected = expectedLine.split("\\|");
expectedLineNum++;
String[] actual = actualLine.split("\\|");
actualLineNum++;
if (expected.length != actual.length) {
System.out.println(
"Incorrect number of columns on line "
+ actualLineNum
+ "! Expecting "
+ expected.length
+ " columns, but found "
+ actual.length
+ " columns.");
System.exit(1);
}
for (int i = 0; i < expected.length; i++) {
boolean failed;
try {
long e = Long.valueOf(expected[i]);
long a = Long.valueOf(actual[i]);
failed = (e != a);
} catch (NumberFormatException nfe) {
try {
double e = Double.valueOf(expected[i]);
double a = Double.valueOf(actual[i]);
if (e < 0 && a > 0 || e > 0 && a < 0) {
failed = true;
} else {
if (e < 0) {
e = -e;
a = -a;
}
double t = round(a, 2);
// defined in TPC-H standard specification v2.18.0 section 2.1.3.5
failed = (e * 0.99 > t || e * 1.01 < t);
}
} catch (NumberFormatException nfe2) {
failed =
!expected[i]
.trim()
.equals(actual[i].replaceAll("\"", "").trim());
}
}
if (failed) {
System.out.println(
"Incorrect result on line "
+ actualLineNum
+ " column "
+ (i + 1)
+ "! Expecting "
+ expected[i]
+ ", but found "
+ actual[i]
+ ".");
System.exit(1);
}
}
}
while (expectedReader.readLine() != null) {
expectedLineNum++;
}
while (actualReader.readLine() != null) {
actualLineNum++;
}
if (expectedLineNum != actualLineNum) {
System.out.println(
"Incorrect number of lines! Expecting "
+ expectedLineNum
+ " lines, but found "
+ actualLineNum
+ " lines.");
System.exit(1);
}
}
} | Result comparator for TPC-H test, according to the TPC-H standard specification v2.18.0. | main | java | apache/flink | flink-end-to-end-tests/flink-tpch-test/src/main/java/org/apache/flink/table/tpch/TpchResultComparator.java | https://github.com/apache/flink/blob/master/flink-end-to-end-tests/flink-tpch-test/src/main/java/org/apache/flink/table/tpch/TpchResultComparator.java | Apache-2.0 |
private static double round(double x, int m) {
if (x < 0) {
throw new IllegalArgumentException("x must be non-negative");
}
double y = x + 5 * Math.pow(10, -m - 1);
double z = y * Math.pow(10, m);
double q = Math.floor(z);
return q / Math.pow(10, m);
} | Rounding function defined in TPC-H standard specification v2.18.0 chapter 10. | round | java | apache/flink | flink-end-to-end-tests/flink-tpch-test/src/main/java/org/apache/flink/table/tpch/TpchResultComparator.java | https://github.com/apache/flink/blob/master/flink-end-to-end-tests/flink-tpch-test/src/main/java/org/apache/flink/table/tpch/TpchResultComparator.java | Apache-2.0 |
public CompletableFuture<String> query(int key) {
return CompletableFuture.supplyAsync(
() -> {
long sleep = (long) (ThreadLocalRandom.current().nextFloat() * 100);
try {
Thread.sleep(sleep);
} catch (InterruptedException e) {
throw new RuntimeException("AsyncClient was interrupted", e);
}
if (ThreadLocalRandom.current().nextFloat() < 0.001f) {
throw new RuntimeException("wahahahaha...");
} else {
return "key" + (key % 10);
}
});
} | A simple asynchronous client that simulates interacting with an unreliable external service. | query | java | apache/flink | flink-examples/flink-examples-streaming/src/main/java/org/apache/flink/streaming/examples/async/AsyncClient.java | https://github.com/apache/flink/blob/master/flink-examples/flink-examples-streaming/src/main/java/org/apache/flink/streaming/examples/async/AsyncClient.java | Apache-2.0 |
public static void main(String[] args) throws Exception {
final StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment();
env.setParallelism(4);
GeneratorFunction<Long, String> generatorFunction = index -> "Number: " + index;
DataGeneratorSource<String> source =
new DataGeneratorSource<>(
generatorFunction,
Long.MAX_VALUE,
RateLimiterStrategy.perSecond(100),
Types.STRING);
DataStreamSource<String> streamSource =
env.fromSource(source, WatermarkStrategy.noWatermarks(), "Data Generator");
streamSource.print();
env.execute("Data Generator Source Example");
} | An example for generating data with a {@link DataGeneratorSource}. | main | java | apache/flink | flink-examples/flink-examples-streaming/src/main/java/org/apache/flink/streaming/examples/datagen/DataGenerator.java | https://github.com/apache/flink/blob/master/flink-examples/flink-examples-streaming/src/main/java/org/apache/flink/streaming/examples/datagen/DataGenerator.java | Apache-2.0 |
public static void main(String[] args) throws Exception {
final StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment();
env.enableCheckpointing(3000);
env.setParallelism(1);
final String[] elements = new String[] {"a", "b", "c", "d", "e", "f", "g", "h", "i", "j"};
final int size = elements.length;
final GeneratorFunction<Long, String> generatorFunction =
index -> elements[(int) (index % size)];
final DataGeneratorSource<String> generatorSource =
new DataGeneratorSource<>(
generatorFunction,
Long.MAX_VALUE,
RateLimiterStrategy.perCheckpoint(size),
Types.STRING);
final DataStreamSource<String> streamSource =
env.fromSource(generatorSource, WatermarkStrategy.noWatermarks(), "Data Generator");
streamSource.print();
env.execute("Data Generator Source Example");
} | An example for generating specific data per checkpoint with a {@link DataGeneratorSource} . | main | java | apache/flink | flink-examples/flink-examples-streaming/src/main/java/org/apache/flink/streaming/examples/datagen/DataGeneratorPerCheckpoint.java | https://github.com/apache/flink/blob/master/flink-examples/flink-examples-streaming/src/main/java/org/apache/flink/streaming/examples/datagen/DataGeneratorPerCheckpoint.java | Apache-2.0 |
@Override
public void processRecord(
GradePojo leftRecord,
SalaryPojo rightRecord,
Collector<GradeAndSalaryPojo> output,
RuntimeContext ctx)
throws Exception {
output.collect(
new GradeAndSalaryPojo(leftRecord.name, leftRecord.grade, rightRecord.salary));
} | Join the function of grade (name, grade) and salary (name, salary) to produce an output of
(name, grade, salary). | processRecord | java | apache/flink | flink-examples/flink-examples-streaming/src/main/java/org/apache/flink/streaming/examples/dsv2/join/Join.java | https://github.com/apache/flink/blob/master/flink-examples/flink-examples-streaming/src/main/java/org/apache/flink/streaming/examples/dsv2/join/Join.java | Apache-2.0 |
@Override
public String toString() {
return String.format("%d,%d,%.2f", this.productId, this.timestamp, this.sales);
} | {@link CumulativeSales} represents the cumulative sales at a certain moment of a product. | toString | java | apache/flink | flink-examples/flink-examples-streaming/src/main/java/org/apache/flink/streaming/examples/dsv2/watermark/CountSales.java | https://github.com/apache/flink/blob/master/flink-examples/flink-examples-streaming/src/main/java/org/apache/flink/streaming/examples/dsv2/watermark/CountSales.java | Apache-2.0 |
public static void main(String[] args) throws Exception {
// parse the parameters
final ParameterTool params = ParameterTool.fromArgs(args);
final int parallelism = params.getInt("parallelism", 5);
// obtain execution environment
ExecutionEnvironment env = ExecutionEnvironment.getInstance();
// Create the Order source, the source will declare and generate event time watermarks.
NonKeyedPartitionStream<Order> source =
env.fromSource(new WrappedSource<>(new OrderSource()), "order source")
.withParallelism(parallelism);
source
// key by product id
.keyBy(order -> order.productId)
.process(
// handle event time watermark in downstream
new CountSalesProcessFunction())
.toSink(new WrappedSink<>(new PrintSink<>()));
// execute program
env.execute("Count Sales");
} | Firstly, we define an event time watermark, which represents the time of currently processing
event. Since the watermark needs to convey the timestamp, its data type is long. To determine
the minimum event time across all watermarks, we utilize the combineFunctionMin() method to
combine the watermarks. The default handling strategy is forward, meaning that the watermark
will typically be advanced to downstream operators in most scenarios. Thus, we create a
WatermarkDeclaration instance that can be used to declare and generate the watermark. | main | java | apache/flink | flink-examples/flink-examples-streaming/src/main/java/org/apache/flink/streaming/examples/dsv2/watermark/CountSales.java | https://github.com/apache/flink/blob/master/flink-examples/flink-examples-streaming/src/main/java/org/apache/flink/streaming/examples/dsv2/watermark/CountSales.java | Apache-2.0 |
@Override
public Set<? extends WatermarkDeclaration> declareWatermarks() {
// Declare the event time watermark.
return Set.of(EVENT_TIME_WATERMARK_DECLARATION);
} | Source of Orders. We will declare and generate the event time watermark in this source. | declareWatermarks | java | apache/flink | flink-examples/flink-examples-streaming/src/main/java/org/apache/flink/streaming/examples/dsv2/watermark/CountSales.java | https://github.com/apache/flink/blob/master/flink-examples/flink-examples-streaming/src/main/java/org/apache/flink/streaming/examples/dsv2/watermark/CountSales.java | Apache-2.0 |
@Override
public void onTrigger(
Collector<ProductSales> output,
PartitionedContext<ProductSales> ctx,
OneInputWindowContext<Order> windowContext)
throws Exception {
// get current productId
long productId = ctx.getStateManager().getCurrentKey();
// calculate total sales quantity
long salesQuantity = 0;
for (Order ignored : windowContext.getAllRecords()) {
salesQuantity += 1;
}
// emit result
output.collect(
new ProductSales(productId, windowContext.getStartTime(), salesQuantity));
} | Count sales quantity per product.
<p>We will obtain all orders and calculate the total sales quantity for each product when
window trigger. | onTrigger | java | apache/flink | flink-examples/flink-examples-streaming/src/main/java/org/apache/flink/streaming/examples/dsv2/windowing/CountProductSalesWindowing.java | https://github.com/apache/flink/blob/master/flink-examples/flink-examples-streaming/src/main/java/org/apache/flink/streaming/examples/dsv2/windowing/CountProductSalesWindowing.java | Apache-2.0 |
@Override
public void processRecord(
String record,
Collector<Tuple2<String, Integer>> output,
PartitionedContext<Tuple2<String, Integer>> ctx)
throws Exception {
// normalize and split the line
String[] tokens = record.toLowerCase().split("\\W+");
// emit the pairs
for (String token : tokens) {
if (!token.isEmpty()) {
output.collect(new Tuple2<>(token, 1));
}
}
} | Implements the string tokenizer that splits sentences into words as a user-defined
ProcessFunction. The process function takes a line (String) and splits it into multiple pairs
in the form of "(word,1)" ({@code Tuple2<String, Integer>}). | processRecord | java | apache/flink | flink-examples/flink-examples-streaming/src/main/java/org/apache/flink/streaming/examples/dsv2/wordcount/WordCount.java | https://github.com/apache/flink/blob/master/flink-examples/flink-examples-streaming/src/main/java/org/apache/flink/streaming/examples/dsv2/wordcount/WordCount.java | Apache-2.0 |
@Override
public void processElement(
String value, Context ctx, Collector<Tuple2<String, Integer>> out)
throws Exception {
// normalize and split the line
String[] tokens = value.toLowerCase().split("\\W+");
// emit the pairs
for (String token : tokens) {
if (token.length() > 5) {
ctx.output(rejectedWordsTag, token);
} else if (token.length() > 0) {
out.collect(new Tuple2<>(token, 1));
}
}
} | Implements the string tokenizer that splits sentences into words as a user-defined
FlatMapFunction. The function takes a line (String) and splits it into multiple pairs in the
form of "(word,1)" ({@code Tuple2<String, Integer>}).
<p>This rejects words that are longer than 5 characters long. | processElement | java | apache/flink | flink-examples/flink-examples-streaming/src/main/java/org/apache/flink/streaming/examples/sideoutput/SideOutputExample.java | https://github.com/apache/flink/blob/master/flink-examples/flink-examples-streaming/src/main/java/org/apache/flink/streaming/examples/sideoutput/SideOutputExample.java | Apache-2.0 |
public static void main(String[] args) throws Exception {
// the host and the port to connect to
final String hostname;
final int port;
final boolean asyncState;
try {
final ParameterTool params = ParameterTool.fromArgs(args);
hostname = params.has("hostname") ? params.get("hostname") : "localhost";
port = params.getInt("port");
asyncState = params.has("async-state");
} catch (Exception e) {
System.err.println(
"No port specified. Please run 'SocketWindowWordCount "
+ "--hostname <hostname> --port <port> [--asyncState]', where hostname (localhost by default) "
+ "and port is the address of the text server");
System.err.println(
"To start a simple text server, run 'netcat -l <port>' and "
+ "type the input text into the command line");
return;
}
// get the execution environment
final StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment();
// get input data by connecting to the socket
DataStream<String> text = env.socketTextStream(hostname, port, "\n");
// parse the data, group it, window it, and aggregate the counts
KeyedStream<WordWithCount, String> keyedStream =
text.flatMap(
(FlatMapFunction<String, WordWithCount>)
(value, out) -> {
for (String word : value.split("\\s")) {
out.collect(new WordWithCount(word, 1L));
}
},
Types.POJO(WordWithCount.class))
.keyBy(value -> value.word);
if (asyncState) {
keyedStream = keyedStream.enableAsyncState();
}
DataStream<WordWithCount> windowCounts =
keyedStream
.window(TumblingProcessingTimeWindows.of(Duration.ofSeconds(5)))
.reduce((a, b) -> new WordWithCount(a.word, a.count + b.count))
.returns(WordWithCount.class);
// print the results with a single thread, rather than in parallel
windowCounts.print().setParallelism(1);
env.execute("Socket Window WordCount");
} | Implements a streaming windowed version of the "WordCount" program.
<p>This program connects to a server socket and reads strings from the socket. The easiest way to
try this out is to open a text server (at port 12345) using the <i>netcat</i> tool via
<pre>
nc -l 12345 on Linux or nc -l -p 12345 on Windows
</pre>
<p>and run this example with the hostname and the port as arguments. | main | java | apache/flink | flink-examples/flink-examples-streaming/src/main/java/org/apache/flink/streaming/examples/socket/SocketWindowWordCount.java | https://github.com/apache/flink/blob/master/flink-examples/flink-examples-streaming/src/main/java/org/apache/flink/streaming/examples/socket/SocketWindowWordCount.java | Apache-2.0 |
@Override
public String toString() {
return word + " : " + count;
} | Data type for words with count. | toString | java | apache/flink | flink-examples/flink-examples-streaming/src/main/java/org/apache/flink/streaming/examples/socket/SocketWindowWordCount.java | https://github.com/apache/flink/blob/master/flink-examples/flink-examples-streaming/src/main/java/org/apache/flink/streaming/examples/socket/SocketWindowWordCount.java | Apache-2.0 |
public static void main(String[] args) throws Exception {
// ---- print some usage help ----
System.out.println(
"Usage with built-in data generator: StateMachineExample [--error-rate <probability-of-invalid-transition>] [--sleep <sleep-per-record-in-ms> | --rps <records-per-second>]");
System.out.println(
"Usage with Kafka: StateMachineExample --kafka-topic <topic> [--brokers <brokers>]");
System.out.println("Options for both the above setups: ");
System.out.println("\t[--backend <hashmap|rocksdb|forst>]");
System.out.println("\t[--checkpoint-dir <filepath>]");
System.out.println("\t[--incremental-checkpoints <true|false>]");
System.out.println("\t[--output <filepath> OR null for stdout]");
System.out.println();
// ---- determine whether to use the built-in source, or read from Kafka ----
final DataStream<Event> events;
final ParameterTool params = ParameterTool.fromArgs(args);
// create the environment to create streams and configure execution
Configuration configuration = new Configuration();
final String stateBackend = params.get("backend", "memory");
if ("hashmap".equals(stateBackend)) {
final String checkpointDir = params.get("checkpoint-dir");
configuration.set(StateBackendOptions.STATE_BACKEND, "hashmap");
configuration.set(CheckpointingOptions.CHECKPOINT_STORAGE, "filesystem");
configuration.set(CheckpointingOptions.CHECKPOINTS_DIRECTORY, checkpointDir);
} else if ("rocksdb".equals(stateBackend) || "forst".equals(stateBackend)) {
final String checkpointDir = params.get("checkpoint-dir");
boolean incrementalCheckpoints = params.getBoolean("incremental-checkpoints", false);
configuration.set(StateBackendOptions.STATE_BACKEND, stateBackend);
configuration.set(CheckpointingOptions.INCREMENTAL_CHECKPOINTS, incrementalCheckpoints);
configuration.set(CheckpointingOptions.CHECKPOINT_STORAGE, "filesystem");
configuration.set(CheckpointingOptions.CHECKPOINTS_DIRECTORY, checkpointDir);
}
final StreamExecutionEnvironment env =
StreamExecutionEnvironment.getExecutionEnvironment(configuration);
env.enableCheckpointing(2000L);
if (params.has("kafka-topic")) {
// set up the Kafka reader
String kafkaTopic = params.get("kafka-topic");
String brokers = params.get("brokers", "localhost:9092");
System.out.printf("Reading from kafka topic %s @ %s\n", kafkaTopic, brokers);
System.out.println();
KafkaSource<Event> source =
KafkaSource.<Event>builder()
.setBootstrapServers(brokers)
.setGroupId("stateMachineExample")
.setTopics(kafkaTopic)
.setDeserializer(
KafkaRecordDeserializationSchema.valueOnly(
new EventDeSerializationSchema()))
.setStartingOffsets(OffsetsInitializer.latest())
.build();
events =
env.fromSource(
source, WatermarkStrategy.noWatermarks(), "StateMachineExampleSource");
} else {
final double errorRate = params.getDouble("error-rate", 0.0);
final int sleep = params.getInt("sleep", 1);
final double recordsPerSecond =
params.getDouble("rps", rpsFromSleep(sleep, env.getParallelism()));
System.out.printf(
"Using standalone source with error rate %f and %.1f records per second\n",
errorRate, recordsPerSecond);
System.out.println();
GeneratorFunction<Long, Event> generatorFunction =
new EventsGeneratorFunction(errorRate);
DataGeneratorSource<Event> eventGeneratorSource =
new DataGeneratorSource<>(
generatorFunction,
Long.MAX_VALUE,
RateLimiterStrategy.perSecond(recordsPerSecond),
TypeInformation.of(Event.class));
events =
env.fromSource(
eventGeneratorSource,
WatermarkStrategy.noWatermarks(),
"Events Generator Source");
}
// ---- main program ----
final String outputFile = params.get("output");
// make parameters available in the web interface
env.getConfig().setGlobalJobParameters(params);
DataStream<Alert> alerts =
events
// partition on the address to make sure equal addresses
// end up in the same state machine flatMap function
.keyBy(Event::sourceAddress)
.enableAsyncState()
// the function that evaluates the state machine over the sequence of events
.flatMap(new StateMachineMapper());
// output the alerts to std-out
if (outputFile == null) {
alerts.print();
} else {
alerts.sinkTo(
FileSink.<Alert>forRowFormat(
new Path(outputFile), new SimpleStringEncoder<>())
.withRollingPolicy(
DefaultRollingPolicy.builder()
.withMaxPartSize(MemorySize.ofMebiBytes(1))
.withRolloverInterval(Duration.ofSeconds(10))
.build())
.build())
.setParallelism(1)
.name("output");
}
// trigger program execution
env.execute("State machine job");
} | Main entry point for the program.
@param args The command line arguments. | main | java | apache/flink | flink-examples/flink-examples-streaming/src/main/java/org/apache/flink/streaming/examples/statemachine/StateMachineExample.java | https://github.com/apache/flink/blob/master/flink-examples/flink-examples-streaming/src/main/java/org/apache/flink/streaming/examples/statemachine/StateMachineExample.java | Apache-2.0 |
public boolean isTerminal() {
return transitions.length == 0;
} | Checks if this state is a terminal state. A terminal state has no outgoing transitions. | isTerminal | java | apache/flink | flink-examples/flink-examples-streaming/src/main/java/org/apache/flink/streaming/examples/statemachine/dfa/State.java | https://github.com/apache/flink/blob/master/flink-examples/flink-examples-streaming/src/main/java/org/apache/flink/streaming/examples/statemachine/dfa/State.java | Apache-2.0 |
public State transition(EventType evt) {
for (Transition t : transitions) {
if (t.eventType() == evt) {
return t.targetState();
}
}
// no transition found
return InvalidTransition;
} | Gets the state after transitioning from this state based on the given event. If the
transition is valid, this returns the new state, and if this transition is illegal, it
returns [[InvalidTransition]].
@param evt The event that defined the transition.
@return The new state, or [[InvalidTransition]]. | transition | java | apache/flink | flink-examples/flink-examples-streaming/src/main/java/org/apache/flink/streaming/examples/statemachine/dfa/State.java | https://github.com/apache/flink/blob/master/flink-examples/flink-examples-streaming/src/main/java/org/apache/flink/streaming/examples/statemachine/dfa/State.java | Apache-2.0 |
public EventTypeAndState randomTransition(Random rnd) {
if (isTerminal()) {
throw new RuntimeException("Cannot transition from state " + name());
} else {
final float p = rnd.nextFloat();
float mass = 0.0f;
Transition transition = null;
for (Transition t : transitions) {
mass += t.prob();
if (p <= mass) {
transition = t;
break;
}
}
assert transition != null;
return new EventTypeAndState(transition.eventType(), transition.targetState());
}
} | Picks a random transition, based on the probabilities of the outgoing transitions of this
state.
@param rnd The random number generator to use.
@return A pair of (transition event , new state). | randomTransition | java | apache/flink | flink-examples/flink-examples-streaming/src/main/java/org/apache/flink/streaming/examples/statemachine/dfa/State.java | https://github.com/apache/flink/blob/master/flink-examples/flink-examples-streaming/src/main/java/org/apache/flink/streaming/examples/statemachine/dfa/State.java | Apache-2.0 |
public EventType randomInvalidTransition(Random rnd) {
while (true) {
EventType candidate = EventType.values()[rnd.nextInt(EventType.values().length)];
if (transition(candidate) == InvalidTransition) {
return candidate;
}
}
} | Returns an event type that, if applied as a transition on this state, will result in an
illegal state transition.
@param rnd The random number generator to use.
@return And event type for an illegal state transition. | randomInvalidTransition | java | apache/flink | flink-examples/flink-examples-streaming/src/main/java/org/apache/flink/streaming/examples/statemachine/dfa/State.java | https://github.com/apache/flink/blob/master/flink-examples/flink-examples-streaming/src/main/java/org/apache/flink/streaming/examples/statemachine/dfa/State.java | Apache-2.0 |
public static String formatAddress(int address) {
int b1 = (address >>> 24) & 0xff;
int b2 = (address >>> 16) & 0xff;
int b3 = (address >>> 8) & 0xff;
int b4 = address & 0xff;
return "" + b1 + '.' + b2 + '.' + b3 + '.' + b4;
} | Util method to create a string representation of a 32 bit integer representing an IPv4
address.
@param address The address, MSB first.
@return The IP address string. | formatAddress | java | apache/flink | flink-examples/flink-examples-streaming/src/main/java/org/apache/flink/streaming/examples/statemachine/event/Event.java | https://github.com/apache/flink/blob/master/flink-examples/flink-examples-streaming/src/main/java/org/apache/flink/streaming/examples/statemachine/event/Event.java | Apache-2.0 |
public Event next(int minIp, int maxIp) {
final double p = rnd.nextDouble();
if (p * 1000 >= states.size()) {
// create a new state machine
final int nextIP = rnd.nextInt(maxIp - minIp) + minIp;
if (!states.containsKey(nextIP)) {
EventTypeAndState eventAndState = State.Initial.randomTransition(rnd);
states.put(nextIP, eventAndState.state);
return new Event(eventAndState.eventType, nextIP);
} else {
// collision on IP address, try again
return next(minIp, maxIp);
}
} else {
// pick an existing state machine
// skip over some elements in the linked map, then take the next
// update it, and insert it at the end
int numToSkip = Math.min(20, rnd.nextInt(states.size()));
Iterator<Entry<Integer, State>> iter = states.entrySet().iterator();
for (int i = numToSkip; i > 0; --i) {
iter.next();
}
Entry<Integer, State> entry = iter.next();
State currentState = entry.getValue();
int address = entry.getKey();
iter.remove();
if (p < errorProb) {
EventType event = currentState.randomInvalidTransition(rnd);
return new Event(event, address);
} else {
EventTypeAndState eventAndState = currentState.randomTransition(rnd);
if (!eventAndState.state.isTerminal()) {
// reinsert
states.put(address, eventAndState.state);
}
return new Event(eventAndState.eventType, address);
}
}
} | Creates a new random event. This method randomly pick either one of its currently running
state machines, or start a new state machine for a random IP address.
<p>With {@link #errorProb} probability, the generated event will be from an illegal state
transition of one of the currently running state machines.
@param minIp The lower bound for the range from which a new IP address may be picked.
@param maxIp The upper bound for the range from which a new IP address may be picked.
@return A next random event. | next | java | apache/flink | flink-examples/flink-examples-streaming/src/main/java/org/apache/flink/streaming/examples/statemachine/generator/EventsGenerator.java | https://github.com/apache/flink/blob/master/flink-examples/flink-examples-streaming/src/main/java/org/apache/flink/streaming/examples/statemachine/generator/EventsGenerator.java | Apache-2.0 |
public static void runGenerator(Collector<Event>[] collectors) throws IOException {
final GeneratorThread[] threads = new GeneratorThread[collectors.length];
final int range = Integer.MAX_VALUE / collectors.length;
// create the generator threads
for (int i = 0; i < threads.length; i++) {
int min = range * i;
int max = min + range;
GeneratorThread thread = new GeneratorThread(collectors[i], min, max);
threads[i] = thread;
thread.setName("Generator " + i);
}
long delay = 2L;
int nextErroneous = 0;
boolean running = true;
for (GeneratorThread t : threads) {
t.setDelay(delay);
t.start();
}
final ThroughputLogger throughputLogger = new ThroughputLogger(threads);
throughputLogger.start();
System.out.println("Commands:");
System.out.println(" -> q : Quit");
System.out.println(" -> + : increase latency");
System.out.println(" -> - : decrease latency");
System.out.println(" -> e : inject invalid state transition");
// input loop
while (running) {
final int next = System.in.read();
switch (next) {
case 'q':
System.out.println("Quitting...");
running = false;
break;
case 'e':
System.out.println("Injecting erroneous transition ...");
threads[nextErroneous].sendInvalidStateTransition();
nextErroneous = (nextErroneous + 1) % threads.length;
break;
case '+':
delay = Math.max(delay * 2, 1);
System.out.println("Delay is " + delay);
for (GeneratorThread t : threads) {
t.setDelay(delay);
}
break;
case '-':
delay /= 2;
System.out.println("Delay is " + delay);
for (GeneratorThread t : threads) {
t.setDelay(delay);
}
break;
default:
// do nothing
}
}
// shutdown
throughputLogger.shutdown();
for (GeneratorThread t : threads) {
t.shutdown();
try {
t.join();
} catch (InterruptedException e) {
// restore interrupted status
Thread.currentThread().interrupt();
}
}
} | Base for standalone generators that use the state machine to create event sequences and push them
for example into Kafka. | runGenerator | java | apache/flink | flink-examples/flink-examples-streaming/src/main/java/org/apache/flink/streaming/examples/statemachine/generator/StandaloneThreadedGenerator.java | https://github.com/apache/flink/blob/master/flink-examples/flink-examples-streaming/src/main/java/org/apache/flink/streaming/examples/statemachine/generator/StandaloneThreadedGenerator.java | Apache-2.0 |
public static void main(String[] args) throws Exception {
final ParameterTool params = ParameterTool.fromArgs(args);
final StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment();
env.getConfig().setGlobalJobParameters(params);
env.setParallelism(1);
final boolean fileOutput = params.has("output");
final boolean asyncState = params.has("async-state");
final List<Tuple3<String, Long, Integer>> input = new ArrayList<>();
input.add(new Tuple3<>("a", 1L, 1));
input.add(new Tuple3<>("b", 1L, 1));
input.add(new Tuple3<>("b", 3L, 1));
input.add(new Tuple3<>("b", 5L, 1));
input.add(new Tuple3<>("c", 6L, 1));
// We expect to detect the session "a" earlier than this point (the old
// functionality can only detect here when the next starts)
input.add(new Tuple3<>("a", 10L, 1));
// We expect to detect session "b" and "c" at this point as well
input.add(new Tuple3<>("c", 11L, 1));
GeneratorFunction<Long, Tuple3<String, Long, Integer>> dataGenerator =
index -> input.get(index.intValue());
DataGeneratorSource<Tuple3<String, Long, Integer>> generatorSource =
new DataGeneratorSource<>(
dataGenerator,
input.size(),
TypeInformation.of(new TypeHint<Tuple3<String, Long, Integer>>() {}));
DataStream<Tuple3<String, Long, Integer>> source =
env.fromSource(
generatorSource,
WatermarkStrategy.<Tuple3<String, Long, Integer>>forMonotonousTimestamps()
.withTimestampAssigner((event, timestamp) -> event.f1),
"Generated data source");
KeyedStream<Tuple3<String, Long, Integer>, String> keyedStream =
source.keyBy(value -> value.f0);
if (asyncState) {
keyedStream = keyedStream.enableAsyncState();
}
// We create sessions for each id with max timeout of 3 time units
DataStream<Tuple3<String, Long, Integer>> aggregated =
keyedStream.window(EventTimeSessionWindows.withGap(Duration.ofMillis(3L))).sum(2);
if (fileOutput) {
aggregated
.sinkTo(
FileSink.<Tuple3<String, Long, Integer>>forRowFormat(
new Path(params.get("output")),
new SimpleStringEncoder<>())
.withRollingPolicy(
DefaultRollingPolicy.builder()
.withMaxPartSize(MemorySize.ofMebiBytes(1))
.withRolloverInterval(Duration.ofSeconds(10))
.build())
.build())
.name("output");
} else {
System.out.println("Printing result to stdout. Use --output to specify output path.");
aggregated.print();
}
env.execute();
} | An example of session windowing that keys events by ID and groups and counts them in session with
gaps of 3 milliseconds. | main | java | apache/flink | flink-examples/flink-examples-streaming/src/main/java/org/apache/flink/streaming/examples/windowing/SessionWindowing.java | https://github.com/apache/flink/blob/master/flink-examples/flink-examples-streaming/src/main/java/org/apache/flink/streaming/examples/windowing/SessionWindowing.java | Apache-2.0 |
@Override
public Tuple4<Integer, Integer, Double, Long> map(Long ignoredIndex) throws Exception {
if (rand.nextBoolean()) {
speeds[nextCar] = Math.min(100, speeds[nextCar] + 5);
} else {
speeds[nextCar] = Math.max(0, speeds[nextCar] - 5);
}
long now = System.currentTimeMillis();
long timeDiffMillis = lastUpdate[nextCar] == 0 ? 0 : now - lastUpdate[nextCar];
lastUpdate[nextCar] = now;
distances[nextCar] +=
speeds[nextCar]
* (timeDiffMillis * HOURS_IN_MILLI)
* METERS_IN_KILOMETER
* COMPAT_FACTOR;
nextCar = (++nextCar) % speeds.length;
return new Tuple4<>(nextCar, speeds[nextCar], distances[nextCar], now);
} | A generator function for simulating car data.
<p>This generator function generates a stream of car data in a form of a four-element tuple. The
data includes the car's ID, its speed in kilometers per hour, the distance it has traveled in
meters, and the timestamp of the data generation. The speed and distance of each car are randomly
updated in each invocation of the {@link #map(Long)} method. | map | java | apache/flink | flink-examples/flink-examples-streaming/src/main/java/org/apache/flink/streaming/examples/windowing/util/CarGeneratorFunction.java | https://github.com/apache/flink/blob/master/flink-examples/flink-examples-streaming/src/main/java/org/apache/flink/streaming/examples/windowing/util/CarGeneratorFunction.java | Apache-2.0 |
@Override
public void flatMap(String value, Collector<Tuple2<String, Integer>> out) {
// normalize and split the line
String[] tokens = value.toLowerCase().split("\\W+");
// emit the pairs
for (String token : tokens) {
if (token.length() > 0) {
out.collect(new Tuple2<>(token, 1));
}
}
} | Implements the string tokenizer that splits sentences into words as a user-defined
FlatMapFunction. The function takes a line (String) and splits it into multiple pairs in the
form of "(word,1)" ({@code Tuple2<String, Integer>}). | flatMap | java | apache/flink | flink-examples/flink-examples-streaming/src/main/java/org/apache/flink/streaming/examples/wordcount/WordCount.java | https://github.com/apache/flink/blob/master/flink-examples/flink-examples-streaming/src/main/java/org/apache/flink/streaming/examples/wordcount/WordCount.java | Apache-2.0 |
private static String loadFileContent(String relativeFilePathInResources) throws IOException {
URL url =
DSv2ExamplesITCase.class.getClassLoader().getResource(relativeFilePathInResources);
Objects.requireNonNull(url);
StringBuilder contentBuilder = new StringBuilder();
BufferedReader br = new BufferedReader(new FileReader(url.getPath()));
String line;
while ((line = br.readLine()) != null) {
contentBuilder.append(line).append("\n");
}
return contentBuilder.toString();
} | Load the content from the specified file path for testing purposes.
@return the file content | loadFileContent | java | apache/flink | flink-examples/flink-examples-streaming/src/test/java/org/apache/flink/streaming/test/examples/DSv2ExamplesITCase.java | https://github.com/apache/flink/blob/master/flink-examples/flink-examples-streaming/src/test/java/org/apache/flink/streaming/test/examples/DSv2ExamplesITCase.java | Apache-2.0 |
public static void main(String[] args) throws Exception {
// setup the unified API
// in this case: declare that the table programs should be executed in batch mode
final EnvironmentSettings settings =
EnvironmentSettings.newInstance().inBatchMode().build();
final TableEnvironment env = TableEnvironment.create(settings);
// create a table with example data without a connector required
final Table rawCustomers =
env.fromValues(
Row.of(
"Guillermo Smith",
LocalDate.parse("1992-12-12"),
"4081 Valley Road",
"08540",
"New Jersey",
"m",
true,
0,
78,
3),
Row.of(
"Valeria Mendoza",
LocalDate.parse("1970-03-28"),
"1239 Rainbow Road",
"90017",
"Los Angeles",
"f",
true,
9,
39,
0),
Row.of(
"Leann Holloway",
LocalDate.parse("1989-05-21"),
"2359 New Street",
"97401",
"Eugene",
null,
true,
null,
null,
null),
Row.of(
"Brandy Sanders",
LocalDate.parse("1956-05-26"),
"4891 Walkers-Ridge-Way",
"73119",
"Oklahoma City",
"m",
false,
9,
39,
0),
Row.of(
"John Turner",
LocalDate.parse("1982-10-02"),
"2359 New Street",
"60605",
"Chicago",
"m",
true,
12,
39,
0),
Row.of(
"Ellen Ortega",
LocalDate.parse("1985-06-18"),
"2448 Rodney STreet",
"85023",
"Phoenix",
"f",
true,
0,
78,
3));
// handle ranges of columns easily
final Table truncatedCustomers = rawCustomers.select(withColumns(range(1, 7)));
// name columns
final Table namedCustomers =
truncatedCustomers.as(
"name",
"date_of_birth",
"street",
"zip_code",
"city",
"gender",
"has_newsletter");
// register a view temporarily
env.createTemporaryView("customers", namedCustomers);
// use SQL whenever you like
// call execute() and print() to get insights
env.sqlQuery(
"SELECT "
+ " COUNT(*) AS `number of customers`, "
+ " AVG(YEAR(date_of_birth)) AS `average birth year` "
+ "FROM `customers`")
.execute()
.print();
// or further transform the data using the fluent Table API
// e.g. filter, project fields, or call a user-defined function
final Table youngCustomers =
env.from("customers")
.filter($("gender").isNotNull())
.filter($("has_newsletter").isEqual(true))
.filter($("date_of_birth").isGreaterOrEqual(LocalDate.parse("1980-01-01")))
.select(
$("name").upperCase(),
$("date_of_birth"),
call(AddressNormalizer.class, $("street"), $("zip_code"), $("city"))
.as("address"));
// use execute() and collect() to retrieve your results from the cluster
// this can be useful for testing before storing it in an external system
try (CloseableIterator<Row> iterator = youngCustomers.execute().collect()) {
final Set<Row> expectedOutput = new HashSet<>();
expectedOutput.add(
Row.of(
"GUILLERMO SMITH",
LocalDate.parse("1992-12-12"),
"4081 VALLEY ROAD, 08540, NEW JERSEY"));
expectedOutput.add(
Row.of(
"JOHN TURNER",
LocalDate.parse("1982-10-02"),
"2359 NEW STREET, 60605, CHICAGO"));
expectedOutput.add(
Row.of(
"ELLEN ORTEGA",
LocalDate.parse("1985-06-18"),
"2448 RODNEY STREET, 85023, PHOENIX"));
final Set<Row> actualOutput = new HashSet<>();
iterator.forEachRemaining(actualOutput::add);
if (actualOutput.equals(expectedOutput)) {
System.out.println("SUCCESS!");
} else {
System.out.println("FAILURE!");
}
}
} | Example for getting started with the Table & SQL API.
<p>The example shows how to create, transform, and query a table. It should give a first
impression about the look-and-feel of the API without going too much into details. See the other
examples for using connectors or more complex operations.
<p>In particular, the example shows how to
<ul>
<li>setup a {@link TableEnvironment},
<li>use the environment for creating example tables, registering views, and executing SQL
queries,
<li>transform tables with filters and projections,
<li>declare user-defined functions,
<li>and print/collect results locally.
</ul>
<p>The example executes two Flink jobs. The results are written to stdout. | main | java | apache/flink | flink-examples/flink-examples-table/src/main/java/org/apache/flink/table/examples/java/basics/GettingStartedExample.java | https://github.com/apache/flink/blob/master/flink-examples/flink-examples-table/src/main/java/org/apache/flink/table/examples/java/basics/GettingStartedExample.java | Apache-2.0 |
public static void main(String[] args) throws Exception {
// set up the Java DataStream API
final StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment();
// set up the Java Table API
final StreamTableEnvironment tableEnv = StreamTableEnvironment.create(env);
final DataStream<Order> orderA =
env.fromData(
Arrays.asList(
new Order(1L, "beer", 3),
new Order(1L, "diaper", 4),
new Order(3L, "rubber", 2)));
final DataStream<Order> orderB =
env.fromData(
Arrays.asList(
new Order(2L, "pen", 3),
new Order(2L, "rubber", 3),
new Order(4L, "beer", 1)));
// convert the first DataStream to a Table object
// it will be used "inline" and is not registered in a catalog
final Table tableA = tableEnv.fromDataStream(orderA);
// convert the second DataStream and register it as a view
// it will be accessible under a name
tableEnv.createTemporaryView("TableB", orderB);
// union the two tables
final Table result =
tableEnv.sqlQuery(
"SELECT * FROM "
+ tableA
+ " WHERE amount > 2 UNION ALL "
+ "SELECT * FROM TableB WHERE amount < 2");
// convert the Table back to an insert-only DataStream of type `Order`
tableEnv.toDataStream(result, Order.class).print();
// after the table program is converted to a DataStream program,
// we must use `env.execute()` to submit the job
env.execute();
} | Simple example for demonstrating the use of SQL on a table backed by a {@link DataStream} in Java
DataStream API.
<p>In particular, the example shows how to
<ul>
<li>convert two bounded data streams to tables,
<li>register a table as a view under a name,
<li>run a stream SQL query on registered and unregistered tables,
<li>and convert the table back to a data stream.
</ul>
<p>The example executes a single Flink job. The results are written to stdout. | main | java | apache/flink | flink-examples/flink-examples-table/src/main/java/org/apache/flink/table/examples/java/basics/StreamSQLExample.java | https://github.com/apache/flink/blob/master/flink-examples/flink-examples-table/src/main/java/org/apache/flink/table/examples/java/basics/StreamSQLExample.java | Apache-2.0 |
public static void main(String[] args) throws Exception {
// set up execution environment
StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment();
StreamTableEnvironment tEnv = StreamTableEnvironment.create(env);
// write source data into temporary file and get the absolute path
String contents =
"1,beer,3,2019-12-12 00:00:01\n"
+ "1,diaper,4,2019-12-12 00:00:02\n"
+ "2,pen,3,2019-12-12 00:00:04\n"
+ "2,rubber,3,2019-12-12 00:00:06\n"
+ "3,rubber,2,2019-12-12 00:00:05\n"
+ "4,beer,1,2019-12-12 00:00:08";
String path = createTempFile(contents);
// register table via DDL with watermark,
// the events are out of order, hence, we use 3 seconds to wait the late events
String ddl =
"CREATE TABLE orders (\n"
+ " user_id INT,\n"
+ " product STRING,\n"
+ " amount INT,\n"
+ " ts TIMESTAMP(3),\n"
+ " WATERMARK FOR ts AS ts - INTERVAL '3' SECOND\n"
+ ") WITH (\n"
+ " 'connector.type' = 'filesystem',\n"
+ " 'connector.path' = '"
+ path
+ "',\n"
+ " 'format.type' = 'csv'\n"
+ ")";
tEnv.executeSql(ddl);
// run a SQL query on the table and retrieve the result as a new Table
String query =
"SELECT\n"
+ " CAST(TUMBLE_START(ts, INTERVAL '5' SECOND) AS STRING) window_start,\n"
+ " COUNT(*) order_num,\n"
+ " SUM(amount) total_amount,\n"
+ " COUNT(DISTINCT product) unique_products\n"
+ "FROM orders\n"
+ "GROUP BY TUMBLE(ts, INTERVAL '5' SECOND)";
tEnv.executeSql(query).print();
// should output:
// +----+--------------------------------+--------------+--------------+-----------------+
// | op | window_start | order_num | total_amount | unique_products |
// +----+--------------------------------+--------------+--------------+-----------------+
// | +I | 2019-12-12 00:00:00.000 | 3 | 10 | 3 |
// | +I | 2019-12-12 00:00:05.000 | 3 | 6 | 2 |
// +----+--------------------------------+--------------+--------------+-----------------+
} | Simple example for demonstrating the use of SQL in Java.
<p>Usage: {@code ./bin/flink run ./examples/table/StreamWindowSQLExample.jar}
<p>This example shows how to: - Register a table via DDL - Declare an event time attribute in the
DDL - Run a streaming window aggregate on the registered table | main | java | apache/flink | flink-examples/flink-examples-table/src/main/java/org/apache/flink/table/examples/java/basics/StreamWindowSQLExample.java | https://github.com/apache/flink/blob/master/flink-examples/flink-examples-table/src/main/java/org/apache/flink/table/examples/java/basics/StreamWindowSQLExample.java | Apache-2.0 |
private static String createTempFile(String contents) throws IOException {
File tempFile = File.createTempFile("orders", ".csv");
tempFile.deleteOnExit();
FileUtils.writeFileUtf8(tempFile, contents);
return tempFile.toURI().toString();
} | Creates a temporary file with the contents and returns the absolute path. | createTempFile | java | apache/flink | flink-examples/flink-examples-table/src/main/java/org/apache/flink/table/examples/java/basics/StreamWindowSQLExample.java | https://github.com/apache/flink/blob/master/flink-examples/flink-examples-table/src/main/java/org/apache/flink/table/examples/java/basics/StreamWindowSQLExample.java | Apache-2.0 |
public static void main(String[] args) throws Exception {
// set up the Java DataStream API
final StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment();
// set up the Java Table API
final StreamTableEnvironment tableEnv = StreamTableEnvironment.create(env);
// Create a changelog stream of currency rate
final DataStream<Row> currencyRate =
env.fromData(
Row.ofKind(RowKind.INSERT, Instant.ofEpochMilli(1000), "USD", 0.8),
Row.ofKind(RowKind.UPDATE_AFTER, Instant.ofEpochMilli(4000), "USD", 0.9),
Row.ofKind(RowKind.UPDATE_AFTER, Instant.ofEpochMilli(3000), "USD", 1.0),
Row.ofKind(RowKind.UPDATE_AFTER, Instant.ofEpochMilli(6000), "USD", 1.1));
// Create a table from change log stream
Table rateTable =
tableEnv.fromChangelogStream(
currencyRate,
Schema.newBuilder()
.column("f0", DataTypes.TIMESTAMP_LTZ(3))
.column("f1", DataTypes.STRING().notNull())
.column("f2", DataTypes.DOUBLE())
.watermark("f0", "f0 - INTERVAL '2' SECONDS")
.primaryKey("f1")
.build(),
ChangelogMode.upsert())
.as("rate_time", "currency_code", "euro_rate");
// Register the table as a view, it will be accessible under a name
tableEnv.createTemporaryView("currency_rate", rateTable);
// Create a data stream of transaction
final DataStream<Transaction> transaction =
env.fromData(
new Transaction("trx1", Instant.ofEpochMilli(1000), "USD", 1),
new Transaction("trx2", Instant.ofEpochMilli(2000), "USD", 1),
new Transaction("trx3", Instant.ofEpochMilli(3000), "USD", 1),
new Transaction("trx4", Instant.ofEpochMilli(4000), "USD", 1));
// convert the Transaction DataStream and register it as a view,
// it will be accessible under a name
Table trxTable =
tableEnv.fromDataStream(
transaction,
Schema.newBuilder()
.column("id", DataTypes.STRING())
.column("trxTime", DataTypes.TIMESTAMP_LTZ(3))
.column("currencyCode", DataTypes.STRING())
.column("amount", DataTypes.DOUBLE())
.watermark("trxTime", "trxTime - INTERVAL '2' SECONDS")
.build())
.as("id", "trx_time", "currency_code", "amount");
// Register the table as a view, it will be accessible under a name
tableEnv.createTemporaryView("transaction", trxTable);
// temporal join the two tables
final Table result =
tableEnv.sqlQuery(
" SELECT\n"
+ " t.id,\n"
+ " t.trx_time,\n"
+ " c.currency_code,\n"
+ " t.amount,\n"
+ " t.amount * c.euro_rate AS total_euro\n"
+ " FROM transaction t\n"
+ " JOIN currency_rate FOR SYSTEM_TIME AS OF t.trx_time AS c\n"
+ " ON t.currency_code = c.currency_code; ");
// convert the Table back to an insert-only DataStream of type `Order`
tableEnv.toDataStream(result, EnrichedTransaction.class).print();
// after the table program is converted to a DataStream program,
// we must use `env.execute()` to submit the job
env.execute();
} | Example for demonstrating the use of temporal join between a table backed by a {@link DataStream}
and a table backed by a change log stream.
<p>In particular, the example shows how to
<ul>
<li>create a change log stream from elements
<li>rename the table columns
<li>register a table as a view under a name,
<li>run a stream temporal join query on registered tables,
<li>and convert the table back to a data stream.
</ul>
<p>The example executes a single Flink job. The results are written to stdout. | main | java | apache/flink | flink-examples/flink-examples-table/src/main/java/org/apache/flink/table/examples/java/basics/TemporalJoinSQLExample.java | https://github.com/apache/flink/blob/master/flink-examples/flink-examples-table/src/main/java/org/apache/flink/table/examples/java/basics/TemporalJoinSQLExample.java | Apache-2.0 |
@Override
public String toString() {
return "EnrichedTransaction{"
+ "id="
+ id
+ ", trxTime="
+ trxTime
+ ", currencyCode='"
+ currencyCode
+ '\''
+ ", amount="
+ amount
+ ", totalEuro="
+ totalEuro
+ '}';
} | Enriched transaction by joining with the currency rate table. | toString | java | apache/flink | flink-examples/flink-examples-table/src/main/java/org/apache/flink/table/examples/java/basics/TemporalJoinSQLExample.java | https://github.com/apache/flink/blob/master/flink-examples/flink-examples-table/src/main/java/org/apache/flink/table/examples/java/basics/TemporalJoinSQLExample.java | Apache-2.0 |
private static String createTemporaryDirectory() throws IOException {
final Path tempDirectory = Files.createTempDirectory("population");
return tempDirectory.toString();
} | Creates an empty temporary directory for CSV files and returns the absolute path. | createTemporaryDirectory | java | apache/flink | flink-examples/flink-examples-table/src/main/java/org/apache/flink/table/examples/java/basics/UpdatingTopCityExample.java | https://github.com/apache/flink/blob/master/flink-examples/flink-examples-table/src/main/java/org/apache/flink/table/examples/java/basics/UpdatingTopCityExample.java | Apache-2.0 |
public static void main(String[] args) throws Exception {
// set up the Table API
final EnvironmentSettings settings =
EnvironmentSettings.newInstance().inBatchMode().build();
final TableEnvironment tableEnv = TableEnvironment.create(settings);
// execute a Flink SQL job and print the result locally
tableEnv.executeSql(
// define the aggregation
"SELECT word, SUM(frequency) AS `count`\n"
// read from an artificial fixed-size table with rows and columns
+ "FROM (\n"
+ " VALUES ('Hello', 1), ('Ciao', 1), ('Hello', 2)\n"
+ ")\n"
// name the table and its columns
+ "AS WordTable(word, frequency)\n"
// group for aggregation
+ "GROUP BY word")
.print();
} | The famous word count example that shows a minimal Flink SQL job in batch execution mode. | main | java | apache/flink | flink-examples/flink-examples-table/src/main/java/org/apache/flink/table/examples/java/basics/WordCountSQLExample.java | https://github.com/apache/flink/blob/master/flink-examples/flink-examples-table/src/main/java/org/apache/flink/table/examples/java/basics/WordCountSQLExample.java | Apache-2.0 |
public static void main(String[] args) throws Exception {
final ParameterTool params = ParameterTool.fromArgs(args);
final String hostname = params.get("hostname", "localhost");
final String port = params.get("port", "9999");
final StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment();
env.setParallelism(1); // source only supports parallelism of 1
final StreamTableEnvironment tEnv = StreamTableEnvironment.create(env);
// register a table in the catalog
tEnv.executeSql(
"CREATE TABLE UserScores (name STRING, score INT)\n"
+ "WITH (\n"
+ " 'connector' = 'socket',\n"
+ " 'hostname' = '"
+ hostname
+ "',\n"
+ " 'port' = '"
+ port
+ "',\n"
+ " 'byte-delimiter' = '10',\n"
+ " 'format' = 'changelog-csv',\n"
+ " 'changelog-csv.column-delimiter' = '|'\n"
+ ")");
// define a dynamic aggregating query
final Table result = tEnv.sqlQuery("SELECT name, SUM(score) FROM UserScores GROUP BY name");
// print the result to the console
tEnv.toChangelogStream(result).print();
env.execute();
} | Example for implementing a custom {@link DynamicTableSource} and a {@link DecodingFormat}.
<p>The example implements a table source with a decoding format that supports changelog
semantics.
<p>The {@link SocketDynamicTableFactory} illustrates how connector components play together. It
can serve as a reference implementation for implementing own connectors and/or formats.
<p>The {@link SocketDynamicTableSource} uses a simple single-threaded {@link Source} to open a
socket that listens for incoming bytes. The raw bytes are decoded into rows by a pluggable
format. The format expects a changelog flag as the first column.
<p>In particular, the example shows how to
<ul>
<li>create factories that parse and validate options,
<li>implement table connectors,
<li>implement and discover custom formats,
<li>and use provided utilities such as data structure converters and the {@link FactoryUtil}.
</ul>
<p>Usage: <code>ChangelogSocketExample --hostname <localhost> --port <9999></code>
<p>Use the following command to ingest data in a terminal:
<pre>
nc -lk 9999
INSERT|Alice|12
INSERT|Bob|5
DELETE|Alice|12
INSERT|Alice|18
</pre>
<p>The result is written to stdout. | main | java | apache/flink | flink-examples/flink-examples-table/src/main/java/org/apache/flink/table/examples/java/connectors/ChangelogSocketExample.java | https://github.com/apache/flink/blob/master/flink-examples/flink-examples-table/src/main/java/org/apache/flink/table/examples/java/connectors/ChangelogSocketExample.java | Apache-2.0 |
@Override
public String splitId() {
return "dummy";
} | Placeholder because the socket itself implicitly represents the only split and does not
require an actual split object. | splitId | java | apache/flink | flink-examples/flink-examples-table/src/main/java/org/apache/flink/table/examples/java/connectors/SocketSource.java | https://github.com/apache/flink/blob/master/flink-examples/flink-examples-table/src/main/java/org/apache/flink/table/examples/java/connectors/SocketSource.java | Apache-2.0 |
public static void main(String[] args) throws Exception {
// setup the environment
final EnvironmentSettings settings =
EnvironmentSettings.newInstance().inBatchMode().build();
final TableEnvironment env = TableEnvironment.create(settings);
// execute different kinds of functions
executeLastDatedValueFunction(env);
executeInternalRowMergerFunction(env);
} | Example for implementing more complex {@link UserDefinedFunction}s.
<p>In many use cases, function signatures can be reflectively extracted from a UDF class. The
annotations {@link DataTypeHint} and {@link FunctionHint} help if reflective information is not
enough and needs to be enriched with further logical details. Check the website documentation as
well as the docs of {@link ScalarFunction}, {@link TableFunction}, and {@link AggregateFunction}
for more information.
<p>Both reflective extraction and annotations are suitable for function signatures with fixed
input and output types. However, for advanced use cases it might be required to derive an output
type from one of the argument types or perform stricter validation.
<p>This example demonstrates various UDF implementations. We are executing multiple Flink jobs
where the result is written to stdout. | main | java | apache/flink | flink-examples/flink-examples-table/src/main/java/org/apache/flink/table/examples/java/functions/AdvancedFunctionsExample.java | https://github.com/apache/flink/blob/master/flink-examples/flink-examples-table/src/main/java/org/apache/flink/table/examples/java/functions/AdvancedFunctionsExample.java | Apache-2.0 |
private static void executeLastDatedValueFunction(TableEnvironment env) {
// create a table with example data
final Table customers =
env.fromValues(
DataTypes.of("ROW<name STRING, order_date DATE, item_count INT>"),
Row.of("Guillermo Smith", LocalDate.parse("2020-12-01"), 3),
Row.of("Guillermo Smith", LocalDate.parse("2020-12-05"), 5),
Row.of("Valeria Mendoza", LocalDate.parse("2020-03-23"), 4),
Row.of("Valeria Mendoza", LocalDate.parse("2020-06-02"), 10),
Row.of("Leann Holloway", LocalDate.parse("2020-05-26"), 9),
Row.of("Leann Holloway", LocalDate.parse("2020-05-27"), null),
Row.of("Brandy Sanders", LocalDate.parse("2020-10-14"), 1),
Row.of("John Turner", LocalDate.parse("2020-10-02"), 12),
Row.of("Ellen Ortega", LocalDate.parse("2020-06-18"), 100));
env.createTemporaryView("customers", customers);
// register and execute the function
env.createTemporarySystemFunction("LastDatedValueFunction", LastDatedValueFunction.class);
env.executeSql(
"SELECT name, LastDatedValueFunction(item_count, order_date) "
+ "FROM customers GROUP BY name")
.print();
// clean up
env.dropTemporaryView("customers");
} | Aggregates data by name and returns the latest non-null {@code item_count} value with its
corresponding {@code order_date}. | executeLastDatedValueFunction | java | apache/flink | flink-examples/flink-examples-table/src/main/java/org/apache/flink/table/examples/java/functions/AdvancedFunctionsExample.java | https://github.com/apache/flink/blob/master/flink-examples/flink-examples-table/src/main/java/org/apache/flink/table/examples/java/functions/AdvancedFunctionsExample.java | Apache-2.0 |
private static void executeInternalRowMergerFunction(TableEnvironment env) {
// create a table with example data
final Table customers =
env.fromValues(
DataTypes.of(
"ROW<name STRING, data1 ROW<birth_date DATE>, data2 ROW<city STRING, phone STRING>>"),
Row.of(
"Guillermo Smith",
Row.of(LocalDate.parse("1992-12-12")),
Row.of("New Jersey", "816-443-8010")),
Row.of(
"Valeria Mendoza",
Row.of(LocalDate.parse("1970-03-28")),
Row.of("Los Angeles", "928-264-9662")),
Row.of(
"Leann Holloway",
Row.of(LocalDate.parse("1989-05-21")),
Row.of("Eugene", "614-889-6038")));
env.createTemporaryView("customers", customers);
// register and execute the function
env.createTemporarySystemFunction(
"InternalRowMergerFunction", InternalRowMergerFunction.class);
env.executeSql("SELECT name, InternalRowMergerFunction(data1, data2) FROM customers")
.print();
// clean up
env.dropTemporaryView("customers");
} | Merges two rows as efficient as possible using internal data structures. | executeInternalRowMergerFunction | java | apache/flink | flink-examples/flink-examples-table/src/main/java/org/apache/flink/table/examples/java/functions/AdvancedFunctionsExample.java | https://github.com/apache/flink/blob/master/flink-examples/flink-examples-table/src/main/java/org/apache/flink/table/examples/java/functions/AdvancedFunctionsExample.java | Apache-2.0 |
@Override
public TypeInference getTypeInference(DataTypeFactory typeFactory) {
return TypeInference.newBuilder()
// accept a signature (ANY, DATE) both with default conversion classes,
// the input type strategy is mostly used to produce nicer validation exceptions
// during planning, implementers can decide to skip it if they are fine with failing
// at a later stage during code generation when the runtime method is checked
.inputTypeStrategy(
InputTypeStrategies.sequence(
InputTypeStrategies.ANY,
InputTypeStrategies.explicit(DataTypes.DATE())))
// let the accumulator data type depend on the first input argument
.accumulatorTypeStrategy(
callContext -> {
final DataType argDataType = callContext.getArgumentDataTypes().get(0);
final DataType accDataType =
DataTypes.STRUCTURED(
Accumulator.class,
DataTypes.FIELD("value", argDataType),
DataTypes.FIELD("date", DataTypes.DATE()));
return Optional.of(accDataType);
})
// let the output data type depend on the first input argument
.outputTypeStrategy(
callContext -> {
final DataType argDataType = callContext.getArgumentDataTypes().get(0);
final DataType outputDataType =
DataTypes.ROW(
DataTypes.FIELD("value", argDataType),
DataTypes.FIELD("date", DataTypes.DATE()));
return Optional.of(outputDataType);
})
.build();
} | Declares the {@link TypeInference} of this function. It specifies:
<ul>
<li>which argument types are supported when calling this function,
<li>which {@link DataType#getConversionClass()} should be used when calling the JVM method
{@link #accumulate(Accumulator, Object, LocalDate)} during runtime,
<li>a similar strategy how to derive an accumulator type,
<li>and a similar strategy how to derive the output type.
</ul> | getTypeInference | java | apache/flink | flink-examples/flink-examples-table/src/main/java/org/apache/flink/table/examples/java/functions/LastDatedValueFunction.java | https://github.com/apache/flink/blob/master/flink-examples/flink-examples-table/src/main/java/org/apache/flink/table/examples/java/functions/LastDatedValueFunction.java | Apache-2.0 |
@Override
public Accumulator<T> createAccumulator() {
return new Accumulator<>();
} | Generic accumulator for representing state. It will contain different kind of instances for
{@code value} depending on actual call in the query. | createAccumulator | java | apache/flink | flink-examples/flink-examples-table/src/main/java/org/apache/flink/table/examples/java/functions/LastDatedValueFunction.java | https://github.com/apache/flink/blob/master/flink-examples/flink-examples-table/src/main/java/org/apache/flink/table/examples/java/functions/LastDatedValueFunction.java | Apache-2.0 |
@Override
public RecoverableWriter createRecoverableWriter() throws IOException {
return new AzureBlobRecoverableWriter(getHadoopFileSystem());
} | Wraps the given Hadoop File System object as a Flink File System object. The given Hadoop
file system object is expected to be initialized already.
@param hadoopFileSystem The Azure Blob FileSystem that will be used under the hood. | createRecoverableWriter | java | apache/flink | flink-filesystems/flink-azure-fs-hadoop/src/main/java/org/apache/flink/fs/azurefs/AzureBlobFileSystem.java | https://github.com/apache/flink/blob/master/flink-filesystems/flink-azure-fs-hadoop/src/main/java/org/apache/flink/fs/azurefs/AzureBlobFileSystem.java | Apache-2.0 |
@Override
public RecoverableWriter createRecoverableWriter() {
LOGGER.info("Creating GSRecoverableWriter with file-system options {}", fileSystemOptions);
// create the GS blob storage wrapper
GSBlobStorageImpl blobStorage = new GSBlobStorageImpl(storage);
// construct the recoverable writer with the blob storage wrapper and the options
return new GSRecoverableWriter(blobStorage, fileSystemOptions);
} | FileSystem implementation that wraps GoogleHadoopFileSystem and supports RecoverableWriter. | createRecoverableWriter | java | apache/flink | flink-filesystems/flink-gs-fs-hadoop/src/main/java/org/apache/flink/fs/gs/GSFileSystem.java | https://github.com/apache/flink/blob/master/flink-filesystems/flink-gs-fs-hadoop/src/main/java/org/apache/flink/fs/gs/GSFileSystem.java | Apache-2.0 |
public Optional<String> getWriterTemporaryBucketName() {
return flinkConfig.getOptional(WRITER_TEMPORARY_BUCKET_NAME);
} | The temporary bucket name to use for recoverable writes, if different from the final bucket
name. | getWriterTemporaryBucketName | java | apache/flink | flink-filesystems/flink-gs-fs-hadoop/src/main/java/org/apache/flink/fs/gs/GSFileSystemOptions.java | https://github.com/apache/flink/blob/master/flink-filesystems/flink-gs-fs-hadoop/src/main/java/org/apache/flink/fs/gs/GSFileSystemOptions.java | Apache-2.0 |
public Optional<Integer> getHTTPConnectionTimeout() {
return flinkConfig.getOptional(GCS_HTTP_CONNECT_TIMEOUT);
} | Timeout in millisecond to establish the connection. | getHTTPConnectionTimeout | java | apache/flink | flink-filesystems/flink-gs-fs-hadoop/src/main/java/org/apache/flink/fs/gs/GSFileSystemOptions.java | https://github.com/apache/flink/blob/master/flink-filesystems/flink-gs-fs-hadoop/src/main/java/org/apache/flink/fs/gs/GSFileSystemOptions.java | Apache-2.0 |
public Optional<MemorySize> getWriterChunkSize() {
return flinkConfig.getOptional(WRITER_CHUNK_SIZE);
} | The chunk size to use for writes on the underlying Google WriteChannel. | getWriterChunkSize | java | apache/flink | flink-filesystems/flink-gs-fs-hadoop/src/main/java/org/apache/flink/fs/gs/GSFileSystemOptions.java | https://github.com/apache/flink/blob/master/flink-filesystems/flink-gs-fs-hadoop/src/main/java/org/apache/flink/fs/gs/GSFileSystemOptions.java | Apache-2.0 |
public static GSBlobIdentifier fromBlobId(BlobId blobId) {
return new GSBlobIdentifier(blobId.getBucket(), blobId.getName());
} | Construct an abstract blob identifier from a Google BlobId.
@param blobId The Google BlobId
@return The abstract blob identifier | fromBlobId | java | apache/flink | flink-filesystems/flink-gs-fs-hadoop/src/main/java/org/apache/flink/fs/gs/storage/GSBlobIdentifier.java | https://github.com/apache/flink/blob/master/flink-filesystems/flink-gs-fs-hadoop/src/main/java/org/apache/flink/fs/gs/storage/GSBlobIdentifier.java | Apache-2.0 |
@Override
public String getChecksum() {
LOGGER.trace("Getting checksum for blob {}", blobIdentifier);
String checksum = blob.getCrc32c();
LOGGER.trace("Found checksum for blob {}: {}", blobIdentifier, checksum);
return checksum;
} | Blob metadata, wraps Google storage Blob. | getChecksum | java | apache/flink | flink-filesystems/flink-gs-fs-hadoop/src/main/java/org/apache/flink/fs/gs/storage/GSBlobStorageImpl.java | https://github.com/apache/flink/blob/master/flink-filesystems/flink-gs-fs-hadoop/src/main/java/org/apache/flink/fs/gs/storage/GSBlobStorageImpl.java | Apache-2.0 |
public static GSBlobIdentifier parseUri(URI uri) {
Preconditions.checkArgument(
uri.getScheme().equals(GSFileSystemFactory.SCHEME),
String.format("URI scheme for %s must be %s", uri, GSFileSystemFactory.SCHEME));
String finalBucketName = uri.getAuthority();
if (StringUtils.isNullOrWhitespaceOnly(finalBucketName)) {
throw new IllegalArgumentException(String.format("Bucket name in %s is invalid", uri));
}
String path = uri.getPath();
if (StringUtils.isNullOrWhitespaceOnly(path)) {
throw new IllegalArgumentException(String.format("Object name in %s is invalid", uri));
}
String finalObjectName = path.substring(1); // remove leading slash from path
if (StringUtils.isNullOrWhitespaceOnly(finalObjectName)) {
throw new IllegalArgumentException(String.format("Object name in %s is invalid", uri));
}
return new GSBlobIdentifier(finalBucketName, finalObjectName);
} | Parses a blob id from a Google storage uri, i.e. gs://bucket/foo/bar yields a blob with
bucket name "bucket" and object name "foo/bar".
@param uri The gs uri
@return The blob id | parseUri | java | apache/flink | flink-filesystems/flink-gs-fs-hadoop/src/main/java/org/apache/flink/fs/gs/utils/BlobUtils.java | https://github.com/apache/flink/blob/master/flink-filesystems/flink-gs-fs-hadoop/src/main/java/org/apache/flink/fs/gs/utils/BlobUtils.java | Apache-2.0 |
public static String getTemporaryBucketName(
GSBlobIdentifier finalBlobIdentifier, GSFileSystemOptions options) {
return options.getWriterTemporaryBucketName().orElse(finalBlobIdentifier.bucketName);
} | Returns the temporary bucket name. If options specifies a temporary bucket name, we use that
one; otherwise, we use the bucket name of the final blob.
@param finalBlobIdentifier The final blob identifier
@param options The file system options
@return The temporary bucket name | getTemporaryBucketName | java | apache/flink | flink-filesystems/flink-gs-fs-hadoop/src/main/java/org/apache/flink/fs/gs/utils/BlobUtils.java | https://github.com/apache/flink/blob/master/flink-filesystems/flink-gs-fs-hadoop/src/main/java/org/apache/flink/fs/gs/utils/BlobUtils.java | Apache-2.0 |
public static String getTemporaryObjectPartialName(GSBlobIdentifier finalBlobIdentifier) {
return String.format(
"%s/%s/%s/",
TEMPORARY_OBJECT_PREFIX,
finalBlobIdentifier.bucketName,
finalBlobIdentifier.objectName);
} | Returns a temporary object partial name, i.e. .inprogress/foo/bar/ for the final blob with
object name "foo/bar". The included trailing slash is deliberate, so that we can be sure that
object names that start with this partial name are, in fact, temporary files associated with
the upload of the associated final blob.
@param finalBlobIdentifier The final blob identifier
@return The temporary object partial name | getTemporaryObjectPartialName | java | apache/flink | flink-filesystems/flink-gs-fs-hadoop/src/main/java/org/apache/flink/fs/gs/utils/BlobUtils.java | https://github.com/apache/flink/blob/master/flink-filesystems/flink-gs-fs-hadoop/src/main/java/org/apache/flink/fs/gs/utils/BlobUtils.java | Apache-2.0 |
public static String getTemporaryObjectName(
GSBlobIdentifier finalBlobIdentifier, UUID temporaryObjectId) {
return getTemporaryObjectPartialName(finalBlobIdentifier) + temporaryObjectId.toString();
} | Returns a temporary object name, formed by appending the temporary object id to the temporary
object partial name, i.e. .inprogress/foo/bar/abc for the final blob with object name
"foo/bar" and temporary object id "abc".
@param finalBlobIdentifier The final blob identifier
@param temporaryObjectId The temporary object id
@return The temporary object name | getTemporaryObjectName | java | apache/flink | flink-filesystems/flink-gs-fs-hadoop/src/main/java/org/apache/flink/fs/gs/utils/BlobUtils.java | https://github.com/apache/flink/blob/master/flink-filesystems/flink-gs-fs-hadoop/src/main/java/org/apache/flink/fs/gs/utils/BlobUtils.java | Apache-2.0 |
public static String getTemporaryObjectNameWithEntropy(
GSBlobIdentifier finalBlobIdentifier, UUID temporaryObjectId) {
return temporaryObjectId.toString()
+ getTemporaryObjectPartialName(finalBlobIdentifier)
+ temporaryObjectId.toString();
} | Returns a temporary object name with entropy, formed by adding the temporary object id to the
temporary object partial name in both start and end of path, i.e. abc.inprogress/foo/bar/abc
for the final blob with object name "foo/bar" and temporary object id "abc".
@param finalBlobIdentifier The final blob identifier
@param temporaryObjectId The temporary object id
@return The temporary object name with entropy | getTemporaryObjectNameWithEntropy | java | apache/flink | flink-filesystems/flink-gs-fs-hadoop/src/main/java/org/apache/flink/fs/gs/utils/BlobUtils.java | https://github.com/apache/flink/blob/master/flink-filesystems/flink-gs-fs-hadoop/src/main/java/org/apache/flink/fs/gs/utils/BlobUtils.java | Apache-2.0 |
public static GSBlobIdentifier getTemporaryBlobIdentifier(
GSBlobIdentifier finalBlobIdentifier,
UUID temporaryObjectId,
GSFileSystemOptions options) {
String temporaryBucketName = BlobUtils.getTemporaryBucketName(finalBlobIdentifier, options);
String temporaryObjectName =
options.isFileSinkEntropyEnabled()
? BlobUtils.getTemporaryObjectNameWithEntropy(
finalBlobIdentifier, temporaryObjectId)
: BlobUtils.getTemporaryObjectName(finalBlobIdentifier, temporaryObjectId);
return new GSBlobIdentifier(temporaryBucketName, temporaryObjectName);
} | Resolves a temporary blob identifier for a provided temporary object id and the provided
options.
@param finalBlobIdentifier The final blob identifier
@param temporaryObjectId The temporary object id
@param options The file system options
@return The blob identifier | getTemporaryBlobIdentifier | java | apache/flink | flink-filesystems/flink-gs-fs-hadoop/src/main/java/org/apache/flink/fs/gs/utils/BlobUtils.java | https://github.com/apache/flink/blob/master/flink-filesystems/flink-gs-fs-hadoop/src/main/java/org/apache/flink/fs/gs/utils/BlobUtils.java | Apache-2.0 |
public static String convertChecksumToString(int checksum) {
ByteBuffer buffer = ByteBuffer.allocate(Integer.BYTES);
buffer.order(ByteOrder.BIG_ENDIAN);
buffer.putInt(checksum);
return BASE64_ENCODER.encodeToString(buffer.array());
} | Converts an int crc32 checksum to the string format used by Google storage, which is the
base64 string for the int in big-endian format.
@param checksum The int checksum
@return The string checksum | convertChecksumToString | java | apache/flink | flink-filesystems/flink-gs-fs-hadoop/src/main/java/org/apache/flink/fs/gs/utils/ChecksumUtils.java | https://github.com/apache/flink/blob/master/flink-filesystems/flink-gs-fs-hadoop/src/main/java/org/apache/flink/fs/gs/utils/ChecksumUtils.java | Apache-2.0 |
public static org.apache.hadoop.conf.Configuration getHadoopConfiguration(
Configuration flinkConfig, ConfigContext configContext) {
// create a starting hadoop configuration
org.apache.hadoop.conf.Configuration hadoopConfig =
new org.apache.hadoop.conf.Configuration();
// look for a hadoop configuration directory and load configuration from it if found
Optional<String> hadoopConfigDir =
Optional.ofNullable(flinkConfig.get(CoreOptions.FLINK_HADOOP_CONF_DIR));
if (!hadoopConfigDir.isPresent()) {
hadoopConfigDir = configContext.getenv("HADOOP_CONF_DIR");
}
hadoopConfigDir.ifPresent(
configDir -> {
LOGGER.info("Loading Hadoop config resources from {}", configDir);
hadoopConfig.addResource(configContext.loadHadoopConfigFromDir(configDir));
});
// now, load hadoop config from flink and add to base hadoop config
HadoopConfigLoader hadoopConfigLoader =
new HadoopConfigLoader(
FLINK_CONFIG_PREFIXES,
MIRRORED_CONFIG_KEYS,
HADOOP_CONFIG_PREFIX,
Collections.emptySet(),
Collections.emptySet(),
FLINK_SHADING_PREFIX);
hadoopConfigLoader.setFlinkConfig(flinkConfig);
org.apache.hadoop.conf.Configuration flinkHadoopConfig =
hadoopConfigLoader.getOrLoadHadoopConfig();
hadoopConfig.addResource(flinkHadoopConfig);
// reload the config resources and return it
hadoopConfig.reloadConfiguration();
return hadoopConfig;
} | Loads the Hadoop configuration, by loading from a Hadoop conf dir (if one exists) and then
overlaying properties derived from the Flink config.
@param flinkConfig The Flink config
@param configContext The config context.
@return The Hadoop config. | getHadoopConfiguration | java | apache/flink | flink-filesystems/flink-gs-fs-hadoop/src/main/java/org/apache/flink/fs/gs/utils/ConfigUtils.java | https://github.com/apache/flink/blob/master/flink-filesystems/flink-gs-fs-hadoop/src/main/java/org/apache/flink/fs/gs/utils/ConfigUtils.java | Apache-2.0 |
public static String stringifyHadoopConfig(org.apache.hadoop.conf.Configuration hadoopConfig)
throws RuntimeException {
try (Writer writer = new StringWriter()) {
org.apache.hadoop.conf.Configuration.dumpConfiguration(hadoopConfig, writer);
return writer.toString();
} catch (IOException ex) {
throw new RuntimeException(ex);
}
} | Helper to serialize a Hadoop config to a string, for logging.
@param hadoopConfig The Hadoop config.
@return A string with the Hadoop properties.
@throws RuntimeException On underlying IO failure | stringifyHadoopConfig | java | apache/flink | flink-filesystems/flink-gs-fs-hadoop/src/main/java/org/apache/flink/fs/gs/utils/ConfigUtils.java | https://github.com/apache/flink/blob/master/flink-filesystems/flink-gs-fs-hadoop/src/main/java/org/apache/flink/fs/gs/utils/ConfigUtils.java | Apache-2.0 |
public int write(byte[] content, int start, int length) throws IOException {
LOGGER.trace("Writing {} bytes to blob {}", length, blobIdentifier);
Preconditions.checkNotNull(content);
Preconditions.checkArgument(start >= 0);
Preconditions.checkArgument(length >= 0);
hasher.putBytes(content, start, length);
return writeChannel.write(content, start, length);
} | Writes bytes to the underlying channel and updates checksum.
@param content The content to write
@param start The start position
@param length The number of bytes to write
@return The number of bytes written
@throws IOException On underlying failure | write | java | apache/flink | flink-filesystems/flink-gs-fs-hadoop/src/main/java/org/apache/flink/fs/gs/writer/GSChecksumWriteChannel.java | https://github.com/apache/flink/blob/master/flink-filesystems/flink-gs-fs-hadoop/src/main/java/org/apache/flink/fs/gs/writer/GSChecksumWriteChannel.java | Apache-2.0 |
public void close() throws IOException {
LOGGER.trace("Closing write channel to blob {}", blobIdentifier);
// close channel and get blob metadata
writeChannel.close();
Optional<GSBlobStorage.BlobMetadata> blobMetadata = storage.getMetadata(blobIdentifier);
if (!blobMetadata.isPresent()) {
throw new IOException(
String.format("Failed to read metadata for blob %s", blobIdentifier));
}
// make sure checksums match
String writeChecksum = ChecksumUtils.convertChecksumToString(hasher.hash().asInt());
String blobChecksum = blobMetadata.get().getChecksum();
if (!writeChecksum.equals(blobChecksum)) {
throw new IOException(
String.format(
"Checksum mismatch writing blob %s: expected %s but found %s",
blobIdentifier, writeChecksum, blobChecksum));
}
} | Closes the channel and validates the checksum against the storage. Manually verifying
checksums for streaming uploads is recommended by Google, see here:
https://cloud.google.com/storage/docs/streaming
@throws IOException On underlying failure or non-matching checksums | close | java | apache/flink | flink-filesystems/flink-gs-fs-hadoop/src/main/java/org/apache/flink/fs/gs/writer/GSChecksumWriteChannel.java | https://github.com/apache/flink/blob/master/flink-filesystems/flink-gs-fs-hadoop/src/main/java/org/apache/flink/fs/gs/writer/GSChecksumWriteChannel.java | Apache-2.0 |
@Override
public int getVersion() {
return SERIALIZER_VERSION;
} | The serializer version. Note that, if this changes, then the version of {@link
GSResumeRecoverableSerializer} must also change, because it uses this class to serialize
itself, in part.
@return The serializer version. | getVersion | java | apache/flink | flink-filesystems/flink-gs-fs-hadoop/src/main/java/org/apache/flink/fs/gs/writer/GSCommitRecoverableSerializer.java | https://github.com/apache/flink/blob/master/flink-filesystems/flink-gs-fs-hadoop/src/main/java/org/apache/flink/fs/gs/writer/GSCommitRecoverableSerializer.java | Apache-2.0 |
private void writeFinalBlob() {
// do we have any component blobs?
List<GSBlobIdentifier> blobIdentifiers = recoverable.getComponentBlobIds(options);
if (blobIdentifiers.isEmpty()) {
// we have no blob identifiers, so just create an empty target blob
storage.createBlob(recoverable.finalBlobIdentifier);
} else {
// yes, we have component blobs. compose them into the final blob id. if the component
// blob ids are in the same bucket as the final blob id, this can be done directly.
// otherwise, we must compose to a new temporary blob id in the same bucket as the
// component blob ids and then copy that blob to the final blob location
String temporaryBucketName =
BlobUtils.getTemporaryBucketName(recoverable.finalBlobIdentifier, options);
if (recoverable.finalBlobIdentifier.bucketName.equals(temporaryBucketName)) {
// compose directly to final blob
composeBlobs(
recoverable.getComponentBlobIds(options), recoverable.finalBlobIdentifier);
} else {
// compose to the intermediate blob, then copy
UUID temporaryObjectId = UUID.randomUUID();
GSBlobIdentifier intermediateBlobIdentifier =
BlobUtils.getTemporaryBlobIdentifier(
recoverable.finalBlobIdentifier, temporaryObjectId, options);
composeBlobs(recoverable.getComponentBlobIds(options), intermediateBlobIdentifier);
composedTempBlobIdentifiers.add(intermediateBlobIdentifier);
storage.copy(intermediateBlobIdentifier, recoverable.finalBlobIdentifier);
}
}
} | Writes the final blob by composing the temporary blobs and copying, if necessary. | writeFinalBlob | java | apache/flink | flink-filesystems/flink-gs-fs-hadoop/src/main/java/org/apache/flink/fs/gs/writer/GSRecoverableWriterCommitter.java | https://github.com/apache/flink/blob/master/flink-filesystems/flink-gs-fs-hadoop/src/main/java/org/apache/flink/fs/gs/writer/GSRecoverableWriterCommitter.java | Apache-2.0 |
@Override
public String toString() {
return "GSResumeRecoverable{"
+ "finalBlobIdentifier="
+ finalBlobIdentifier
+ ", componentObjectIds="
+ componentObjectIds
+ ", position="
+ position
+ ", closed="
+ closed
+ '}';
} | Indicates if the write has been closed. | toString | java | apache/flink | flink-filesystems/flink-gs-fs-hadoop/src/main/java/org/apache/flink/fs/gs/writer/GSResumeRecoverable.java | https://github.com/apache/flink/blob/master/flink-filesystems/flink-gs-fs-hadoop/src/main/java/org/apache/flink/fs/gs/writer/GSResumeRecoverable.java | Apache-2.0 |
public static org.apache.hadoop.conf.Configuration hadoopConfigFromMap(
Map<String, String> values) {
org.apache.hadoop.conf.Configuration hadoopConfig =
new org.apache.hadoop.conf.Configuration();
for (Map.Entry<String, String> entry : values.entrySet()) {
hadoopConfig.set(entry.getKey(), entry.getValue());
}
return hadoopConfig;
} | Helper to create a hadoop configuration from a map.
@param values Map of values
@return Hadoop config | hadoopConfigFromMap | java | apache/flink | flink-filesystems/flink-gs-fs-hadoop/src/test/java/org/apache/flink/fs/gs/TestUtils.java | https://github.com/apache/flink/blob/master/flink-filesystems/flink-gs-fs-hadoop/src/test/java/org/apache/flink/fs/gs/TestUtils.java | Apache-2.0 |
public static Map<String, String> hadoopConfigToMap(
org.apache.hadoop.conf.Configuration hadoopConfig) {
HashMap<String, String> map = new HashMap<>();
for (Map.Entry<String, String> entry : hadoopConfig) {
map.put(entry.getKey(), entry.getValue());
}
return map;
} | Helper to translate Hadoop config to a map.
@param hadoopConfig The Hadoop config
@return The map of keys/values | hadoopConfigToMap | java | apache/flink | flink-filesystems/flink-gs-fs-hadoop/src/test/java/org/apache/flink/fs/gs/TestUtils.java | https://github.com/apache/flink/blob/master/flink-filesystems/flink-gs-fs-hadoop/src/test/java/org/apache/flink/fs/gs/TestUtils.java | Apache-2.0 |
@TestTemplate
void shouldWriteProperly() throws IOException {
MockBlobStorage blobStorage = new MockBlobStorage();
GSBlobStorage.WriteChannel writeChannel = blobStorage.writeBlob(blobIdentifier);
GSChecksumWriteChannel checksumWriteChannel =
new GSChecksumWriteChannel(blobStorage, writeChannel, blobIdentifier);
// write each partial buffer and validate the written count
for (int i = 0; i < byteBuffers.length; i++) {
int writtenCount =
checksumWriteChannel.write(byteBuffers[i], writeStarts[i], writeLengths[i]);
assertThat(writtenCount).isEqualTo(writeLengths[i]);
}
// close the write, this also validates the checksum
checksumWriteChannel.close();
// read the value out of storage, the bytes should match
MockBlobStorage.BlobValue blobValue = blobStorage.blobs.get(blobIdentifier);
assertThat(blobValue.content).isEqualTo(expectedWrittenBytes);
} | Write each of the partial byte buffers and confirm we get the expected results, including a
valid checksum and the expected data in the storage.
@throws IOException On storage failure. | shouldWriteProperly | java | apache/flink | flink-filesystems/flink-gs-fs-hadoop/src/test/java/org/apache/flink/fs/gs/writer/GSChecksumWriteChannelTest.java | https://github.com/apache/flink/blob/master/flink-filesystems/flink-gs-fs-hadoop/src/test/java/org/apache/flink/fs/gs/writer/GSChecksumWriteChannelTest.java | Apache-2.0 |
@TestTemplate
void shouldThrowOnChecksumMismatch() throws IOException {
MockBlobStorage blobStorage = new MockBlobStorage();
blobStorage.forcedChecksum = "";
GSBlobStorage.WriteChannel writeChannel = blobStorage.writeBlob(blobIdentifier);
GSChecksumWriteChannel checksumWriteChannel =
new GSChecksumWriteChannel(blobStorage, writeChannel, blobIdentifier);
// write each partial buffer and validate the written count
for (int i = 0; i < byteBuffers.length; i++) {
int writtenCount =
checksumWriteChannel.write(byteBuffers[i], writeStarts[i], writeLengths[i]);
assertThat(writtenCount).isEqualTo(writeLengths[i]);
}
// close the write, this also validates the checksum
assertThatThrownBy(() -> checksumWriteChannel.close()).isInstanceOf(IOException.class);
} | Simulate a checksum failure and confirm an exception is thrown.
@throws IOException On checksum failure. | shouldThrowOnChecksumMismatch | java | apache/flink | flink-filesystems/flink-gs-fs-hadoop/src/test/java/org/apache/flink/fs/gs/writer/GSChecksumWriteChannelTest.java | https://github.com/apache/flink/blob/master/flink-filesystems/flink-gs-fs-hadoop/src/test/java/org/apache/flink/fs/gs/writer/GSChecksumWriteChannelTest.java | Apache-2.0 |
@TestTemplate
void commitTest() throws IOException {
GSRecoverableWriterCommitter committer = commitTestInternal();
committer.commit();
// there should be exactly one blob left, the final blob identifier. validate its contents.
assertThat(blobStorage.blobs).hasSize(1);
MockBlobStorage.BlobValue blobValue = blobStorage.blobs.get(blobIdentifier);
assertThat(blobValue).isNotNull();
assertThat(blobValue.content).isEqualTo(expectedBytes.toByteArray());
} | Test writing a blob.
@throws IOException On underlying failure | commitTest | java | apache/flink | flink-filesystems/flink-gs-fs-hadoop/src/test/java/org/apache/flink/fs/gs/writer/GSRecoverableWriterCommitterTest.java | https://github.com/apache/flink/blob/master/flink-filesystems/flink-gs-fs-hadoop/src/test/java/org/apache/flink/fs/gs/writer/GSRecoverableWriterCommitterTest.java | Apache-2.0 |
private static String stripHostname(final String originalHostname) {
// Check if the hostname domains the domain separator character
final int index = originalHostname.indexOf(DOMAIN_SEPARATOR);
if (index == -1) {
return originalHostname;
}
// Make sure we are not stripping an IPv4 address
final Matcher matcher = IPV4_PATTERN.matcher(originalHostname);
if (matcher.matches()) {
return originalHostname;
}
if (index == 0) {
throw new IllegalStateException(
"Hostname " + originalHostname + " starts with a " + DOMAIN_SEPARATOR);
}
return originalHostname.substring(0, index);
} | Looks for a domain suffix in a FQDN and strips it if present.
@param originalHostname the original hostname, possibly an FQDN
@return the stripped hostname without the domain suffix | stripHostname | java | apache/flink | flink-filesystems/flink-hadoop-fs/src/main/java/org/apache/flink/runtime/fs/hdfs/HadoopBlockLocation.java | https://github.com/apache/flink/blob/master/flink-filesystems/flink-hadoop-fs/src/main/java/org/apache/flink/runtime/fs/hdfs/HadoopBlockLocation.java | Apache-2.0 |
public org.apache.hadoop.fs.FSDataInputStream getHadoopInputStream() {
return fsDataInputStream;
} | Gets the wrapped Hadoop input stream.
@return The wrapped Hadoop input stream. | getHadoopInputStream | java | apache/flink | flink-filesystems/flink-hadoop-fs/src/main/java/org/apache/flink/runtime/fs/hdfs/HadoopDataInputStream.java | https://github.com/apache/flink/blob/master/flink-filesystems/flink-hadoop-fs/src/main/java/org/apache/flink/runtime/fs/hdfs/HadoopDataInputStream.java | Apache-2.0 |
public org.apache.hadoop.fs.FSDataOutputStream getHadoopOutputStream() {
return fdos;
} | Gets the wrapped Hadoop output stream.
@return The wrapped Hadoop output stream. | getHadoopOutputStream | java | apache/flink | flink-filesystems/flink-hadoop-fs/src/main/java/org/apache/flink/runtime/fs/hdfs/HadoopDataOutputStream.java | https://github.com/apache/flink/blob/master/flink-filesystems/flink-hadoop-fs/src/main/java/org/apache/flink/runtime/fs/hdfs/HadoopDataOutputStream.java | Apache-2.0 |
static FileSystemKind getKindForScheme(String scheme) {
scheme = scheme.toLowerCase(Locale.US);
if (scheme.startsWith("s3")
|| scheme.startsWith("emr")
|| scheme.startsWith("oss")
|| scheme.startsWith("wasb")
|| scheme.startsWith("gs")) {
// the Amazon S3 storage or Aliyun OSS storage or Azure Blob Storage
// or Google Cloud Storage
return FileSystemKind.OBJECT_STORE;
} else if (scheme.startsWith("http") || scheme.startsWith("ftp")) {
// file servers instead of file systems
// they might actually be consistent, but we have no hard guarantees
// currently to rely on that
return FileSystemKind.OBJECT_STORE;
} else {
// the remainder should include hdfs, kosmos, ceph, ...
// this also includes federated HDFS (viewfs).
return FileSystemKind.FILE_SYSTEM;
}
} | Gets the kind of the file system from its scheme.
<p>Implementation note: Initially, especially within the Flink 1.3.x line (in order to not
break backwards compatibility), we must only label file systems as 'inconsistent' or as 'not
proper filesystems' if we are sure about it. Otherwise, we cause regression for example in
the performance and cleanup handling of checkpoints. For that reason, we initially mark some
filesystems as 'eventually consistent' or as 'object stores', and leave the others as
'consistent file systems'. | getKindForScheme | java | apache/flink | flink-filesystems/flink-hadoop-fs/src/main/java/org/apache/flink/runtime/fs/hdfs/HadoopFileSystem.java | https://github.com/apache/flink/blob/master/flink-filesystems/flink-hadoop-fs/src/main/java/org/apache/flink/runtime/fs/hdfs/HadoopFileSystem.java | Apache-2.0 |
private static boolean revokeLeaseByFileSystem(final FileSystem fs, final Path path)
throws IOException {
if (fs instanceof ViewFileSystem) {
final ViewFileSystem vfs = (ViewFileSystem) fs;
final Path resolvePath = vfs.resolvePath(path);
final FileSystem resolveFs = resolvePath.getFileSystem(fs.getConf());
return waitUntilLeaseIsRevoked(resolveFs, resolvePath);
}
return waitUntilLeaseIsRevoked(fs, path);
} | Resolve the real path of FileSystem if it is {@link ViewFileSystem} and revoke the lease of
the file we are resuming with different FileSystem.
@param path The path to the file we want to resume writing to. | revokeLeaseByFileSystem | java | apache/flink | flink-filesystems/flink-hadoop-fs/src/main/java/org/apache/flink/runtime/fs/hdfs/HadoopRecoverableFsDataOutputStream.java | https://github.com/apache/flink/blob/master/flink-filesystems/flink-hadoop-fs/src/main/java/org/apache/flink/runtime/fs/hdfs/HadoopRecoverableFsDataOutputStream.java | Apache-2.0 |
@Override
public BlockLocation[] getBlockLocations() {
final org.apache.hadoop.fs.BlockLocation[] hadoopLocations =
((org.apache.hadoop.fs.LocatedFileStatus) getInternalFileStatus())
.getBlockLocations();
final HadoopBlockLocation[] locations = new HadoopBlockLocation[hadoopLocations.length];
for (int i = 0; i < locations.length; i++) {
locations[i] = new HadoopBlockLocation(hadoopLocations[i]);
}
return locations;
} | Creates a new located file status from an HDFS file status. | getBlockLocations | java | apache/flink | flink-filesystems/flink-hadoop-fs/src/main/java/org/apache/flink/runtime/fs/hdfs/LocatedHadoopFileStatus.java | https://github.com/apache/flink/blob/master/flink-filesystems/flink-hadoop-fs/src/main/java/org/apache/flink/runtime/fs/hdfs/LocatedHadoopFileStatus.java | Apache-2.0 |
public void setFlinkConfig(Configuration config) {
flinkConfig = config;
hadoopConfig = null;
} | Hadoop's configuration for the file systems, lazily initialized. | setFlinkConfig | java | apache/flink | flink-filesystems/flink-hadoop-fs/src/main/java/org/apache/flink/runtime/util/HadoopConfigLoader.java | https://github.com/apache/flink/blob/master/flink-filesystems/flink-hadoop-fs/src/main/java/org/apache/flink/runtime/util/HadoopConfigLoader.java | Apache-2.0 |
public static boolean hasHDFSDelegationToken(UserGroupInformation ugi) {
Collection<Token<? extends TokenIdentifier>> usrTok = ugi.getTokens();
for (Token<? extends TokenIdentifier> token : usrTok) {
if (token.getKind().equals(HDFS_DELEGATION_TOKEN_KIND)) {
return true;
}
}
return false;
} | Indicates whether the user has an HDFS delegation token. | hasHDFSDelegationToken | java | apache/flink | flink-filesystems/flink-hadoop-fs/src/main/java/org/apache/flink/runtime/util/HadoopUtils.java | https://github.com/apache/flink/blob/master/flink-filesystems/flink-hadoop-fs/src/main/java/org/apache/flink/runtime/util/HadoopUtils.java | Apache-2.0 |
public static boolean isMinHadoopVersion(int major, int minor) throws FlinkRuntimeException {
final Tuple2<Integer, Integer> hadoopVersion = getMajorMinorBundledHadoopVersion();
int maj = hadoopVersion.f0;
int min = hadoopVersion.f1;
return maj > major || (maj == major && min >= minor);
} | Checks if the Hadoop dependency is at least the given version. | isMinHadoopVersion | java | apache/flink | flink-filesystems/flink-hadoop-fs/src/main/java/org/apache/flink/runtime/util/HadoopUtils.java | https://github.com/apache/flink/blob/master/flink-filesystems/flink-hadoop-fs/src/main/java/org/apache/flink/runtime/util/HadoopUtils.java | Apache-2.0 |
private static boolean addHadoopConfIfFound(
Configuration configuration, String possibleHadoopConfPath) {
boolean foundHadoopConfiguration = false;
if (new File(possibleHadoopConfPath).exists()) {
if (new File(possibleHadoopConfPath + "/core-site.xml").exists()) {
configuration.addResource(
new org.apache.hadoop.fs.Path(possibleHadoopConfPath + "/core-site.xml"));
LOG.debug(
"Adding "
+ possibleHadoopConfPath
+ "/core-site.xml to hadoop configuration");
foundHadoopConfiguration = true;
}
if (new File(possibleHadoopConfPath + "/hdfs-site.xml").exists()) {
configuration.addResource(
new org.apache.hadoop.fs.Path(possibleHadoopConfPath + "/hdfs-site.xml"));
LOG.debug(
"Adding "
+ possibleHadoopConfPath
+ "/hdfs-site.xml to hadoop configuration");
foundHadoopConfiguration = true;
}
}
return foundHadoopConfiguration;
} | Search Hadoop configuration files in the given path, and add them to the configuration if
found. | addHadoopConfIfFound | java | apache/flink | flink-filesystems/flink-hadoop-fs/src/main/java/org/apache/flink/runtime/util/HadoopUtils.java | https://github.com/apache/flink/blob/master/flink-filesystems/flink-hadoop-fs/src/main/java/org/apache/flink/runtime/util/HadoopUtils.java | Apache-2.0 |
public static void test() throws Exception {
// make sure no Hadoop FS classes are in the classpath
assertThatThrownBy(() -> Class.forName("org.apache.hadoop.fs.FileSystem"))
.describedAs("Cannot run test when Hadoop classes are in the classpath")
.isInstanceOf(ClassNotFoundException.class);
assertThatThrownBy(() -> Class.forName("org.apache.hadoop.conf.Configuration"))
.describedAs("Cannot run test when Hadoop classes are in the classpath")
.isInstanceOf(ClassNotFoundException.class);
// this method should complete without a linkage error
final HadoopFsFactory factory = new HadoopFsFactory();
// this method should also complete without a linkage error
factory.configure(new Configuration());
assertThatThrownBy(() -> factory.create(new URI("hdfs://somehost:9000/root/dir")))
.isInstanceOf(UnsupportedFileSystemSchemeException.class);
} | A class with tests that require to be run in a Hadoop-free environment, to test proper error
handling when no Hadoop classes are available.
<p>This class must be dynamically loaded in a Hadoop-free class loader. | test | java | apache/flink | flink-filesystems/flink-hadoop-fs/src/test/java/org/apache/flink/runtime/fs/hdfs/HadoopFreeTests.java | https://github.com/apache/flink/blob/master/flink-filesystems/flink-hadoop-fs/src/test/java/org/apache/flink/runtime/fs/hdfs/HadoopFreeTests.java | Apache-2.0 |
@Override
protected void testMkdirsFailsForExistingFile() throws Exception {
final String versionString = VersionInfo.getVersion();
final String prefix = versionString.substring(0, 3);
final float version = Float.parseFloat(prefix);
assumeThat(version)
.describedAs("Cannot execute this test on Hadoop prior to 2.8")
.isGreaterThanOrEqualTo(2.8f);
super.testMkdirsFailsForExistingFile();
} | This test needs to be skipped for earlier Hadoop versions because those have a bug. | testMkdirsFailsForExistingFile | java | apache/flink | flink-filesystems/flink-hadoop-fs/src/test/java/org/apache/flink/runtime/fs/hdfs/HadoopLocalFileSystemBehaviorTest.java | https://github.com/apache/flink/blob/master/flink-filesystems/flink-hadoop-fs/src/test/java/org/apache/flink/runtime/fs/hdfs/HadoopLocalFileSystemBehaviorTest.java | Apache-2.0 |
private void disableExternalResourceFetching(XMLReader reader)
throws SAXNotRecognizedException, SAXNotSupportedException {
reader.setFeature("http://xml.org/sax/features/external-general-entities", false);
reader.setFeature("http://xml.org/sax/features/external-parameter-entities", false);
reader.setFeature("http://apache.org/xml/features/nonvalidating/load-external-dtd", false);
} | Disables certain dangerous features that attempt to automatically fetch DTDs
<p>See <a
href="https://www.owasp.org/index.php/XML_External_Entity_(XXE)_Prevention_Cheat_Sheet#XMLReader">OWASP
XXE Cheat Sheet</a>
@param reader the reader to disable the features on
@throws SAXNotRecognizedException
@throws SAXNotSupportedException | disableExternalResourceFetching | java | apache/flink | flink-filesystems/flink-s3-fs-base/src/main/java/com/amazonaws/services/s3/model/transform/XmlResponsesSaxParser.java | https://github.com/apache/flink/blob/master/flink-filesystems/flink-s3-fs-base/src/main/java/com/amazonaws/services/s3/model/transform/XmlResponsesSaxParser.java | Apache-2.0 |
private static String checkForEmptyString(String s) {
if (s == null) return null;
if (s.length() == 0) return null;
return s;
} | Checks if the specified string is empty or null and if so, returns null. Otherwise simply
returns the string.
@param s The string to check.
@return Null if the specified string was null, or empty, otherwise returns the string the
caller passed in. | checkForEmptyString | java | apache/flink | flink-filesystems/flink-s3-fs-base/src/main/java/com/amazonaws/services/s3/model/transform/XmlResponsesSaxParser.java | https://github.com/apache/flink/blob/master/flink-filesystems/flink-s3-fs-base/src/main/java/com/amazonaws/services/s3/model/transform/XmlResponsesSaxParser.java | Apache-2.0 |
private static int parseInt(String s) {
try {
return Integer.parseInt(s);
} catch (NumberFormatException nfe) {
log.error("Unable to parse integer value '" + s + "'", nfe);
}
return -1;
} | Safely parses the specified string as an integer and returns the value. If a
NumberFormatException occurs while parsing the integer, an error is logged and -1 is
returned.
@param s The string to parse and return as an integer.
@return The integer value of the specified string, otherwise -1 if there were any problems
parsing the string as an integer. | parseInt | java | apache/flink | flink-filesystems/flink-s3-fs-base/src/main/java/com/amazonaws/services/s3/model/transform/XmlResponsesSaxParser.java | https://github.com/apache/flink/blob/master/flink-filesystems/flink-s3-fs-base/src/main/java/com/amazonaws/services/s3/model/transform/XmlResponsesSaxParser.java | Apache-2.0 |
private static long parseLong(String s) {
try {
return Long.parseLong(s);
} catch (NumberFormatException nfe) {
log.error("Unable to parse long value '" + s + "'", nfe);
}
return -1;
} | Safely parses the specified string as a long and returns the value. If a
NumberFormatException occurs while parsing the long, an error is logged and -1 is returned.
@param s The string to parse and return as a long.
@return The long value of the specified string, otherwise -1 if there were any problems
parsing the string as a long. | parseLong | java | apache/flink | flink-filesystems/flink-s3-fs-base/src/main/java/com/amazonaws/services/s3/model/transform/XmlResponsesSaxParser.java | https://github.com/apache/flink/blob/master/flink-filesystems/flink-s3-fs-base/src/main/java/com/amazonaws/services/s3/model/transform/XmlResponsesSaxParser.java | Apache-2.0 |
private static String decodeIfSpecified(String value, boolean decode) {
return decode ? SdkHttpUtils.urlDecode(value) : value;
} | Perform a url decode on the given value if specified. Return value by default; | decodeIfSpecified | java | apache/flink | flink-filesystems/flink-s3-fs-base/src/main/java/com/amazonaws/services/s3/model/transform/XmlResponsesSaxParser.java | https://github.com/apache/flink/blob/master/flink-filesystems/flink-s3-fs-base/src/main/java/com/amazonaws/services/s3/model/transform/XmlResponsesSaxParser.java | Apache-2.0 |
public ListBucketHandler parseListBucketObjectsResponse(
InputStream inputStream, final boolean shouldSDKDecodeResponse) throws IOException {
ListBucketHandler handler = new ListBucketHandler(shouldSDKDecodeResponse);
parseXmlInputStream(handler, sanitizeXmlDocument(handler, inputStream));
return handler;
} | Parses a ListBucket response XML document from an input stream.
@param inputStream XML data input stream.
@return the XML handler object populated with data parsed from the XML stream.
@throws SdkClientException | parseListBucketObjectsResponse | java | apache/flink | flink-filesystems/flink-s3-fs-base/src/main/java/com/amazonaws/services/s3/model/transform/XmlResponsesSaxParser.java | https://github.com/apache/flink/blob/master/flink-filesystems/flink-s3-fs-base/src/main/java/com/amazonaws/services/s3/model/transform/XmlResponsesSaxParser.java | Apache-2.0 |
public ListObjectsV2Handler parseListObjectsV2Response(
InputStream inputStream, final boolean shouldSDKDecodeResponse) throws IOException {
ListObjectsV2Handler handler = new ListObjectsV2Handler(shouldSDKDecodeResponse);
parseXmlInputStream(handler, sanitizeXmlDocument(handler, inputStream));
return handler;
} | Parses a ListBucketV2 response XML document from an input stream.
@param inputStream XML data input stream.
@return the XML handler object populated with data parsed from the XML stream.
@throws SdkClientException | parseListObjectsV2Response | java | apache/flink | flink-filesystems/flink-s3-fs-base/src/main/java/com/amazonaws/services/s3/model/transform/XmlResponsesSaxParser.java | https://github.com/apache/flink/blob/master/flink-filesystems/flink-s3-fs-base/src/main/java/com/amazonaws/services/s3/model/transform/XmlResponsesSaxParser.java | Apache-2.0 |
public ListVersionsHandler parseListVersionsResponse(
InputStream inputStream, final boolean shouldSDKDecodeResponse) throws IOException {
ListVersionsHandler handler = new ListVersionsHandler(shouldSDKDecodeResponse);
parseXmlInputStream(handler, sanitizeXmlDocument(handler, inputStream));
return handler;
} | Parses a ListVersions response XML document from an input stream.
@param inputStream XML data input stream.
@return the XML handler object populated with data parsed from the XML stream.
@throws SdkClientException | parseListVersionsResponse | java | apache/flink | flink-filesystems/flink-s3-fs-base/src/main/java/com/amazonaws/services/s3/model/transform/XmlResponsesSaxParser.java | https://github.com/apache/flink/blob/master/flink-filesystems/flink-s3-fs-base/src/main/java/com/amazonaws/services/s3/model/transform/XmlResponsesSaxParser.java | Apache-2.0 |
public ListAllMyBucketsHandler parseListMyBucketsResponse(InputStream inputStream)
throws IOException {
ListAllMyBucketsHandler handler = new ListAllMyBucketsHandler();
parseXmlInputStream(handler, sanitizeXmlDocument(handler, inputStream));
return handler;
} | Parses a ListAllMyBuckets response XML document from an input stream.
@param inputStream XML data input stream.
@return the XML handler object populated with data parsed from the XML stream.
@throws SdkClientException | parseListMyBucketsResponse | java | apache/flink | flink-filesystems/flink-s3-fs-base/src/main/java/com/amazonaws/services/s3/model/transform/XmlResponsesSaxParser.java | https://github.com/apache/flink/blob/master/flink-filesystems/flink-s3-fs-base/src/main/java/com/amazonaws/services/s3/model/transform/XmlResponsesSaxParser.java | Apache-2.0 |
public AccessControlListHandler parseAccessControlListResponse(InputStream inputStream)
throws IOException {
AccessControlListHandler handler = new AccessControlListHandler();
parseXmlInputStream(handler, inputStream);
return handler;
} | Parses an AccessControlListHandler response XML document from an input stream.
@param inputStream XML data input stream.
@return the XML handler object populated with data parsed from the XML stream.
@throws SdkClientException | parseAccessControlListResponse | java | apache/flink | flink-filesystems/flink-s3-fs-base/src/main/java/com/amazonaws/services/s3/model/transform/XmlResponsesSaxParser.java | https://github.com/apache/flink/blob/master/flink-filesystems/flink-s3-fs-base/src/main/java/com/amazonaws/services/s3/model/transform/XmlResponsesSaxParser.java | Apache-2.0 |
public RequestPaymentConfigurationHandler parseRequestPaymentConfigurationResponse(
InputStream inputStream) throws IOException {
RequestPaymentConfigurationHandler handler = new RequestPaymentConfigurationHandler();
parseXmlInputStream(handler, inputStream);
return handler;
} | @param inputStream
@return true if the bucket's is configured as Requester Pays, false if it is configured as
Owner pays.
@throws SdkClientException | parseRequestPaymentConfigurationResponse | java | apache/flink | flink-filesystems/flink-s3-fs-base/src/main/java/com/amazonaws/services/s3/model/transform/XmlResponsesSaxParser.java | https://github.com/apache/flink/blob/master/flink-filesystems/flink-s3-fs-base/src/main/java/com/amazonaws/services/s3/model/transform/XmlResponsesSaxParser.java | Apache-2.0 |
public List<Bucket> getBuckets() {
return buckets;
} | @return the buckets listed in the document. | getBuckets | java | apache/flink | flink-filesystems/flink-s3-fs-base/src/main/java/com/amazonaws/services/s3/model/transform/XmlResponsesSaxParser.java | https://github.com/apache/flink/blob/master/flink-filesystems/flink-s3-fs-base/src/main/java/com/amazonaws/services/s3/model/transform/XmlResponsesSaxParser.java | Apache-2.0 |
@Override
public Date getExpirationTime() {
return result == null ? null : result.getExpirationTime();
} | @see com.amazonaws.services.s3.model.CompleteMultipartUploadResult#getExpirationTime() | getExpirationTime | java | apache/flink | flink-filesystems/flink-s3-fs-base/src/main/java/com/amazonaws/services/s3/model/transform/XmlResponsesSaxParser.java | https://github.com/apache/flink/blob/master/flink-filesystems/flink-s3-fs-base/src/main/java/com/amazonaws/services/s3/model/transform/XmlResponsesSaxParser.java | Apache-2.0 |
@Override
public void setExpirationTime(Date expirationTime) {
if (result != null) {
result.setExpirationTime(expirationTime);
}
} | @see
com.amazonaws.services.s3.model.CompleteMultipartUploadResult#setExpirationTime(java.util.Date) | setExpirationTime | java | apache/flink | flink-filesystems/flink-s3-fs-base/src/main/java/com/amazonaws/services/s3/model/transform/XmlResponsesSaxParser.java | https://github.com/apache/flink/blob/master/flink-filesystems/flink-s3-fs-base/src/main/java/com/amazonaws/services/s3/model/transform/XmlResponsesSaxParser.java | Apache-2.0 |
@Override
public String getExpirationTimeRuleId() {
return result == null ? null : result.getExpirationTimeRuleId();
} | @see
com.amazonaws.services.s3.model.CompleteMultipartUploadResult#getExpirationTimeRuleId() | getExpirationTimeRuleId | java | apache/flink | flink-filesystems/flink-s3-fs-base/src/main/java/com/amazonaws/services/s3/model/transform/XmlResponsesSaxParser.java | https://github.com/apache/flink/blob/master/flink-filesystems/flink-s3-fs-base/src/main/java/com/amazonaws/services/s3/model/transform/XmlResponsesSaxParser.java | Apache-2.0 |
public boolean isRequesterCharged() {
return result == null ? false : result.isRequesterCharged();
} | @see com.amazonaws.services.s3.model.CompleteMultipartUploadResult#isRequesterCharged() | isRequesterCharged | java | apache/flink | flink-filesystems/flink-s3-fs-base/src/main/java/com/amazonaws/services/s3/model/transform/XmlResponsesSaxParser.java | https://github.com/apache/flink/blob/master/flink-filesystems/flink-s3-fs-base/src/main/java/com/amazonaws/services/s3/model/transform/XmlResponsesSaxParser.java | Apache-2.0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.