code
stringlengths 25
201k
| docstring
stringlengths 19
96.2k
| func_name
stringlengths 0
235
| language
stringclasses 1
value | repo
stringlengths 8
51
| path
stringlengths 11
314
| url
stringlengths 62
377
| license
stringclasses 7
values |
|---|---|---|---|---|---|---|---|
public static <W extends Window> void deleteCleanupTimer(
W window,
WindowAssigner<?, ?> windowAssigner,
WindowTriggerContext<?, ?, W> triggerContext,
long allowedLateness) {
long cleanupTime = cleanupTime(window, windowAssigner, allowedLateness);
if (cleanupTime == Long.MAX_VALUE) {
// no need to clean up because we didn't set one
return;
}
if (windowAssigner.isEventTime()) {
triggerContext.deleteEventTimeTimer(cleanupTime);
} else {
triggerContext.deleteProcessingTimeTimer(cleanupTime);
}
}
|
Deletes the cleanup timer set for the contents of the provided window.
@param window the window whose state to discard
@param windowAssigner The window assigner
@param triggerContext The trigger context
@param allowedLateness The allowed lateness of the window
|
deleteCleanupTimer
|
java
|
apache/flink
|
flink-datastream/src/main/java/org/apache/flink/datastream/impl/extension/window/utils/WindowUtils.java
|
https://github.com/apache/flink/blob/master/flink-datastream/src/main/java/org/apache/flink/datastream/impl/extension/window/utils/WindowUtils.java
|
Apache-2.0
|
public static <W extends Window> void registerCleanupTimer(
W window,
WindowAssigner<?, ?> windowAssigner,
WindowTriggerContext<?, ?, W> triggerContext,
long allowedLateness) {
long cleanupTime = cleanupTime(window, windowAssigner, allowedLateness);
if (cleanupTime == Long.MAX_VALUE) {
// don't set a GC timer for "end of time"
return;
}
if (windowAssigner.isEventTime()) {
triggerContext.registerEventTimeTimer(cleanupTime);
} else {
triggerContext.registerProcessingTimeTimer(cleanupTime);
}
}
|
Registers a timer to cleanup the content of the window.
@param window the window whose state to discard
@param windowAssigner The window assigner
@param triggerContext The trigger context
@param allowedLateness The allowed lateness of the window
|
registerCleanupTimer
|
java
|
apache/flink
|
flink-datastream/src/main/java/org/apache/flink/datastream/impl/extension/window/utils/WindowUtils.java
|
https://github.com/apache/flink/blob/master/flink-datastream/src/main/java/org/apache/flink/datastream/impl/extension/window/utils/WindowUtils.java
|
Apache-2.0
|
private static <W extends Window> long cleanupTime(
W window, WindowAssigner<?, ?> windowAssigner, long allowedLateness) {
if (windowAssigner.isEventTime()) {
long cleanupTime = window.maxTimestamp() + allowedLateness;
return cleanupTime >= window.maxTimestamp() ? cleanupTime : Long.MAX_VALUE;
} else {
return window.maxTimestamp();
}
}
|
Returns the cleanup time for a window, which is {@code window.maxTimestamp +
allowedLateness}. In case this leads to a value greater than {@link Long#MAX_VALUE} then a
cleanup time of {@link Long#MAX_VALUE} is returned.
@param window the window whose cleanup time we are computing.
@param windowAssigner The window assigner
@param allowedLateness The allowed lateness of the window
|
cleanupTime
|
java
|
apache/flink
|
flink-datastream/src/main/java/org/apache/flink/datastream/impl/extension/window/utils/WindowUtils.java
|
https://github.com/apache/flink/blob/master/flink-datastream/src/main/java/org/apache/flink/datastream/impl/extension/window/utils/WindowUtils.java
|
Apache-2.0
|
public TypeInformation<T> getType() {
return transformation.getOutputType();
}
|
We keep track of the side outputs that were already requested and their types. With this, we
can catch the case when a side output with a matching id is requested for a different type
because this would lead to problems at runtime.
|
getType
|
java
|
apache/flink
|
flink-datastream/src/main/java/org/apache/flink/datastream/impl/stream/AbstractDataStream.java
|
https://github.com/apache/flink/blob/master/flink-datastream/src/main/java/org/apache/flink/datastream/impl/stream/AbstractDataStream.java
|
Apache-2.0
|
public static <IN, OUT> TypeInformation<OUT> getOutputTypeForOneInputProcessFunction(
OneInputStreamProcessFunction<IN, OUT> processFunction,
TypeInformation<IN> inTypeInformation) {
TypeInformation<OUT> outType;
if (processFunction instanceof InternalOneInputWindowStreamProcessFunction) {
return TypeExtractor.getUnaryOperatorReturnType(
((InternalOneInputWindowStreamProcessFunction<IN, OUT, ?>) processFunction)
.getWindowProcessFunction(),
OneInputWindowStreamProcessFunction.class,
0,
1,
new int[] {1, 0},
null,
Utils.getCallLocationName(),
false);
}
if (processFunction instanceof EventTimeWrappedOneInputStreamProcessFunction) {
processFunction =
((EventTimeWrappedOneInputStreamProcessFunction) processFunction)
.getWrappedUserFunction();
}
return TypeExtractor.getUnaryOperatorReturnType(
processFunction,
OneInputStreamProcessFunction.class,
0,
1,
new int[] {1, 0},
inTypeInformation,
Utils.getCallLocationName(),
true);
}
|
Get the output type information for {@link OneInputStreamProcessFunction} from input type
information.
|
getOutputTypeForOneInputProcessFunction
|
java
|
apache/flink
|
flink-datastream/src/main/java/org/apache/flink/datastream/impl/utils/StreamUtils.java
|
https://github.com/apache/flink/blob/master/flink-datastream/src/main/java/org/apache/flink/datastream/impl/utils/StreamUtils.java
|
Apache-2.0
|
public static <IN1, IN2, OUT>
TypeInformation<OUT> getOutputTypeForTwoInputNonBroadcastProcessFunction(
TwoInputNonBroadcastStreamProcessFunction<IN1, IN2, OUT> processFunction,
TypeInformation<IN1> in1TypeInformation,
TypeInformation<IN2> in2TypeInformation) {
if (processFunction instanceof TwoInputNonBroadcastJoinProcessFunction) {
return TypeExtractor.getBinaryOperatorReturnType(
((TwoInputNonBroadcastJoinProcessFunction<IN1, IN2, OUT>) processFunction)
.getJoinFunction(),
JoinFunction.class,
0,
1,
2,
TypeExtractor.NO_INDEX,
in1TypeInformation,
in2TypeInformation,
Utils.getCallLocationName(),
true);
}
if (processFunction instanceof InternalTwoInputWindowStreamProcessFunction) {
return TypeExtractor.getBinaryOperatorReturnType(
((InternalTwoInputWindowStreamProcessFunction<IN1, IN2, OUT, ?>)
processFunction)
.getWindowProcessFunction(),
TwoInputNonBroadcastWindowStreamProcessFunction.class,
0,
1,
2,
new int[] {2, 0},
null,
null,
Utils.getCallLocationName(),
false);
}
if (processFunction instanceof EventTimeWrappedTwoInputNonBroadcastStreamProcessFunction) {
processFunction =
((EventTimeWrappedTwoInputNonBroadcastStreamProcessFunction) processFunction)
.getWrappedUserFunction();
}
return TypeExtractor.getBinaryOperatorReturnType(
processFunction,
TwoInputNonBroadcastStreamProcessFunction.class,
0,
1,
2,
TypeExtractor.NO_INDEX,
in1TypeInformation,
in2TypeInformation,
Utils.getCallLocationName(),
true);
}
|
Get the output type information for {@link TwoInputNonBroadcastStreamProcessFunction} from
two input type information .
|
getOutputTypeForTwoInputNonBroadcastProcessFunction
|
java
|
apache/flink
|
flink-datastream/src/main/java/org/apache/flink/datastream/impl/utils/StreamUtils.java
|
https://github.com/apache/flink/blob/master/flink-datastream/src/main/java/org/apache/flink/datastream/impl/utils/StreamUtils.java
|
Apache-2.0
|
public static <IN1, IN2, OUT>
TypeInformation<OUT> getOutputTypeForTwoInputBroadcastProcessFunction(
TwoInputBroadcastStreamProcessFunction<IN1, IN2, OUT> processFunction,
TypeInformation<IN1> in1TypeInformation,
TypeInformation<IN2> in2TypeInformation) {
if (processFunction instanceof EventTimeWrappedTwoInputBroadcastStreamProcessFunction) {
processFunction =
((EventTimeWrappedTwoInputBroadcastStreamProcessFunction) processFunction)
.getWrappedUserFunction();
}
return TypeExtractor.getBinaryOperatorReturnType(
processFunction,
TwoInputBroadcastStreamProcessFunction.class,
0,
1,
2,
TypeExtractor.NO_INDEX,
in1TypeInformation,
in2TypeInformation,
Utils.getCallLocationName(),
true);
}
|
Get the output type information for {@link TwoInputBroadcastStreamProcessFunction} from two
input type information .
|
getOutputTypeForTwoInputBroadcastProcessFunction
|
java
|
apache/flink
|
flink-datastream/src/main/java/org/apache/flink/datastream/impl/utils/StreamUtils.java
|
https://github.com/apache/flink/blob/master/flink-datastream/src/main/java/org/apache/flink/datastream/impl/utils/StreamUtils.java
|
Apache-2.0
|
public static <IN, OUT1, OUT2>
Tuple2<TypeInformation<OUT1>, TypeInformation<OUT2>>
getOutputTypesForTwoOutputProcessFunction(
TwoOutputStreamProcessFunction<IN, OUT1, OUT2>
twoOutputStreamProcessFunction,
TypeInformation<IN> inTypeInformation) {
if (twoOutputStreamProcessFunction
instanceof InternalTwoOutputWindowStreamProcessFunction) {
TypeInformation<OUT1> firstOutputType =
TypeExtractor.getUnaryOperatorReturnType(
((InternalTwoOutputWindowStreamProcessFunction<IN, OUT1, OUT2, ?>)
twoOutputStreamProcessFunction)
.getWindowProcessFunction(),
TwoOutputWindowStreamProcessFunction.class,
0,
1,
new int[] {1, 0},
inTypeInformation,
Utils.getCallLocationName(),
true);
TypeInformation<OUT2> secondOutputType =
TypeExtractor.getUnaryOperatorReturnType(
((InternalTwoOutputWindowStreamProcessFunction<IN, OUT1, OUT2, ?>)
twoOutputStreamProcessFunction)
.getWindowProcessFunction(),
TwoOutputWindowStreamProcessFunction.class,
0,
2,
new int[] {2, 0},
inTypeInformation,
Utils.getCallLocationName(),
true);
return Tuple2.of(firstOutputType, secondOutputType);
}
if (twoOutputStreamProcessFunction
instanceof EventTimeWrappedTwoOutputStreamProcessFunction) {
twoOutputStreamProcessFunction =
((EventTimeWrappedTwoOutputStreamProcessFunction)
twoOutputStreamProcessFunction)
.getWrappedUserFunction();
}
TypeInformation<OUT1> firstOutputType =
TypeExtractor.getUnaryOperatorReturnType(
twoOutputStreamProcessFunction,
TwoOutputStreamProcessFunction.class,
0,
1,
new int[] {1, 0},
inTypeInformation,
Utils.getCallLocationName(),
true);
TypeInformation<OUT2> secondOutputType =
TypeExtractor.getUnaryOperatorReturnType(
twoOutputStreamProcessFunction,
TwoOutputStreamProcessFunction.class,
0,
2,
new int[] {2, 0},
inTypeInformation,
Utils.getCallLocationName(),
true);
return Tuple2.of(firstOutputType, secondOutputType);
}
|
Get output types information for {@link TwoOutputStreamProcessFunction} from the input type
information.
|
getOutputTypesForTwoOutputProcessFunction
|
java
|
apache/flink
|
flink-datastream/src/main/java/org/apache/flink/datastream/impl/utils/StreamUtils.java
|
https://github.com/apache/flink/blob/master/flink-datastream/src/main/java/org/apache/flink/datastream/impl/utils/StreamUtils.java
|
Apache-2.0
|
public static <T, R> OneInputTransformation<T, R> getOneInputTransformation(
String operatorName,
AbstractDataStream<T> inputStream,
TypeInformation<R> outTypeInformation,
OneInputStreamOperator<T, R> operator) {
// read the output type of the input Transform to coax out errors about MissingTypeInfo
inputStream.getTransformation().getOutputType();
OneInputTransformation<T, R> resultTransform =
new OneInputTransformation<>(
inputStream.getTransformation(),
operatorName,
SimpleUdfStreamOperatorFactory.of(operator),
outTypeInformation,
inputStream.getEnvironment().getParallelism(),
false);
return resultTransform;
}
|
Construct and return a {@link OneInputTransformation} from non-keyed input streams.
|
getOneInputTransformation
|
java
|
apache/flink
|
flink-datastream/src/main/java/org/apache/flink/datastream/impl/utils/StreamUtils.java
|
https://github.com/apache/flink/blob/master/flink-datastream/src/main/java/org/apache/flink/datastream/impl/utils/StreamUtils.java
|
Apache-2.0
|
public static <T, R, K> OneInputTransformation<T, R> getOneInputKeyedTransformation(
String operatorName,
AbstractDataStream<T> inputStream,
TypeInformation<R> outTypeInformation,
OneInputStreamOperator<T, R> operator,
KeySelector<T, K> keySelector,
TypeInformation<K> keyType) {
OneInputTransformation<T, R> resultTransform =
getOneInputTransformation(operatorName, inputStream, outTypeInformation, operator);
// inject the key selector and key type
resultTransform.setStateKeySelector(keySelector);
resultTransform.setStateKeyType(keyType);
return resultTransform;
}
|
Construct and return a {@link OneInputTransformation} from keyed input streams.
|
getOneInputKeyedTransformation
|
java
|
apache/flink
|
flink-datastream/src/main/java/org/apache/flink/datastream/impl/utils/StreamUtils.java
|
https://github.com/apache/flink/blob/master/flink-datastream/src/main/java/org/apache/flink/datastream/impl/utils/StreamUtils.java
|
Apache-2.0
|
public static <T, R> AbstractDataStream<R> transformOneInputOperator(
String operatorName,
AbstractDataStream<T> inputStream,
TypeInformation<R> outTypeInfo,
StreamOperatorFactory<R> operatorFactory) {
// read the output type of the input Transform to coax out errors about MissingTypeInfo
inputStream.getTransformation().getOutputType();
OneInputTransformation<T, R> resultTransform =
new OneInputTransformation<>(
inputStream.getTransformation(),
operatorName,
operatorFactory,
outTypeInfo,
inputStream.getEnvironment().getParallelism(),
false);
NonKeyedPartitionStreamImpl<R> returnStream =
new NonKeyedPartitionStreamImpl<>(inputStream.getEnvironment(), resultTransform);
inputStream.getEnvironment().addOperator(resultTransform);
return returnStream;
}
|
Construct and return a new DataStream with one input operator.
|
transformOneInputOperator
|
java
|
apache/flink
|
flink-datastream/src/main/java/org/apache/flink/datastream/impl/utils/StreamUtils.java
|
https://github.com/apache/flink/blob/master/flink-datastream/src/main/java/org/apache/flink/datastream/impl/utils/StreamUtils.java
|
Apache-2.0
|
public static <K, IN, OUT1, OUT2, W extends Window>
Transformation<OUT1> transformTwoOutputWindow(
ExecutionConfig executionConfig,
AbstractDataStream<IN> inputStream,
TypeInformation<IN> inputType,
TypeInformation<OUT1> outType1,
TypeInformation<OUT2> outType2,
OutputTag<OUT2> secondOutputTag,
InternalTwoOutputWindowStreamProcessFunction<IN, OUT1, OUT2, W>
internalWindowFunction,
KeySelector<IN, K> keySelector,
TypeInformation<K> keyType) {
WindowAssigner<IN, W> assigner = internalWindowFunction.getAssigner();
ListStateDescriptor<IN> stateDesc =
new ListStateDescriptor<>("two-output-window-state", inputType);
TwoOutputWindowProcessOperator<K, IN, OUT1, OUT2, W> windowProcessOperator =
new TwoOutputWindowProcessOperator<>(
internalWindowFunction,
secondOutputTag,
null,
null,
assigner,
internalWindowFunction.getTrigger(),
assigner.getWindowSerializer(executionConfig),
stateDesc,
internalWindowFunction.getAllowedLateness());
return StreamUtils.getOneInputKeyedTransformation(
"TwoOutput-Window",
inputStream,
outType1,
windowProcessOperator,
keySelector,
keyType);
}
|
Construct and return a {@link OneInputTransformation} of two output window operator.
|
transformTwoOutputWindow
|
java
|
apache/flink
|
flink-datastream/src/main/java/org/apache/flink/datastream/impl/utils/StreamUtils.java
|
https://github.com/apache/flink/blob/master/flink-datastream/src/main/java/org/apache/flink/datastream/impl/utils/StreamUtils.java
|
Apache-2.0
|
public static <T> DataStreamV2SinkTransformation<T, T> addSinkOperator(
AbstractDataStream<T> inputStream, Sink<T> sink, TypeInformation<T> typeInformation) {
// read the output type of the input Transform to coax out errors about MissingTypeInfo
inputStream.getTransformation().getOutputType();
if (!(sink instanceof WrappedSink)) {
throw new UnsupportedOperationException(
"Unsupported type of sink, please use DataStreamV2SinkUtils to wrap a sink-v2 sink first.");
}
org.apache.flink.api.connector.sink2.Sink<T> innerSink =
((WrappedSink<T>) sink).getWrappedSink();
DataStreamV2SinkTransformation<T, T> sinkTransformation =
new DataStreamV2SinkTransformation<>(
inputStream,
innerSink,
typeInformation,
"Sink",
inputStream.getEnvironment().getParallelism(),
false);
inputStream.getEnvironment().addOperator(sinkTransformation);
return sinkTransformation;
}
|
Add sink operator to the input stream.
|
addSinkOperator
|
java
|
apache/flink
|
flink-datastream/src/main/java/org/apache/flink/datastream/impl/utils/StreamUtils.java
|
https://github.com/apache/flink/blob/master/flink-datastream/src/main/java/org/apache/flink/datastream/impl/utils/StreamUtils.java
|
Apache-2.0
|
public static <T> ProcessConfigurableAndNonKeyedPartitionStream<T> wrapWithConfigureHandle(
NonKeyedPartitionStreamImpl<T> stream) {
return new ProcessConfigurableAndNonKeyedPartitionStreamImpl<>(stream);
}
|
Wrap a {@link NonKeyedPartitionStreamImpl} with configure handle.
|
wrapWithConfigureHandle
|
java
|
apache/flink
|
flink-datastream/src/main/java/org/apache/flink/datastream/impl/utils/StreamUtils.java
|
https://github.com/apache/flink/blob/master/flink-datastream/src/main/java/org/apache/flink/datastream/impl/utils/StreamUtils.java
|
Apache-2.0
|
public static <K, T> ProcessConfigurableAndKeyedPartitionStream<K, T> wrapWithConfigureHandle(
KeyedPartitionStreamImpl<K, T> stream) {
return new ProcessConfigurableAndKeyedPartitionStreamImpl<>(stream);
}
|
Wrap a {@link KeyedPartitionStreamImpl} with configure handle.
|
wrapWithConfigureHandle
|
java
|
apache/flink
|
flink-datastream/src/main/java/org/apache/flink/datastream/impl/utils/StreamUtils.java
|
https://github.com/apache/flink/blob/master/flink-datastream/src/main/java/org/apache/flink/datastream/impl/utils/StreamUtils.java
|
Apache-2.0
|
@Override
public ExecutionEnvironment createExecutionEnvironment(Configuration configuration) {
return createExecutionEnvironmentFunction.apply(configuration);
}
|
Mock implementation of {@link ExecutionEnvironmentFactory} for testing.
|
createExecutionEnvironment
|
java
|
apache/flink
|
flink-datastream/src/test/java/org/apache/flink/datastream/impl/TestingExecutionEnvironmentFactory.java
|
https://github.com/apache/flink/blob/master/flink-datastream/src/test/java/org/apache/flink/datastream/impl/TestingExecutionEnvironmentFactory.java
|
Apache-2.0
|
public static StreamingRuntimeContext createStreamingRuntimeContext() {
return new MockStreamingRuntimeContext(
false,
2,
1,
new MockEnvironmentBuilder()
.setTaskName("mockTask")
.setManagedMemorySize(4 * MemoryManager.DEFAULT_PAGE_SIZE)
.setParallelism(2)
.setMaxParallelism(2)
.setSubtaskIndex(1)
.setJobType(JobType.STREAMING)
.setJobName("mockJob")
.build());
}
|
Test utils for things related to context.
|
createStreamingRuntimeContext
|
java
|
apache/flink
|
flink-datastream/src/test/java/org/apache/flink/datastream/impl/context/ContextTestUtils.java
|
https://github.com/apache/flink/blob/master/flink-datastream/src/test/java/org/apache/flink/datastream/impl/context/ContextTestUtils.java
|
Apache-2.0
|
static ExecutionEnvironment getInstance() throws ReflectiveOperationException {
return (ExecutionEnvironment)
Class.forName("org.apache.flink.datastream.impl.ExecutionEnvironmentImpl")
.getMethod("newInstance")
.invoke(null);
}
|
Get the execution environment instance.
@return A {@link ExecutionEnvironment} instance.
|
getInstance
|
java
|
apache/flink
|
flink-datastream-api/src/main/java/org/apache/flink/datastream/api/ExecutionEnvironment.java
|
https://github.com/apache/flink/blob/master/flink-datastream-api/src/main/java/org/apache/flink/datastream/api/ExecutionEnvironment.java
|
Apache-2.0
|
public static <IN1, IN2, OUT> TwoInputNonBroadcastStreamProcessFunction<IN1, IN2, OUT> join(
JoinFunction<IN1, IN2, OUT> joinFunction) {
return join(joinFunction, JoinType.INNER);
}
|
Wrap the JoinFunction and INNER JoinType within a ProcessFunction to perform the Join
operation. Note that the wrapped process function should only be used with KeyedStream.
|
join
|
java
|
apache/flink
|
flink-datastream-api/src/main/java/org/apache/flink/datastream/api/builtin/BuiltinFuncs.java
|
https://github.com/apache/flink/blob/master/flink-datastream-api/src/main/java/org/apache/flink/datastream/api/builtin/BuiltinFuncs.java
|
Apache-2.0
|
public static <KEY, T1, T2, OUT> NonKeyedPartitionStream<OUT> join(
KeyedPartitionStream<KEY, T1> leftStream,
KeyedPartitionStream<KEY, T2> rightStream,
JoinFunction<T1, T2, OUT> joinFunction,
JoinType joinType) {
return leftStream.connectAndProcess(rightStream, join(joinFunction, joinType));
}
|
Join two {@link KeyedPartitionStream} with the type of {@link JoinType}.
|
join
|
java
|
apache/flink
|
flink-datastream-api/src/main/java/org/apache/flink/datastream/api/builtin/BuiltinFuncs.java
|
https://github.com/apache/flink/blob/master/flink-datastream-api/src/main/java/org/apache/flink/datastream/api/builtin/BuiltinFuncs.java
|
Apache-2.0
|
public static <KEY, T1, T2, OUT> NonKeyedPartitionStream<OUT> join(
NonKeyedPartitionStream<T1> leftStream,
KeySelector<T1, KEY> leftKeySelector,
NonKeyedPartitionStream<T2> rightStream,
KeySelector<T2, KEY> rightKeySelector,
JoinFunction<T1, T2, OUT> joinFunction) {
return join(
leftStream,
leftKeySelector,
rightStream,
rightKeySelector,
joinFunction,
JoinType.INNER);
}
|
Inner join two {@link NonKeyedPartitionStream}. The two streams will be redistributed by
{@link KeySelector} respectively.
|
join
|
java
|
apache/flink
|
flink-datastream-api/src/main/java/org/apache/flink/datastream/api/builtin/BuiltinFuncs.java
|
https://github.com/apache/flink/blob/master/flink-datastream-api/src/main/java/org/apache/flink/datastream/api/builtin/BuiltinFuncs.java
|
Apache-2.0
|
public static <KEY, T1, T2, OUT> NonKeyedPartitionStream<OUT> join(
NonKeyedPartitionStream<T1> leftStream,
KeySelector<T1, KEY> leftKeySelector,
NonKeyedPartitionStream<T2> rightStream,
KeySelector<T2, KEY> rightKeySelector,
JoinFunction<T1, T2, OUT> joinFunction,
JoinType joinType) {
return join(
leftStream.keyBy(leftKeySelector),
rightStream.keyBy(rightKeySelector),
joinFunction,
joinType);
}
|
Join two {@link NonKeyedPartitionStream} with the type of {@link JoinType}. The two streams
will be redistributed by {@link KeySelector} respectively.
|
join
|
java
|
apache/flink
|
flink-datastream-api/src/main/java/org/apache/flink/datastream/api/builtin/BuiltinFuncs.java
|
https://github.com/apache/flink/blob/master/flink-datastream-api/src/main/java/org/apache/flink/datastream/api/builtin/BuiltinFuncs.java
|
Apache-2.0
|
public static <IN, OUT> OneInputStreamProcessFunction<IN, OUT> window(
WindowStrategy windowStrategy,
OneInputWindowStreamProcessFunction<IN, OUT> windowProcessFunction) {
try {
return (OneInputStreamProcessFunction<IN, OUT>)
WINDOW_FUNCS_INSTANCE
.getMethod(
"window",
WindowStrategy.class,
OneInputWindowStreamProcessFunction.class)
.invoke(null, windowStrategy, windowProcessFunction);
} catch (Exception e) {
throw new RuntimeException(e);
}
}
|
Wrap the WindowStrategy and OneInputWindowStreamProcessFunction within a
OneInputStreamProcessFunction to perform the window operation.
@param windowStrategy the window strategy
@param windowProcessFunction the window process function
@return the wrapped process function
|
window
|
java
|
apache/flink
|
flink-datastream-api/src/main/java/org/apache/flink/datastream/api/builtin/BuiltinFuncs.java
|
https://github.com/apache/flink/blob/master/flink-datastream-api/src/main/java/org/apache/flink/datastream/api/builtin/BuiltinFuncs.java
|
Apache-2.0
|
public static <IN1, IN2, OUT> TwoInputNonBroadcastStreamProcessFunction<IN1, IN2, OUT> window(
WindowStrategy windowStrategy,
TwoInputNonBroadcastWindowStreamProcessFunction<IN1, IN2, OUT> windowProcessFunction) {
try {
return (TwoInputNonBroadcastStreamProcessFunction<IN1, IN2, OUT>)
WINDOW_FUNCS_INSTANCE
.getMethod(
"window",
WindowStrategy.class,
TwoInputNonBroadcastWindowStreamProcessFunction.class)
.invoke(null, windowStrategy, windowProcessFunction);
} catch (Exception e) {
throw new RuntimeException(e);
}
}
|
Wrap the WindowStrategy and TwoInputNonBroadcastWindowStreamProcessFunction within a
TwoInputNonBroadcastStreamProcessFunction to perform the window operation.
@param windowStrategy the window strategy
@param windowProcessFunction the window process function
@return the wrapped process function
|
window
|
java
|
apache/flink
|
flink-datastream-api/src/main/java/org/apache/flink/datastream/api/builtin/BuiltinFuncs.java
|
https://github.com/apache/flink/blob/master/flink-datastream-api/src/main/java/org/apache/flink/datastream/api/builtin/BuiltinFuncs.java
|
Apache-2.0
|
public static <IN, OUT1, OUT2> TwoOutputStreamProcessFunction<IN, OUT1, OUT2> window(
WindowStrategy windowStrategy,
TwoOutputWindowStreamProcessFunction<IN, OUT1, OUT2> windowProcessFunction) {
try {
return (TwoOutputStreamProcessFunction<IN, OUT1, OUT2>)
WINDOW_FUNCS_INSTANCE
.getMethod(
"window",
WindowStrategy.class,
TwoOutputWindowStreamProcessFunction.class)
.invoke(null, windowStrategy, windowProcessFunction);
} catch (Exception e) {
throw new RuntimeException(e);
}
}
|
Wrap the WindowStrategy and TwoOutputWindowStreamProcessFunction within a
TwoOutputStreamProcessFunction to perform the window operation.
@param windowStrategy the window strategy
@param windowProcessFunction the window process function
@return the wrapped process function
|
window
|
java
|
apache/flink
|
flink-datastream-api/src/main/java/org/apache/flink/datastream/api/builtin/BuiltinFuncs.java
|
https://github.com/apache/flink/blob/master/flink-datastream-api/src/main/java/org/apache/flink/datastream/api/builtin/BuiltinFuncs.java
|
Apache-2.0
|
public static boolean isEventTimeWatermark(Watermark watermark) {
return isEventTimeWatermark(watermark.getIdentifier());
}
|
Determine if the received watermark is an EventTimeWatermark.
@param watermark The watermark to be checked.
@return true if the watermark is an EventTimeWatermark; false otherwise.
|
isEventTimeWatermark
|
java
|
apache/flink
|
flink-datastream-api/src/main/java/org/apache/flink/datastream/api/extension/eventtime/EventTimeExtension.java
|
https://github.com/apache/flink/blob/master/flink-datastream-api/src/main/java/org/apache/flink/datastream/api/extension/eventtime/EventTimeExtension.java
|
Apache-2.0
|
public static boolean isEventTimeWatermark(String watermarkIdentifier) {
return watermarkIdentifier.equals(EVENT_TIME_WATERMARK_DECLARATION.getIdentifier());
}
|
Determine if the received watermark is an EventTimeWatermark by watermark identifier.
@param watermarkIdentifier The identifier of the watermark to be checked.
@return true if the watermark is an EventTimeWatermark; false otherwise.
|
isEventTimeWatermark
|
java
|
apache/flink
|
flink-datastream-api/src/main/java/org/apache/flink/datastream/api/extension/eventtime/EventTimeExtension.java
|
https://github.com/apache/flink/blob/master/flink-datastream-api/src/main/java/org/apache/flink/datastream/api/extension/eventtime/EventTimeExtension.java
|
Apache-2.0
|
public static boolean isIdleStatusWatermark(Watermark watermark) {
return isIdleStatusWatermark(watermark.getIdentifier());
}
|
Determine if the received watermark is an IdleStatusWatermark.
@param watermark The watermark to be checked.
@return true if the watermark is an IdleStatusWatermark; false otherwise.
|
isIdleStatusWatermark
|
java
|
apache/flink
|
flink-datastream-api/src/main/java/org/apache/flink/datastream/api/extension/eventtime/EventTimeExtension.java
|
https://github.com/apache/flink/blob/master/flink-datastream-api/src/main/java/org/apache/flink/datastream/api/extension/eventtime/EventTimeExtension.java
|
Apache-2.0
|
public static boolean isIdleStatusWatermark(String watermarkIdentifier) {
return watermarkIdentifier.equals(IDLE_STATUS_WATERMARK_DECLARATION.getIdentifier());
}
|
Determine if the received watermark is an IdleStatusWatermark by watermark identifier.
@param watermarkIdentifier The identifier of the watermark to be checked.
@return true if the watermark is an IdleStatusWatermark; false otherwise.
|
isIdleStatusWatermark
|
java
|
apache/flink
|
flink-datastream-api/src/main/java/org/apache/flink/datastream/api/extension/eventtime/EventTimeExtension.java
|
https://github.com/apache/flink/blob/master/flink-datastream-api/src/main/java/org/apache/flink/datastream/api/extension/eventtime/EventTimeExtension.java
|
Apache-2.0
|
public static <T> EventTimeWatermarkGeneratorBuilder<T> newWatermarkGeneratorBuilder(
EventTimeExtractor<T> eventTimeExtractor) {
return new EventTimeWatermarkGeneratorBuilder<>(eventTimeExtractor);
}
|
Create an instance of {@link EventTimeWatermarkGeneratorBuilder}, which contains a {@code
EventTimeExtractor}.
@param eventTimeExtractor An instance of {@code EventTimeExtractor} used to extract event
time information from data records.
@param <T> The type of data records.
@return An instance of {@code EventTimeWatermarkGeneratorBuilder} containing the specified
event time extractor.
|
newWatermarkGeneratorBuilder
|
java
|
apache/flink
|
flink-datastream-api/src/main/java/org/apache/flink/datastream/api/extension/eventtime/EventTimeExtension.java
|
https://github.com/apache/flink/blob/master/flink-datastream-api/src/main/java/org/apache/flink/datastream/api/extension/eventtime/EventTimeExtension.java
|
Apache-2.0
|
public static <IN, OUT> OneInputStreamProcessFunction<IN, OUT> wrapProcessFunction(
OneInputEventTimeStreamProcessFunction<IN, OUT> processFunction) {
try {
return (OneInputStreamProcessFunction<IN, OUT>)
getEventTimeExtensionImplClass()
.getMethod(
"wrapProcessFunction",
OneInputEventTimeStreamProcessFunction.class)
.invoke(null, processFunction);
} catch (Exception e) {
throw new RuntimeException(e);
}
}
|
Wrap the user-defined {@link OneInputEventTimeStreamProcessFunction}, which will provide
related components such as {@link EventTimeManager} and declare the necessary built-in state
required for the Timer, etc. Note that registering event timers of {@link
EventTimeProcessFunction} can only be used with {@link KeyedPartitionStream}.
@param processFunction The user-defined {@link OneInputEventTimeStreamProcessFunction} that
needs to be wrapped.
@return The wrapped {@link OneInputStreamProcessFunction}.
|
wrapProcessFunction
|
java
|
apache/flink
|
flink-datastream-api/src/main/java/org/apache/flink/datastream/api/extension/eventtime/EventTimeExtension.java
|
https://github.com/apache/flink/blob/master/flink-datastream-api/src/main/java/org/apache/flink/datastream/api/extension/eventtime/EventTimeExtension.java
|
Apache-2.0
|
public static <IN, OUT1, OUT2>
TwoOutputStreamProcessFunction<IN, OUT1, OUT2> wrapProcessFunction(
TwoOutputEventTimeStreamProcessFunction<IN, OUT1, OUT2> processFunction) {
try {
return (TwoOutputStreamProcessFunction<IN, OUT1, OUT2>)
getEventTimeExtensionImplClass()
.getMethod(
"wrapProcessFunction",
TwoOutputEventTimeStreamProcessFunction.class)
.invoke(null, processFunction);
} catch (Exception e) {
throw new RuntimeException(e);
}
}
|
Wrap the user-defined {@link TwoOutputStreamProcessFunction}, which will provide related
components such as {@link EventTimeManager} and declare the necessary built-in state required
for the Timer, etc. Note that registering event timers of {@link EventTimeProcessFunction}
can only be used with {@link KeyedPartitionStream}.
@param processFunction The user-defined {@link TwoOutputEventTimeStreamProcessFunction} that
needs to be wrapped.
@return The wrapped {@link TwoOutputStreamProcessFunction}.
|
wrapProcessFunction
|
java
|
apache/flink
|
flink-datastream-api/src/main/java/org/apache/flink/datastream/api/extension/eventtime/EventTimeExtension.java
|
https://github.com/apache/flink/blob/master/flink-datastream-api/src/main/java/org/apache/flink/datastream/api/extension/eventtime/EventTimeExtension.java
|
Apache-2.0
|
public static <IN1, IN2, OUT>
TwoInputNonBroadcastStreamProcessFunction<IN1, IN2, OUT> wrapProcessFunction(
TwoInputNonBroadcastEventTimeStreamProcessFunction<IN1, IN2, OUT>
processFunction) {
try {
return (TwoInputNonBroadcastStreamProcessFunction<IN1, IN2, OUT>)
getEventTimeExtensionImplClass()
.getMethod(
"wrapProcessFunction",
TwoInputNonBroadcastEventTimeStreamProcessFunction.class)
.invoke(null, processFunction);
} catch (Exception e) {
throw new RuntimeException(e);
}
}
|
Wrap the user-defined {@link TwoInputNonBroadcastEventTimeStreamProcessFunction}, which will
provide related components such as {@link EventTimeManager} and declare the necessary
built-in state required for the Timer, etc. Note that registering event timers of {@link
EventTimeProcessFunction} can only be used with {@link KeyedPartitionStream}.
@param processFunction The user-defined {@link
TwoInputNonBroadcastEventTimeStreamProcessFunction} that needs to be wrapped.
@return The wrapped {@link TwoInputNonBroadcastStreamProcessFunction}.
|
wrapProcessFunction
|
java
|
apache/flink
|
flink-datastream-api/src/main/java/org/apache/flink/datastream/api/extension/eventtime/EventTimeExtension.java
|
https://github.com/apache/flink/blob/master/flink-datastream-api/src/main/java/org/apache/flink/datastream/api/extension/eventtime/EventTimeExtension.java
|
Apache-2.0
|
public static <IN1, IN2, OUT>
TwoInputBroadcastStreamProcessFunction<IN1, IN2, OUT> wrapProcessFunction(
TwoInputBroadcastEventTimeStreamProcessFunction<IN1, IN2, OUT>
processFunction) {
try {
return (TwoInputBroadcastStreamProcessFunction<IN1, IN2, OUT>)
getEventTimeExtensionImplClass()
.getMethod(
"wrapProcessFunction",
TwoInputBroadcastEventTimeStreamProcessFunction.class)
.invoke(null, processFunction);
} catch (Exception e) {
throw new RuntimeException(e);
}
}
|
Wrap the user-defined {@link TwoInputBroadcastEventTimeStreamProcessFunction}, which will
provide related components such as {@link EventTimeManager} and declare the necessary
built-in state required for the Timer, etc. Note that registering event timers of {@link
EventTimeProcessFunction} can only be used with {@link KeyedPartitionStream}.
@param processFunction The user-defined {@link
TwoInputBroadcastEventTimeStreamProcessFunction} that needs to be wrapped.
@return The wrapped {@link TwoInputBroadcastStreamProcessFunction}.
|
wrapProcessFunction
|
java
|
apache/flink
|
flink-datastream-api/src/main/java/org/apache/flink/datastream/api/extension/eventtime/EventTimeExtension.java
|
https://github.com/apache/flink/blob/master/flink-datastream-api/src/main/java/org/apache/flink/datastream/api/extension/eventtime/EventTimeExtension.java
|
Apache-2.0
|
private static Class<?> getEventTimeExtensionImplClass() {
try {
return Class.forName(
"org.apache.flink.datastream.impl.extension.eventtime.EventTimeExtensionImpl");
} catch (ClassNotFoundException e) {
throw new RuntimeException("Please ensure that flink-datastream in your class path");
}
}
|
Get the implementation class of EventTimeExtension.
|
getEventTimeExtensionImplClass
|
java
|
apache/flink
|
flink-datastream-api/src/main/java/org/apache/flink/datastream/api/extension/eventtime/EventTimeExtension.java
|
https://github.com/apache/flink/blob/master/flink-datastream-api/src/main/java/org/apache/flink/datastream/api/extension/eventtime/EventTimeExtension.java
|
Apache-2.0
|
default void onRecord1(
IN1 record,
Collector<OUT> output,
PartitionedContext<OUT> ctx,
TwoInputWindowContext<IN1, IN2> windowContext)
throws Exception {
windowContext.putRecord1(record);
}
|
This method will be invoked when a record is received from input1. Its default behaviors to
store data in built-in window state by {@link TwoInputWindowContext#putRecord1}. If the user
overrides this method, they have to take care of the input data themselves.
|
onRecord1
|
java
|
apache/flink
|
flink-datastream-api/src/main/java/org/apache/flink/datastream/api/extension/window/function/TwoInputNonBroadcastWindowStreamProcessFunction.java
|
https://github.com/apache/flink/blob/master/flink-datastream-api/src/main/java/org/apache/flink/datastream/api/extension/window/function/TwoInputNonBroadcastWindowStreamProcessFunction.java
|
Apache-2.0
|
default void onRecord2(
IN2 record,
Collector<OUT> output,
PartitionedContext<OUT> ctx,
TwoInputWindowContext<IN1, IN2> windowContext)
throws Exception {
windowContext.putRecord2(record);
}
|
This method will be invoked when a record is received from input2. Its default behaviors to
store data in built-in window state by {@link TwoInputWindowContext#putRecord2}. If the user
overrides this method, they have to take care of the input data themselves.
|
onRecord2
|
java
|
apache/flink
|
flink-datastream-api/src/main/java/org/apache/flink/datastream/api/extension/window/function/TwoInputNonBroadcastWindowStreamProcessFunction.java
|
https://github.com/apache/flink/blob/master/flink-datastream-api/src/main/java/org/apache/flink/datastream/api/extension/window/function/TwoInputNonBroadcastWindowStreamProcessFunction.java
|
Apache-2.0
|
default void onLateRecord1(IN1 record, Collector<OUT> output, PartitionedContext<OUT> ctx)
throws Exception {}
|
This method will be invoked when a record is received from input1 after the window has been
cleaned.
|
onLateRecord1
|
java
|
apache/flink
|
flink-datastream-api/src/main/java/org/apache/flink/datastream/api/extension/window/function/TwoInputNonBroadcastWindowStreamProcessFunction.java
|
https://github.com/apache/flink/blob/master/flink-datastream-api/src/main/java/org/apache/flink/datastream/api/extension/window/function/TwoInputNonBroadcastWindowStreamProcessFunction.java
|
Apache-2.0
|
default void onLateRecord2(IN2 record, Collector<OUT> output, PartitionedContext<OUT> ctx)
throws Exception {}
|
This method will be invoked when a record is received from input2 after the window has been
cleaned.
|
onLateRecord2
|
java
|
apache/flink
|
flink-datastream-api/src/main/java/org/apache/flink/datastream/api/extension/window/function/TwoInputNonBroadcastWindowStreamProcessFunction.java
|
https://github.com/apache/flink/blob/master/flink-datastream-api/src/main/java/org/apache/flink/datastream/api/extension/window/function/TwoInputNonBroadcastWindowStreamProcessFunction.java
|
Apache-2.0
|
default Set<StateDeclaration> useWindowStates() {
return Collections.emptySet();
}
|
Explicitly declares states that are bound to the window. Each specific window state must be
declared in this method before it can be used.
@return all declared window states used by this process function.
|
useWindowStates
|
java
|
apache/flink
|
flink-datastream-api/src/main/java/org/apache/flink/datastream/api/extension/window/function/WindowProcessFunction.java
|
https://github.com/apache/flink/blob/master/flink-datastream-api/src/main/java/org/apache/flink/datastream/api/extension/window/function/WindowProcessFunction.java
|
Apache-2.0
|
public static WindowStrategy global() {
return new GlobalWindowStrategy();
}
|
Creates a global window strategy. Note that the global window can be used in both
GlobalStream, KeyedStream, NonKeyedStream.
@return A global window strategy.
|
global
|
java
|
apache/flink
|
flink-datastream-api/src/main/java/org/apache/flink/datastream/api/extension/window/strategy/WindowStrategy.java
|
https://github.com/apache/flink/blob/master/flink-datastream-api/src/main/java/org/apache/flink/datastream/api/extension/window/strategy/WindowStrategy.java
|
Apache-2.0
|
public static WindowStrategy tumbling(Duration windowSize) {
return new TumblingTimeWindowStrategy(windowSize);
}
|
Create a tumbling time window strategy with the event time default time type. Note that
tumbling time windows can be used in KeyedStream and GlobalStream. If tumbling time window is
used in a GlobalStream, it will convert the GlobalStream into a KeyedStream with a Key of
zero, and then use the converted KeyedStream to execute the window.
@param windowSize the size of Window.
@return A tumbling time window strategy.
|
tumbling
|
java
|
apache/flink
|
flink-datastream-api/src/main/java/org/apache/flink/datastream/api/extension/window/strategy/WindowStrategy.java
|
https://github.com/apache/flink/blob/master/flink-datastream-api/src/main/java/org/apache/flink/datastream/api/extension/window/strategy/WindowStrategy.java
|
Apache-2.0
|
public static WindowStrategy tumbling(Duration windowSize, TimeType timeType) {
return new TumblingTimeWindowStrategy(windowSize, timeType);
}
|
Create a tumbling time window strategy. Note that tumbling time windows can be used in
KeyedStream and GlobalStream. If tumbling time window is used in a GlobalStream, it will
convert the GlobalStream into a KeyedStream with a Key of zero, and then use the converted
KeyedStream to execute the window.
@param windowSize the size of Window.
@param timeType the time type of Window.
@return A tumbling time window strategy.
|
tumbling
|
java
|
apache/flink
|
flink-datastream-api/src/main/java/org/apache/flink/datastream/api/extension/window/strategy/WindowStrategy.java
|
https://github.com/apache/flink/blob/master/flink-datastream-api/src/main/java/org/apache/flink/datastream/api/extension/window/strategy/WindowStrategy.java
|
Apache-2.0
|
public static WindowStrategy tumbling(
Duration windowSize, TimeType timeType, Duration allowedLateness) {
return new TumblingTimeWindowStrategy(windowSize, timeType, allowedLateness);
}
|
Create a tumbling time window strategy. Note that tumbling time windows can be used in
KeyedStream and GlobalStream. If tumbling time window is used in a GlobalStream, it will
convert the GlobalStream into a KeyedStream with a Key of zero, and then use the converted
KeyedStream to execute the window.
@param windowSize the size of Window.
@param timeType the time type of Window.
@param allowedLateness the allowed lateness of Window.
@return A tumbling time window strategy.
|
tumbling
|
java
|
apache/flink
|
flink-datastream-api/src/main/java/org/apache/flink/datastream/api/extension/window/strategy/WindowStrategy.java
|
https://github.com/apache/flink/blob/master/flink-datastream-api/src/main/java/org/apache/flink/datastream/api/extension/window/strategy/WindowStrategy.java
|
Apache-2.0
|
public static WindowStrategy sliding(Duration windowSize, Duration windowSlideInterval) {
return new SlidingTimeWindowStrategy(windowSize, windowSlideInterval);
}
|
Create a sliding time window strategy with the event time default time type. Note that
sliding time windows can be used in KeyedStream and GlobalStream. If sliding time window is
used in a GlobalStream, it will convert the GlobalStream into a KeyedStream with a Key of
zero, and then use the converted KeyedStream to execute the window.
@param windowSize the size of Window.
@param windowSlideInterval the slide interval of Window.
@return A sliding time window strategy.
|
sliding
|
java
|
apache/flink
|
flink-datastream-api/src/main/java/org/apache/flink/datastream/api/extension/window/strategy/WindowStrategy.java
|
https://github.com/apache/flink/blob/master/flink-datastream-api/src/main/java/org/apache/flink/datastream/api/extension/window/strategy/WindowStrategy.java
|
Apache-2.0
|
public static WindowStrategy sliding(
Duration windowSize, Duration windowSlideInterval, TimeType timeType) {
return new SlidingTimeWindowStrategy(windowSize, windowSlideInterval, timeType);
}
|
Create a sliding time window strategy. Note that sliding time windows can be used in
KeyedStream and GlobalStream. If sliding time window is used in a GlobalStream, it will
convert the GlobalStream into a KeyedStream with a Key of zero, and then use the converted
KeyedStream to execute the window.
@param windowSize the size of Window.
@param windowSlideInterval the slide interval of Window.
@param timeType the time type of Window.
@return A sliding time window strategy.
|
sliding
|
java
|
apache/flink
|
flink-datastream-api/src/main/java/org/apache/flink/datastream/api/extension/window/strategy/WindowStrategy.java
|
https://github.com/apache/flink/blob/master/flink-datastream-api/src/main/java/org/apache/flink/datastream/api/extension/window/strategy/WindowStrategy.java
|
Apache-2.0
|
public static WindowStrategy sliding(
Duration windowSize,
Duration windowSlideInterval,
TimeType timeType,
Duration allowedLateness) {
return new SlidingTimeWindowStrategy(
windowSize, windowSlideInterval, timeType, allowedLateness);
}
|
Create a sliding time window strategy. Note that sliding time windows can be used in
KeyedStream and GlobalStream. If sliding time window is used in a GlobalStream, it will
convert the GlobalStream into a KeyedStream with a Key of zero, and then use the converted
KeyedStream to execute the window.
@param windowSize the size of Window.
@param windowSlideInterval the slide interval of Window.
@param timeType the time type of Window.
@param allowedLateness the allowed lateness of Window.
@return A sliding time window strategy.
|
sliding
|
java
|
apache/flink
|
flink-datastream-api/src/main/java/org/apache/flink/datastream/api/extension/window/strategy/WindowStrategy.java
|
https://github.com/apache/flink/blob/master/flink-datastream-api/src/main/java/org/apache/flink/datastream/api/extension/window/strategy/WindowStrategy.java
|
Apache-2.0
|
public static WindowStrategy session(Duration sessionGap) {
return new SessionWindowStrategy(sessionGap);
}
|
Create a session time window strategy with the event time default time type. Note that
session time windows can be used in KeyedStream and GlobalStream. If session time window is
used in a GlobalStream, it will convert the GlobalStream into a KeyedStream with a Key of
zero, and then use the converted KeyedStream to execute the window.
@param sessionGap the timeout of session.
@return A session window strategy.
|
session
|
java
|
apache/flink
|
flink-datastream-api/src/main/java/org/apache/flink/datastream/api/extension/window/strategy/WindowStrategy.java
|
https://github.com/apache/flink/blob/master/flink-datastream-api/src/main/java/org/apache/flink/datastream/api/extension/window/strategy/WindowStrategy.java
|
Apache-2.0
|
public static WindowStrategy session(Duration sessionGap, TimeType timeType) {
return new SessionWindowStrategy(sessionGap, timeType);
}
|
Create a session time window strategy. Note that session time windows can be used in
KeyedStream and GlobalStream. If session time window is used in a GlobalStream, it will
convert the GlobalStream into a KeyedStream with a Key of zero, and then use the converted
KeyedStream to execute the window.
@param sessionGap the timeout of session.
@param timeType the time type of Window.
@return A session window strategy.
|
session
|
java
|
apache/flink
|
flink-datastream-api/src/main/java/org/apache/flink/datastream/api/extension/window/strategy/WindowStrategy.java
|
https://github.com/apache/flink/blob/master/flink-datastream-api/src/main/java/org/apache/flink/datastream/api/extension/window/strategy/WindowStrategy.java
|
Apache-2.0
|
default Set<StateDeclaration> usesStates() {
return Collections.emptySet();
}
|
Explicitly declares states upfront. Each specific state must be declared in this method
before it can be used.
@return all declared states used by this process function.
|
usesStates
|
java
|
apache/flink
|
flink-datastream-api/src/main/java/org/apache/flink/datastream/api/function/ProcessFunction.java
|
https://github.com/apache/flink/blob/master/flink-datastream-api/src/main/java/org/apache/flink/datastream/api/function/ProcessFunction.java
|
Apache-2.0
|
@BeforeAll
static void checkOperatingSystem() {
assertThat(OperatingSystem.isWindows())
.as("This test checks shell scripts which are not available on Windows.")
.isFalse();
}
|
Abstract test class for executing bash scripts.
|
checkOperatingSystem
|
java
|
apache/flink
|
flink-dist/src/test/java/org/apache/flink/dist/JavaBashTestBase.java
|
https://github.com/apache/flink/blob/master/flink-dist/src/test/java/org/apache/flink/dist/JavaBashTestBase.java
|
Apache-2.0
|
protected String executeScript(final String[] command) throws IOException {
ProcessBuilder pb = new ProcessBuilder(command);
pb.redirectErrorStream(true);
Process process = pb.start();
return IOUtils.toString(process.getInputStream());
}
|
Executes the given shell script wrapper and returns its output.
@param command command to run
@return raw script output
|
executeScript
|
java
|
apache/flink
|
flink-dist/src/test/java/org/apache/flink/dist/JavaBashTestBase.java
|
https://github.com/apache/flink/blob/master/flink-dist/src/test/java/org/apache/flink/dist/JavaBashTestBase.java
|
Apache-2.0
|
private static String toHtmlTable(final List<OptionWithMetaInfo> options) {
StringBuilder htmlTable = new StringBuilder();
htmlTable.append("<table class=\"configuration table table-bordered\">\n");
htmlTable.append(" <thead>\n");
htmlTable.append(" <tr>\n");
htmlTable.append(" <th class=\"text-left\" style=\"width: 20%\">Key</th>\n");
htmlTable.append(" <th class=\"text-left\" style=\"width: 15%\">Default</th>\n");
htmlTable.append(" <th class=\"text-left\" style=\"width: 10%\">Type</th>\n");
htmlTable.append(
" <th class=\"text-left\" style=\"width: 55%\">Description</th>\n");
htmlTable.append(" </tr>\n");
htmlTable.append(" </thead>\n");
htmlTable.append(" <tbody>\n");
for (OptionWithMetaInfo option : options) {
htmlTable.append(toHtmlString(option));
}
htmlTable.append(" </tbody>\n");
htmlTable.append("</table>\n");
return htmlTable.toString();
}
|
Transforms this configuration group into HTML formatted table. Options are sorted
alphabetically by key.
@param options list of options to include in this group
@return string containing HTML formatted table
|
toHtmlTable
|
java
|
apache/flink
|
flink-docs/src/main/java/org/apache/flink/docs/configuration/ConfigOptionsDocGenerator.java
|
https://github.com/apache/flink/blob/master/flink-docs/src/main/java/org/apache/flink/docs/configuration/ConfigOptionsDocGenerator.java
|
Apache-2.0
|
private static @Nullable Description getEnumOptionsDescription(
OptionWithMetaInfo optionWithMetaInfo) {
Class<?> clazz = getClazz(optionWithMetaInfo.option);
if (!clazz.isEnum()) {
return null;
}
AtomicReference<IllegalAccessException> exception = new AtomicReference<>(null);
InlineElement[] optionDescriptions =
Arrays.stream(clazz.getDeclaredFields())
.filter(field -> field.isEnumConstant() && shouldBeDocumented(field))
.map(
field -> {
try {
return field.get(null);
} catch (IllegalAccessException e) {
exception.set(
ExceptionUtils.firstOrSuppressed(
e, exception.get()));
}
return null;
})
.filter(Objects::nonNull)
.map(ConfigOptionsDocGenerator::formatEnumOption)
.map(
elements ->
TextElement.wrap(
elements.stream().toArray(InlineElement[]::new)))
.toArray(InlineElement[]::new);
if (exception.get() != null) {
throw new RuntimeException(
"config option should have public access right.", exception.get());
}
return Description.builder().text("Possible values:").list(optionDescriptions).build();
}
|
Returns a {@link Description} for the enum constants of the given option in case it is
enum-based, and {@code null} otherwise.
|
getEnumOptionsDescription
|
java
|
apache/flink
|
flink-docs/src/main/java/org/apache/flink/docs/configuration/ConfigOptionsDocGenerator.java
|
https://github.com/apache/flink/blob/master/flink-docs/src/main/java/org/apache/flink/docs/configuration/ConfigOptionsDocGenerator.java
|
Apache-2.0
|
public static boolean shouldBeDocumented(
MessageHeaders<
? extends RequestBody,
? extends ResponseBody,
? extends MessageParameters>
spec) {
return spec.getClass().getAnnotation(Documentation.ExcludeFromDocumentation.class) == null;
}
|
Checks whether the given endpoint should be documented.
@param spec endpoint to check
@return true if the endpoint should be documented
|
shouldBeDocumented
|
java
|
apache/flink
|
flink-docs/src/main/java/org/apache/flink/docs/rest/ApiSpecGeneratorUtils.java
|
https://github.com/apache/flink/blob/master/flink-docs/src/main/java/org/apache/flink/docs/rest/ApiSpecGeneratorUtils.java
|
Apache-2.0
|
public static Optional<Class<?>> findAdditionalFieldType(Class<?> clazz) {
final FlinkJsonSchema.AdditionalFields annotation =
clazz.getAnnotation(FlinkJsonSchema.AdditionalFields.class);
return Optional.ofNullable(annotation).map(FlinkJsonSchema.AdditionalFields::type);
}
|
Find whether the class contains dynamic fields that need to be documented.
@param clazz class to check
@return optional that is non-empty if the class is annotated with {@link
FlinkJsonSchema.AdditionalFields}
|
findAdditionalFieldType
|
java
|
apache/flink
|
flink-docs/src/main/java/org/apache/flink/docs/rest/ApiSpecGeneratorUtils.java
|
https://github.com/apache/flink/blob/master/flink-docs/src/main/java/org/apache/flink/docs/rest/ApiSpecGeneratorUtils.java
|
Apache-2.0
|
private static void injectAsyncOperationResultSchema(
final OpenAPI openApi, List<Schema> asyncOperationSchemas) {
final Schema schema =
openApi.getComponents()
.getSchemas()
.get(AsynchronousOperationResult.class.getSimpleName());
if (schema != null) {
schema.getProperties()
.put(
AsynchronousOperationResult.FIELD_NAME_OPERATION,
new ComposedSchema().oneOf(asyncOperationSchemas));
}
}
|
The {@link AsynchronousOperationResult} contains a generic 'operation' field that can't be
properly extracted from swagger. This method injects these manually.
<p>Resulting spec diff:
<pre>
AsynchronousOperationResult:
type: object
properties:
operation:
- type: object
+ oneOf:
+ - $ref: '#/components/schemas/AsynchronousOperationInfo'
+ - $ref: '#/components/schemas/SavepointInfo'
</pre>
|
injectAsyncOperationResultSchema
|
java
|
apache/flink
|
flink-docs/src/main/java/org/apache/flink/docs/rest/OpenApiSpecGenerator.java
|
https://github.com/apache/flink/blob/master/flink-docs/src/main/java/org/apache/flink/docs/rest/OpenApiSpecGenerator.java
|
Apache-2.0
|
@Override
protected String getNameOfClass(Class<?> cls) {
final String modelName = super.getNameOfClass(cls);
final String fqcn = cls.getCanonicalName();
final String previousFqcn = seenClassNamesToFQCN.put(modelName, fqcn);
if (previousFqcn != null && !fqcn.equals(previousFqcn)) {
throw new IllegalStateException(
String.format(
"Detected name clash for model name '%s'.%n"
+ "\tClasses:%n"
+ "\t\t- %s%n"
+ "\t\t- %s%n"
+ "\tEither rename the classes or annotate them with '%s' and set a unique 'name'.",
modelName, fqcn, previousFqcn, Schema.class.getCanonicalName()));
}
return modelName;
}
|
A {@link TypeNameResolver} that detects name-clashes between top-level and inner classes.
|
getNameOfClass
|
java
|
apache/flink
|
flink-docs/src/main/java/org/apache/flink/docs/rest/OpenApiSpecGenerator.java
|
https://github.com/apache/flink/blob/master/flink-docs/src/main/java/org/apache/flink/docs/rest/OpenApiSpecGenerator.java
|
Apache-2.0
|
public static void main(String[] args) throws IOException, ConfigurationException {
String outputDirectory = args[0];
for (final RuntimeRestAPIVersion apiVersion : RuntimeRestAPIVersion.values()) {
if (apiVersion == RuntimeRestAPIVersion.V0) {
// this version exists only for testing purposes
continue;
}
createDocumentationFile(
"Flink JobManager REST API",
new DocumentingDispatcherRestEndpoint(),
apiVersion,
Paths.get(
outputDirectory,
"rest_" + apiVersion.getURLVersionPrefix() + "_dispatcher.yml"));
}
}
|
Generates the Runtime REST API OpenAPI spec.
@param args args[0] contains the directory into which the generated files are placed
@throws IOException if any file operation failed
|
main
|
java
|
apache/flink
|
flink-docs/src/main/java/org/apache/flink/docs/rest/RuntimeOpenApiSpecGenerator.java
|
https://github.com/apache/flink/blob/master/flink-docs/src/main/java/org/apache/flink/docs/rest/RuntimeOpenApiSpecGenerator.java
|
Apache-2.0
|
public static void main(String[] args) throws IOException, ConfigurationException {
String outputDirectory = args[0];
for (final RuntimeRestAPIVersion apiVersion : RuntimeRestAPIVersion.values()) {
if (apiVersion == RuntimeRestAPIVersion.V0) {
// this version exists only for testing purposes
continue;
}
createHtmlFile(
new DocumentingDispatcherRestEndpoint(),
apiVersion,
Paths.get(
outputDirectory,
"rest_" + apiVersion.getURLVersionPrefix() + "_dispatcher.html"));
}
}
|
Generates the Runtime REST API documentation.
@param args args[0] contains the directory into which the generated files are placed
@throws IOException if any file operation failed
|
main
|
java
|
apache/flink
|
flink-docs/src/main/java/org/apache/flink/docs/rest/RuntimeRestAPIDocGenerator.java
|
https://github.com/apache/flink/blob/master/flink-docs/src/main/java/org/apache/flink/docs/rest/RuntimeRestAPIDocGenerator.java
|
Apache-2.0
|
public static void main(String[] args) throws IOException, ConfigurationException {
String outputDirectory = args[0];
for (final SqlGatewayRestAPIVersion apiVersion : SqlGatewayRestAPIVersion.values()) {
if (apiVersion == SqlGatewayRestAPIVersion.V0) {
// this version exists only for testing purposes
continue;
}
createDocumentationFile(
"Flink SQL Gateway REST API",
new DocumentingSqlGatewayRestEndpoint(),
apiVersion,
Paths.get(
outputDirectory,
"rest_" + apiVersion.getURLVersionPrefix() + "_sql_gateway.yml"));
}
}
|
Generates the Sql Gateway REST API OpenAPI spec.
@param args args[0] contains the directory into which the generated files are placed
@throws IOException if any file operation failed
|
main
|
java
|
apache/flink
|
flink-docs/src/main/java/org/apache/flink/docs/rest/SqlGatewayOpenApiSpecGenerator.java
|
https://github.com/apache/flink/blob/master/flink-docs/src/main/java/org/apache/flink/docs/rest/SqlGatewayOpenApiSpecGenerator.java
|
Apache-2.0
|
public static void main(String[] args) throws IOException, ConfigurationException {
String outputDirectory = args[0];
for (final SqlGatewayRestAPIVersion apiVersion : SqlGatewayRestAPIVersion.values()) {
if (apiVersion == SqlGatewayRestAPIVersion.V0) {
// this version exists only for testing purposes
continue;
}
createHtmlFile(
new DocumentingSqlGatewayRestEndpoint(),
apiVersion,
Paths.get(
outputDirectory,
"rest_" + apiVersion.getURLVersionPrefix() + "_sql_gateway.html"));
}
}
|
Generates the Sql Gateway REST API documentation.
@param args args[0] contains the directory into which the generated files are placed
@throws IOException if any file operation failed
|
main
|
java
|
apache/flink
|
flink-docs/src/main/java/org/apache/flink/docs/rest/SqlGatewayRestAPIDocGenerator.java
|
https://github.com/apache/flink/blob/master/flink-docs/src/main/java/org/apache/flink/docs/rest/SqlGatewayRestAPIDocGenerator.java
|
Apache-2.0
|
public static String escapeCharacters(String value) {
return value.replaceAll("<wbr>", TEMPORARY_PLACEHOLDER)
.replaceAll("<", "<")
.replaceAll(">", ">")
.replaceAll(TEMPORARY_PLACEHOLDER, "<wbr>");
}
|
Placeholder that is used to prevent certain sections from being escaped. We don't need a
sophisticated value but only something that won't show up in config options.
|
escapeCharacters
|
java
|
apache/flink
|
flink-docs/src/main/java/org/apache/flink/docs/util/Utils.java
|
https://github.com/apache/flink/blob/master/flink-docs/src/main/java/org/apache/flink/docs/util/Utils.java
|
Apache-2.0
|
@Test
void testNoKeyPrefixOfOtherKey() throws Exception {
final Collection<String> allOptions = new ArrayList<>();
new ConfigurationOptionLocator()
.discoverOptionsAndApply(
Paths.get(ConfigOptionsDocGeneratorTest.getProjectRootDir()),
(aClass, optionWithMetaInfos) -> {
optionWithMetaInfos.stream()
.filter(o -> o.field.getAnnotation(Deprecated.class) == null)
.map(o -> o.option.key())
.forEach(allOptions::add);
});
final List<String> keys = allOptions.stream().sorted().collect(Collectors.toList());
for (int x = 0; x < keys.size(); x++) {
final String checkedKey = keys.get(x);
final Stream<String> stringStream =
keys.subList(x + 1, keys.size()).stream()
.filter(key -> key.startsWith(checkedKey + "."));
assertThat(stringStream)
.as("Key of option '" + checkedKey + "' is prefix of another option.")
.isEmpty();
}
}
|
Tests for the YAML compliance of our config options.
<p>Note: This test doesn't really belong into flink-docs, but it contains the required utils and
depends on the right modules.
|
testNoKeyPrefixOfOtherKey
|
java
|
apache/flink
|
flink-docs/src/test/java/org/apache/flink/docs/configuration/ConfigOptionsYamlSpecTest.java
|
https://github.com/apache/flink/blob/master/flink-docs/src/test/java/org/apache/flink/docs/configuration/ConfigOptionsYamlSpecTest.java
|
Apache-2.0
|
@Override
default void close() throws Exception {}
|
Changelog handle reader to use by {@link StateChangeIteratorImpl}.
|
close
|
java
|
apache/flink
|
flink-dstl/flink-dstl-dfs/src/main/java/org/apache/flink/changelog/fs/ChangelogStreamHandleReader.java
|
https://github.com/apache/flink/blob/master/flink-dstl/flink-dstl-dfs/src/main/java/org/apache/flink/changelog/fs/ChangelogStreamHandleReader.java
|
Apache-2.0
|
static RetryPolicy fromConfig(ReadableConfig config) {
switch (config.get(FsStateChangelogOptions.RETRY_POLICY)) {
case "fixed":
return fixed(
config.get(FsStateChangelogOptions.RETRY_MAX_ATTEMPTS),
config.get(FsStateChangelogOptions.UPLOAD_TIMEOUT).toMillis(),
config.get(FsStateChangelogOptions.RETRY_DELAY_AFTER_FAILURE).toMillis());
case "none":
return NONE;
default:
throw new IllegalConfigurationException(
"Unknown retry policy: "
+ config.get(FsStateChangelogOptions.RETRY_POLICY));
}
}
|
Retry policy to use by {@link RetryingExecutor}.
|
fromConfig
|
java
|
apache/flink
|
flink-dstl/flink-dstl-dfs/src/main/java/org/apache/flink/changelog/fs/RetryPolicy.java
|
https://github.com/apache/flink/blob/master/flink-dstl/flink-dstl-dfs/src/main/java/org/apache/flink/changelog/fs/RetryPolicy.java
|
Apache-2.0
|
public void seizeCapacity(long bytes) throws IllegalStateException {
checkState(hasCapacity());
inFlightBytesCounter += bytes;
}
|
Seize <b>bytes</b> capacity. It is the caller responsibility to ensure at least some capacity
{@link #hasCapacity() is available}. <strong>After</strong> this call, the caller is allowed
to actually use the seized capacity. When the capacity is not needed anymore, the caller is
required to {@link #releaseCapacity(long) release} it. Called by the Task thread.
@throws IllegalStateException if capacity is unavailable.
|
seizeCapacity
|
java
|
apache/flink
|
flink-dstl/flink-dstl-dfs/src/main/java/org/apache/flink/changelog/fs/UploadThrottle.java
|
https://github.com/apache/flink/blob/master/flink-dstl/flink-dstl-dfs/src/main/java/org/apache/flink/changelog/fs/UploadThrottle.java
|
Apache-2.0
|
public void releaseCapacity(long bytes) {
inFlightBytesCounter -= bytes;
}
|
Release previously {@link #seizeCapacity(long) seized} capacity. Called by {@link
BatchingStateChangeUploadScheduler} (IO thread).
|
releaseCapacity
|
java
|
apache/flink
|
flink-dstl/flink-dstl-dfs/src/main/java/org/apache/flink/changelog/fs/UploadThrottle.java
|
https://github.com/apache/flink/blob/master/flink-dstl/flink-dstl-dfs/src/main/java/org/apache/flink/changelog/fs/UploadThrottle.java
|
Apache-2.0
|
public boolean hasCapacity() {
return inFlightBytesCounter < maxBytesInFlight;
}
|
Test whether some capacity is available.
|
hasCapacity
|
java
|
apache/flink
|
flink-dstl/flink-dstl-dfs/src/main/java/org/apache/flink/changelog/fs/UploadThrottle.java
|
https://github.com/apache/flink/blob/master/flink-dstl/flink-dstl-dfs/src/main/java/org/apache/flink/changelog/fs/UploadThrottle.java
|
Apache-2.0
|
public static ChangelogStorageMetricGroup createUnregisteredChangelogStorageMetricGroup() {
return new UnregisteredChangelogStorageMetricGroup();
}
|
A safe drop-in replacement for {@link ChangelogStorageMetricGroup}s.
|
createUnregisteredChangelogStorageMetricGroup
|
java
|
apache/flink
|
flink-dstl/flink-dstl-dfs/src/test/java/org/apache/flink/changelog/fs/UnregisteredChangelogStorageMetricGroup.java
|
https://github.com/apache/flink/blob/master/flink-dstl/flink-dstl-dfs/src/test/java/org/apache/flink/changelog/fs/UnregisteredChangelogStorageMetricGroup.java
|
Apache-2.0
|
public static void main(String[] args) throws Exception {
ParameterTool params = ParameterTool.fromArgs(args);
String outputPath = params.getRequired("outputPath");
int recordsPerSecond = params.getInt("recordsPerSecond", 10);
int duration = params.getInt("durationInSecond", 60);
int offset = params.getInt("offsetInSecond", 0);
StreamExecutionEnvironment sEnv = StreamExecutionEnvironment.getExecutionEnvironment();
sEnv.enableCheckpointing(4000);
sEnv.getConfig().setAutoWatermarkInterval(1000);
// execute a simple pass through program.
PeriodicSourceGenerator generator =
new PeriodicSourceGenerator(recordsPerSecond, duration, offset);
DataStream<Tuple> rows = sEnv.addSource(generator);
DataStream<Tuple> result =
rows.keyBy(tuple -> tuple.getField(1))
.window(TumblingProcessingTimeWindows.of(Duration.ofSeconds(5)))
.sum(0);
result.sinkTo(
FileSink.forRowFormat(
new Path(outputPath + "/result.txt"),
new SimpleStringEncoder<Tuple>())
.build())
.setParallelism(1);
sEnv.execute();
}
|
This is a periodic streaming job that runs for CLI testing purposes.
<p>The stream is bounded and will complete after the specified duration.
<p>Parameters: -outputPath Sets the path to where the result data is written. -recordsPerSecond
Sets the output record frequency. -durationInSecond Sets the running duration of the job.
-offsetInSecond Sets the startup delay before processing the first message.
|
main
|
java
|
apache/flink
|
flink-end-to-end-tests/flink-cli-test/src/main/java/org/apache/flink/streaming/tests/PeriodicStreamingJob.java
|
https://github.com/apache/flink/blob/master/flink-end-to-end-tests/flink-cli-test/src/main/java/org/apache/flink/streaming/tests/PeriodicStreamingJob.java
|
Apache-2.0
|
long incrementAndGet(int key) {
return ++statesPerKey[key - startKey];
}
|
Increments and returns the current sequence number for the given key.
|
incrementAndGet
|
java
|
apache/flink
|
flink-end-to-end-tests/flink-datastream-allround-test/src/main/java/org/apache/flink/streaming/tests/SequenceGeneratorSource.java
|
https://github.com/apache/flink/blob/master/flink-end-to-end-tests/flink-datastream-allround-test/src/main/java/org/apache/flink/streaming/tests/SequenceGeneratorSource.java
|
Apache-2.0
|
public String getStateName() {
return stateName;
}
|
The state builder wraps the logic of registering state in user functions, as well as how state is
updated per input element..
|
getStateName
|
java
|
apache/flink
|
flink-end-to-end-tests/flink-datastream-allround-test/src/main/java/org/apache/flink/streaming/tests/artificialstate/builder/ArtificialStateBuilder.java
|
https://github.com/apache/flink/blob/master/flink-end-to-end-tests/flink-datastream-allround-test/src/main/java/org/apache/flink/streaming/tests/artificialstate/builder/ArtificialStateBuilder.java
|
Apache-2.0
|
@Override
public void close() throws IOException {
FileUtils.deleteFileOrDirectory(path.toFile());
}
|
Utility class to delete a given {@link Path} when exiting a try-with-resources statement.
|
close
|
java
|
apache/flink
|
flink-end-to-end-tests/flink-end-to-end-tests-common/src/main/java/org/apache/flink/tests/util/AutoClosablePath.java
|
https://github.com/apache/flink/blob/master/flink-end-to-end-tests/flink-end-to-end-tests-common/src/main/java/org/apache/flink/tests/util/AutoClosablePath.java
|
Apache-2.0
|
public static WGetBuilder wget(String url) {
return new WGetBuilder(url);
}
|
Utility class for setting up command-line tool usages in a readable fashion.
|
wget
|
java
|
apache/flink
|
flink-end-to-end-tests/flink-end-to-end-tests-common/src/main/java/org/apache/flink/tests/util/CommandLineWrapper.java
|
https://github.com/apache/flink/blob/master/flink-end-to-end-tests/flink-end-to-end-tests-common/src/main/java/org/apache/flink/tests/util/CommandLineWrapper.java
|
Apache-2.0
|
public static List<String> readCsvResultFiles(Path path) throws IOException {
File filePath = path.toFile();
// list all the non-hidden files
File[] csvFiles = filePath.listFiles((dir, name) -> !name.startsWith("."));
List<String> result = new ArrayList<>();
if (csvFiles != null) {
for (File file : csvFiles) {
result.addAll(Files.readAllLines(file.toPath()));
}
}
return result;
}
|
Read the all files with the specified path.
|
readCsvResultFiles
|
java
|
apache/flink
|
flink-end-to-end-tests/flink-end-to-end-tests-common/src/main/java/org/apache/flink/tests/util/TestUtils.java
|
https://github.com/apache/flink/blob/master/flink-end-to-end-tests/flink-end-to-end-tests-common/src/main/java/org/apache/flink/tests/util/TestUtils.java
|
Apache-2.0
|
public static void restrictTo(final String reason, final OperatingSystem... operatingSystems)
throws AssumptionViolatedException {
final EnumSet<OperatingSystem> allowed = EnumSet.copyOf(Arrays.asList(operatingSystems));
Assume.assumeTrue(reason, allowed.contains(OperatingSystem.getCurrentOperatingSystem()));
}
|
Restricts the execution to the given set of operating systems.
@param reason reason for the restriction
@param operatingSystems allowed operating systems
@throws AssumptionViolatedException if this method is called on a forbidden operating system
|
restrictTo
|
java
|
apache/flink
|
flink-end-to-end-tests/flink-end-to-end-tests-common/src/main/java/org/apache/flink/tests/util/activation/OperatingSystemRestriction.java
|
https://github.com/apache/flink/blob/master/flink-end-to-end-tests/flink-end-to-end-tests-common/src/main/java/org/apache/flink/tests/util/activation/OperatingSystemRestriction.java
|
Apache-2.0
|
public static void forbid(final String reason, final OperatingSystem... forbiddenSystems)
throws AssumptionViolatedException {
final OperatingSystem os = OperatingSystem.getCurrentOperatingSystem();
for (final OperatingSystem forbiddenSystem : forbiddenSystems) {
Assume.assumeTrue(reason, os != forbiddenSystem);
}
}
|
Forbids the execution on the given set of operating systems.
@param reason reason for the restriction
@param forbiddenSystems forbidden operating systems
@throws AssumptionViolatedException if this method is called on a forbidden operating system
|
forbid
|
java
|
apache/flink
|
flink-end-to-end-tests/flink-end-to-end-tests-common/src/main/java/org/apache/flink/tests/util/activation/OperatingSystemRestriction.java
|
https://github.com/apache/flink/blob/master/flink-end-to-end-tests/flink-end-to-end-tests-common/src/main/java/org/apache/flink/tests/util/activation/OperatingSystemRestriction.java
|
Apache-2.0
|
static DownloadCache get() {
return FactoryUtils.loadAndInvokeFactory(
DownloadCacheFactory.class, DownloadCacheFactory::create, LolCacheFactory::new);
}
|
Returns the configured DownloadCache implementation, or a {@link LolCache} if none is
configured.
@return configured DownloadCache, or {@link LolCache} is none is configured
|
get
|
java
|
apache/flink
|
flink-end-to-end-tests/flink-end-to-end-tests-common/src/main/java/org/apache/flink/tests/util/cache/DownloadCache.java
|
https://github.com/apache/flink/blob/master/flink-end-to-end-tests/flink-end-to-end-tests-common/src/main/java/org/apache/flink/tests/util/cache/DownloadCache.java
|
Apache-2.0
|
@Override
public DownloadCache create() throws IOException {
final TemporaryFolder folder = new TemporaryFolder();
folder.create();
return new LolCache(folder);
}
|
A {@link DownloadCacheFactory} for the {@link LolCache}.
|
create
|
java
|
apache/flink
|
flink-end-to-end-tests/flink-end-to-end-tests-common/src/main/java/org/apache/flink/tests/util/cache/LolCacheFactory.java
|
https://github.com/apache/flink/blob/master/flink-end-to-end-tests/flink-end-to-end-tests-common/src/main/java/org/apache/flink/tests/util/cache/LolCacheFactory.java
|
Apache-2.0
|
@Override
public DownloadCache create() {
final Optional<Path> tmpDir = TMP_DIR.get();
final Period timeToLive = TIME_TO_LIVE.get(TIME_TO_LIVE_DEFAULT);
if (!tmpDir.isPresent()) {
throw new IllegalArgumentException(
String.format(
"Not loading %s because %s was not set.",
PersistingDownloadCache.class, TMP_DIR.getPropertyName()));
}
return new PersistingDownloadCache(tmpDir.get(), timeToLive);
}
|
A {@link DownloadCacheFactory} for the {@link PersistingDownloadCache}.
|
create
|
java
|
apache/flink
|
flink-end-to-end-tests/flink-end-to-end-tests-common/src/main/java/org/apache/flink/tests/util/cache/PersistingDownloadCacheFactory.java
|
https://github.com/apache/flink/blob/master/flink-end-to-end-tests/flink-end-to-end-tests-common/src/main/java/org/apache/flink/tests/util/cache/PersistingDownloadCacheFactory.java
|
Apache-2.0
|
@Override
public DownloadCache create() {
final Optional<Path> tmpDir = TMP_DIR.get();
final Optional<Integer> timeToLive = BUILDS_TO_LIVE.get();
final Optional<Integer> buildNumber = BUILD_NUMBER.get();
if (!tmpDir.isPresent()) {
throw new IllegalArgumentException(
String.format(
"Not loading %s because %s was not set.",
TravisDownloadCache.class, TMP_DIR.getPropertyName()));
}
if (!timeToLive.isPresent()) {
throw new IllegalArgumentException(
String.format(
"Not loading %s because %s was not set.",
TravisDownloadCache.class, BUILDS_TO_LIVE.getPropertyName()));
}
if (!buildNumber.isPresent()) {
throw new IllegalArgumentException(
String.format(
"Not loading %s because %s was not set.",
TravisDownloadCache.class, BUILD_NUMBER.getPropertyName()));
}
return new TravisDownloadCache(tmpDir.get(), timeToLive.get(), buildNumber.get());
}
|
A {@link DownloadCacheFactory} for the {@link TravisDownloadCache}.
|
create
|
java
|
apache/flink
|
flink-end-to-end-tests/flink-end-to-end-tests-common/src/main/java/org/apache/flink/tests/util/cache/TravisDownloadCacheFactory.java
|
https://github.com/apache/flink/blob/master/flink-end-to-end-tests/flink-end-to-end-tests-common/src/main/java/org/apache/flink/tests/util/cache/TravisDownloadCacheFactory.java
|
Apache-2.0
|
static FlinkResource get() {
return get(FlinkResourceSetup.builder().build());
}
|
Returns the configured FlinkResource implementation, or a {@link
LocalStandaloneFlinkResource} if none is configured.
@return configured FlinkResource, or {@link LocalStandaloneFlinkResource} is none is
configured
|
get
|
java
|
apache/flink
|
flink-end-to-end-tests/flink-end-to-end-tests-common/src/main/java/org/apache/flink/tests/util/flink/FlinkResource.java
|
https://github.com/apache/flink/blob/master/flink-end-to-end-tests/flink-end-to-end-tests-common/src/main/java/org/apache/flink/tests/util/flink/FlinkResource.java
|
Apache-2.0
|
static FlinkResource get(FlinkResourceSetup setup) {
return FactoryUtils.loadAndInvokeFactory(
FlinkResourceFactory.class,
factory -> factory.create(setup),
LocalStandaloneFlinkResourceFactory::new);
}
|
Returns the configured FlinkResource implementation, or a {@link
LocalStandaloneFlinkResource} if none is configured.
@param setup setup instructions for the FlinkResource
@return configured FlinkResource, or {@link LocalStandaloneFlinkResource} is none is
configured
|
get
|
java
|
apache/flink
|
flink-end-to-end-tests/flink-end-to-end-tests-common/src/main/java/org/apache/flink/tests/util/flink/FlinkResource.java
|
https://github.com/apache/flink/blob/master/flink-end-to-end-tests/flink-end-to-end-tests-common/src/main/java/org/apache/flink/tests/util/flink/FlinkResource.java
|
Apache-2.0
|
@Override
public FlinkResource create(FlinkResourceSetup setup) {
Optional<Path> logBackupDirectory = DISTRIBUTION_LOG_BACKUP_DIRECTORY.get();
if (!logBackupDirectory.isPresent()) {
LOG.warn(
"Property {} not set, logs will not be backed up in case of test failures.",
DISTRIBUTION_LOG_BACKUP_DIRECTORY.getPropertyName());
}
return new LocalStandaloneFlinkResource(
FileUtils.findFlinkDist(), logBackupDirectory.orElse(null), setup);
}
|
A {@link FlinkResourceFactory} for the {@link LocalStandaloneFlinkResource}.
|
create
|
java
|
apache/flink
|
flink-end-to-end-tests/flink-end-to-end-tests-common/src/main/java/org/apache/flink/tests/util/flink/LocalStandaloneFlinkResourceFactory.java
|
https://github.com/apache/flink/blob/master/flink-end-to-end-tests/flink-end-to-end-tests-common/src/main/java/org/apache/flink/tests/util/flink/LocalStandaloneFlinkResourceFactory.java
|
Apache-2.0
|
public static <R, F> R loadAndInvokeFactory(
final Class<F> factoryInterface,
final FactoryInvoker<F, R> factoryInvoker,
final Supplier<F> defaultProvider) {
final ServiceLoader<F> factories = ServiceLoader.load(factoryInterface);
final List<R> instantiatedResources = new ArrayList<>();
final List<Exception> errorsDuringInitialization = new ArrayList<>();
for (F factory : factories) {
try {
R resource = factoryInvoker.invoke(factory);
instantiatedResources.add(resource);
LOG.info("Instantiated {}.", resource.getClass().getSimpleName());
} catch (Exception e) {
LOG.debug(
"Factory {} could not instantiate instance.",
factory.getClass().getSimpleName(),
e);
errorsDuringInitialization.add(e);
}
}
if (instantiatedResources.size() == 1) {
return instantiatedResources.get(0);
}
if (instantiatedResources.isEmpty()) {
try {
return factoryInvoker.invoke(defaultProvider.get());
} catch (Exception e) {
final RuntimeException exception =
new RuntimeException("Could not instantiate any instance.");
final RuntimeException defaultException =
new RuntimeException("Could not instantiate default instance.", e);
exception.addSuppressed(defaultException);
errorsDuringInitialization.forEach(exception::addSuppressed);
throw exception;
}
}
throw new RuntimeException("Multiple instances were created: " + instantiatedResources);
}
|
Loads all factories for the given class using the {@link ServiceLoader} and attempts to
create an instance.
@param factoryInterface factory interface
@param factoryInvoker factory invoker
@param defaultProvider default factory provider
@param <R> resource type
@param <F> factory type
@throws RuntimeException if no or multiple resources could be instantiated
@return created instance
|
loadAndInvokeFactory
|
java
|
apache/flink
|
flink-end-to-end-tests/flink-end-to-end-tests-common/src/main/java/org/apache/flink/tests/util/util/FactoryUtils.java
|
https://github.com/apache/flink/blob/master/flink-end-to-end-tests/flink-end-to-end-tests-common/src/main/java/org/apache/flink/tests/util/util/FactoryUtils.java
|
Apache-2.0
|
public static void main(String[] args) throws Exception {
final StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment();
// we want to go through serialization to check for kryo issues
env.disableOperatorChaining();
env.fromData(new NonPojo()).map(x -> x);
env.execute();
}
|
Simple batch job in pure Java.
|
main
|
java
|
apache/flink
|
flink-end-to-end-tests/flink-end-to-end-tests-scala/src/main/java/org/apache/flink/tests/scala/JavaJob.java
|
https://github.com/apache/flink/blob/master/flink-end-to-end-tests/flink-end-to-end-tests-scala/src/main/java/org/apache/flink/tests/scala/JavaJob.java
|
Apache-2.0
|
public static void main(String[] args) throws Exception {
Configuration configuration = new Configuration();
configuration.setString(
PipelineOptions.SERIALIZATION_CONFIG.key(),
"{org.apache.flink.tests.scala.NonPojo: "
+ "{type: kryo, kryo-type: default, class: org.apache.flink.tests.scala.NonPojoSerializer}}");
final StreamExecutionEnvironment env =
StreamExecutionEnvironment.getExecutionEnvironment(configuration);
// we want to go through serialization to check for kryo issues
env.disableOperatorChaining();
env.fromData(new NonPojo()).map(x -> x);
env.execute();
}
|
Simple batch job in pure Java that uses a custom Kryo serializer.
|
main
|
java
|
apache/flink
|
flink-end-to-end-tests/flink-end-to-end-tests-scala/src/main/java/org/apache/flink/tests/scala/JavaJobWithKryoSerializer.java
|
https://github.com/apache/flink/blob/master/flink-end-to-end-tests/flink-end-to-end-tests-scala/src/main/java/org/apache/flink/tests/scala/JavaJobWithKryoSerializer.java
|
Apache-2.0
|
public int getSomeInt() {
return someInt;
}
|
Simple type that needs to go through Kryo for serialization.
|
getSomeInt
|
java
|
apache/flink
|
flink-end-to-end-tests/flink-end-to-end-tests-scala/src/main/java/org/apache/flink/tests/scala/NonPojo.java
|
https://github.com/apache/flink/blob/master/flink-end-to-end-tests/flink-end-to-end-tests-scala/src/main/java/org/apache/flink/tests/scala/NonPojo.java
|
Apache-2.0
|
@Test
public void testImperativeUdaf() throws Exception {
runAndCheckSQL("scala_free_e2e.sql", Arrays.asList("+I[Bob, 2]", "+I[Alice, 1]"));
}
|
End-to-End tests for table planner scala-free since 1.15. Due to scala-free of table planner
introduced, the class in table planner is not visible in distribution runtime, if we use these
class in execution time, ClassNotFound exception will be thrown. ITCase in table planner can not
cover it, so we should add E2E test for these case.
|
testImperativeUdaf
|
java
|
apache/flink
|
flink-end-to-end-tests/flink-end-to-end-tests-sql/src/test/java/org/apache/flink/table/sql/PlannerScalaFreeITCase.java
|
https://github.com/apache/flink/blob/master/flink-end-to-end-tests/flink-end-to-end-tests-sql/src/test/java/org/apache/flink/table/sql/PlannerScalaFreeITCase.java
|
Apache-2.0
|
public TableResult execute() throws Exception {
LOG.info("Starting AsyncScalarFunctionExample");
// Set up Table Environment
EnvironmentSettings settings = EnvironmentSettings.inStreamingMode();
TableEnvironment tableEnv = TableEnvironment.create(settings);
// Register source table
createSourceTable(tableEnv);
// Register sink table
createSinkTable(tableEnv);
// Register the lookup function
registerLookupFunction(tableEnv);
// Create and execute the transformation using Table API
TableResult result = processProducts(tableEnv);
LOG.info("Executing Flink job");
return result;
}
|
Sets up and executes the Flink processing pipeline.
@return TableResult from the execution
|
execute
|
java
|
apache/flink
|
flink-end-to-end-tests/flink-end-to-end-tests-table-api/src/main/java/org/apache/flink/table/test/async/AsyncScalarFunctionExample.java
|
https://github.com/apache/flink/blob/master/flink-end-to-end-tests/flink-end-to-end-tests-table-api/src/main/java/org/apache/flink/table/test/async/AsyncScalarFunctionExample.java
|
Apache-2.0
|
protected void createSourceTable(TableEnvironment tableEnv) {
final Schema schema =
Schema.newBuilder()
.column("product_id", DataTypes.STRING())
.column("price", DataTypes.DECIMAL(10, 2))
.column("quantity", DataTypes.INT())
.column("ts", DataTypes.TIMESTAMP(3))
.watermark("ts", "ts - INTERVAL '5' SECOND")
.build();
// Create a temporary mock products table
tableEnv.createTemporaryTable(
"products",
TableDescriptor.forConnector("datagen")
.schema(schema)
.option("number-of-rows", "10")
.option("fields.product_id.kind", "sequence")
.option("fields.product_id.start", "1")
.option("fields.product_id.end", "10")
.option("fields.price.min", "10.00")
.option("fields.price.max", "100.00")
.option("fields.quantity.min", "1")
.option("fields.quantity.max", "10")
.build());
LOG.info("Source table 'products' created");
}
|
Creates the source table using datagen connector.
|
createSourceTable
|
java
|
apache/flink
|
flink-end-to-end-tests/flink-end-to-end-tests-table-api/src/main/java/org/apache/flink/table/test/async/AsyncScalarFunctionExample.java
|
https://github.com/apache/flink/blob/master/flink-end-to-end-tests/flink-end-to-end-tests-table-api/src/main/java/org/apache/flink/table/test/async/AsyncScalarFunctionExample.java
|
Apache-2.0
|
protected void createSinkTable(TableEnvironment tableEnv) {
tableEnv.createTemporaryTable(
"enriched_products",
TableDescriptor.forConnector("blackhole")
.schema(
Schema.newBuilder()
.column("product_id", DataTypes.STRING())
.column("price", DataTypes.DECIMAL(10, 2))
.column("quantity", DataTypes.INT())
.column("timestamp", DataTypes.TIMESTAMP(3))
.column("name", DataTypes.STRING())
.build())
.build());
LOG.info("Sink table 'enriched_products' created");
}
|
Creates the sink table using blackhole connector.
|
createSinkTable
|
java
|
apache/flink
|
flink-end-to-end-tests/flink-end-to-end-tests-table-api/src/main/java/org/apache/flink/table/test/async/AsyncScalarFunctionExample.java
|
https://github.com/apache/flink/blob/master/flink-end-to-end-tests/flink-end-to-end-tests-table-api/src/main/java/org/apache/flink/table/test/async/AsyncScalarFunctionExample.java
|
Apache-2.0
|
protected TableResult processProducts(TableEnvironment tableEnv) {
Table products = tableEnv.from("products");
Table enrichedProducts =
products.select(
$("product_id"),
$("price"),
$("quantity"),
$("ts").as("timestamp"),
call("lookup_name", $("product_id")).as("name"));
// Execute the query - will fail with NoClassDefFoundError for StringSubstitutor
TableResult result = enrichedProducts.executeInsert("enriched_products");
LOG.info("Product enrichment transformation created");
return result;
}
|
Processes products data using Table API with async lookups. - Performs async lookups -
Enriches the data with product details - Inserts into the sink table
@return TableResult from the execution
|
processProducts
|
java
|
apache/flink
|
flink-end-to-end-tests/flink-end-to-end-tests-table-api/src/main/java/org/apache/flink/table/test/async/AsyncScalarFunctionExample.java
|
https://github.com/apache/flink/blob/master/flink-end-to-end-tests/flink-end-to-end-tests-table-api/src/main/java/org/apache/flink/table/test/async/AsyncScalarFunctionExample.java
|
Apache-2.0
|
@Test
public void testAsyncScalarFunction() throws Exception {
// Create and run the example application
AsyncScalarFunctionExample example = new AsyncScalarFunctionExample();
example.execute();
// If we reach here without exceptions, the test passes
}
|
Test that runs the AsyncScalarFunctionExample.
<p>This test verifies that AsyncScalarFunction works correctly. The test passes if the
application runs without errors.
|
testAsyncScalarFunction
|
java
|
apache/flink
|
flink-end-to-end-tests/flink-end-to-end-tests-table-api/src/test/java/org/apache/flink/table/test/async/AsyncScalarFunctionTest.java
|
https://github.com/apache/flink/blob/master/flink-end-to-end-tests/flink-end-to-end-tests-table-api/src/test/java/org/apache/flink/table/test/async/AsyncScalarFunctionTest.java
|
Apache-2.0
|
public static void main(String[] args) throws Exception {
final ParameterTool pt = ParameterTool.fromArgs(args);
final StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment();
setupEnvironment(env, pt);
final int numStates =
pt.getInt(NUM_LIST_STATES_PER_OP.key(), NUM_LIST_STATES_PER_OP.defaultValue());
final int numPartitionsPerState =
pt.getInt(
NUM_PARTITIONS_PER_LIST_STATE.key(),
NUM_PARTITIONS_PER_LIST_STATE.defaultValue());
Preconditions.checkState(
env.getCheckpointInterval() > 0L, "Checkpointing must be enabled for this test!");
env.addSource(new SimpleEndlessSourceWithBloatedState(numStates, numPartitionsPerState))
.setParallelism(env.getParallelism())
.sinkTo(new DiscardingSink<>())
.setParallelism(1);
env.execute("HeavyDeploymentStressTestProgram");
}
|
End-to-end test for heavy deployment descriptors. This test creates a heavy deployment by
producing inflated meta data for the source's operator state. The state is registered as union
state and will be multiplied in deployment.
|
main
|
java
|
apache/flink
|
flink-end-to-end-tests/flink-heavy-deployment-stress-test/src/main/java/org/apache/flink/deployment/HeavyDeploymentStressTestProgram.java
|
https://github.com/apache/flink/blob/master/flink-end-to-end-tests/flink-heavy-deployment-stress-test/src/main/java/org/apache/flink/deployment/HeavyDeploymentStressTestProgram.java
|
Apache-2.0
|
public static void main(String[] args) throws Exception {
final ParameterTool pt = ParameterTool.fromArgs(args);
final StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment();
env.setParallelism(pt.getInt("parallelism", 1));
env.setMaxParallelism(pt.getInt("maxParallelism", pt.getInt("parallelism", 1)));
env.enableCheckpointing(pt.getInt("checkpointInterval", 1000));
Configuration configuration = new Configuration();
configuration.set(RestartStrategyOptions.RESTART_STRATEGY, "fixed-delay");
configuration.set(
RestartStrategyOptions.RESTART_STRATEGY_FIXED_DELAY_ATTEMPTS, Integer.MAX_VALUE);
configuration.set(
RestartStrategyOptions.RESTART_STRATEGY_FIXED_DELAY_DELAY,
Duration.ofMillis(pt.getInt("restartDelay", 0)));
String checkpointDir = pt.getRequired("checkpointDir");
configuration.set(CheckpointingOptions.CHECKPOINTS_DIRECTORY, checkpointDir);
env.configure(configuration);
if (pt.getBoolean("externalizedCheckpoints", false)) {
env.getCheckpointConfig()
.setExternalizedCheckpointRetention(
ExternalizedCheckpointRetention.RETAIN_ON_CANCELLATION);
}
boolean killJvmOnFail = pt.getBoolean("killJvmOnFail", false);
String stateBackend = pt.get("stateBackend", "hashmap");
if ("hashmap".equals(stateBackend)) {
env.configure(new Configuration().set(StateBackendOptions.STATE_BACKEND, "hashmap"));
} else if ("rocks".equals(stateBackend)) {
boolean incrementalCheckpoints = pt.getBoolean("incrementalCheckpoints", false);
env.configure(
new Configuration()
.set(StateBackendOptions.STATE_BACKEND, "rocksdb")
.set(
CheckpointingOptions.INCREMENTAL_CHECKPOINTS,
incrementalCheckpoints));
} else {
throw new IllegalArgumentException("Unknown backend: " + stateBackend);
}
// make parameters available in the web interface
env.getConfig().setGlobalJobParameters(pt);
// delay to throttle down the production of the source
long delay = pt.getLong("delay", 0L);
// the maximum number of attempts, before the job finishes with success
int maxAttempts = pt.getInt("maxAttempts", 3);
// size of one artificial value
int valueSize = pt.getInt("valueSize", 10);
env.addSource(new RandomLongSource(maxAttempts, delay))
.keyBy((KeySelector<Long, Long>) aLong -> aLong)
.flatMap(new StateCreatingFlatMap(valueSize, killJvmOnFail))
.addSink(new PrintSinkFunction<>());
env.execute("Sticky Allocation And Local Recovery Test");
}
|
Automatic end-to-end test for local recovery (including sticky allocation).
<p>List of possible input parameters for this job:
<ul>
<li>checkpointDir: the checkpoint directory, required.
<li>parallelism: the parallelism of the job, default 1.
<li>maxParallelism: the maximum parallelism of the job, default 1.
<li>checkpointInterval: the checkpointing interval in milliseconds, default 1000.
<li>restartDelay: the delay of the fixed delay restart strategy, default 0.
<li>externalizedCheckpoints: flag to activate externalized checkpoints, default <code>false
</code>.
<li>stateBackend: choice for state backend between <code>file</code> and <code>rocks</code>,
default <code>file</code>.
<li>killJvmOnFail: flag that determines whether or not an artificial failure induced by the
test kills the JVM or not.
<li>incrementalCheckpoints: flag for incremental checkpoint with rocks state backend, default
<code>false</code>.
<li>delay: sleep delay to throttle down the production of the source, default 0.
<li>maxAttempts: the maximum number of run attempts, before the job finishes with success,
default 3.
<li>valueSize: size of the artificial value for each key in bytes, default 10.
</ul>
|
main
|
java
|
apache/flink
|
flink-end-to-end-tests/flink-local-recovery-and-allocation-test/src/main/java/org/apache/flink/streaming/tests/StickyAllocationAndLocalRecoveryTestJob.java
|
https://github.com/apache/flink/blob/master/flink-end-to-end-tests/flink-local-recovery-and-allocation-test/src/main/java/org/apache/flink/streaming/tests/StickyAllocationAndLocalRecoveryTestJob.java
|
Apache-2.0
|
private static int getJvmPid() throws Exception {
java.lang.management.RuntimeMXBean runtime =
java.lang.management.ManagementFactory.getRuntimeMXBean();
java.lang.reflect.Field jvm = runtime.getClass().getDeclaredField("jvm");
jvm.setAccessible(true);
sun.management.VMManagement mgmt = (sun.management.VMManagement) jvm.get(runtime);
java.lang.reflect.Method pidMethod = mgmt.getClass().getDeclaredMethod("getProcessId");
pidMethod.setAccessible(true);
return (int) (Integer) pidMethod.invoke(mgmt);
}
|
This code is copied from Stack Overflow.
<p><a
href="https://stackoverflow.com/questions/35842">https://stackoverflow.com/questions/35842</a>,
answer <a
href="https://stackoverflow.com/a/12066696/9193881">https://stackoverflow.com/a/12066696/9193881</a>
<p>Author: <a href="https://stackoverflow.com/users/446591/brad-mace">Brad Mace</a>)
|
getJvmPid
|
java
|
apache/flink
|
flink-end-to-end-tests/flink-local-recovery-and-allocation-test/src/main/java/org/apache/flink/streaming/tests/StickyAllocationAndLocalRecoveryTestJob.java
|
https://github.com/apache/flink/blob/master/flink-end-to-end-tests/flink-local-recovery-and-allocation-test/src/main/java/org/apache/flink/streaming/tests/StickyAllocationAndLocalRecoveryTestJob.java
|
Apache-2.0
|
@Override
public String toString() {
return "MapperTestInfo{"
+ "failingTask="
+ failingTask
+ ", killedJvm="
+ killedJvm
+ ", jvmPid="
+ jvmPid
+ ", taskNameWithSubtask='"
+ taskNameWithSubtask
+ '\''
+ ", allocationId='"
+ allocationId
+ '\''
+ '}';
}
|
The current allocation id of this task.
|
toString
|
java
|
apache/flink
|
flink-end-to-end-tests/flink-local-recovery-and-allocation-test/src/main/java/org/apache/flink/streaming/tests/StickyAllocationAndLocalRecoveryTestJob.java
|
https://github.com/apache/flink/blob/master/flink-end-to-end-tests/flink-local-recovery-and-allocation-test/src/main/java/org/apache/flink/streaming/tests/StickyAllocationAndLocalRecoveryTestJob.java
|
Apache-2.0
|
public static void main(String[] args) throws Exception {
final ParameterTool params = ParameterTool.fromArgs(args);
final StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment();
env.fromData("Hello")
.map(
(MapFunction<String, String>)
value -> {
String messageFromPropsFile;
try (InputStream propFile =
ClassLoaderTestProgram.class
.getClassLoader()
.getResourceAsStream(
"parent-child-test.properties")) {
Properties properties = new Properties();
properties.load(propFile);
messageFromPropsFile = properties.getProperty("message");
}
// Enumerate all properties files we can find and store the
// messages in the
// order we find them. The order will be different between
// parent-first and
// child-first classloader mode.
Enumeration<URL> resources =
ClassLoaderTestProgram.class
.getClassLoader()
.getResources("parent-child-test.properties");
StringBuilder orderedProperties = new StringBuilder();
while (resources.hasMoreElements()) {
URL url = resources.nextElement();
try (InputStream in = url.openStream()) {
Properties properties = new Properties();
properties.load(in);
String messageFromEnumeratedPropsFile =
properties.getProperty("message");
orderedProperties.append(
messageFromEnumeratedPropsFile);
}
}
String message = ParentChildTestingVehicle.getMessage();
return message
+ ":"
+ messageFromPropsFile
+ ":"
+ orderedProperties;
})
.sinkTo(new DiscardingSink<>());
env.execute("ClassLoader Test Program");
}
|
End-to-end test program for verifying that the {@code classloader.resolve-order} setting is being
honored by Flink. We test this by creating a {@code ParentChildTestingVehicle} with a single
method that we call in the same package as the {@code ParentChildTestingVehicle} in the "lib"
package (flink-parent-child-classloading-test-lib-package) and verify the message in the
end-to-end test script.
|
main
|
java
|
apache/flink
|
flink-end-to-end-tests/flink-parent-child-classloading-test-program/src/main/java/org/apache/flink/streaming/tests/ClassLoaderTestProgram.java
|
https://github.com/apache/flink/blob/master/flink-end-to-end-tests/flink-parent-child-classloading-test-program/src/main/java/org/apache/flink/streaming/tests/ClassLoaderTestProgram.java
|
Apache-2.0
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.