code stringlengths 25 201k | docstring stringlengths 19 96.2k | func_name stringlengths 0 235 | language stringclasses 1
value | repo stringlengths 8 51 | path stringlengths 11 314 | url stringlengths 62 377 | license stringclasses 7
values |
|---|---|---|---|---|---|---|---|
@JsonIgnore
public SortFieldSpec[] getFieldSpecs() {
return fieldSpecs;
} | Gets all {@link SortFieldSpec} in the SortSpec. | getFieldSpecs | java | apache/flink | flink-table/flink-table-planner/src/main/java/org/apache/flink/table/planner/plan/nodes/exec/spec/SortSpec.java | https://github.com/apache/flink/blob/master/flink-table/flink-table-planner/src/main/java/org/apache/flink/table/planner/plan/nodes/exec/spec/SortSpec.java | Apache-2.0 |
public SortFieldSpec getFieldSpec(int index) {
return fieldSpecs[index];
} | Gets {@link SortFieldSpec} of field at given index. | getFieldSpec | java | apache/flink | flink-table/flink-table-planner/src/main/java/org/apache/flink/table/planner/plan/nodes/exec/spec/SortSpec.java | https://github.com/apache/flink/blob/master/flink-table/flink-table-planner/src/main/java/org/apache/flink/table/planner/plan/nodes/exec/spec/SortSpec.java | Apache-2.0 |
@JsonIgnore
public int getFieldSize() {
return fieldSpecs.length;
} | Gets num of field in the spec. | getFieldSize | java | apache/flink | flink-table/flink-table-planner/src/main/java/org/apache/flink/table/planner/plan/nodes/exec/spec/SortSpec.java | https://github.com/apache/flink/blob/master/flink-table/flink-table-planner/src/main/java/org/apache/flink/table/planner/plan/nodes/exec/spec/SortSpec.java | Apache-2.0 |
@SuppressWarnings("unchecked")
@Override
protected Transformation<RowData> translateToPlanInternal(
PlannerBase planner, ExecNodeConfig config) {
final ExecEdge inputEdge = getInputEdges().get(0);
final Transformation<RowData> inputTransform =
(Transformation<RowData>... | Stream {@link ExecNode} which normalizes a changelog stream which maybe an upsert stream or a
changelog stream containing duplicate events. This node normalize such stream into a regular
changelog stream that contains INSERT/UPDATE_BEFORE/UPDATE_AFTER/DELETE records without
duplication. | translateToPlanInternal | java | apache/flink | flink-table/flink-table-planner/src/main/java/org/apache/flink/table/planner/plan/nodes/exec/stream/StreamExecChangelogNormalize.java | https://github.com/apache/flink/blob/master/flink-table/flink-table-planner/src/main/java/org/apache/flink/table/planner/plan/nodes/exec/stream/StreamExecChangelogNormalize.java | Apache-2.0 |
@SuppressWarnings("unchecked")
@Override
protected Transformation<RowData> translateToPlanInternal(
PlannerBase planner, ExecNodeConfig config) {
final ExecEdge inputEdge = getInputEdges().get(0);
final Transformation<RowData> inputTransform =
(Transformation<RowData>... | For backward compatibility, if plan was generated without insert-only requirement, insertOnly
will be absent in the json (null) and we interpret that as false to use old code path and
avoid a problematic migration from {@link RowTimeDeduplicateFunction} to {@link
RowTimeDeduplicateKeepFirstRowFunction}. | translateToPlanInternal | java | apache/flink | flink-table/flink-table-planner/src/main/java/org/apache/flink/table/planner/plan/nodes/exec/stream/StreamExecDeduplicate.java | https://github.com/apache/flink/blob/master/flink-table/flink-table-planner/src/main/java/org/apache/flink/table/planner/plan/nodes/exec/stream/StreamExecDeduplicate.java | Apache-2.0 |
@Override
OneInputStreamOperator<RowData, RowData> createDeduplicateOperator() {
int rowtimeIndex = -1;
for (int i = 0; i < inputRowType.getFieldCount(); ++i) {
if (TypeCheckUtils.isRowTime(inputRowType.getTypeAt(i))) {
rowtimeIndex = i;
... | Translator to create event time deduplicate operator. | createDeduplicateOperator | java | apache/flink | flink-table/flink-table-planner/src/main/java/org/apache/flink/table/planner/plan/nodes/exec/stream/StreamExecDeduplicate.java | https://github.com/apache/flink/blob/master/flink-table/flink-table-planner/src/main/java/org/apache/flink/table/planner/plan/nodes/exec/stream/StreamExecDeduplicate.java | Apache-2.0 |
@Override
OneInputStreamOperator<RowData, RowData> createDeduplicateOperator() {
if (isMiniBatchEnabled()) {
checkState(!RankUtil.outputInsertOnlyInDeduplicate(config, keepLastRow));
checkState(!outputInsertOnly);
CountBundleTrigger<RowData> trigger = ... | Translator to create process time deduplicate operator. | createDeduplicateOperator | java | apache/flink | flink-table/flink-table-planner/src/main/java/org/apache/flink/table/planner/plan/nodes/exec/stream/StreamExecDeduplicate.java | https://github.com/apache/flink/blob/master/flink-table/flink-table-planner/src/main/java/org/apache/flink/table/planner/plan/nodes/exec/stream/StreamExecDeduplicate.java | Apache-2.0 |
@SuppressWarnings("unchecked")
@Override
protected Transformation<RowData> translateToPlanInternal(
PlannerBase planner, ExecNodeConfig config) {
final Transformation<RowData> inputTransform =
(Transformation<RowData>) getInputEdges().get(0).translateToPlan(planner);
... | Stream {@link ExecNode} which will drop the UPDATE_BEFORE messages. This is usually used as an
optimization for the downstream operators that doesn't need the UPDATE_BEFORE messages, but the
upstream operator can't drop it by itself (e.g. the source). | translateToPlanInternal | java | apache/flink | flink-table/flink-table-planner/src/main/java/org/apache/flink/table/planner/plan/nodes/exec/stream/StreamExecDropUpdateBefore.java | https://github.com/apache/flink/blob/master/flink-table/flink-table-planner/src/main/java/org/apache/flink/table/planner/plan/nodes/exec/stream/StreamExecDropUpdateBefore.java | Apache-2.0 |
@SuppressWarnings("unchecked")
@Override
protected Transformation<RowData> translateToPlanInternal(
PlannerBase planner, ExecNodeConfig config) {
final Transformation<RowData> inputTransform =
(Transformation<RowData>) getInputEdges().get(0).translateToPlan(planner);
... | This {@link ExecNode} represents a change of partitioning of the input elements for stream.
<p>TODO Remove this class once FLINK-21224 is finished. | translateToPlanInternal | java | apache/flink | flink-table/flink-table-planner/src/main/java/org/apache/flink/table/planner/plan/nodes/exec/stream/StreamExecExchange.java | https://github.com/apache/flink/blob/master/flink-table/flink-table-planner/src/main/java/org/apache/flink/table/planner/plan/nodes/exec/stream/StreamExecExchange.java | Apache-2.0 |
@Override
@SuppressWarnings("unchecked")
protected Transformation<RowData> translateToPlanInternal(
PlannerBase planner, ExecNodeConfig config) {
final ExecEdge leftInputEdge = getInputEdges().get(0);
final ExecEdge rightInputEdge = getInputEdges().get(1);
final Transformati... | {@link StreamExecNode} for regular Joins.
<p>Regular joins are the most generic type of join in which any new records or changes to either
side of the join input are visible and are affecting the whole join result. | translateToPlanInternal | java | apache/flink | flink-table/flink-table-planner/src/main/java/org/apache/flink/table/planner/plan/nodes/exec/stream/StreamExecJoin.java | https://github.com/apache/flink/blob/master/flink-table/flink-table-planner/src/main/java/org/apache/flink/table/planner/plan/nodes/exec/stream/StreamExecJoin.java | Apache-2.0 |
protected RowType checkAndConvertInputTypeIfNeeded(RowType inputRowType) {
final List<Integer> rowtimeFieldIndices = new ArrayList<>();
for (int i = 0; i < inputRowType.getFieldCount(); ++i) {
if (TypeCheckUtils.isRowTime(inputRowType.getTypeAt(i))) {
rowtimeFieldIndices.add(... | Stream {@link ExecNode} to write data into an external sink defined by a {@link TableSink}.
@param <T> The return type of the {@link TableSink}. | checkAndConvertInputTypeIfNeeded | java | apache/flink | flink-table/flink-table-planner/src/main/java/org/apache/flink/table/planner/plan/nodes/exec/stream/StreamExecLegacySink.java | https://github.com/apache/flink/blob/master/flink-table/flink-table-planner/src/main/java/org/apache/flink/table/planner/plan/nodes/exec/stream/StreamExecLegacySink.java | Apache-2.0 |
@SuppressWarnings("unchecked")
@Override
protected Transformation<RowData> translateToPlanInternal(
PlannerBase planner, ExecNodeConfig config) {
final Transformation<RowData> inputTransform =
(Transformation<RowData>) getInputEdges().get(0).translateToPlan(planner);
... | Stream {@link ExecNode} which injects a mini-batch event in the streaming data. The mini-batch
event will be recognized as a boundary between two mini-batches. The following operators will
keep track of the mini-batch events and trigger mini-batch once the mini-batch id is advanced.
<p>NOTE: currently, we leverage the... | translateToPlanInternal | java | apache/flink | flink-table/flink-table-planner/src/main/java/org/apache/flink/table/planner/plan/nodes/exec/stream/StreamExecMiniBatchAssigner.java | https://github.com/apache/flink/blob/master/flink-table/flink-table-planner/src/main/java/org/apache/flink/table/planner/plan/nodes/exec/stream/StreamExecMiniBatchAssigner.java | Apache-2.0 |
private RowType leftTypeForIndex(final List<RowType> inputRowTypes, final int inputIndex) {
if (inputIndex <= 0) {
throw new IllegalArgumentException(
"Input index must be greater than 0 for accumulated left type calculation");
}
final LogicalType[] fieldTypes =
... | Calculates the accumulated {@link RowType} of all inputs to the left of a given input index.
<p>For a multi-way join, the condition for input `i` is evaluated against the combined row of
all inputs from `0` to `i-1`. This method computes the {@link RowType} for this combined row,
which is essential for the code genera... | leftTypeForIndex | java | apache/flink | flink-table/flink-table-planner/src/main/java/org/apache/flink/table/planner/plan/nodes/exec/stream/StreamExecMultiJoin.java | https://github.com/apache/flink/blob/master/flink-table/flink-table-planner/src/main/java/org/apache/flink/table/planner/plan/nodes/exec/stream/StreamExecMultiJoin.java | Apache-2.0 |
@Override
protected Transformation<RowData> translateToPlanInternal(
PlannerBase planner, ExecNodeConfig config) {
throw new UnsupportedOperationException("This method is not implemented yet.");
} | Stream {@link ExecNode} for multiple input which contains a sub-graph of {@link ExecNode}s. The
root node of the sub-graph is {@link #rootNode}, and the leaf nodes of the sub-graph are the
output nodes of the {@link #getInputNodes()}.
<p>The following example shows a graph of {@code ExecNode}s with multiple input node... | translateToPlanInternal | java | apache/flink | flink-table/flink-table-planner/src/main/java/org/apache/flink/table/planner/plan/nodes/exec/stream/StreamExecMultipleInput.java | https://github.com/apache/flink/blob/master/flink-table/flink-table-planner/src/main/java/org/apache/flink/table/planner/plan/nodes/exec/stream/StreamExecMultipleInput.java | Apache-2.0 |
@SuppressWarnings("unchecked")
@Override
protected Transformation<RowData> translateToPlanInternal(
PlannerBase planner, ExecNodeConfig config) {
if (overSpec.getGroups().size() > 1) {
throw new TableException("All aggregates must be computed on the same window.");
}
... | Stream {@link ExecNode} for time-based over operator. | translateToPlanInternal | java | apache/flink | flink-table/flink-table-planner/src/main/java/org/apache/flink/table/planner/plan/nodes/exec/stream/StreamExecOverAggregate.java | https://github.com/apache/flink/blob/master/flink-table/flink-table-planner/src/main/java/org/apache/flink/table/planner/plan/nodes/exec/stream/StreamExecOverAggregate.java | Apache-2.0 |
private KeyedProcessFunction<RowData, RowData, RowData> createUnboundedOverProcessFunction(
CodeGeneratorContext ctx,
List<AggregateCall> aggCalls,
List<RexLiteral> constants,
RowType aggInputRowType,
RowType inputRowType,
boolean isRowsClause,
... | Create an ProcessFunction for unbounded OVER window to evaluate final aggregate value.
@param ctx code generator context
@param aggCalls physical calls to aggregate functions and their output field names
@param constants the constants in aggregates parameters, such as sum(1)
@param aggInputRowType physical type of the... | createUnboundedOverProcessFunction | java | apache/flink | flink-table/flink-table-planner/src/main/java/org/apache/flink/table/planner/plan/nodes/exec/stream/StreamExecOverAggregate.java | https://github.com/apache/flink/blob/master/flink-table/flink-table-planner/src/main/java/org/apache/flink/table/planner/plan/nodes/exec/stream/StreamExecOverAggregate.java | Apache-2.0 |
private KeyedProcessFunction<RowData, RowData, RowData> createBoundedOverProcessFunction(
CodeGeneratorContext ctx,
List<AggregateCall> aggCalls,
List<RexLiteral> constants,
RowType aggInputType,
RowType inputType,
boolean isRowsClause,
... | Create an ProcessFunction for ROWS clause bounded OVER window to evaluate final aggregate
value.
@param ctx code generator context
@param aggCalls physical calls to aggregate functions and their output field names
@param constants the constants in aggregates parameters, such as sum(1)
@param aggInputType physical type... | createBoundedOverProcessFunction | java | apache/flink | flink-table/flink-table-planner/src/main/java/org/apache/flink/table/planner/plan/nodes/exec/stream/StreamExecOverAggregate.java | https://github.com/apache/flink/blob/master/flink-table/flink-table-planner/src/main/java/org/apache/flink/table/planner/plan/nodes/exec/stream/StreamExecOverAggregate.java | Apache-2.0 |
@SuppressWarnings("unchecked")
@Override
protected Transformation<RowData> translateToPlanInternal(
PlannerBase planner, ExecNodeConfig config) {
if (!config.get(InternalConfigOptions.TABLE_EXEC_NON_TEMPORAL_SORT_ENABLED)) {
throw new TableException("Sort on a non-time-attribute ... | {@link StreamExecNode} for Sort.
<p><b>NOTES:</b> This class is used for testing with bounded source now. If a query is converted
to this node in product environment, an exception will be thrown. | translateToPlanInternal | java | apache/flink | flink-table/flink-table-planner/src/main/java/org/apache/flink/table/planner/plan/nodes/exec/stream/StreamExecSort.java | https://github.com/apache/flink/blob/master/flink-table/flink-table-planner/src/main/java/org/apache/flink/table/planner/plan/nodes/exec/stream/StreamExecSort.java | Apache-2.0 |
@Override
public Transformation<RowData> createInputFormatTransformation(
StreamExecutionEnvironment env,
InputFormat<RowData, ?> inputFormat,
InternalTypeInfo<RowData> outputTypeInfo,
String operatorName) {
// It's better to use StreamExecutionEnvironment.cre... | Stream {@link ExecNode} to read data from an external source defined by a {@link
ScanTableSource}. | createInputFormatTransformation | java | apache/flink | flink-table/flink-table-planner/src/main/java/org/apache/flink/table/planner/plan/nodes/exec/stream/StreamExecTableSourceScan.java | https://github.com/apache/flink/blob/master/flink-table/flink-table-planner/src/main/java/org/apache/flink/table/planner/plan/nodes/exec/stream/StreamExecTableSourceScan.java | Apache-2.0 |
private Transformation<RowData> createSortProcTime(
RowType inputType,
Transformation<RowData> inputTransform,
ExecNodeConfig config,
ClassLoader classLoader) {
// if the order has secondary sorting fields in addition to the proctime
if (sortSpec.getFieldS... | Create Sort logic based on processing time. | createSortProcTime | java | apache/flink | flink-table/flink-table-planner/src/main/java/org/apache/flink/table/planner/plan/nodes/exec/stream/StreamExecTemporalSort.java | https://github.com/apache/flink/blob/master/flink-table/flink-table-planner/src/main/java/org/apache/flink/table/planner/plan/nodes/exec/stream/StreamExecTemporalSort.java | Apache-2.0 |
private Transformation<RowData> createSortRowTime(
RowType inputType,
Transformation<RowData> inputTransform,
ExecNodeConfig config,
ClassLoader classLoader) {
GeneratedRecordComparator rowComparator = null;
if (sortSpec.getFieldSize() > 1) {
/... | Create Sort logic based on row time. | createSortRowTime | java | apache/flink | flink-table/flink-table-planner/src/main/java/org/apache/flink/table/planner/plan/nodes/exec/stream/StreamExecTemporalSort.java | https://github.com/apache/flink/blob/master/flink-table/flink-table-planner/src/main/java/org/apache/flink/table/planner/plan/nodes/exec/stream/StreamExecTemporalSort.java | Apache-2.0 |
@SuppressWarnings("unchecked")
@Override
protected Transformation<RowData> translateToPlanInternal(
PlannerBase planner, ExecNodeConfig config) {
final ExecEdge inputEdge = getInputEdges().get(0);
final Transformation<RowData> inputTransform =
(Transformation<RowData>... | Stream {@link ExecNode} which generates watermark based on the input elements. | translateToPlanInternal | java | apache/flink | flink-table/flink-table-planner/src/main/java/org/apache/flink/table/planner/plan/nodes/exec/stream/StreamExecWatermarkAssigner.java | https://github.com/apache/flink/blob/master/flink-table/flink-table-planner/src/main/java/org/apache/flink/table/planner/plan/nodes/exec/stream/StreamExecWatermarkAssigner.java | Apache-2.0 |
@SuppressWarnings("unchecked")
@Override
protected Transformation<RowData> translateToPlanInternal(
PlannerBase planner, ExecNodeConfig config) {
// validate window strategy
if (!windowing.isRowtime()) {
throw new TableException("Processing time Window Deduplication is no... | Stream {@link ExecNode} for Window Deduplicate. | translateToPlanInternal | java | apache/flink | flink-table/flink-table-planner/src/main/java/org/apache/flink/table/planner/plan/nodes/exec/stream/StreamExecWindowDeduplicate.java | https://github.com/apache/flink/blob/master/flink-table/flink-table-planner/src/main/java/org/apache/flink/table/planner/plan/nodes/exec/stream/StreamExecWindowDeduplicate.java | Apache-2.0 |
public static String treeToString(ExecNode<?> node) {
return treeToString(node, new ArrayList<>(), false);
} | Converts an {@link ExecNode} tree to a string as a tree style.
<p>The following tree of {@link ExecNode}
<pre>{@code
Sink
|
Join
/ \
Filter1 Filter2
\ /
Project
|
Scan
}</pre>
<p>would be converted to the tree style as following:
<pre>{@code
Sink
+- Join
:- Filter1
... | treeToString | java | apache/flink | flink-table/flink-table-planner/src/main/java/org/apache/flink/table/planner/plan/nodes/exec/utils/ExecNodePlanDumper.java | https://github.com/apache/flink/blob/master/flink-table/flink-table-planner/src/main/java/org/apache/flink/table/planner/plan/nodes/exec/utils/ExecNodePlanDumper.java | Apache-2.0 |
public static String treeToString(
ExecNode<?> node, List<ExecNode<?>> borders, boolean includingBorders) {
checkNotNull(node, "node should not be null.");
// convert to mutable list
List<ExecNode<?>> borderList =
new ArrayList<>(checkNotNull(borders, "borders should ... | Converts an {@link ExecNode} tree to a string as a tree style.
@param node the ExecNode to convert
@param borders node sets that stop visit when meet them
@param includingBorders Whether print the border nodes
@return the plan of ExecNode | treeToString | java | apache/flink | flink-table/flink-table-planner/src/main/java/org/apache/flink/table/planner/plan/nodes/exec/utils/ExecNodePlanDumper.java | https://github.com/apache/flink/blob/master/flink-table/flink-table-planner/src/main/java/org/apache/flink/table/planner/plan/nodes/exec/utils/ExecNodePlanDumper.java | Apache-2.0 |
public static String dagToString(ExecNodeGraph execGraph) {
return dagToString(execGraph.getRootNodes());
} | Converts an {@link ExecNodeGraph} to a string as a tree style. see {@link
#dagToString(List)}. | dagToString | java | apache/flink | flink-table/flink-table-planner/src/main/java/org/apache/flink/table/planner/plan/nodes/exec/utils/ExecNodePlanDumper.java | https://github.com/apache/flink/blob/master/flink-table/flink-table-planner/src/main/java/org/apache/flink/table/planner/plan/nodes/exec/utils/ExecNodePlanDumper.java | Apache-2.0 |
Integer getReuseId(ExecNode<?> node) {
return reuseIdBuilder.getReuseId(node);
} | Returns reuse id if the given node is a reuse node, else -1. | getReuseId | java | apache/flink | flink-table/flink-table-planner/src/main/java/org/apache/flink/table/planner/plan/nodes/exec/utils/ExecNodePlanDumper.java | https://github.com/apache/flink/blob/master/flink-table/flink-table-planner/src/main/java/org/apache/flink/table/planner/plan/nodes/exec/utils/ExecNodePlanDumper.java | Apache-2.0 |
int addVisitedTimes(ExecNode<?> node) {
return mapNodeToVisitedTimes.compute(node, (k, v) -> v == null ? 1 : v + 1);
} | Updates visited times for given node, return the new times. | addVisitedTimes | java | apache/flink | flink-table/flink-table-planner/src/main/java/org/apache/flink/table/planner/plan/nodes/exec/utils/ExecNodePlanDumper.java | https://github.com/apache/flink/blob/master/flink-table/flink-table-planner/src/main/java/org/apache/flink/table/planner/plan/nodes/exec/utils/ExecNodePlanDumper.java | Apache-2.0 |
@Override
public void visit(ExecNode<?> node) {
if (borders.contains(node)) {
return;
}
// if a node is visited more than once, this node is a reusable node
if (visitedNodes.contains(node)) {
if (!mapReuseNodeToReuseId.containsKey(... | Build reuse id in an ExecNode DAG or tree. | visit | java | apache/flink | flink-table/flink-table-planner/src/main/java/org/apache/flink/table/planner/plan/nodes/exec/utils/ExecNodePlanDumper.java | https://github.com/apache/flink/blob/master/flink-table/flink-table-planner/src/main/java/org/apache/flink/table/planner/plan/nodes/exec/utils/ExecNodePlanDumper.java | Apache-2.0 |
public Integer getReuseId(ExecNode<?> node) {
return mapReuseNodeToReuseId.getOrDefault(node, -1);
} | Returns reuse id if the given node is a reuse node (that means it has multiple outputs),
else -1. | getReuseId | java | apache/flink | flink-table/flink-table-planner/src/main/java/org/apache/flink/table/planner/plan/nodes/exec/utils/ExecNodePlanDumper.java | https://github.com/apache/flink/blob/master/flink-table/flink-table-planner/src/main/java/org/apache/flink/table/planner/plan/nodes/exec/utils/ExecNodePlanDumper.java | Apache-2.0 |
@Override
public void visit(ExecNode<?> node) {
if (updateVisitedTimes) {
reuseInfo.addVisitedTimes(node);
}
if (depth > 0) {
lastChildren
.subList(0, lastChildren.size() - 1)
.forEach(isLast -> s... | Convert ExecNode tree to string as a tree style. | visit | java | apache/flink | flink-table/flink-table-planner/src/main/java/org/apache/flink/table/planner/plan/nodes/exec/utils/ExecNodePlanDumper.java | https://github.com/apache/flink/blob/master/flink-table/flink-table-planner/src/main/java/org/apache/flink/table/planner/plan/nodes/exec/utils/ExecNodePlanDumper.java | Apache-2.0 |
public static <T> void setManagedMemoryWeight(
Transformation<T> transformation, long memoryBytes) {
if (memoryBytes > 0) {
final int weightInMebibyte = Math.max(1, (int) (memoryBytes >> 20));
final Optional<Integer> previousWeight =
transformation.declare... | Sets {Transformation#declareManagedMemoryUseCaseAtOperatorScope(ManagedMemoryUseCase, int)}
using the given bytes for {@link ManagedMemoryUseCase#OPERATOR}. | setManagedMemoryWeight | java | apache/flink | flink-table/flink-table-planner/src/main/java/org/apache/flink/table/planner/plan/nodes/exec/utils/ExecNodeUtil.java | https://github.com/apache/flink/blob/master/flink-table/flink-table-planner/src/main/java/org/apache/flink/table/planner/plan/nodes/exec/utils/ExecNodeUtil.java | Apache-2.0 |
public static String getMultipleInputDescription(
ExecNode<?> rootNode,
List<ExecNode<?>> inputNodes,
List<InputProperty> inputProperties) {
String members =
ExecNodePlanDumper.treeToString(rootNode, inputNodes, true).replace("\n", "\\n");
StringBuilde... | Return description for multiple input node. | getMultipleInputDescription | java | apache/flink | flink-table/flink-table-planner/src/main/java/org/apache/flink/table/planner/plan/nodes/exec/utils/ExecNodeUtil.java | https://github.com/apache/flink/blob/master/flink-table/flink-table-planner/src/main/java/org/apache/flink/table/planner/plan/nodes/exec/utils/ExecNodeUtil.java | Apache-2.0 |
public static void makeLegacySourceTransformationsBounded(Transformation<?> transformation) {
if (transformation instanceof LegacySourceTransformation) {
((LegacySourceTransformation<?>) transformation).setBoundedness(Boundedness.BOUNDED);
}
transformation.getInputs().forEach(ExecNod... | The planner might have more information than expressed in legacy source transformations. This
enforces planner information about boundedness to the affected transformations. | makeLegacySourceTransformationsBounded | java | apache/flink | flink-table/flink-table-planner/src/main/java/org/apache/flink/table/planner/plan/nodes/exec/utils/ExecNodeUtil.java | https://github.com/apache/flink/blob/master/flink-table/flink-table-planner/src/main/java/org/apache/flink/table/planner/plan/nodes/exec/utils/ExecNodeUtil.java | Apache-2.0 |
protected void visitInputs(ExecNode<?> node) {
node.getInputEdges().forEach(n -> n.getSource().accept(this));
} | Implement of {@link ExecNodeVisitor}. An exec node may be visited multiple times if it's the
input of multiple nodes. | visitInputs | java | apache/flink | flink-table/flink-table-planner/src/main/java/org/apache/flink/table/planner/plan/nodes/exec/visitor/ExecNodeVisitorImpl.java | https://github.com/apache/flink/blob/master/flink-table/flink-table-planner/src/main/java/org/apache/flink/table/planner/plan/nodes/exec/visitor/ExecNodeVisitorImpl.java | Apache-2.0 |
public static RexCall toUdfCall(RexCall call) {
final BridgingSqlFunction function = ShortcutUtils.unwrapBridgingSqlFunction(call);
assert function != null;
final List<StaticArgument> staticArgs =
function.getTypeInference()
.getStaticArguments()
... | Removes all columns added by the {@link SystemTypeInference} and returns a call that
corresponds to the signature of the UDF. | toUdfCall | java | apache/flink | flink-table/flink-table-planner/src/main/java/org/apache/flink/table/planner/plan/nodes/physical/stream/StreamPhysicalProcessTableFunction.java | https://github.com/apache/flink/blob/master/flink-table/flink-table-planner/src/main/java/org/apache/flink/table/planner/plan/nodes/physical/stream/StreamPhysicalProcessTableFunction.java | Apache-2.0 |
private List<RelHint> mergeQueryHintsIfNecessary(List<RelHint> hints) {
List<RelHint> result = new ArrayList<>();
Map<String, Map<String, String>> kvHintsMap = new HashMap<>();
Map<String, String> listHintsMap = new HashMap<>();
for (RelHint hint : hints) {
String hintName =... | For KV hint like state ttl hint or lookup join hint, we need to merge the hints if there are
multiple hints and choose the first value with same key. | mergeQueryHintsIfNecessary | java | apache/flink | flink-table/flink-table-planner/src/main/java/org/apache/flink/table/planner/plan/optimize/QueryHintsResolver.java | https://github.com/apache/flink/blob/master/flink-table/flink-table-planner/src/main/java/org/apache/flink/table/planner/plan/optimize/QueryHintsResolver.java | Apache-2.0 |
public static List<RelNode> resolvePhysicalPlan(
List<RelNode> expanded, TableConfig tableConfig) {
OptimizerConfigOptions.NonDeterministicUpdateStrategy handling =
tableConfig
.getConfiguration()
.get(
O... | Try to resolve the NDU problem if configured {@link
OptimizerConfigOptions#TABLE_OPTIMIZER_NONDETERMINISTIC_UPDATE_STRATEGY} is in `TRY_RESOLVE`
mode. Will raise an error if the NDU issues in the given plan can not be completely solved. | resolvePhysicalPlan | java | apache/flink | flink-table/flink-table-planner/src/main/java/org/apache/flink/table/planner/plan/optimize/StreamNonDeterministicPhysicalPlanResolver.java | https://github.com/apache/flink/blob/master/flink-table/flink-table-planner/src/main/java/org/apache/flink/table/planner/plan/optimize/StreamNonDeterministicPhysicalPlanResolver.java | Apache-2.0 |
private static Join tryInjectRuntimeFilter(Join join) {
// check supported join type
if (!(isSuitableJoinType(join.getJoinType()))) {
return join;
}
// check supported join implementation
if (!(join instanceof BatchPhysicalHashJoin)
&& !(join instanc... | Judge whether the join is suitable, and try to inject runtime filter for it.
@param join the join node
@return the new join node with runtime filter. | tryInjectRuntimeFilter | java | apache/flink | flink-table/flink-table-planner/src/main/java/org/apache/flink/table/planner/plan/optimize/program/FlinkRuntimeFilterProgram.java | https://github.com/apache/flink/blob/master/flink-table/flink-table-planner/src/main/java/org/apache/flink/table/planner/plan/optimize/program/FlinkRuntimeFilterProgram.java | Apache-2.0 |
private static RelNode createNewProbeWithRuntimeFilter(
RelNode buildSide,
RelNode probeSide,
ImmutableIntList buildIndices,
ImmutableIntList probeIndices) {
Optional<Double> buildRowCountOpt = getEstimatedRowCount(buildSide);
checkState(buildRowCountOpt.i... | Inject runtime filter and return the new probe side (without exchange).
@param buildSide the build side
@param probeSide the probe side
@param buildIndices the build projection
@param probeIndices the probe projection
@return the new probe side | createNewProbeWithRuntimeFilter | java | apache/flink | flink-table/flink-table-planner/src/main/java/org/apache/flink/table/planner/plan/optimize/program/FlinkRuntimeFilterProgram.java | https://github.com/apache/flink/blob/master/flink-table/flink-table-planner/src/main/java/org/apache/flink/table/planner/plan/optimize/program/FlinkRuntimeFilterProgram.java | Apache-2.0 |
private static Optional<BuildSideInfo> findSuitableBuildSide(
RelNode rel,
ImmutableIntList buildIndices,
BiFunction<RelNode, ImmutableIntList, Boolean> buildSideChecker) {
if (rel instanceof Exchange) {
// found the desired exchange, inject builder here
... | Find a suitable build side. In order not to affect MultiInput, when the original build side
of runtime filter is not an {@link Exchange}, we need to push down the builder, until we find
an exchange and inject the builder there.
@param rel the original build side
@param buildIndices build indices
@param buildSideChecke... | findSuitableBuildSide | java | apache/flink | flink-table/flink-table-planner/src/main/java/org/apache/flink/table/planner/plan/optimize/program/FlinkRuntimeFilterProgram.java | https://github.com/apache/flink/blob/master/flink-table/flink-table-planner/src/main/java/org/apache/flink/table/planner/plan/optimize/program/FlinkRuntimeFilterProgram.java | Apache-2.0 |
private static ImmutableIntList getInputIndices(
List<RexNode> projects, ImmutableIntList outputIndices) {
List<Integer> inputIndices = new ArrayList<>();
for (int k : outputIndices) {
RexNode rexNode = projects.get(k);
if (!(rexNode instanceof RexInputRef)) {
... | Try to map project output indices to it's input indices.If the output indices can't be fully
mapped to input, return empty. | getInputIndices | java | apache/flink | flink-table/flink-table-planner/src/main/java/org/apache/flink/table/planner/plan/optimize/program/FlinkRuntimeFilterProgram.java | https://github.com/apache/flink/blob/master/flink-table/flink-table-planner/src/main/java/org/apache/flink/table/planner/plan/optimize/program/FlinkRuntimeFilterProgram.java | Apache-2.0 |
@Override
public RelNode visit(RelNode rel) {
if (rel instanceof Calc && rel.getInput(0) instanceof CommonPhysicalTableSourceScan) {
// if there is already one Calc, we should merge it and new projection node.
Calc calc = (Calc) rel;
RelNode input = calc.getInput();
... | Replace {@link CommonPhysicalTableSourceScan} with {@link Calc}. | visit | java | apache/flink | flink-table/flink-table-planner/src/main/java/org/apache/flink/table/planner/plan/reuse/ReplaceScanWithCalcShuttle.java | https://github.com/apache/flink/blob/master/flink-table/flink-table-planner/src/main/java/org/apache/flink/table/planner/plan/reuse/ReplaceScanWithCalcShuttle.java | Apache-2.0 |
public List<RelNode> reuseDuplicatedScan(List<RelNode> relNodes) {
ReusableScanVisitor visitor = new ReusableScanVisitor();
relNodes.forEach(visitor::go);
for (List<CommonPhysicalTableSourceScan> reusableNodes :
visitor.digestToReusableScans().values()) {
if (reusabl... | Reuse sources.
<p>When there are projection and metadata push down, the generated source cannot be reused
because of the difference of digest. To make source reusable, this class does the following:
<ul>
<li>First, find the same source, regardless of their projection and metadata push down.
<li>Union projections ... | reuseDuplicatedScan | java | apache/flink | flink-table/flink-table-planner/src/main/java/org/apache/flink/table/planner/plan/reuse/ScanReuser.java | https://github.com/apache/flink/blob/master/flink-table/flink-table-planner/src/main/java/org/apache/flink/table/planner/plan/reuse/ScanReuser.java | Apache-2.0 |
private static RowType applyPhysicalAndMetadataPushDown(
DynamicTableSource source,
RowType originType,
List<SourceAbilitySpec> sourceAbilitySpecs,
int[][] physicalAndMetaFields,
int[][] projectedPhysicalFields,
List<String> usedMetadataNames) {
... | Generate sourceAbilitySpecs and newProducedType by projected physical fields and metadata
keys. | applyPhysicalAndMetadataPushDown | java | apache/flink | flink-table/flink-table-planner/src/main/java/org/apache/flink/table/planner/plan/reuse/ScanReuser.java | https://github.com/apache/flink/blob/master/flink-table/flink-table-planner/src/main/java/org/apache/flink/table/planner/plan/reuse/ScanReuser.java | Apache-2.0 |
public static int indexOf(int[][] projectedFields, int[] fieldIndices) {
for (int i = 0; i < projectedFields.length; i++) {
int[] nested = projectedFields[i];
if (Arrays.equals(nested, fieldIndices)) {
return i;
}
}
throw new TableException(
... | Found the index of specific projected field in the nested array which is made up of all
projected fields index paths. | indexOf | java | apache/flink | flink-table/flink-table-planner/src/main/java/org/apache/flink/table/planner/plan/reuse/ScanReuserUtils.java | https://github.com/apache/flink/blob/master/flink-table/flink-table-planner/src/main/java/org/apache/flink/table/planner/plan/reuse/ScanReuserUtils.java | Apache-2.0 |
public static List<SourceAbilitySpec> abilitySpecsWithoutEscaped(TableSourceTable table) {
List<SourceAbilitySpec> ret = new ArrayList<>();
SourceAbilitySpec[] specs = table.abilitySpecs();
for (SourceAbilitySpec spec : specs) {
if (!isEscapeDigest(spec)) {
ret.add(sp... | Returns a list of {@link SourceAbilitySpec} instances associated with a given {@link
TableSourceTable} instance, excluding some particular abilities, such as {@link
ProjectPushDownSpec}. These abilities don't need before do scan reuse. | abilitySpecsWithoutEscaped | java | apache/flink | flink-table/flink-table-planner/src/main/java/org/apache/flink/table/planner/plan/reuse/ScanReuserUtils.java | https://github.com/apache/flink/blob/master/flink-table/flink-table-planner/src/main/java/org/apache/flink/table/planner/plan/reuse/ScanReuserUtils.java | Apache-2.0 |
public static boolean containsRexNodeSpecAfterProjection(CommonPhysicalTableSourceScan table) {
SourceAbilitySpec[] specs = table.tableSourceTable().abilitySpecs();
boolean hasProjection = false;
for (SourceAbilitySpec spec : specs) {
if (spec instanceof ProjectPushDownSpec || spec i... | Contains {@link SourceAbilitySpec#needAdjustFieldReferenceAfterProjection()} spec after
projection push down except {@link WatermarkPushDownSpec}. We have customized the solution
for watermark push down. | containsRexNodeSpecAfterProjection | java | apache/flink | flink-table/flink-table-planner/src/main/java/org/apache/flink/table/planner/plan/reuse/ScanReuserUtils.java | https://github.com/apache/flink/blob/master/flink-table/flink-table-planner/src/main/java/org/apache/flink/table/planner/plan/reuse/ScanReuserUtils.java | Apache-2.0 |
@Override
public void onMatch(RelOptRuleCall call) {
TableScan oldRel = call.rel(0);
RelOptTable table = oldRel.getTable();
LogicalTableScan newRel =
LogicalTableScan.create(oldRel.getCluster(), table, oldRel.getHints());
call.transformTo(newRel);
} | Rule that converts an EnumerableTableScan into a LogicalTableScan. We need this rule because
Calcite creates an EnumerableTableScan when parsing a SQL query. We convert it into a
LogicalTableScan so we can merge the optimization process with any plan that might be created by
the Table API. | onMatch | java | apache/flink | flink-table/flink-table-planner/src/main/java/org/apache/flink/table/planner/plan/rules/logical/EnumerableToLogicalTableScan.java | https://github.com/apache/flink/blob/master/flink-table/flink-table-planner/src/main/java/org/apache/flink/table/planner/plan/rules/logical/EnumerableToLogicalTableScan.java | Apache-2.0 |
@Override
default RelOptRule toRule() {
return new EventTimeTemporalJoinRewriteRule(this);
} | Configuration for {@link EventTimeTemporalJoinRewriteRule}.
<p>Operator tree:
<pre>{@code
Join (event time temporal)
/ \
RelNode [Calc]
\
Snapshot
\
[Calc]
\
WatermarkAssigner
\
[Calc]
... | toRule | java | apache/flink | flink-table/flink-table-planner/src/main/java/org/apache/flink/table/planner/plan/rules/logical/EventTimeTemporalJoinRewriteRule.java | https://github.com/apache/flink/blob/master/flink-table/flink-table-planner/src/main/java/org/apache/flink/table/planner/plan/rules/logical/EventTimeTemporalJoinRewriteRule.java | Apache-2.0 |
public void onMatch(RelOptRuleCall call) {
final Aggregate aggregate = call.rel(0);
if (!AggregateUtil.containsAccurateDistinctCall(aggregate.getAggCallList())) {
return;
}
// Check unsupported aggregate which contains both approximate distinct call and
// accurate di... | Instance of the rule that operates only on logical expressions and generates a join. | onMatch | java | apache/flink | flink-table/flink-table-planner/src/main/java/org/apache/flink/table/planner/plan/rules/logical/FlinkAggregateExpandDistinctAggregatesRule.java | https://github.com/apache/flink/blob/master/flink-table/flink-table-planner/src/main/java/org/apache/flink/table/planner/plan/rules/logical/FlinkAggregateExpandDistinctAggregatesRule.java | Apache-2.0 |
private RelBuilder convertMonopole(
RelBuilder relBuilder, Aggregate aggregate, List<Integer> argList, int filterArg) {
// For example,
// SELECT deptno, COUNT(DISTINCT sal), SUM(DISTINCT sal)
// FROM emp
// GROUP BY deptno
//
// becomes
//
... | Converts an aggregate relational expression that contains just one distinct aggregate
function (or perhaps several over the same arguments) and no non-distinct aggregate
functions. | convertMonopole | java | apache/flink | flink-table/flink-table-planner/src/main/java/org/apache/flink/table/planner/plan/rules/logical/FlinkAggregateExpandDistinctAggregatesRule.java | https://github.com/apache/flink/blob/master/flink-table/flink-table-planner/src/main/java/org/apache/flink/table/planner/plan/rules/logical/FlinkAggregateExpandDistinctAggregatesRule.java | Apache-2.0 |
private Pair<Aggregate, List<RexNode>> toRegularAggregate(Aggregate aggregate) {
Tuple2<int[], Seq<AggregateCall>> auxGroupAndRegularAggCalls =
AggregateUtil.checkAndSplitAggCalls(aggregate);
final int[] auxGroup = auxGroupAndRegularAggCalls._1;
final Seq<AggregateCall> regularAg... | Convert aggregate with AUXILIARY_GROUP to regular aggregate. Return original aggregate and
null project if the given aggregate does not contain AUXILIARY_GROUP, else new aggregate
without AUXILIARY_GROUP and a project to permute output columns if needed. | toRegularAggregate | java | apache/flink | flink-table/flink-table-planner/src/main/java/org/apache/flink/table/planner/plan/rules/logical/FlinkAggregateJoinTransposeRule.java | https://github.com/apache/flink/blob/master/flink-table/flink-table-planner/src/main/java/org/apache/flink/table/planner/plan/rules/logical/FlinkAggregateJoinTransposeRule.java | Apache-2.0 |
private static RelNode findBestOrder(RelBuilder relBuilder, LoptMultiJoin multiJoin) {
// Reorder all the inner join type input factors in the multiJoin.
List<Map<Set<Integer>, JoinPlan>> foundPlansForInnerJoin =
reorderInnerJoin(relBuilder, multiJoin);
Map<Set<Integer>, JoinPla... | Find best join reorder using bushy join reorder strategy. We will first try to reorder all
the inner join type input factors in the multiJoin. Then, we will add all outer join factors
to the top of reordered join tree generated by the first step. If there are factors, which
join condition is true, we will add these fac... | findBestOrder | java | apache/flink | flink-table/flink-table-planner/src/main/java/org/apache/flink/table/planner/plan/rules/logical/FlinkBushyJoinReorderRule.java | https://github.com/apache/flink/blob/master/flink-table/flink-table-planner/src/main/java/org/apache/flink/table/planner/plan/rules/logical/FlinkBushyJoinReorderRule.java | Apache-2.0 |
private static RelNode createTopProject(
RelBuilder relBuilder,
LoptMultiJoin multiJoin,
JoinPlan finalPlan,
List<String> fieldNames) {
List<RexNode> newProjExprs = new ArrayList<>();
RexBuilder rexBuilder = multiJoin.getMultiJoinRel().getCluster().getRexB... | Creates the topmost projection that will sit on top of the selected join ordering. The
projection needs to match the original join ordering. Also, places any post-join filters on
top of the project. | createTopProject | java | apache/flink | flink-table/flink-table-planner/src/main/java/org/apache/flink/table/planner/plan/rules/logical/FlinkBushyJoinReorderRule.java | https://github.com/apache/flink/blob/master/flink-table/flink-table-planner/src/main/java/org/apache/flink/table/planner/plan/rules/logical/FlinkBushyJoinReorderRule.java | Apache-2.0 |
@Override
public boolean matches(RelOptRuleCall call) {
final LogicalFilter topFilter = call.rel(0);
final LogicalCalc bottomCalc = call.rel(1);
return FlinkRelUtil.isMergeable(topFilter, bottomCalc);
} | Extends calcite's FilterCalcMergeRule, modification: only merge the two neighbouring {@link
Filter} and {@link Calc} if each non-deterministic {@link RexNode} of bottom {@link Calc} should
appear at most once in the implicit project list and condition of top {@link Filter}. | matches | java | apache/flink | flink-table/flink-table-planner/src/main/java/org/apache/flink/table/planner/plan/rules/logical/FlinkFilterCalcMergeRule.java | https://github.com/apache/flink/blob/master/flink-table/flink-table-planner/src/main/java/org/apache/flink/table/planner/plan/rules/logical/FlinkFilterCalcMergeRule.java | Apache-2.0 |
private List<RexNode> getConjunctions(Filter filter) {
List<RexNode> conjunctions = conjunctions(filter.getCondition());
RexBuilder rexBuilder = filter.getCluster().getRexBuilder();
for (int i = 0; i < conjunctions.size(); i++) {
RexNode node = conjunctions.get(i);
if (no... | Get conjunctions of filter's condition but with collapsed {@code IS NOT DISTINCT FROM}
expressions if needed.
@param filter filter containing condition
@return condition conjunctions with collapsed {@code IS NOT DISTINCT FROM} expressions if any
@see RelOptUtil#conjunctions(RexNode) | getConjunctions | java | apache/flink | flink-table/flink-table-planner/src/main/java/org/apache/flink/table/planner/plan/rules/logical/FlinkFilterJoinRule.java | https://github.com/apache/flink/blob/master/flink-table/flink-table-planner/src/main/java/org/apache/flink/table/planner/plan/rules/logical/FlinkFilterJoinRule.java | Apache-2.0 |
@Value.Default
default boolean isSmart() {
return false;
} | Whether to try to strengthen join-type, default false. | isSmart | java | apache/flink | flink-table/flink-table-planner/src/main/java/org/apache/flink/table/planner/plan/rules/logical/FlinkFilterJoinRule.java | https://github.com/apache/flink/blob/master/flink-table/flink-table-planner/src/main/java/org/apache/flink/table/planner/plan/rules/logical/FlinkFilterJoinRule.java | Apache-2.0 |
private List<RelNode> combineInputs(
Join join,
RelNode left,
RelNode right,
List<ImmutableBitSet> projFieldsList,
List<int[]> joinFieldRefCountsList,
List<Boolean> inputNullGenFieldList) {
final List<RelNode> newInputs = new ArrayList<>();... | Combines the inputs into a LogicalJoin into an array of inputs.
@param join original join
@param left left input into join
@param right right input into join
@param projFieldsList returns a list of the new combined projection fields
@param joinFieldRefCountsList returns a list of the new combined join field reference ... | combineInputs | java | apache/flink | flink-table/flink-table-planner/src/main/java/org/apache/flink/table/planner/plan/rules/logical/FlinkJoinToMultiJoinRule.java | https://github.com/apache/flink/blob/master/flink-table/flink-table-planner/src/main/java/org/apache/flink/table/planner/plan/rules/logical/FlinkJoinToMultiJoinRule.java | Apache-2.0 |
private void combineOuterJoins(
Join joinRel,
List<RelNode> combinedInputs,
RelNode left,
RelNode right,
List<Pair<JoinRelType, RexNode>> joinSpecs,
List<Boolean> inputNullGenFieldList) {
JoinRelType joinType = joinRel.getJoinType();
... | Combines the outer join conditions and join types from the left and right join inputs. If the
join itself is either a left or right outer join, then the join condition corresponding to
the join is also set in the position corresponding to the null-generating input into the
join. The join type is also set.
@param joinR... | combineOuterJoins | java | apache/flink | flink-table/flink-table-planner/src/main/java/org/apache/flink/table/planner/plan/rules/logical/FlinkJoinToMultiJoinRule.java | https://github.com/apache/flink/blob/master/flink-table/flink-table-planner/src/main/java/org/apache/flink/table/planner/plan/rules/logical/FlinkJoinToMultiJoinRule.java | Apache-2.0 |
private void copyOuterJoinInfo(
MultiJoin multiJoin,
List<Pair<JoinRelType, RexNode>> destJoinSpecs,
int adjustmentAmount,
List<RelDataTypeField> srcFields,
List<RelDataTypeField> destFields) {
final List<Pair<JoinRelType, RexNode>> srcJoinSpecs =
... | Copies outer join data from a source MultiJoin to a new set of arrays. Also adjusts the
conditions to reflect the new position of an input if that input ends up being shifted to the
right.
@param multiJoin the source MultiJoin
@param destJoinSpecs the list where the join types and conditions will be copied
@param adju... | copyOuterJoinInfo | java | apache/flink | flink-table/flink-table-planner/src/main/java/org/apache/flink/table/planner/plan/rules/logical/FlinkJoinToMultiJoinRule.java | https://github.com/apache/flink/blob/master/flink-table/flink-table-planner/src/main/java/org/apache/flink/table/planner/plan/rules/logical/FlinkJoinToMultiJoinRule.java | Apache-2.0 |
private List<RexNode> combineJoinFilters(
Join join, RelNode left, RelNode right, List<Boolean> inputNullGenFieldList) {
JoinRelType joinType = join.getJoinType();
JoinInfo joinInfo = join.analyzeCondition();
ImmutableIntList leftKeys = joinInfo.leftKeys;
ImmutableIntList rig... | Combines the join filters from the left and right inputs (if they are MultiJoinRels) with the
join filter in the joinrel into a single AND'd join filter, unless the inputs correspond to
null generating inputs in an outer join.
@param join Join
@param left Left input of the join
@param right Right input of the join
@re... | combineJoinFilters | java | apache/flink | flink-table/flink-table-planner/src/main/java/org/apache/flink/table/planner/plan/rules/logical/FlinkJoinToMultiJoinRule.java | https://github.com/apache/flink/blob/master/flink-table/flink-table-planner/src/main/java/org/apache/flink/table/planner/plan/rules/logical/FlinkJoinToMultiJoinRule.java | Apache-2.0 |
private boolean canCombine(
RelNode input,
ImmutableIntList joinKeys,
JoinRelType joinType,
boolean nullGenerating,
boolean isLeft,
List<Boolean> inputNullGenFieldList,
int beginIndex) {
if (input instanceof MultiJoin) {
... | Returns whether an input can be merged into a given relational expression without changing
semantics.
@param input input into a join
@param nullGenerating true if the input is null generating
@return true if the input can be combined into a parent MultiJoin | canCombine | java | apache/flink | flink-table/flink-table-planner/src/main/java/org/apache/flink/table/planner/plan/rules/logical/FlinkJoinToMultiJoinRule.java | https://github.com/apache/flink/blob/master/flink-table/flink-table-planner/src/main/java/org/apache/flink/table/planner/plan/rules/logical/FlinkJoinToMultiJoinRule.java | Apache-2.0 |
private RexNode shiftRightFilter(
Join joinRel, RelNode left, MultiJoin right, RexNode rightFilter) {
if (rightFilter == null) {
return null;
}
int nFieldsOnLeft = left.getRowType().getFieldList().size();
int nFieldsOnRight = right.getRowType().getFieldList().siz... | Shifts a filter originating from the right child of the LogicalJoin to the right, to reflect
the filter now being applied on the resulting MultiJoin.
@param joinRel the original LogicalJoin
@param left the left child of the LogicalJoin
@param right the right child of the LogicalJoin
@param rightFilter the filter origi... | shiftRightFilter | java | apache/flink | flink-table/flink-table-planner/src/main/java/org/apache/flink/table/planner/plan/rules/logical/FlinkJoinToMultiJoinRule.java | https://github.com/apache/flink/blob/master/flink-table/flink-table-planner/src/main/java/org/apache/flink/table/planner/plan/rules/logical/FlinkJoinToMultiJoinRule.java | Apache-2.0 |
private com.google.common.collect.ImmutableMap<Integer, ImmutableIntList>
addOnJoinFieldRefCounts(
List<RelNode> multiJoinInputs,
int nTotalFields,
RexNode joinCondition,
List<int[]> origJoinFieldRefCounts) {
// count th... | Adds on to the existing join condition reference counts the references from the new join
condition.
@param multiJoinInputs inputs into the new MultiJoin
@param nTotalFields total number of fields in the MultiJoin
@param joinCondition the new join condition
@param origJoinFieldRefCounts existing join condition referenc... | addOnJoinFieldRefCounts | java | apache/flink | flink-table/flink-table-planner/src/main/java/org/apache/flink/table/planner/plan/rules/logical/FlinkJoinToMultiJoinRule.java | https://github.com/apache/flink/blob/master/flink-table/flink-table-planner/src/main/java/org/apache/flink/table/planner/plan/rules/logical/FlinkJoinToMultiJoinRule.java | Apache-2.0 |
private List<RexNode> combinePostJoinFilters(Join joinRel, RelNode left, RelNode right) {
final List<RexNode> filters = new ArrayList<>();
if (right instanceof MultiJoin) {
final MultiJoin multiRight = (MultiJoin) right;
filters.add(
shiftRightFilter(joinRel, ... | Combines the post-join filters from the left and right inputs (if they are MultiJoinRels)
into a single AND'd filter.
@param joinRel the original LogicalJoin
@param left left child of the LogicalJoin
@param right right child of the LogicalJoin
@return combined post-join filters AND'd together | combinePostJoinFilters | java | apache/flink | flink-table/flink-table-planner/src/main/java/org/apache/flink/table/planner/plan/rules/logical/FlinkJoinToMultiJoinRule.java | https://github.com/apache/flink/blob/master/flink-table/flink-table-planner/src/main/java/org/apache/flink/table/planner/plan/rules/logical/FlinkJoinToMultiJoinRule.java | Apache-2.0 |
public Void visitInputRef(RexInputRef inputRef) {
refCounts[inputRef.getIndex()]++;
return null;
} | Visitor that keeps a reference count of the inputs used by an expression. | visitInputRef | java | apache/flink | flink-table/flink-table-planner/src/main/java/org/apache/flink/table/planner/plan/rules/logical/FlinkJoinToMultiJoinRule.java | https://github.com/apache/flink/blob/master/flink-table/flink-table-planner/src/main/java/org/apache/flink/table/planner/plan/rules/logical/FlinkJoinToMultiJoinRule.java | Apache-2.0 |
@Override
public boolean matches(RelOptRuleCall call) {
final Project topProject = call.rel(0);
final LogicalCalc bottomCalc = call.rel(1);
return FlinkRelUtil.isMergeable(topProject, bottomCalc);
} | Extends calcite's ProjectMergeRule, modification: only merge the two neighbouring {@link Project}
and {@link Calc} if each non-deterministic {@link RexNode} of bottom {@link Calc} should appear
at most once in the project list of top {@link Project}. | matches | java | apache/flink | flink-table/flink-table-planner/src/main/java/org/apache/flink/table/planner/plan/rules/logical/FlinkProjectCalcMergeRule.java | https://github.com/apache/flink/blob/master/flink-table/flink-table-planner/src/main/java/org/apache/flink/table/planner/plan/rules/logical/FlinkProjectCalcMergeRule.java | Apache-2.0 |
public void onMatch(RelOptRuleCall call) {
Project origProj = call.rel(0);
final Join join = call.rel(1);
if (!join.getJoinType().projectsRight()) {
return; // TODO: support SEMI/ANTI join later
}
// locate all fields referenced in the projection and join condition;
... | Creates a ProjectJoinTransposeRule with an explicit condition.
@param preserveExprCondition Condition for expressions that should be preserved in the
projection | onMatch | java | apache/flink | flink-table/flink-table-planner/src/main/java/org/apache/flink/table/planner/plan/rules/logical/FlinkProjectJoinTransposeRule.java | https://github.com/apache/flink/blob/master/flink-table/flink-table-planner/src/main/java/org/apache/flink/table/planner/plan/rules/logical/FlinkProjectJoinTransposeRule.java | Apache-2.0 |
@Override
public boolean matches(RelOptRuleCall call) {
final Project topProject = call.rel(0);
final Project bottomProject = call.rel(1);
return FlinkRelUtil.isMergeable(topProject, bottomProject);
} | Extends calcite's ProjectMergeRule, modification: only merge the two neighbouring {@link
Project}s if each non-deterministic {@link RexNode} of bottom {@link Project} should appear at
most once in the project list of top {@link Project}. | matches | java | apache/flink | flink-table/flink-table-planner/src/main/java/org/apache/flink/table/planner/plan/rules/logical/FlinkProjectMergeRule.java | https://github.com/apache/flink/blob/master/flink-table/flink-table-planner/src/main/java/org/apache/flink/table/planner/plan/rules/logical/FlinkProjectMergeRule.java | Apache-2.0 |
private RexNode adjustCondition(LogicalProject project, Join join) {
// create two RexPrograms -- the bottom one representing a
// concatenation of the project and the RHS of the semi/anti join and the
// top one representing the semi/anti join condition
RexBuilder rexBuilder = project.... | Pulls the project above the semi/anti join and returns the resulting semi/anti join
condition. As a result, the semi/anti join condition should be modified such that references
to the LHS of a semi/anti join should now reference the children of the project that's on the
LHS.
@param project LogicalProject on the LHS of... | adjustCondition | java | apache/flink | flink-table/flink-table-planner/src/main/java/org/apache/flink/table/planner/plan/rules/logical/FlinkSemiAntiJoinProjectTransposeRule.java | https://github.com/apache/flink/blob/master/flink-table/flink-table-planner/src/main/java/org/apache/flink/table/planner/plan/rules/logical/FlinkSemiAntiJoinProjectTransposeRule.java | Apache-2.0 |
private boolean hasEqualsRefsOfDifferentTypes(
RelDataTypeFactory typeFactory, RexNode predicate) {
List<RexNode> conjunctions = RelOptUtil.conjunctions(predicate);
return conjunctions.stream()
.filter(node -> node instanceof RexCall && node.getKind() == SqlKind.EQUALS)
... | Returns true if two input refs of an equal call have different types in join condition, else
false. | hasEqualsRefsOfDifferentTypes | java | apache/flink | flink-table/flink-table-planner/src/main/java/org/apache/flink/table/planner/plan/rules/logical/JoinConditionTypeCoerceRule.java | https://github.com/apache/flink/blob/master/flink-table/flink-table-planner/src/main/java/org/apache/flink/table/planner/plan/rules/logical/JoinConditionTypeCoerceRule.java | Apache-2.0 |
protected Tuple2<SupportsFilterPushDown.Result, TableSourceTable>
resolveFiltersAndCreateTableSourceTable(
RexNode[] convertiblePredicates,
TableSourceTable oldTableSourceTable,
TableScan scan,
RelBuilder relBuilder) {
/... | Resolves filters using the underlying sources {@link SupportsFilterPushDown} and creates a
new {@link TableSourceTable} with the supplied predicates.
@param convertiblePredicates Predicates to resolve
@param oldTableSourceTable TableSourceTable to copy
@param scan Underlying table scan to push to
@param relBuilder Bui... | resolveFiltersAndCreateTableSourceTable | java | apache/flink | flink-table/flink-table-planner/src/main/java/org/apache/flink/table/planner/plan/rules/logical/PushFilterIntoSourceScanRuleBase.java | https://github.com/apache/flink/blob/master/flink-table/flink-table-planner/src/main/java/org/apache/flink/table/planner/plan/rules/logical/PushFilterIntoSourceScanRuleBase.java | Apache-2.0 |
protected boolean canPushdownFilter(TableSourceTable tableSourceTable) {
return tableSourceTable != null
&& tableSourceTable.tableSource() instanceof SupportsFilterPushDown
&& Arrays.stream(tableSourceTable.abilitySpecs())
.noneMatch(spec -> spec instanceo... | Determines wether we can pushdown the filter into the source. we can not push filter twice,
make sure FilterPushDownSpec has not been assigned as a capability.
@param tableSourceTable Table scan to attempt to push into
@return Whether we can push or not | canPushdownFilter | java | apache/flink | flink-table/flink-table-planner/src/main/java/org/apache/flink/table/planner/plan/rules/logical/PushFilterIntoSourceScanRuleBase.java | https://github.com/apache/flink/blob/master/flink-table/flink-table-planner/src/main/java/org/apache/flink/table/planner/plan/rules/logical/PushFilterIntoSourceScanRuleBase.java | Apache-2.0 |
private Tuple2<List<RexNode>, List<RexNode>> partitionJoinFilters(MultiJoin multiJoin) {
List<RexNode> joinFilters = RelOptUtil.conjunctions(multiJoin.getJoinFilter());
Map<Boolean, List<RexNode>> partitioned =
joinFilters.stream()
.collect(Collectors.partitionin... | Partitions MultiJoin condition in equi join filters and non-equi join filters. | partitionJoinFilters | java | apache/flink | flink-table/flink-table-planner/src/main/java/org/apache/flink/table/planner/plan/rules/logical/RewriteMultiJoinConditionRule.java | https://github.com/apache/flink/blob/master/flink-table/flink-table-planner/src/main/java/org/apache/flink/table/planner/plan/rules/logical/RewriteMultiJoinConditionRule.java | Apache-2.0 |
public void onMatch(RelOptRuleCall call) {
LogicalJoin join = call.rel(0);
RexNode condition = join.getCondition();
if (join.getCondition().isAlwaysTrue()) {
return;
}
RexNode simpleCondExp =
FlinkRexUtil.simplify(
join.getClu... | Planner rule that apply various simplifying transformations on join condition. e.g. reduce same
expressions: a=b AND b=a -> a=b, simplify boolean expressions: x = 1 AND FALSE -> FALSE, simplify
cast expressions: CAST('123' as integer) -> 123 | onMatch | java | apache/flink | flink-table/flink-table-planner/src/main/java/org/apache/flink/table/planner/plan/rules/logical/SimplifyJoinConditionRule.java | https://github.com/apache/flink/blob/master/flink-table/flink-table-planner/src/main/java/org/apache/flink/table/planner/plan/rules/logical/SimplifyJoinConditionRule.java | Apache-2.0 |
private RelNode addProjectionForIn(RelNode relNode) {
if (relNode instanceof LogicalProject) {
return relNode;
}
RelDataType rowType = relNode.getRowType();
final List<RexNode> projects = new ArrayList<>();
for (int i = 0; i < rowType.getFieldCount(); ++i) {
... | Adds Projection to adjust the field index for join condition.
<p>e.g. SQL: SELECT * FROM l WHERE b IN (SELECT COUNT(*) FROM r WHERE l.c = r.f the rel in
SubQuery is `LogicalAggregate(group=[{}], EXPR$1=[COUNT()])`. After decorrelated, it was
changed to `LogicalAggregate(group=[{0}], EXPR$0=[COUNT()])`, and the output ... | addProjectionForIn | java | apache/flink | flink-table/flink-table-planner/src/main/java/org/apache/flink/table/planner/plan/rules/logical/SubQueryDecorrelator.java | https://github.com/apache/flink/blob/master/flink-table/flink-table-planner/src/main/java/org/apache/flink/table/planner/plan/rules/logical/SubQueryDecorrelator.java | Apache-2.0 |
Frame getInvoke(RelNode r) {
return dispatcher.invoke(r);
} | Pull out all correlation conditions from a given subquery to top level, and rebuild the
subquery rel tree without correlation conditions.
<p>`public` is for reflection. We use ReflectiveVisitor instead of RelShuttle because
RelShuttle returns RelNode. | getInvoke | java | apache/flink | flink-table/flink-table-planner/src/main/java/org/apache/flink/table/planner/plan/rules/logical/SubQueryDecorrelator.java | https://github.com/apache/flink/blob/master/flink-table/flink-table-planner/src/main/java/org/apache/flink/table/planner/plan/rules/logical/SubQueryDecorrelator.java | Apache-2.0 |
public Frame decorrelateRel(LogicalProject rel) {
final RelNode oldInput = rel.getInput();
Frame frame = getInvoke(oldInput);
if (frame == null) {
// If input has not been rewritten, do not rewrite this rel.
return null;
}
fina... | Rewrite LogicalProject.
<p>Rewrite logic: Pass along any correlated variables coming from the input.
@param rel the project rel to rewrite | decorrelateRel | java | apache/flink | flink-table/flink-table-planner/src/main/java/org/apache/flink/table/planner/plan/rules/logical/SubQueryDecorrelator.java | https://github.com/apache/flink/blob/master/flink-table/flink-table-planner/src/main/java/org/apache/flink/table/planner/plan/rules/logical/SubQueryDecorrelator.java | Apache-2.0 |
public Frame decorrelateRel(LogicalFilter rel) {
final RelNode oldInput = rel.getInput();
Frame frame = getInvoke(oldInput);
if (frame == null) {
// If input has not been rewritten, do not rewrite this rel.
return null;
}
// Co... | Rewrite LogicalFilter.
<p>Rewrite logic: 1. If a Filter references a correlated field in its filter condition,
rewrite the Filter references only non-correlated fields, and the condition references
correlated fields will be push to it's output. 2. If Filter does not reference correlated
variables, simply rewrite the f... | decorrelateRel | java | apache/flink | flink-table/flink-table-planner/src/main/java/org/apache/flink/table/planner/plan/rules/logical/SubQueryDecorrelator.java | https://github.com/apache/flink/blob/master/flink-table/flink-table-planner/src/main/java/org/apache/flink/table/planner/plan/rules/logical/SubQueryDecorrelator.java | Apache-2.0 |
public Frame decorrelateRel(LogicalAggregate rel) {
// Aggregate itself should not reference corVars.
assert !cm.mapRefRelToCorRef.containsKey(rel);
final RelNode oldInput = rel.getInput();
final Frame frame = getInvoke(oldInput);
if (frame == null) {
... | Rewrites a {@link LogicalAggregate}.
<p>Rewrite logic: 1. Permute the group by keys to the front. 2. If the input of an
aggregate produces correlated variables, add them to the group list. 3. Change aggCalls
to reference the new project.
@param rel Aggregate to rewrite | decorrelateRel | java | apache/flink | flink-table/flink-table-planner/src/main/java/org/apache/flink/table/planner/plan/rules/logical/SubQueryDecorrelator.java | https://github.com/apache/flink/blob/master/flink-table/flink-table-planner/src/main/java/org/apache/flink/table/planner/plan/rules/logical/SubQueryDecorrelator.java | Apache-2.0 |
public Frame decorrelateRel(Sort rel) {
// Sort itself should not reference corVars.
assert !cm.mapRefRelToCorRef.containsKey(rel);
// Sort only references field positions in collations field.
// The collations field in the newRel now need to refer to the
// ... | Rewrite Sort.
<p>Rewrite logic: change the collations field to reference the new input.
@param rel Sort to be rewritten | decorrelateRel | java | apache/flink | flink-table/flink-table-planner/src/main/java/org/apache/flink/table/planner/plan/rules/logical/SubQueryDecorrelator.java | https://github.com/apache/flink/blob/master/flink-table/flink-table-planner/src/main/java/org/apache/flink/table/planner/plan/rules/logical/SubQueryDecorrelator.java | Apache-2.0 |
private void checkCorCondition(final LogicalFilter filter) {
if (mapSubQueryNodeToCorSet.containsKey(filter) && !hasUnsupportedCorCondition) {
final List<RexNode> corConditions = new ArrayList<>();
final List<RexNode> unsupportedCorConditions = new ArrayList<>();
... | check whether the predicate on filter has unsupported correlation condition. e.g. SELECT
* FROM l WHERE a IN (SELECT c FROM r WHERE l.b = r.d OR r.d > 10) | checkCorCondition | java | apache/flink | flink-table/flink-table-planner/src/main/java/org/apache/flink/table/planner/plan/rules/logical/SubQueryDecorrelator.java | https://github.com/apache/flink/blob/master/flink-table/flink-table-planner/src/main/java/org/apache/flink/table/planner/plan/rules/logical/SubQueryDecorrelator.java | Apache-2.0 |
public Pair<RelNode, RexNode> getSubQueryEquivalent(RexSubQuery subQuery) {
return subQueryMap.get(subQuery);
} | Result describing the relational expression after decorrelation and where to find the
equivalent non-correlated expressions and correlated conditions. | getSubQueryEquivalent | java | apache/flink | flink-table/flink-table-planner/src/main/java/org/apache/flink/table/planner/plan/rules/logical/SubQueryDecorrelator.java | https://github.com/apache/flink/blob/master/flink-table/flink-table-planner/src/main/java/org/apache/flink/table/planner/plan/rules/logical/SubQueryDecorrelator.java | Apache-2.0 |
public static RexNode replaceGroupAuxiliaries(
RexNode node, LogicalWindow window, RelBuilder builder) {
RexBuilder rexBuilder = builder.getRexBuilder();
WindowType windowType = getWindowType(window);
if (node instanceof RexCall) {
RexCall c = (RexCall) node;
... | Replace group auxiliaries with field references. | replaceGroupAuxiliaries | java | apache/flink | flink-table/flink-table-planner/src/main/java/org/apache/flink/table/planner/plan/rules/logical/WindowPropertiesRules.java | https://github.com/apache/flink/blob/master/flink-table/flink-table-planner/src/main/java/org/apache/flink/table/planner/plan/rules/logical/WindowPropertiesRules.java | Apache-2.0 |
private static boolean isWindowStart(RexNode node) {
if (node instanceof RexCall) {
RexCall c = (RexCall) node;
if (c.getOperator().isGroupAuxiliary()) {
return c.getOperator() == FlinkSqlOperatorTable.TUMBLE_START
|| c.getOperator() == FlinkSqlOpe... | Checks if a RexNode is a window start auxiliary function. | isWindowStart | java | apache/flink | flink-table/flink-table-planner/src/main/java/org/apache/flink/table/planner/plan/rules/logical/WindowPropertiesRules.java | https://github.com/apache/flink/blob/master/flink-table/flink-table-planner/src/main/java/org/apache/flink/table/planner/plan/rules/logical/WindowPropertiesRules.java | Apache-2.0 |
private static boolean isWindowEnd(RexNode node) {
if (node instanceof RexCall) {
RexCall c = (RexCall) node;
if (c.getOperator().isGroupAuxiliary()) {
return c.getOperator() == FlinkSqlOperatorTable.TUMBLE_END
|| c.getOperator() == FlinkSqlOperato... | Checks if a RexNode is a window end auxiliary function. | isWindowEnd | java | apache/flink | flink-table/flink-table-planner/src/main/java/org/apache/flink/table/planner/plan/rules/logical/WindowPropertiesRules.java | https://github.com/apache/flink/blob/master/flink-table/flink-table-planner/src/main/java/org/apache/flink/table/planner/plan/rules/logical/WindowPropertiesRules.java | Apache-2.0 |
private static boolean isWindowRowtime(RexNode node) {
if (node instanceof RexCall) {
RexCall c = (RexCall) node;
if (c.getOperator().isGroupAuxiliary()) {
return c.getOperator() == FlinkSqlOperatorTable.TUMBLE_ROWTIME
|| c.getOperator() == FlinkSq... | Checks if a RexNode is a window rowtime auxiliary function. | isWindowRowtime | java | apache/flink | flink-table/flink-table-planner/src/main/java/org/apache/flink/table/planner/plan/rules/logical/WindowPropertiesRules.java | https://github.com/apache/flink/blob/master/flink-table/flink-table-planner/src/main/java/org/apache/flink/table/planner/plan/rules/logical/WindowPropertiesRules.java | Apache-2.0 |
private int[] extractUsedInputFields(
StreamPhysicalCalc calc,
StreamPhysicalChangelogNormalize changelogNormalize,
Set<Integer> primaryKeyIndices) {
RexProgram program = calc.getProgram();
List<RexNode> projectsAndCondition =
program.getProjectList().... | Extracts input fields which are used in the Calc node and the ChangelogNormalize node. | extractUsedInputFields | java | apache/flink | flink-table/flink-table-planner/src/main/java/org/apache/flink/table/planner/plan/rules/physical/stream/PushCalcPastChangelogNormalizeRule.java | https://github.com/apache/flink/blob/master/flink-table/flink-table-planner/src/main/java/org/apache/flink/table/planner/plan/rules/physical/stream/PushCalcPastChangelogNormalizeRule.java | Apache-2.0 |
private void partitionPrimaryKeyPredicates(
List<RexNode> predicates,
Set<Integer> primaryKeyIndices,
List<RexNode> primaryKeyPredicates,
List<RexNode> remainingPredicates) {
for (RexNode predicate : predicates) {
int[] inputRefs = extractRefInputField... | Separates the given {@param predicates} into filters which affect only the primary key and
anything else. | partitionPrimaryKeyPredicates | java | apache/flink | flink-table/flink-table-planner/src/main/java/org/apache/flink/table/planner/plan/rules/physical/stream/PushCalcPastChangelogNormalizeRule.java | https://github.com/apache/flink/blob/master/flink-table/flink-table-planner/src/main/java/org/apache/flink/table/planner/plan/rules/physical/stream/PushCalcPastChangelogNormalizeRule.java | Apache-2.0 |
private StreamPhysicalChangelogNormalize pushCalcThroughChangelogNormalize(
RelOptRuleCall call,
List<RexNode> primaryKeyPredicates,
List<RexNode> otherPredicates,
int[] usedInputFields) {
final StreamPhysicalChangelogNormalize changelogNormalize = call.rel(1);
... | Pushes {@param primaryKeyPredicates} and used fields project into the {@link
StreamPhysicalChangelogNormalize}. | pushCalcThroughChangelogNormalize | java | apache/flink | flink-table/flink-table-planner/src/main/java/org/apache/flink/table/planner/plan/rules/physical/stream/PushCalcPastChangelogNormalizeRule.java | https://github.com/apache/flink/blob/master/flink-table/flink-table-planner/src/main/java/org/apache/flink/table/planner/plan/rules/physical/stream/PushCalcPastChangelogNormalizeRule.java | Apache-2.0 |
private StreamPhysicalCalc projectUsedFieldsWithConditions(
RelBuilder relBuilder, RelNode input, List<RexNode> conditions, int[] usedFields) {
final RelDataType inputRowType = input.getRowType();
final List<String> inputFieldNames = inputRowType.getFieldNames();
final RexProgramBuil... | Builds a new {@link StreamPhysicalCalc} on the input node with the given {@param conditions}
and a used fields projection. | projectUsedFieldsWithConditions | java | apache/flink | flink-table/flink-table-planner/src/main/java/org/apache/flink/table/planner/plan/rules/physical/stream/PushCalcPastChangelogNormalizeRule.java | https://github.com/apache/flink/blob/master/flink-table/flink-table-planner/src/main/java/org/apache/flink/table/planner/plan/rules/physical/stream/PushCalcPastChangelogNormalizeRule.java | Apache-2.0 |
private void transformWithRemainingPredicates(
RelOptRuleCall call,
StreamPhysicalChangelogNormalize changelogNormalize,
int[] usedInputFields) {
final StreamPhysicalCalc calc = call.rel(0);
final RelBuilder relBuilder = call.builder();
final RexProgramBuilder... | Transforms the {@link RelOptRuleCall} to use {@param changelogNormalize} as the new input to
a {@link StreamPhysicalCalc} which uses {@param predicates} for the condition. | transformWithRemainingPredicates | java | apache/flink | flink-table/flink-table-planner/src/main/java/org/apache/flink/table/planner/plan/rules/physical/stream/PushCalcPastChangelogNormalizeRule.java | https://github.com/apache/flink/blob/master/flink-table/flink-table-planner/src/main/java/org/apache/flink/table/planner/plan/rules/physical/stream/PushCalcPastChangelogNormalizeRule.java | Apache-2.0 |
private Map<Integer, Integer> buildFieldsMapping(int[] projectedInputRefs) {
final Map<Integer, Integer> fieldsOldToNewIndexMapping = new HashMap<>();
for (int i = 0; i < projectedInputRefs.length; i++) {
fieldsOldToNewIndexMapping.put(projectedInputRefs[i], i);
}
return fiel... | Build field reference mapping from old field index to new field index after projection. | buildFieldsMapping | java | apache/flink | flink-table/flink-table-planner/src/main/java/org/apache/flink/table/planner/plan/rules/physical/stream/PushCalcPastChangelogNormalizeRule.java | https://github.com/apache/flink/blob/master/flink-table/flink-table-planner/src/main/java/org/apache/flink/table/planner/plan/rules/physical/stream/PushCalcPastChangelogNormalizeRule.java | Apache-2.0 |
@SafeVarargs
private final RelNode buildTreeInOrder(
RelNode leafNode, Tuple2<RelNode, RelTraitSet>... nodeAndTraits) {
checkArgument(nodeAndTraits.length >= 1);
RelNode inputNode = leafNode;
RelNode currentNode = null;
for (Tuple2<RelNode, RelTraitSet> nodeAndTrait : nod... | Build a new {@link RelNode} tree in the given nodes order which is in bottom-up direction. | buildTreeInOrder | java | apache/flink | flink-table/flink-table-planner/src/main/java/org/apache/flink/table/planner/plan/rules/physical/stream/WatermarkAssignerChangelogNormalizeTransposeRule.java | https://github.com/apache/flink/blob/master/flink-table/flink-table-planner/src/main/java/org/apache/flink/table/planner/plan/rules/physical/stream/WatermarkAssignerChangelogNormalizeTransposeRule.java | Apache-2.0 |
public FlinkStatistic getStatistic() {
return this.statistic;
} | Returns the statistic of this table. | getStatistic | java | apache/flink | flink-table/flink-table-planner/src/main/java/org/apache/flink/table/planner/plan/schema/FlinkPreparingTableBase.java | https://github.com/apache/flink/blob/master/flink-table/flink-table-planner/src/main/java/org/apache/flink/table/planner/plan/schema/FlinkPreparingTableBase.java | Apache-2.0 |
public SqlMonotonicity getMonotonicity(String columnName) {
return SqlMonotonicity.NOT_MONOTONIC;
} | Obtains whether a given column is monotonic.
@param columnName Column name
@return True if the given column is monotonic | getMonotonicity | java | apache/flink | flink-table/flink-table-planner/src/main/java/org/apache/flink/table/planner/plan/schema/FlinkPreparingTableBase.java | https://github.com/apache/flink/blob/master/flink-table/flink-table-planner/src/main/java/org/apache/flink/table/planner/plan/schema/FlinkPreparingTableBase.java | Apache-2.0 |
public boolean isTemporal() {
return true;
} | We recognize all tables in FLink are temporal as they are changeable. | isTemporal | java | apache/flink | flink-table/flink-table-planner/src/main/java/org/apache/flink/table/planner/plan/schema/FlinkPreparingTableBase.java | https://github.com/apache/flink/blob/master/flink-table/flink-table-planner/src/main/java/org/apache/flink/table/planner/plan/schema/FlinkPreparingTableBase.java | Apache-2.0 |
public double getRowCount() {
Double rowCnt = getStatistic().getRowCount();
return rowCnt == null ? DEFAULT_ROWCOUNT : rowCnt;
} | Returns an estimate of the number of rows in the table. | getRowCount | java | apache/flink | flink-table/flink-table-planner/src/main/java/org/apache/flink/table/planner/plan/schema/FlinkPreparingTableBase.java | https://github.com/apache/flink/blob/master/flink-table/flink-table-planner/src/main/java/org/apache/flink/table/planner/plan/schema/FlinkPreparingTableBase.java | Apache-2.0 |
public Optional<Set<ImmutableBitSet>> uniqueKeysSet() {
Set<? extends Set<String>> uniqueKeys = statistic.getUniqueKeys();
if (uniqueKeys == null) {
return Optional.empty();
} else if (uniqueKeys.size() == 0) {
return Optional.of(ImmutableSet.of());
} else {
... | Returns unique keySets of current table. | uniqueKeysSet | java | apache/flink | flink-table/flink-table-planner/src/main/java/org/apache/flink/table/planner/plan/schema/FlinkPreparingTableBase.java | https://github.com/apache/flink/blob/master/flink-table/flink-table-planner/src/main/java/org/apache/flink/table/planner/plan/schema/FlinkPreparingTableBase.java | Apache-2.0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.