code stringlengths 25 201k | docstring stringlengths 19 96.2k | func_name stringlengths 0 235 | language stringclasses 1 value | repo stringlengths 8 51 | path stringlengths 11 314 | url stringlengths 62 377 | license stringclasses 7 values |
|---|---|---|---|---|---|---|---|
public static SqlNodeList stripListAs(SqlNodeList nodeList) {
for (int i = 0; i < nodeList.size(); i++) {
SqlNode n = nodeList.get(i);
SqlNode n2 = stripAs(n);
if (n != n2) {
nodeList.set(i, n2);
}
}
return nodeList;
} | Modifies a list of nodes, removing AS from each if present.
@see #stripAs | stripListAs | java | apache/flink | flink-table/flink-table-planner/src/main/java/org/apache/calcite/sql/SqlUtil.java | https://github.com/apache/flink/blob/master/flink-table/flink-table-planner/src/main/java/org/apache/calcite/sql/SqlUtil.java | Apache-2.0 |
public static ImmutableList<SqlNode> getAncestry(
SqlNode root, Predicate<SqlNode> predicate, Predicate<SqlNode> postPredicate) {
try {
new Genealogist(predicate, postPredicate).visitChild(root);
throw new AssertionError("not found: " + predicate + " in " + root);
} catch (Util.FoundOne e) {
//noinspection unchecked
return (ImmutableList<SqlNode>)
Objects.requireNonNull(e.getNode(), "Genealogist result");
}
} | Returns a list of ancestors of {@code predicate} within a given {@code SqlNode} tree.
<p>The first element of the list is {@code root}, and the last is the node that matched
{@code predicate}. Throws if no node matches. | getAncestry | java | apache/flink | flink-table/flink-table-planner/src/main/java/org/apache/calcite/sql/SqlUtil.java | https://github.com/apache/flink/blob/master/flink-table/flink-table-planner/src/main/java/org/apache/calcite/sql/SqlUtil.java | Apache-2.0 |
public static List<RelHint> getRelHint(
HintStrategyTable hintStrategies, @Nullable SqlNodeList sqlHints) {
if (sqlHints == null || sqlHints.size() == 0) {
return ImmutableList.of();
}
final ImmutableList.Builder<RelHint> relHints = ImmutableList.builder();
for (SqlNode node : sqlHints) {
assert node instanceof SqlHint;
final SqlHint sqlHint = (SqlHint) node;
final String hintName = sqlHint.getName();
final RelHint.Builder builder = RelHint.builder(hintName);
switch (sqlHint.getOptionFormat()) {
case EMPTY:
// do nothing.
break;
case LITERAL_LIST:
case ID_LIST:
builder.hintOptions(sqlHint.getOptionList());
break;
case KV_LIST:
builder.hintOptions(sqlHint.getOptionKVPairs());
break;
default:
throw new AssertionError("Unexpected hint option format");
}
final RelHint relHint = builder.build();
if (hintStrategies.validateHint(relHint)) {
// Skips the hint if the validation fails.
relHints.add(relHint);
}
}
return relHints.build();
} | Returns an immutable list of {@link RelHint} from sql hints, with a given inherit path from
the root node.
<p>The inherit path would be empty list.
@param hintStrategies The hint strategies to validate the sql hints
@param sqlHints The sql hints nodes
@return the {@code RelHint} list | getRelHint | java | apache/flink | flink-table/flink-table-planner/src/main/java/org/apache/calcite/sql/SqlUtil.java | https://github.com/apache/flink/blob/master/flink-table/flink-table-planner/src/main/java/org/apache/calcite/sql/SqlUtil.java | Apache-2.0 |
public static RelNode attachRelHint(
HintStrategyTable hintStrategies, List<RelHint> hints, Hintable rel) {
final List<RelHint> relHints = hintStrategies.apply(hints, (RelNode) rel);
if (relHints.size() > 0) {
return rel.attachHints(relHints);
}
return (RelNode) rel;
} | Attach the {@code hints} to {@code rel} with specified hint strategies.
@param hintStrategies The strategies to filter the hints
@param hints The original hints to be attached
@return A copy of {@code rel} if there are any hints can be attached given the hint
strategies, or the original node if such hints don't exist | attachRelHint | java | apache/flink | flink-table/flink-table-planner/src/main/java/org/apache/calcite/sql/SqlUtil.java | https://github.com/apache/flink/blob/master/flink-table/flink-table-planner/src/main/java/org/apache/calcite/sql/SqlUtil.java | Apache-2.0 |
private static SqlNode createBalancedCall(
SqlOperator op, SqlParserPos pos, List<SqlNode> operands, int start, int end) {
assert start < end && end <= operands.size();
if (start + 1 == end) {
return operands.get(start);
}
int mid = (end - start) / 2 + start;
SqlNode leftNode = createBalancedCall(op, pos, operands, start, mid);
SqlNode rightNode = createBalancedCall(op, pos, operands, mid, end);
return op.createCall(pos, leftNode, rightNode);
} | Creates a balanced binary call from sql node list, start inclusive, end exclusive. | createBalancedCall | java | apache/flink | flink-table/flink-table-planner/src/main/java/org/apache/calcite/sql/SqlUtil.java | https://github.com/apache/flink/blob/master/flink-table/flink-table-planner/src/main/java/org/apache/calcite/sql/SqlUtil.java | Apache-2.0 |
@Override
protected void generateTypeString(StringBuilder sb, boolean withDetail) {
sb.append("UNKNOWN");
} | The unknown type. Similar to the NULL type, but is only equal to itself. | generateTypeString | java | apache/flink | flink-table/flink-table-planner/src/main/java/org/apache/calcite/sql/type/SqlTypeFactoryImpl.java | https://github.com/apache/flink/blob/master/flink-table/flink-table-planner/src/main/java/org/apache/calcite/sql/type/SqlTypeFactoryImpl.java | Apache-2.0 |
@API(since = "1.27", status = API.Status.INTERNAL)
SqlValidatorNamespace getNamespaceOrThrow(SqlNode node) {
return requireNonNull(getNamespace(node), () -> "namespace for " + node);
} | Namespace for the given node.
@param node node to compute the namespace for
@return namespace for the given node, never null
@see #getNamespace(SqlNode) | getNamespaceOrThrow | java | apache/flink | flink-table/flink-table-planner/src/main/java/org/apache/calcite/sql/validate/SqlValidatorImpl.java | https://github.com/apache/flink/blob/master/flink-table/flink-table-planner/src/main/java/org/apache/calcite/sql/validate/SqlValidatorImpl.java | Apache-2.0 |
@API(since = "1.27", status = API.Status.INTERNAL)
SqlValidatorNamespace getNamespaceOrThrow(SqlNode node, @Nullable SqlValidatorScope scope) {
return requireNonNull(
getNamespace(node, scope), () -> "namespace for " + node + ", scope " + scope);
} | Namespace for the given node.
@param node node to compute the namespace for
@param scope namespace scope
@return namespace for the given node, never null
@see #getNamespace(SqlNode) | getNamespaceOrThrow | java | apache/flink | flink-table/flink-table-planner/src/main/java/org/apache/calcite/sql/validate/SqlValidatorImpl.java | https://github.com/apache/flink/blob/master/flink-table/flink-table-planner/src/main/java/org/apache/calcite/sql/validate/SqlValidatorImpl.java | Apache-2.0 |
protected @Nullable SqlNode getSelfJoinExprForUpdate(SqlNode table, String alias) {
return null;
} | Allows a subclass to provide information about how to convert an UPDATE into a MERGE via
self-join. If this method returns null, then no such conversion takes place. Otherwise, this
method should return a suitable unique identifier expression for the given table.
@param table identifier for table being updated
@param alias alias to use for qualifying columns in expression, or null for unqualified
references; if this is equal to {@value #UPDATE_SRC_ALIAS}, then column references have
been anonymized to "SYS$ANONx", where x is the 1-based column number.
@return expression for unique identifier, or null to prevent conversion | getSelfJoinExprForUpdate | java | apache/flink | flink-table/flink-table-planner/src/main/java/org/apache/calcite/sql/validate/SqlValidatorImpl.java | https://github.com/apache/flink/blob/master/flink-table/flink-table-planner/src/main/java/org/apache/calcite/sql/validate/SqlValidatorImpl.java | Apache-2.0 |
protected SqlSelect createSourceSelectForUpdate(SqlUpdate call) {
final SqlNodeList selectList = new SqlNodeList(SqlParserPos.ZERO);
selectList.add(SqlIdentifier.star(SqlParserPos.ZERO));
int ordinal = 0;
for (SqlNode exp : call.getSourceExpressionList()) {
// Force unique aliases to avoid a duplicate for Y with
// SET X=Y
String alias = SqlUtil.deriveAliasFromOrdinal(ordinal);
selectList.add(SqlValidatorUtil.addAlias(exp, alias));
++ordinal;
}
SqlNode sourceTable = call.getTargetTable();
SqlIdentifier alias = call.getAlias();
if (alias != null) {
sourceTable = SqlValidatorUtil.addAlias(sourceTable, alias.getSimple());
}
return new SqlSelect(
SqlParserPos.ZERO,
null,
selectList,
sourceTable,
call.getCondition(),
null,
null,
null,
null,
null,
null,
null,
null);
} | Creates the SELECT statement that putatively feeds rows into an UPDATE statement to be
updated.
@param call Call to the UPDATE operator
@return select statement | createSourceSelectForUpdate | java | apache/flink | flink-table/flink-table-planner/src/main/java/org/apache/calcite/sql/validate/SqlValidatorImpl.java | https://github.com/apache/flink/blob/master/flink-table/flink-table-planner/src/main/java/org/apache/calcite/sql/validate/SqlValidatorImpl.java | Apache-2.0 |
protected SqlSelect createSourceSelectForDelete(SqlDelete call) {
final SqlNodeList selectList = new SqlNodeList(SqlParserPos.ZERO);
selectList.add(SqlIdentifier.star(SqlParserPos.ZERO));
SqlNode sourceTable = call.getTargetTable();
SqlIdentifier alias = call.getAlias();
if (alias != null) {
sourceTable = SqlValidatorUtil.addAlias(sourceTable, alias.getSimple());
}
return new SqlSelect(
SqlParserPos.ZERO,
null,
selectList,
sourceTable,
call.getCondition(),
null,
null,
null,
null,
null,
null,
null,
null);
} | Creates the SELECT statement that putatively feeds rows into a DELETE statement to be
deleted.
@param call Call to the DELETE operator
@return select statement | createSourceSelectForDelete | java | apache/flink | flink-table/flink-table-planner/src/main/java/org/apache/calcite/sql/validate/SqlValidatorImpl.java | https://github.com/apache/flink/blob/master/flink-table/flink-table-planner/src/main/java/org/apache/calcite/sql/validate/SqlValidatorImpl.java | Apache-2.0 |
protected void registerNamespace(
@Nullable SqlValidatorScope usingScope,
@Nullable String alias,
SqlValidatorNamespace ns,
boolean forceNullable) {
namespaces.put(requireNonNull(ns.getNode(), () -> "ns.getNode() for " + ns), ns);
if (usingScope != null) {
assert alias != null
: "Registering namespace "
+ ns
+ ", into scope "
+ usingScope
+ ", so alias must not be null";
usingScope.addChild(ns, alias, forceNullable);
}
} | Registers a new namespace, and adds it as a child of its parent scope. Derived class can
override this method to tinker with namespaces as they are created.
@param usingScope Parent scope (which will want to look for things in this namespace)
@param alias Alias by which parent will refer to this namespace
@param ns Namespace
@param forceNullable Whether to force the type of namespace to be nullable | registerNamespace | java | apache/flink | flink-table/flink-table-planner/src/main/java/org/apache/calcite/sql/validate/SqlValidatorImpl.java | https://github.com/apache/flink/blob/master/flink-table/flink-table-planner/src/main/java/org/apache/calcite/sql/validate/SqlValidatorImpl.java | Apache-2.0 |
protected SelectNamespace createSelectNamespace(SqlSelect select, SqlNode enclosingNode) {
return new SelectNamespace(this, select, enclosingNode);
} | Creates a namespace for a <code>SELECT</code> node. Derived class may override this factory
method.
@param select Select node
@param enclosingNode Enclosing node
@return Select namespace | createSelectNamespace | java | apache/flink | flink-table/flink-table-planner/src/main/java/org/apache/calcite/sql/validate/SqlValidatorImpl.java | https://github.com/apache/flink/blob/master/flink-table/flink-table-planner/src/main/java/org/apache/calcite/sql/validate/SqlValidatorImpl.java | Apache-2.0 |
protected SetopNamespace createSetopNamespace(SqlCall call, SqlNode enclosingNode) {
return new SetopNamespace(this, call, enclosingNode);
} | Creates a namespace for a set operation (<code>UNION</code>, <code>
INTERSECT</code>, or <code>EXCEPT</code>). Derived class may override this factory method.
@param call Call to set operation
@param enclosingNode Enclosing node
@return Set operation namespace | createSetopNamespace | java | apache/flink | flink-table/flink-table-planner/src/main/java/org/apache/calcite/sql/validate/SqlValidatorImpl.java | https://github.com/apache/flink/blob/master/flink-table/flink-table-planner/src/main/java/org/apache/calcite/sql/validate/SqlValidatorImpl.java | Apache-2.0 |
private void registerQuery(
SqlValidatorScope parentScope,
@Nullable SqlValidatorScope usingScope,
SqlNode node,
SqlNode enclosingNode,
@Nullable String alias,
boolean forceNullable) {
Preconditions.checkArgument(usingScope == null || alias != null);
registerQuery(parentScope, usingScope, node, enclosingNode, alias, forceNullable, true);
} | Registers a query in a parent scope.
@param parentScope Parent scope which this scope turns to in order to resolve objects
@param usingScope Scope whose child list this scope should add itself to
@param node Query node
@param alias Name of this query within its parent. Must be specified if usingScope != null | registerQuery | java | apache/flink | flink-table/flink-table-planner/src/main/java/org/apache/calcite/sql/validate/SqlValidatorImpl.java | https://github.com/apache/flink/blob/master/flink-table/flink-table-planner/src/main/java/org/apache/calcite/sql/validate/SqlValidatorImpl.java | Apache-2.0 |
protected @Nullable SqlNode getAggregate(SqlSelect select) {
SqlNode node = select.getGroup();
if (node != null) {
return node;
}
node = select.getHaving();
if (node != null) {
return node;
}
return getAgg(select);
} | Returns the parse tree node (GROUP BY, HAVING, or an aggregate function call) that causes
{@code select} to be an aggregate query, or null if it is not an aggregate query.
<p>The node is useful context for error messages, but you cannot assume that the node is the
only aggregate function. | getAggregate | java | apache/flink | flink-table/flink-table-planner/src/main/java/org/apache/calcite/sql/validate/SqlValidatorImpl.java | https://github.com/apache/flink/blob/master/flink-table/flink-table-planner/src/main/java/org/apache/calcite/sql/validate/SqlValidatorImpl.java | Apache-2.0 |
private void validateNoAggs(AggFinder aggFinder, SqlNode node, String clause) {
final SqlCall agg = aggFinder.findAgg(node);
if (agg == null) {
return;
}
final SqlOperator op = agg.getOperator();
if (op == SqlStdOperatorTable.OVER) {
throw newValidationError(agg, RESOURCE.windowedAggregateIllegalInClause(clause));
} else if (op.isGroup() || op.isGroupAuxiliary()) {
throw newValidationError(
agg, RESOURCE.groupFunctionMustAppearInGroupByClause(op.getName()));
} else {
throw newValidationError(agg, RESOURCE.aggregateIllegalInClause(clause));
}
} | Throws an error if there is an aggregate or windowed aggregate in the given clause.
@param aggFinder Finder for the particular kind(s) of aggregate function
@param node Parse tree
@param clause Name of clause: "WHERE", "GROUP BY", "ON" | validateNoAggs | java | apache/flink | flink-table/flink-table-planner/src/main/java/org/apache/calcite/sql/validate/SqlValidatorImpl.java | https://github.com/apache/flink/blob/master/flink-table/flink-table-planner/src/main/java/org/apache/calcite/sql/validate/SqlValidatorImpl.java | Apache-2.0 |
private RelDataType validateCommonInputJoinColumn(
SqlIdentifier id, SqlNode leftOrRight, SqlValidatorScope scope, boolean natural) {
Preconditions.checkArgument(id.names.size() == 1);
final String name = id.names.get(0);
final SqlValidatorNamespace namespace = getNamespaceOrThrow(leftOrRight);
final RelDataType rowType = namespace.getRowType();
final SqlNameMatcher nameMatcher = catalogReader.nameMatcher();
final RelDataTypeField field = nameMatcher.field(rowType, name);
if (field == null) {
throw newValidationError(id, RESOURCE.columnNotFound(name));
}
Collection<RelDataType> rowTypes;
if (!natural && rowType instanceof RelCrossType) {
final RelCrossType crossType = (RelCrossType) rowType;
rowTypes = new ArrayList<>(crossType.getTypes());
} else {
rowTypes = Collections.singleton(rowType);
}
for (RelDataType rowType0 : rowTypes) {
if (nameMatcher.frequency(rowType0.getFieldNames(), name) > 1) {
throw newValidationError(id, RESOURCE.columnInUsingNotUnique(name));
}
}
checkRollUpInUsing(id, leftOrRight, scope);
return field.getType();
} | Validates a column in a USING clause, or an inferred join key in a NATURAL join, in the left
or right input to the join. | validateCommonInputJoinColumn | java | apache/flink | flink-table/flink-table-planner/src/main/java/org/apache/calcite/sql/validate/SqlValidatorImpl.java | https://github.com/apache/flink/blob/master/flink-table/flink-table-planner/src/main/java/org/apache/calcite/sql/validate/SqlValidatorImpl.java | Apache-2.0 |
private void validateModality(SqlNode query) {
final SqlModality modality = deduceModality(query);
if (query instanceof SqlSelect) {
final SqlSelect select = (SqlSelect) query;
validateModality(select, modality, true);
} else if (query.getKind() == SqlKind.VALUES) {
switch (modality) {
case STREAM:
throw newValidationError(query, Static.RESOURCE.cannotStreamValues());
default:
break;
}
} else {
assert query.isA(SqlKind.SET_QUERY);
final SqlCall call = (SqlCall) query;
for (SqlNode operand : call.getOperandList()) {
if (deduceModality(operand) != modality) {
throw newValidationError(
operand, Static.RESOURCE.streamSetOpInconsistentInputs());
}
validateModality(operand);
}
}
} | Validates that a query can deliver the modality it promises. Only called on the top-most
SELECT or set operator in the tree. | validateModality | java | apache/flink | flink-table/flink-table-planner/src/main/java/org/apache/calcite/sql/validate/SqlValidatorImpl.java | https://github.com/apache/flink/blob/master/flink-table/flink-table-planner/src/main/java/org/apache/calcite/sql/validate/SqlValidatorImpl.java | Apache-2.0 |
protected void validateOrderList(SqlSelect select) {
// ORDER BY is validated in a scope where aliases in the SELECT clause
// are visible. For example, "SELECT empno AS x FROM emp ORDER BY x"
// is valid.
SqlNodeList orderList = select.getOrderList();
if (orderList == null) {
return;
}
if (!shouldAllowIntermediateOrderBy()) {
if (!cursorSet.contains(select)) {
throw newValidationError(select, RESOURCE.invalidOrderByPos());
}
}
final SqlValidatorScope orderScope = getOrderScope(select);
requireNonNull(orderScope, "orderScope");
List<SqlNode> expandList = new ArrayList<>();
for (SqlNode orderItem : orderList) {
SqlNode expandedOrderItem = expand(orderItem, orderScope);
expandList.add(expandedOrderItem);
}
SqlNodeList expandedOrderList = new SqlNodeList(expandList, orderList.getParserPosition());
select.setOrderBy(expandedOrderList);
for (SqlNode orderItem : expandedOrderList) {
validateOrderItem(select, orderItem);
}
} | Validates the ORDER BY clause of a SELECT statement.
@param select Select statement | validateOrderList | java | apache/flink | flink-table/flink-table-planner/src/main/java/org/apache/calcite/sql/validate/SqlValidatorImpl.java | https://github.com/apache/flink/blob/master/flink-table/flink-table-planner/src/main/java/org/apache/calcite/sql/validate/SqlValidatorImpl.java | Apache-2.0 |
private void validateExpr(SqlNode expr, SqlValidatorScope scope) {
if (expr instanceof SqlCall) {
final SqlOperator op = ((SqlCall) expr).getOperator();
if (op.isAggregator() && op.requiresOver()) {
throw newValidationError(expr, RESOURCE.absentOverClause());
}
if (op instanceof SqlTableFunction) {
throw RESOURCE.cannotCallTableFunctionHere(op.getName()).ex();
}
}
// Unless 'naked measures' are enabled, a non-aggregating query cannot
// reference measure columns. (An aggregating query can use them as
// argument to the AGGREGATE function.)
if (!config.nakedMeasures()
&& !(scope instanceof AggregatingScope)
&& scope.isMeasureRef(expr)) {
throw newValidationError(expr, RESOURCE.measureMustBeInAggregateQuery());
}
// Call on the expression to validate itself.
expr.validateExpr(this, scope);
// Perform any validation specific to the scope. For example, an
// aggregating scope requires that expressions are valid aggregations.
scope.validateExpr(expr);
} | Validates an expression.
@param expr Expression
@param scope Scope in which expression occurs | validateExpr | java | apache/flink | flink-table/flink-table-planner/src/main/java/org/apache/calcite/sql/validate/SqlValidatorImpl.java | https://github.com/apache/flink/blob/master/flink-table/flink-table-planner/src/main/java/org/apache/calcite/sql/validate/SqlValidatorImpl.java | Apache-2.0 |
private void handleScalarSubQuery(
SqlSelect parentSelect,
SqlSelect selectItem,
List<SqlNode> expandedSelectItems,
Set<String> aliasList,
List<Map.Entry<String, RelDataType>> fieldList) {
// A scalar sub-query only has one output column.
if (1 != SqlNonNullableAccessors.getSelectList(selectItem).size()) {
throw newValidationError(selectItem, RESOURCE.onlyScalarSubQueryAllowed());
}
// No expansion in this routine just append to list.
expandedSelectItems.add(selectItem);
// Get or generate alias and add to list.
final String alias = SqlValidatorUtil.alias(selectItem, aliasList.size());
aliasList.add(alias);
final SelectScope scope = (SelectScope) getWhereScope(parentSelect);
final RelDataType type = deriveType(scope, selectItem);
setValidatedNodeType(selectItem, type);
// We do not want to pass on the RelRecordType returned
// by the sub-query. Just the type of the single expression
// in the sub-query select list.
assert type instanceof RelRecordType;
RelRecordType rec = (RelRecordType) type;
RelDataType nodeType = rec.getFieldList().get(0).getType();
nodeType = typeFactory.createTypeWithNullability(nodeType, true);
fieldList.add(Pair.of(alias, nodeType));
} | Processes SubQuery found in Select list. Checks that is actually Scalar sub-query and makes
proper entries in each of the 3 lists used to create the final rowType entry.
@param parentSelect base SqlSelect item
@param selectItem child SqlSelect from select list
@param expandedSelectItems Select items after processing
@param aliasList built from user or system values
@param fieldList Built up entries for each select list entry | handleScalarSubQuery | java | apache/flink | flink-table/flink-table-planner/src/main/java/org/apache/calcite/sql/validate/SqlValidatorImpl.java | https://github.com/apache/flink/blob/master/flink-table/flink-table-planner/src/main/java/org/apache/calcite/sql/validate/SqlValidatorImpl.java | Apache-2.0 |
private void checkConstraint(
SqlValidatorTable validatorTable, SqlNode source, RelDataType targetRowType) {
final ModifiableViewTable modifiableViewTable =
validatorTable.unwrap(ModifiableViewTable.class);
if (modifiableViewTable != null && source instanceof SqlCall) {
final Table table = modifiableViewTable.getTable();
final RelDataType tableRowType = table.getRowType(typeFactory);
final List<RelDataTypeField> tableFields = tableRowType.getFieldList();
// Get the mapping from column indexes of the underlying table
// to the target columns and view constraints.
final Map<Integer, RelDataTypeField> tableIndexToTargetField =
SqlValidatorUtil.getIndexToFieldMap(tableFields, targetRowType);
final Map<Integer, RexNode> projectMap =
RelOptUtil.getColumnConstraints(
modifiableViewTable, targetRowType, typeFactory);
// Determine columns (indexed to the underlying table) that need
// to be validated against the view constraint.
@SuppressWarnings("RedundantCast")
final ImmutableBitSet targetColumns =
ImmutableBitSet.of((Iterable<Integer>) tableIndexToTargetField.keySet());
@SuppressWarnings("RedundantCast")
final ImmutableBitSet constrainedColumns =
ImmutableBitSet.of((Iterable<Integer>) projectMap.keySet());
@SuppressWarnings("assignment.type.incompatible")
List<@KeyFor({"tableIndexToTargetField", "projectMap"}) Integer>
constrainedTargetColumns = targetColumns.intersect(constrainedColumns).asList();
// Validate insert values against the view constraint.
final List<SqlNode> values = ((SqlCall) source).getOperandList();
for (final int colIndex : constrainedTargetColumns) {
final String colName = tableFields.get(colIndex).getName();
final RelDataTypeField targetField = tableIndexToTargetField.get(colIndex);
for (SqlNode row : values) {
final SqlCall call = (SqlCall) row;
final SqlNode sourceValue = call.operand(targetField.getIndex());
final ValidationError validationError =
new ValidationError(
sourceValue,
RESOURCE.viewConstraintNotSatisfied(
colName, Util.last(validatorTable.getQualifiedName())));
RelOptUtil.validateValueAgainstConstraint(
sourceValue, projectMap.get(colIndex), validationError);
}
}
}
} | Validates insert values against the constraint of a modifiable view.
@param validatorTable Table that may wrap a ModifiableViewTable
@param source The values being inserted
@param targetRowType The target type for the view | checkConstraint | java | apache/flink | flink-table/flink-table-planner/src/main/java/org/apache/calcite/sql/validate/SqlValidatorImpl.java | https://github.com/apache/flink/blob/master/flink-table/flink-table-planner/src/main/java/org/apache/calcite/sql/validate/SqlValidatorImpl.java | Apache-2.0 |
private void checkConstraint(
SqlValidatorTable validatorTable, SqlUpdate update, RelDataType targetRowType) {
final ModifiableViewTable modifiableViewTable =
validatorTable.unwrap(ModifiableViewTable.class);
if (modifiableViewTable != null) {
final Table table = modifiableViewTable.getTable();
final RelDataType tableRowType = table.getRowType(typeFactory);
final Map<Integer, RexNode> projectMap =
RelOptUtil.getColumnConstraints(
modifiableViewTable, targetRowType, typeFactory);
final Map<String, Integer> nameToIndex =
SqlValidatorUtil.mapNameToIndex(tableRowType.getFieldList());
// Validate update values against the view constraint.
final List<String> targetNames =
SqlIdentifier.simpleNames(update.getTargetColumnList());
final List<SqlNode> sources = update.getSourceExpressionList();
Pair.forEach(
targetNames,
sources,
(columnName, expr) -> {
final Integer columnIndex = nameToIndex.get(columnName);
if (projectMap.containsKey(columnIndex)) {
final RexNode columnConstraint = projectMap.get(columnIndex);
final ValidationError validationError =
new ValidationError(
expr,
RESOURCE.viewConstraintNotSatisfied(
columnName,
Util.last(validatorTable.getQualifiedName())));
RelOptUtil.validateValueAgainstConstraint(
expr, columnConstraint, validationError);
}
});
}
} | Validates updates against the constraint of a modifiable view.
@param validatorTable A {@link SqlValidatorTable} that may wrap a ModifiableViewTable
@param update The UPDATE parse tree node
@param targetRowType The target type | checkConstraint | java | apache/flink | flink-table/flink-table-planner/src/main/java/org/apache/calcite/sql/validate/SqlValidatorImpl.java | https://github.com/apache/flink/blob/master/flink-table/flink-table-planner/src/main/java/org/apache/calcite/sql/validate/SqlValidatorImpl.java | Apache-2.0 |
private void checkFieldCount(
SqlNode node,
SqlValidatorTable table,
List<ColumnStrategy> strategies,
RelDataType targetRowTypeToValidate,
RelDataType realTargetRowType,
SqlNode source,
RelDataType logicalSourceRowType,
RelDataType logicalTargetRowType) {
final int sourceFieldCount = logicalSourceRowType.getFieldCount();
final int targetFieldCount = logicalTargetRowType.getFieldCount();
final int targetRealFieldCount = realTargetRowType.getFieldCount();
if (sourceFieldCount != targetFieldCount && sourceFieldCount != targetRealFieldCount) {
// Allows the source row fields count to be equal with either
// the logical or the real(excludes columns that can not insert into)
// target row fields count.
throw newValidationError(
node, RESOURCE.unmatchInsertColumn(targetFieldCount, sourceFieldCount));
}
// Ensure that non-nullable fields are targeted.
for (final RelDataTypeField field : table.getRowType().getFieldList()) {
final RelDataTypeField targetField =
targetRowTypeToValidate.getField(field.getName(), true, false);
switch (strategies.get(field.getIndex())) {
case NOT_NULLABLE:
assert !field.getType().isNullable();
if (targetField == null) {
throw newValidationError(node, RESOURCE.columnNotNullable(field.getName()));
}
break;
case NULLABLE:
assert field.getType().isNullable();
break;
case VIRTUAL:
case STORED:
if (targetField != null
&& !isValuesWithDefault(source, targetField.getIndex())) {
throw newValidationError(
node, RESOURCE.insertIntoAlwaysGenerated(field.getName()));
}
break;
default:
break;
}
}
} | Check the field count of sql insert source and target node row type.
@param node target table sql identifier
@param table target table
@param strategies column strategies of target table
@param targetRowTypeToValidate row type to validate mainly for column strategies
@param realTargetRowType target table row type exclusive virtual columns
@param source source node
@param logicalSourceRowType source node row type
@param logicalTargetRowType logical target row type, contains only target columns if they are
specified or if the sql dialect allows subset insert, make a subset of fields(start from
the left first field) whose length is equals with the source row type fields number | checkFieldCount | java | apache/flink | flink-table/flink-table-planner/src/main/java/org/apache/calcite/sql/validate/SqlValidatorImpl.java | https://github.com/apache/flink/blob/master/flink-table/flink-table-planner/src/main/java/org/apache/calcite/sql/validate/SqlValidatorImpl.java | Apache-2.0 |
protected void checkTypeAssignment(
@Nullable SqlValidatorScope sourceScope,
SqlValidatorTable table,
RelDataType sourceRowType,
RelDataType targetRowType,
final SqlNode query) {
// NOTE jvs 23-Feb-2006: subclasses may allow for extra targets
// representing system-maintained columns, so stop after all sources
// matched
boolean isUpdateModifiableViewTable = false;
if (query instanceof SqlUpdate) {
final SqlNodeList targetColumnList =
requireNonNull(((SqlUpdate) query).getTargetColumnList());
final int targetColumnCount = targetColumnList.size();
targetRowType =
SqlTypeUtil.extractLastNFields(typeFactory, targetRowType, targetColumnCount);
sourceRowType =
SqlTypeUtil.extractLastNFields(typeFactory, sourceRowType, targetColumnCount);
isUpdateModifiableViewTable = table.unwrap(ModifiableViewTable.class) != null;
}
if (SqlTypeUtil.equalAsStructSansNullability(
typeFactory, sourceRowType, targetRowType, null)) {
// Returns early if source and target row type equals sans nullability.
return;
}
if (config.typeCoercionEnabled() && !isUpdateModifiableViewTable) {
// Try type coercion first if implicit type coercion is allowed.
boolean coerced =
typeCoercion.querySourceCoercion(
sourceScope, sourceRowType, targetRowType, query);
if (coerced) {
return;
}
}
// Fall back to default behavior: compare the type families.
List<RelDataTypeField> sourceFields = sourceRowType.getFieldList();
List<RelDataTypeField> targetFields = targetRowType.getFieldList();
final int sourceCount = sourceFields.size();
for (int i = 0; i < sourceCount; ++i) {
RelDataType sourceType = sourceFields.get(i).getType();
RelDataType targetType = targetFields.get(i).getType();
if (!SqlTypeUtil.canAssignFrom(targetType, sourceType)) {
SqlNode node = getNthExpr(query, i, sourceCount);
if (node instanceof SqlDynamicParam) {
continue;
}
String targetTypeString;
String sourceTypeString;
if (SqlTypeUtil.areCharacterSetsMismatched(sourceType, targetType)) {
sourceTypeString = sourceType.getFullTypeString();
targetTypeString = targetType.getFullTypeString();
} else {
sourceTypeString = sourceType.toString();
targetTypeString = targetType.toString();
}
throw newValidationError(
node,
RESOURCE.typeNotAssignable(
targetFields.get(i).getName(), targetTypeString,
sourceFields.get(i).getName(), sourceTypeString));
}
}
} | Checks the type assignment of an INSERT or UPDATE query.
<p>Skip the virtual columns(can not insert into) type assignment check if the source fields
count equals with the real target table fields count, see how #checkFieldCount was used.
@param sourceScope Scope of query source which is used to infer node type
@param table Target table
@param sourceRowType Source row type
@param targetRowType Target row type, it should either contain all the virtual columns (can
not insert into) or exclude all the virtual columns
@param query The query | checkTypeAssignment | java | apache/flink | flink-table/flink-table-planner/src/main/java/org/apache/calcite/sql/validate/SqlValidatorImpl.java | https://github.com/apache/flink/blob/master/flink-table/flink-table-planner/src/main/java/org/apache/calcite/sql/validate/SqlValidatorImpl.java | Apache-2.0 |
private void validateAccess(
SqlNode node, @Nullable SqlValidatorTable table, SqlAccessEnum requiredAccess) {
if (table != null) {
SqlAccessType access = table.getAllowedAccess();
if (!access.allowsAccess(requiredAccess)) {
throw newValidationError(
node,
RESOURCE.accessNotAllowed(
requiredAccess.name(), table.getQualifiedName().toString()));
}
}
} | Validates access to a table.
@param table Table
@param requiredAccess Access requested on table | validateAccess | java | apache/flink | flink-table/flink-table-planner/src/main/java/org/apache/calcite/sql/validate/SqlValidatorImpl.java | https://github.com/apache/flink/blob/master/flink-table/flink-table-planner/src/main/java/org/apache/calcite/sql/validate/SqlValidatorImpl.java | Apache-2.0 |
private void validateSnapshot(
SqlNode node, @Nullable SqlValidatorScope scope, SqlValidatorNamespace ns) {
if (node.getKind() == SqlKind.SNAPSHOT) {
SqlSnapshot snapshot = (SqlSnapshot) node;
SqlNode period = snapshot.getPeriod();
RelDataType dataType = deriveType(requireNonNull(scope, "scope"), period);
// ----- FLINK MODIFICATION BEGIN -----
if (!(dataType.getSqlTypeName() == SqlTypeName.TIMESTAMP
|| dataType.getSqlTypeName() == SqlTypeName.TIMESTAMP_WITH_LOCAL_TIME_ZONE)) {
throw newValidationError(
period,
Static.RESOURCE.illegalExpressionForTemporal(
dataType.getSqlTypeName().getName()));
}
if (ns instanceof IdentifierNamespace && ns.resolve() instanceof WithItemNamespace) {
// If the snapshot is used over a CTE, then we don't have a concrete underlying
// table to operate on. This will be rechecked later in the planner rules.
return;
}
// ----- FLINK MODIFICATION END -----
SqlValidatorTable table = getTable(ns);
if (!table.isTemporal()) {
List<String> qualifiedName = table.getQualifiedName();
String tableName = qualifiedName.get(qualifiedName.size() - 1);
throw newValidationError(
snapshot.getTableRef(), Static.RESOURCE.notTemporalTable(tableName));
}
}
} | Validates snapshot to a table.
@param node The node to validate
@param scope Validator scope to derive type
@param ns The namespace to lookup table | validateSnapshot | java | apache/flink | flink-table/flink-table-planner/src/main/java/org/apache/calcite/sql/validate/SqlValidatorImpl.java | https://github.com/apache/flink/blob/master/flink-table/flink-table-planner/src/main/java/org/apache/calcite/sql/validate/SqlValidatorImpl.java | Apache-2.0 |
@Override
public CalciteContextException get() {
return newValidationError(sqlNode, validatorException);
} | Throws a validator exception with access to the validator context. The exception is
determined when an instance is created. | get | java | apache/flink | flink-table/flink-table-planner/src/main/java/org/apache/calcite/sql/validate/SqlValidatorImpl.java | https://github.com/apache/flink/blob/master/flink-table/flink-table-planner/src/main/java/org/apache/calcite/sql/validate/SqlValidatorImpl.java | Apache-2.0 |
@Override
public @Nullable SqlNode visit(SqlIdentifier id) {
final SqlNode node =
expandCommonColumn(select, id, (SelectScope) getScope(), validator);
if (node != id) {
return node;
} else {
return super.visit(id);
}
} | Converts an expression into canonical form by fully-qualifying any identifiers. For common
columns in USING, it will be converted to COALESCE(A.col, B.col) AS col. | visit | java | apache/flink | flink-table/flink-table-planner/src/main/java/org/apache/calcite/sql/validate/SqlValidatorImpl.java | https://github.com/apache/flink/blob/master/flink-table/flink-table-planner/src/main/java/org/apache/calcite/sql/validate/SqlValidatorImpl.java | Apache-2.0 |
public SqlNode go(SqlNode node) {
return requireNonNull(
node.accept(this), () -> "NavigationModifier returned for " + node);
} | Modify the nodes in navigation function such as FIRST, LAST, PREV AND NEXT. | go | java | apache/flink | flink-table/flink-table-planner/src/main/java/org/apache/calcite/sql/validate/SqlValidatorImpl.java | https://github.com/apache/flink/blob/master/flink-table/flink-table-planner/src/main/java/org/apache/calcite/sql/validate/SqlValidatorImpl.java | Apache-2.0 |
private RelDataTypeField field(String name) {
RelDataTypeField field = catalogReader.nameMatcher().field(rowType, name);
assert field != null : "field " + name + " was not found in " + rowType;
return field;
} | Permutation of fields in NATURAL JOIN or USING. | field | java | apache/flink | flink-table/flink-table-planner/src/main/java/org/apache/calcite/sql/validate/SqlValidatorImpl.java | https://github.com/apache/flink/blob/master/flink-table/flink-table-planner/src/main/java/org/apache/calcite/sql/validate/SqlValidatorImpl.java | Apache-2.0 |
public void permute(
List<SqlNode> selectItems, List<Map.Entry<String, RelDataType>> fields) {
if (trivial) {
return;
}
final List<SqlNode> oldSelectItems = ImmutableList.copyOf(selectItems);
selectItems.clear();
final List<Map.Entry<String, RelDataType>> oldFields = ImmutableList.copyOf(fields);
fields.clear();
for (ImmutableIntList source : sources) {
final int p0 = source.get(0);
Map.Entry<String, RelDataType> field = oldFields.get(p0);
final String name = field.getKey();
RelDataType type = field.getValue();
SqlNode selectItem = oldSelectItems.get(p0);
for (int p1 : Util.skip(source)) {
final Map.Entry<String, RelDataType> field1 = oldFields.get(p1);
final SqlNode selectItem1 = oldSelectItems.get(p1);
final RelDataType type1 = field1.getValue();
// output is nullable only if both inputs are
final boolean nullable = type.isNullable() && type1.isNullable();
RelDataType currentType = type;
final RelDataType type2 =
requireNonNull(
SqlTypeUtil.leastRestrictiveForComparison(
typeFactory, type, type1),
() ->
"leastRestrictiveForComparison for types "
+ currentType
+ " and "
+ type1);
selectItem =
SqlStdOperatorTable.AS.createCall(
SqlParserPos.ZERO,
SqlStdOperatorTable.COALESCE.createCall(
SqlParserPos.ZERO,
maybeCast(selectItem, type, type2),
maybeCast(selectItem1, type1, type2)),
new SqlIdentifier(name, SqlParserPos.ZERO));
type = typeFactory.createTypeWithNullability(type2, nullable);
}
fields.add(Pair.of(name, type));
selectItems.add(selectItem);
}
} | Moves fields according to the permutation. | permute | java | apache/flink | flink-table/flink-table-planner/src/main/java/org/apache/calcite/sql/validate/SqlValidatorImpl.java | https://github.com/apache/flink/blob/master/flink-table/flink-table-planner/src/main/java/org/apache/calcite/sql/validate/SqlValidatorImpl.java | Apache-2.0 |
boolean shouldReplaceAliases(Config config) {
switch (this) {
case GROUP_BY:
return config.conformance().isGroupByAlias();
case HAVING:
return config.conformance().isHavingAlias();
case QUALIFY:
return true;
default:
throw Util.unexpected(this);
}
} | Determines if the extender should replace aliases with expanded values. For example:
<blockquote>
<pre>{@code
SELECT a + a as twoA
GROUP BY twoA
}</pre>
</blockquote>
<p>turns into
<blockquote>
<pre>{@code
SELECT a + a as twoA
GROUP BY a + a
}</pre>
</blockquote>
<p>This is determined both by the clause and the config.
@param config The configuration
@return Whether we should replace the alias with its expanded value | shouldReplaceAliases | java | apache/flink | flink-table/flink-table-planner/src/main/java/org/apache/calcite/sql/validate/SqlValidatorImpl.java | https://github.com/apache/flink/blob/master/flink-table/flink-table-planner/src/main/java/org/apache/calcite/sql/validate/SqlValidatorImpl.java | Apache-2.0 |
@Deprecated // to be removed before 2.0
public static RelNode decorrelateQuery(RelNode rootRel) {
final RelBuilder relBuilder =
RelFactories.LOGICAL_BUILDER.create(rootRel.getCluster(), null);
return decorrelateQuery(rootRel, relBuilder);
} | Built during decorrelation, of rel to all the newly created correlated variables in its
output, and to map old input positions to new input positions. This is from the view point of
the parent rel of a new rel. | decorrelateQuery | java | apache/flink | flink-table/flink-table-planner/src/main/java/org/apache/calcite/sql2rel/RelDecorrelator.java | https://github.com/apache/flink/blob/master/flink-table/flink-table-planner/src/main/java/org/apache/calcite/sql2rel/RelDecorrelator.java | Apache-2.0 |
private Frame maybeAddValueGenerator(RelNode rel, Frame frame) {
final CorelMap cm1 = new CorelMapBuilder().build(frame.r, rel);
if (!cm1.mapRefRelToCorRef.containsKey(rel)) {
return frame;
}
final Collection<CorRef> needs = cm1.mapRefRelToCorRef.get(rel);
final ImmutableSortedSet<CorDef> haves = frame.corDefOutputs.keySet();
if (hasAll(needs, haves)) {
return frame;
}
return decorrelateInputWithValueGenerator(rel, frame);
} | Adds a value generator to satisfy the correlating variables used by a relational expression,
if those variables are not already provided by its input. | maybeAddValueGenerator | java | apache/flink | flink-table/flink-table-planner/src/main/java/org/apache/calcite/sql2rel/RelDecorrelator.java | https://github.com/apache/flink/blob/master/flink-table/flink-table-planner/src/main/java/org/apache/calcite/sql2rel/RelDecorrelator.java | Apache-2.0 |
private static boolean hasAll(Collection<CorRef> corRefs, Collection<CorDef> corDefs) {
for (CorRef corRef : corRefs) {
if (!has(corDefs, corRef)) {
return false;
}
}
return true;
} | Returns whether all of a collection of {@link CorRef}s are satisfied by at least one of a
collection of {@link CorDef}s. | hasAll | java | apache/flink | flink-table/flink-table-planner/src/main/java/org/apache/calcite/sql2rel/RelDecorrelator.java | https://github.com/apache/flink/blob/master/flink-table/flink-table-planner/src/main/java/org/apache/calcite/sql2rel/RelDecorrelator.java | Apache-2.0 |
private RelNode projectJoinOutputWithNullability(
Join join, Project project, int nullIndicatorPos) {
final RelDataTypeFactory typeFactory = join.getCluster().getTypeFactory();
final RelNode left = join.getLeft();
final JoinRelType joinType = join.getJoinType();
RexInputRef nullIndicator =
new RexInputRef(
nullIndicatorPos,
typeFactory.createTypeWithNullability(
join.getRowType().getFieldList().get(nullIndicatorPos).getType(),
true));
// now create the new project
List<Pair<RexNode, String>> newProjExprs = new ArrayList<>();
// project everything from the LHS and then those from the original
// projRel
List<RelDataTypeField> leftInputFields = left.getRowType().getFieldList();
for (int i = 0; i < leftInputFields.size(); i++) {
newProjExprs.add(RexInputRef.of2(i, leftInputFields));
}
// Marked where the projected expr is coming from so that the types will
// become nullable for the original projections which are now coming out
// of the nullable side of the OJ.
boolean projectPulledAboveLeftCorrelator = joinType.generatesNullsOnRight();
for (Pair<RexNode, String> pair : project.getNamedProjects()) {
RexNode newProjExpr =
removeCorrelationExpr(
pair.left, projectPulledAboveLeftCorrelator, nullIndicator);
newProjExprs.add(Pair.of(newProjExpr, pair.right));
}
return relBuilder
.push(join)
.projectNamed(Pair.left(newProjExprs), Pair.right(newProjExprs), true)
.build();
} | Pulls project above the join from its RHS input. Enforces nullability for join output.
@param join Join
@param project Original project as the right-hand input of the join
@param nullIndicatorPos Position of null indicator
@return the subtree with the new Project at the root | projectJoinOutputWithNullability | java | apache/flink | flink-table/flink-table-planner/src/main/java/org/apache/calcite/sql2rel/RelDecorrelator.java | https://github.com/apache/flink/blob/master/flink-table/flink-table-planner/src/main/java/org/apache/calcite/sql2rel/RelDecorrelator.java | Apache-2.0 |
private RelNode aggregateCorrelatorOutput(
Correlate correlate, Project project, Set<Integer> isCount) {
final RelNode left = correlate.getLeft();
final JoinRelType joinType = correlate.getJoinType();
// now create the new project
final List<Pair<RexNode, String>> newProjects = new ArrayList<>();
// Project everything from the LHS and then those from the original
// project
final List<RelDataTypeField> leftInputFields = left.getRowType().getFieldList();
for (int i = 0; i < leftInputFields.size(); i++) {
newProjects.add(RexInputRef.of2(i, leftInputFields));
}
// Marked where the projected expr is coming from so that the types will
// become nullable for the original projections which are now coming out
// of the nullable side of the OJ.
boolean projectPulledAboveLeftCorrelator = joinType.generatesNullsOnRight();
for (Pair<RexNode, String> pair : project.getNamedProjects()) {
RexNode newProjExpr =
removeCorrelationExpr(pair.left, projectPulledAboveLeftCorrelator, isCount);
newProjects.add(Pair.of(newProjExpr, pair.right));
}
return relBuilder
.push(correlate)
.projectNamed(Pair.left(newProjects), Pair.right(newProjects), true)
.build();
} | Pulls a {@link Project} above a {@link Correlate} from its RHS input. Enforces nullability
for join output.
@param correlate Correlate
@param project the original project as the RHS input of the join
@param isCount Positions which are calls to the <code>COUNT</code> aggregation function
@return the subtree with the new Project at the root | aggregateCorrelatorOutput | java | apache/flink | flink-table/flink-table-planner/src/main/java/org/apache/calcite/sql2rel/RelDecorrelator.java | https://github.com/apache/flink/blob/master/flink-table/flink-table-planner/src/main/java/org/apache/calcite/sql2rel/RelDecorrelator.java | Apache-2.0 |
private boolean checkCorVars(
Correlate correlate,
@Nullable Project project,
@Nullable Filter filter,
@Nullable List<RexFieldAccess> correlatedJoinKeys) {
if (filter != null) {
assert correlatedJoinKeys != null;
// check that all correlated refs in the filter condition are
// used in the join(as field access).
Set<CorRef> corVarInFilter = Sets.newHashSet(cm.mapRefRelToCorRef.get(filter));
for (RexFieldAccess correlatedJoinKey : correlatedJoinKeys) {
corVarInFilter.remove(cm.mapFieldAccessToCorRef.get(correlatedJoinKey));
}
if (!corVarInFilter.isEmpty()) {
return false;
}
// Check that the correlated variables referenced in these
// comparisons do come from the Correlate.
corVarInFilter.addAll(cm.mapRefRelToCorRef.get(filter));
for (CorRef corVar : corVarInFilter) {
if (cm.mapCorToCorRel.get(corVar.corr) != correlate) {
return false;
}
}
}
// if project has any correlated reference, make sure they are also
// provided by the current correlate. They will be projected out of the LHS
// of the correlate.
if ((project != null) && cm.mapRefRelToCorRef.containsKey(project)) {
for (CorRef corVar : cm.mapRefRelToCorRef.get(project)) {
if (cm.mapCorToCorRel.get(corVar.corr) != correlate) {
return false;
}
}
}
return true;
} | Checks whether the correlations in projRel and filter are related to the correlated variables
provided by corRel.
@param correlate Correlate
@param project The original Project as the RHS input of the join
@param filter Filter
@param correlatedJoinKeys Correlated join keys
@return true if filter and proj only references corVar provided by corRel | checkCorVars | java | apache/flink | flink-table/flink-table-planner/src/main/java/org/apache/calcite/sql2rel/RelDecorrelator.java | https://github.com/apache/flink/blob/master/flink-table/flink-table-planner/src/main/java/org/apache/calcite/sql2rel/RelDecorrelator.java | Apache-2.0 |
private void removeCorVarFromTree(Correlate correlate) {
cm.mapCorToCorRel.remove(correlate.getCorrelationId(), correlate);
} | Removes correlated variables from the tree at root corRel.
@param correlate Correlate | removeCorVarFromTree | java | apache/flink | flink-table/flink-table-planner/src/main/java/org/apache/calcite/sql2rel/RelDecorrelator.java | https://github.com/apache/flink/blob/master/flink-table/flink-table-planner/src/main/java/org/apache/calcite/sql2rel/RelDecorrelator.java | Apache-2.0 |
static RemoveSingleAggregateRuleConfig config(RelBuilderFactory f) {
return ImmutableRemoveSingleAggregateRuleConfig.builder()
.withRelBuilderFactory(f)
.withOperandSupplier(
b0 ->
b0.operand(Aggregate.class)
.oneInput(
b1 ->
b1.operand(Project.class)
.oneInput(
b2 ->
b2.operand(
Aggregate
.class)
.anyInputs())))
.build();
} | Rule to remove an Aggregate with SINGLE_VALUE. For cases like:
<p>Aggregate(SINGLE_VALUE) Project(single expression) Aggregate
<p>For instance (subtree taken from TPCH query 17):
<p>LogicalAggregate(group=[{}], agg#0=[SINGLE_VALUE($0)])
LogicalProject(EXPR$0=[*(0.2:DECIMAL(2, 1), $0)]) LogicalAggregate(group=[{}],
agg#0=[AVG($0)]) LogicalProject(L_QUANTITY=[$4]) LogicalFilter(condition=[=($1,
$cor0.P_PARTKEY)]) LogicalTableScan(table=[[TPCH_01, LINEITEM]])
<p>Will be converted into:
<p>LogicalProject($f0=[*(0.2:DECIMAL(2, 1), $0)]) LogicalAggregate(group=[{}],
agg#0=[AVG($0)]) LogicalProject(L_QUANTITY=[$4]) LogicalFilter(condition=[=($1,
$cor0.P_PARTKEY)]) LogicalTableScan(table=[[TPCH_01, LINEITEM]]) | config | java | apache/flink | flink-table/flink-table-planner/src/main/java/org/apache/calcite/sql2rel/RelDecorrelator.java | https://github.com/apache/flink/blob/master/flink-table/flink-table-planner/src/main/java/org/apache/calcite/sql2rel/RelDecorrelator.java | Apache-2.0 |
static RemoveCorrelationForScalarProjectRuleConfig config(
RelDecorrelator decorrelator, RelBuilderFactory relBuilderFactory) {
return ImmutableRemoveCorrelationForScalarProjectRuleConfig.builder()
.withRelBuilderFactory(relBuilderFactory)
.withOperandSupplier(
b0 ->
b0.operand(Correlate.class)
.inputs(
b1 -> b1.operand(RelNode.class).anyInputs(),
b2 ->
b2.operand(Aggregate.class)
.oneInput(
b3 ->
b3.operand(
Project
.class)
.oneInput(
b4 ->
b4.operand(
RelNode
.class)
.anyInputs()))))
.withDecorrelator(decorrelator)
.build();
} | Planner rule that removes correlations for scalar projects. | config | java | apache/flink | flink-table/flink-table-planner/src/main/java/org/apache/calcite/sql2rel/RelDecorrelator.java | https://github.com/apache/flink/blob/master/flink-table/flink-table-planner/src/main/java/org/apache/calcite/sql2rel/RelDecorrelator.java | Apache-2.0 |
static RemoveCorrelationForScalarAggregateRuleConfig config(
RelDecorrelator d, RelBuilderFactory relBuilderFactory) {
return ImmutableRemoveCorrelationForScalarAggregateRuleConfig.builder()
.withRelBuilderFactory(relBuilderFactory)
.withOperandSupplier(
b0 ->
b0.operand(Correlate.class)
.inputs(
b1 -> b1.operand(RelNode.class).anyInputs(),
b2 ->
b2.operand(Project.class)
.oneInput(
b3 ->
b3.operand(
Aggregate
.class)
.predicate(
Aggregate
::isSimple)
.oneInput(
b4 ->
b4.operand(
Project
.class)
.oneInput(
b5 ->
b5.operand(
RelNode
.class)
.anyInputs())))))
.withDecorrelator(d)
.build();
} | Planner rule that removes correlations for scalar aggregates. | config | java | apache/flink | flink-table/flink-table-planner/src/main/java/org/apache/calcite/sql2rel/RelDecorrelator.java | https://github.com/apache/flink/blob/master/flink-table/flink-table-planner/src/main/java/org/apache/calcite/sql2rel/RelDecorrelator.java | Apache-2.0 |
static AdjustProjectForCountAggregateRuleConfig config(
boolean flavor, RelDecorrelator decorrelator, RelBuilderFactory relBuilderFactory) {
return ImmutableAdjustProjectForCountAggregateRuleConfig.builder()
.withRelBuilderFactory(relBuilderFactory)
.withOperandSupplier(
b0 ->
b0.operand(Correlate.class)
.inputs(
b1 -> b1.operand(RelNode.class).anyInputs(),
b2 ->
flavor
? b2.operand(Project.class)
.oneInput(
b3 ->
b3.operand(
Aggregate
.class)
.anyInputs())
: b2.operand(Aggregate.class)
.anyInputs()))
.withFlavor(flavor)
.withDecorrelator(decorrelator)
.build();
} | Planner rule that adjusts projects when counts are added. | config | java | apache/flink | flink-table/flink-table-planner/src/main/java/org/apache/calcite/sql2rel/RelDecorrelator.java | https://github.com/apache/flink/blob/master/flink-table/flink-table-planner/src/main/java/org/apache/calcite/sql2rel/RelDecorrelator.java | Apache-2.0 |
protected RelDecorrelator getVisitor() {
return this;
} | Returns the {@code visitor} on which the {@code MethodDispatcher} dispatches each {@code
decorrelateRel} method, the default implementation returns this instance, if you got a
sub-class, override this method to replace the {@code visitor} as the sub-class instance. | getVisitor | java | apache/flink | flink-table/flink-table-planner/src/main/java/org/apache/calcite/sql2rel/RelDecorrelator.java | https://github.com/apache/flink/blob/master/flink-table/flink-table-planner/src/main/java/org/apache/calcite/sql2rel/RelDecorrelator.java | Apache-2.0 |
protected Collection<RelOptRule> getPostDecorrelateRules() {
return Collections.emptyList();
} | Returns the rules applied on the rel after decorrelation, never null. | getPostDecorrelateRules | java | apache/flink | flink-table/flink-table-planner/src/main/java/org/apache/calcite/sql2rel/RelDecorrelator.java | https://github.com/apache/flink/blob/master/flink-table/flink-table-planner/src/main/java/org/apache/calcite/sql2rel/RelDecorrelator.java | Apache-2.0 |
public int getDynamicParamCount() {
return dynamicParamSqlNodes.size();
} | Returns the number of dynamic parameters encountered during translation; this must only be
called after {@link #convertQuery}.
@return number of dynamic parameters | getDynamicParamCount | java | apache/flink | flink-table/flink-table-planner/src/main/java/org/apache/calcite/sql2rel/SqlToRelConverter.java | https://github.com/apache/flink/blob/master/flink-table/flink-table-planner/src/main/java/org/apache/calcite/sql2rel/SqlToRelConverter.java | Apache-2.0 |
public RelDataType getDynamicParamType(int index) {
SqlNode sqlNode = dynamicParamSqlNodes.get(index);
if (sqlNode == null) {
throw Util.needToImplement("dynamic param type inference");
}
return validator().getValidatedNodeType(sqlNode);
} | Returns the type inferred for a dynamic parameter.
@param index 0-based index of dynamic parameter
@return inferred type, never null | getDynamicParamType | java | apache/flink | flink-table/flink-table-planner/src/main/java/org/apache/calcite/sql2rel/SqlToRelConverter.java | https://github.com/apache/flink/blob/master/flink-table/flink-table-planner/src/main/java/org/apache/calcite/sql2rel/SqlToRelConverter.java | Apache-2.0 |
public int getDynamicParamCountInExplain(boolean increment) {
int retVal = explainParamCount;
if (increment) {
++explainParamCount;
}
return retVal;
} | Returns the current count of the number of dynamic parameters in an EXPLAIN PLAN statement.
@param increment if true, increment the count
@return the current count before the optional increment | getDynamicParamCountInExplain | java | apache/flink | flink-table/flink-table-planner/src/main/java/org/apache/calcite/sql2rel/SqlToRelConverter.java | https://github.com/apache/flink/blob/master/flink-table/flink-table-planner/src/main/java/org/apache/calcite/sql2rel/SqlToRelConverter.java | Apache-2.0 |
public Map<SqlNode, RexNode> getMapConvertedNonCorrSubqs() {
return mapConvertedNonCorrSubqs;
} | Returns the mapping of non-correlated sub-queries that have been converted to the constants
that they evaluate to. | getMapConvertedNonCorrSubqs | java | apache/flink | flink-table/flink-table-planner/src/main/java/org/apache/calcite/sql2rel/SqlToRelConverter.java | https://github.com/apache/flink/blob/master/flink-table/flink-table-planner/src/main/java/org/apache/calcite/sql2rel/SqlToRelConverter.java | Apache-2.0 |
public void setSubQueryConverter(SubQueryConverter converter) {
subQueryConverter = converter;
} | Sets a new SubQueryConverter. To have any effect, this must be called before any convert
method.
@param converter new SubQueryConverter | setSubQueryConverter | java | apache/flink | flink-table/flink-table-planner/src/main/java/org/apache/calcite/sql2rel/SqlToRelConverter.java | https://github.com/apache/flink/blob/master/flink-table/flink-table-planner/src/main/java/org/apache/calcite/sql2rel/SqlToRelConverter.java | Apache-2.0 |
public RelNode decorrelate(SqlNode query, RelNode rootRel) {
if (!config.isDecorrelationEnabled()) {
return rootRel;
}
final RelNode result = decorrelateQuery(rootRel);
if (result != rootRel) {
checkConvertedType(query, result);
}
return result;
} | If sub-query is correlated and decorrelation is enabled, performs decorrelation.
@param query Query
@param rootRel Root relational expression
@return New root relational expression after decorrelation | decorrelate | java | apache/flink | flink-table/flink-table-planner/src/main/java/org/apache/calcite/sql2rel/SqlToRelConverter.java | https://github.com/apache/flink/blob/master/flink-table/flink-table-planner/src/main/java/org/apache/calcite/sql2rel/SqlToRelConverter.java | Apache-2.0 |
public RelNode trimUnusedFields(boolean ordered, RelNode rootRel) {
// Trim fields that are not used by their consumer.
if (config.isTrimUnusedFields()) {
final RelFieldTrimmer trimmer = newFieldTrimmer();
final List<RelCollation> collations =
rootRel.getTraitSet().getTraits(RelCollationTraitDef.INSTANCE);
rootRel = trimmer.trim(rootRel);
if (!ordered
&& collations != null
&& !collations.isEmpty()
&& !collations.equals(ImmutableList.of(RelCollations.EMPTY))) {
final RelTraitSet traitSet =
rootRel.getTraitSet().replace(RelCollationTraitDef.INSTANCE, collations);
rootRel = rootRel.copy(traitSet, rootRel.getInputs());
}
if (SQL2REL_LOGGER.isDebugEnabled()) {
SQL2REL_LOGGER.debug(
RelOptUtil.dumpPlan(
"Plan after trimming unused fields",
rootRel,
SqlExplainFormat.TEXT,
SqlExplainLevel.EXPPLAN_ATTRIBUTES));
}
}
return rootRel;
} | Walks over a tree of relational expressions, replacing each {@link RelNode} with a 'slimmed
down' relational expression that projects only the fields required by its consumer.
<p>This may make things easier for the optimizer, by removing crud that would expand the
search space, but is difficult for the optimizer itself to do it, because optimizer rules
must preserve the number and type of fields. Hence, this transform that operates on the
entire tree, similar to the {@link RelStructuredTypeFlattener type-flattening transform}.
<p>Currently this functionality is disabled in farrago/luciddb; the default implementation of
this method does nothing.
@param ordered Whether the relational expression must produce results in a particular order
(typically because it has an ORDER BY at top level)
@param rootRel Relational expression that is at the root of the tree
@return Trimmed relational expression | trimUnusedFields | java | apache/flink | flink-table/flink-table-planner/src/main/java/org/apache/calcite/sql2rel/SqlToRelConverter.java | https://github.com/apache/flink/blob/master/flink-table/flink-table-planner/src/main/java/org/apache/calcite/sql2rel/SqlToRelConverter.java | Apache-2.0 |
protected Blackboard createBlackboard(
@Nullable SqlValidatorScope scope,
@Nullable Map<String, RexNode> nameToNodeMap,
boolean top) {
return new Blackboard(scope, nameToNodeMap, top);
} | Factory method for creating translation workspace. | createBlackboard | java | apache/flink | flink-table/flink-table-planner/src/main/java/org/apache/calcite/sql2rel/SqlToRelConverter.java | https://github.com/apache/flink/blob/master/flink-table/flink-table-planner/src/main/java/org/apache/calcite/sql2rel/SqlToRelConverter.java | Apache-2.0 |
protected void convertOrder(
SqlSelect select,
Blackboard bb,
RelCollation collation,
List<SqlNode> orderExprList,
@Nullable SqlNode offset,
@Nullable SqlNode fetch) {
if (removeSortInSubQuery(bb.top)
|| select.getOrderList() == null
|| select.getOrderList().isEmpty()) {
assert removeSortInSubQuery(bb.top) || collation.getFieldCollations().isEmpty();
if ((offset == null
|| (offset instanceof SqlLiteral
&& Objects.equals(
((SqlLiteral) offset).bigDecimalValue(),
BigDecimal.ZERO)))
&& fetch == null) {
return;
}
}
// Create a sorter using the previously constructed collations.
bb.setRoot(
LogicalSort.create(
bb.root(),
collation,
offset == null ? null : convertExpression(offset),
fetch == null ? null : convertExpression(fetch)),
false);
// If extra expressions were added to the project list for sorting,
// add another project to remove them. But make the collation empty, because
// we can't represent the real collation.
//
// If it is the top node, use the real collation, but don't trim fields.
if (orderExprList.size() > 0 && !bb.top) {
final List<RexNode> exprs = new ArrayList<>();
final RelDataType rowType = bb.root().getRowType();
final int fieldCount = rowType.getFieldCount() - orderExprList.size();
for (int i = 0; i < fieldCount; i++) {
exprs.add(rexBuilder.makeInputRef(bb.root(), i));
}
bb.setRoot(
LogicalProject.create(
bb.root(),
ImmutableList.of(),
exprs,
rowType.getFieldNames().subList(0, fieldCount),
ImmutableSet.of()),
false);
}
} | Converts a query's ORDER BY clause, if any.
<p>Ignores the ORDER BY clause if the query is not top-level and FETCH or OFFSET are not
present.
@param select Query
@param bb Blackboard
@param collation Collation list
@param orderExprList Method populates this list with orderBy expressions not present in
selectList
@param offset Expression for number of rows to discard before returning first row
@param fetch Expression for number of rows to fetch | convertOrder | java | apache/flink | flink-table/flink-table-planner/src/main/java/org/apache/calcite/sql2rel/SqlToRelConverter.java | https://github.com/apache/flink/blob/master/flink-table/flink-table-planner/src/main/java/org/apache/calcite/sql2rel/SqlToRelConverter.java | Apache-2.0 |
private boolean removeSortInSubQuery(boolean top) {
return config.isRemoveSortInSubQuery() && !top;
} | Returns whether we should remove the sort for the subsequent query conversion.
@param top Whether the rel to convert is the root of the query | removeSortInSubQuery | java | apache/flink | flink-table/flink-table-planner/src/main/java/org/apache/calcite/sql2rel/SqlToRelConverter.java | https://github.com/apache/flink/blob/master/flink-table/flink-table-planner/src/main/java/org/apache/calcite/sql2rel/SqlToRelConverter.java | Apache-2.0 |
private static SqlNode pushDownNotForIn(SqlValidatorScope scope, SqlNode sqlNode) {
if (!(sqlNode instanceof SqlCall) || !containsInOperator(sqlNode)) {
return sqlNode;
}
final SqlCall sqlCall = (SqlCall) sqlNode;
switch (sqlCall.getKind()) {
case AND:
case OR:
final List<SqlNode> operands = new ArrayList<>();
for (SqlNode operand : sqlCall.getOperandList()) {
operands.add(pushDownNotForIn(scope, operand));
}
final SqlCall newCall =
sqlCall.getOperator().createCall(sqlCall.getParserPosition(), operands);
return reg(scope, newCall);
case NOT:
assert sqlCall.operand(0) instanceof SqlCall;
final SqlCall call = sqlCall.operand(0);
switch (sqlCall.operand(0).getKind()) {
case CASE:
final SqlCase caseNode = (SqlCase) call;
final SqlNodeList thenOperands = new SqlNodeList(SqlParserPos.ZERO);
for (SqlNode thenOperand : caseNode.getThenOperands()) {
final SqlCall not =
SqlStdOperatorTable.NOT.createCall(
SqlParserPos.ZERO, thenOperand);
thenOperands.add(pushDownNotForIn(scope, reg(scope, not)));
}
SqlNode elseOperand =
requireNonNull(
caseNode.getElseOperand(),
"getElseOperand for " + caseNode);
if (!SqlUtil.isNull(elseOperand)) {
// "not(unknown)" is "unknown", so no need to simplify
final SqlCall not =
SqlStdOperatorTable.NOT.createCall(
SqlParserPos.ZERO, elseOperand);
elseOperand = pushDownNotForIn(scope, reg(scope, not));
}
return reg(
scope,
SqlStdOperatorTable.CASE.createCall(
SqlParserPos.ZERO,
caseNode.getValueOperand(),
caseNode.getWhenOperands(),
thenOperands,
elseOperand));
case AND:
final List<SqlNode> orOperands = new ArrayList<>();
for (SqlNode operand : call.getOperandList()) {
orOperands.add(
pushDownNotForIn(
scope,
reg(
scope,
SqlStdOperatorTable.NOT.createCall(
SqlParserPos.ZERO, operand))));
}
return reg(
scope,
SqlStdOperatorTable.OR.createCall(SqlParserPos.ZERO, orOperands));
case OR:
final List<SqlNode> andOperands = new ArrayList<>();
for (SqlNode operand : call.getOperandList()) {
andOperands.add(
pushDownNotForIn(
scope,
reg(
scope,
SqlStdOperatorTable.NOT.createCall(
SqlParserPos.ZERO, operand))));
}
return reg(
scope,
SqlStdOperatorTable.AND.createCall(SqlParserPos.ZERO, andOperands));
case NOT:
assert call.operandCount() == 1;
return pushDownNotForIn(scope, call.operand(0));
case NOT_IN:
return reg(
scope,
SqlStdOperatorTable.IN.createCall(
SqlParserPos.ZERO, call.getOperandList()));
case IN:
return reg(
scope,
SqlStdOperatorTable.NOT_IN.createCall(
SqlParserPos.ZERO, call.getOperandList()));
default:
break;
}
break;
default:
break;
}
return sqlNode;
} | Push down all the NOT logical operators into any IN/NOT IN operators.
@param scope Scope where {@code sqlNode} occurs
@param sqlNode the root node from which to look for NOT operators
@return the transformed SqlNode representation with NOT pushed down. | pushDownNotForIn | java | apache/flink | flink-table/flink-table-planner/src/main/java/org/apache/calcite/sql2rel/SqlToRelConverter.java | https://github.com/apache/flink/blob/master/flink-table/flink-table-planner/src/main/java/org/apache/calcite/sql2rel/SqlToRelConverter.java | Apache-2.0 |
private static SqlNode reg(SqlValidatorScope scope, SqlNode e) {
scope.getValidator().deriveType(scope, e);
return e;
} | Registers with the validator a {@link SqlNode} that has been created during the Sql-to-Rel
process. | reg | java | apache/flink | flink-table/flink-table-planner/src/main/java/org/apache/calcite/sql2rel/SqlToRelConverter.java | https://github.com/apache/flink/blob/master/flink-table/flink-table-planner/src/main/java/org/apache/calcite/sql2rel/SqlToRelConverter.java | Apache-2.0 |
private boolean convertNonCorrelatedSubQuery(
SubQuery subQuery, Blackboard bb, RelNode converted, boolean isExists) {
SqlCall call = (SqlBasicCall) subQuery.node;
if (subQueryConverter.canConvertSubQuery() && isSubQueryNonCorrelated(converted, bb)) {
// First check if the sub-query has already been converted
// because it's a nested sub-query. If so, don't re-evaluate
// it again.
RexNode constExpr = mapConvertedNonCorrSubqs.get(call);
if (constExpr == null) {
constExpr =
subQueryConverter.convertSubQuery(call, this, isExists, config.isExplain());
}
if (constExpr != null) {
subQuery.expr = constExpr;
mapConvertedNonCorrSubqs.put(call, constExpr);
return true;
}
}
return false;
} | Determines if a sub-query is non-correlated and if so, converts it to a constant.
@param subQuery the call that references the sub-query
@param bb blackboard used to convert the sub-query
@param converted RelNode tree corresponding to the sub-query
@param isExists true if the sub-query is part of an EXISTS expression
@return Whether the sub-query can be converted to a constant | convertNonCorrelatedSubQuery | java | apache/flink | flink-table/flink-table-planner/src/main/java/org/apache/calcite/sql2rel/SqlToRelConverter.java | https://github.com/apache/flink/blob/master/flink-table/flink-table-planner/src/main/java/org/apache/calcite/sql2rel/SqlToRelConverter.java | Apache-2.0 |
public RelNode convertToSingleValueSubq(SqlNode query, RelNode plan) {
// Check whether query is guaranteed to produce a single value.
if (query instanceof SqlSelect) {
SqlSelect select = (SqlSelect) query;
SqlNodeList selectList = select.getSelectList();
SqlNodeList groupList = select.getGroup();
if ((selectList.size() == 1) && ((groupList == null) || (groupList.size() == 0))) {
SqlNode selectExpr = selectList.get(0);
if (selectExpr instanceof SqlCall) {
SqlCall selectExprCall = (SqlCall) selectExpr;
if (Util.isSingleValue(selectExprCall)) {
return plan;
}
}
// If there is a limit with 0 or 1,
// it is ensured to produce a single value
SqlNode fetch = select.getFetch();
if (fetch instanceof SqlNumericLiteral) {
long value = ((SqlNumericLiteral) fetch).getValueAs(Long.class);
if (value < 2) {
return plan;
}
}
}
} else if (query instanceof SqlCall) {
// If the query is (values ...),
// it is necessary to look into the operands to determine
// whether SingleValueAgg is necessary
SqlCall exprCall = (SqlCall) query;
if (exprCall.getOperator() instanceof SqlValuesOperator
&& Util.isSingleValue(exprCall)) {
return plan;
}
}
// If not, project SingleValueAgg
return RelOptUtil.createSingleValueAggRel(cluster, plan);
} | Converts the RelNode tree for a select statement to a select that produces a single value.
@param query the query
@param plan the original RelNode tree corresponding to the statement
@return the converted RelNode tree | convertToSingleValueSubq | java | apache/flink | flink-table/flink-table-planner/src/main/java/org/apache/calcite/sql2rel/SqlToRelConverter.java | https://github.com/apache/flink/blob/master/flink-table/flink-table-planner/src/main/java/org/apache/calcite/sql2rel/SqlToRelConverter.java | Apache-2.0 |
private @Nullable RexNode convertInToOr(
final Blackboard bb,
final List<RexNode> leftKeys,
SqlNodeList valuesList,
SqlInOperator op) {
final List<RexNode> comparisons = new ArrayList<>();
for (SqlNode rightVals : valuesList) {
RexNode rexComparison;
final SqlOperator comparisonOp;
if (op instanceof SqlQuantifyOperator) {
comparisonOp =
RelOptUtil.op(
((SqlQuantifyOperator) op).comparisonKind,
SqlStdOperatorTable.EQUALS);
} else {
comparisonOp = SqlStdOperatorTable.EQUALS;
}
if (leftKeys.size() == 1) {
rexComparison =
rexBuilder.makeCall(
comparisonOp,
leftKeys.get(0),
ensureSqlType(
leftKeys.get(0).getType(),
bb.convertExpression(rightVals)));
} else {
assert rightVals instanceof SqlCall;
final SqlBasicCall call = (SqlBasicCall) rightVals;
assert (call.getOperator() instanceof SqlRowOperator)
&& call.operandCount() == leftKeys.size();
rexComparison =
RexUtil.composeConjunction(
rexBuilder,
Util.transform(
Pair.zip(leftKeys, call.getOperandList()),
pair ->
rexBuilder.makeCall(
comparisonOp,
pair.left,
// TODO: remove requireNonNull when
// checkerframework issue resolved
ensureSqlType(
requireNonNull(
pair.left,
"pair.left")
.getType(),
bb.convertExpression(
pair.right)))));
}
comparisons.add(rexComparison);
}
switch (op.kind) {
case ALL:
return RexUtil.composeConjunction(rexBuilder, comparisons, true);
case NOT_IN:
return rexBuilder.makeCall(
SqlStdOperatorTable.NOT,
RexUtil.composeDisjunction(rexBuilder, comparisons));
case IN:
case SOME:
return RexUtil.composeDisjunction(rexBuilder, comparisons, true);
default:
throw new AssertionError();
}
} | Converts "x IN (1, 2, ...)" to "x=1 OR x=2 OR ...".
@param leftKeys LHS
@param valuesList RHS
@param op The operator (IN, NOT IN, > SOME, ...)
@return converted expression | convertInToOr | java | apache/flink | flink-table/flink-table-planner/src/main/java/org/apache/calcite/sql2rel/SqlToRelConverter.java | https://github.com/apache/flink/blob/master/flink-table/flink-table-planner/src/main/java/org/apache/calcite/sql2rel/SqlToRelConverter.java | Apache-2.0 |
private RexNode ensureSqlType(RelDataType type, RexNode node) {
if (type.getSqlTypeName() == node.getType().getSqlTypeName()
|| (type.getSqlTypeName() == SqlTypeName.VARCHAR
&& node.getType().getSqlTypeName() == SqlTypeName.CHAR)) {
return node;
}
return rexBuilder.ensureType(type, node, true);
} | Ensures that an expression has a given {@link SqlTypeName}, applying a cast if necessary. If
the expression already has the right type family, returns the expression unchanged. | ensureSqlType | java | apache/flink | flink-table/flink-table-planner/src/main/java/org/apache/calcite/sql2rel/SqlToRelConverter.java | https://github.com/apache/flink/blob/master/flink-table/flink-table-planner/src/main/java/org/apache/calcite/sql2rel/SqlToRelConverter.java | Apache-2.0 |
private void findSubQueries(
Blackboard bb,
SqlNode node,
RelOptUtil.Logic logic,
boolean registerOnlyScalarSubQueries,
SqlImplementor.Clause clause) {
final SqlKind kind = node.getKind();
switch (kind) {
case EXISTS:
case UNIQUE:
case SELECT:
case MULTISET_QUERY_CONSTRUCTOR:
case MULTISET_VALUE_CONSTRUCTOR:
case ARRAY_QUERY_CONSTRUCTOR:
case MAP_QUERY_CONSTRUCTOR:
case CURSOR:
case SET_SEMANTICS_TABLE:
case SCALAR_QUERY:
if (!registerOnlyScalarSubQueries || (kind == SqlKind.SCALAR_QUERY)) {
bb.registerSubQuery(node, RelOptUtil.Logic.TRUE_FALSE, clause);
}
return;
case IN:
break;
case NOT_IN:
case NOT:
logic = logic.negate();
break;
default:
break;
}
if (node instanceof SqlCall) {
switch (kind) {
// Do no change logic for AND, IN and NOT IN expressions;
// but do change logic for OR, NOT and others;
// EXISTS was handled already.
case AND:
case IN:
case NOT_IN:
break;
default:
logic = RelOptUtil.Logic.TRUE_FALSE_UNKNOWN;
break;
}
for (SqlNode operand : ((SqlCall) node).getOperandList()) {
if (operand != null) {
// In the case of an IN expression, locate scalar
// sub-queries so we can convert them to constants
findSubQueries(
bb,
operand,
logic,
kind == SqlKind.IN
|| kind == SqlKind.NOT_IN
|| kind == SqlKind.SOME
|| kind == SqlKind.ALL
|| registerOnlyScalarSubQueries,
clause);
}
}
} else if (node instanceof SqlNodeList) {
for (SqlNode child : (SqlNodeList) node) {
findSubQueries(
bb,
child,
logic,
kind == SqlKind.IN
|| kind == SqlKind.NOT_IN
|| kind == SqlKind.SOME
|| kind == SqlKind.ALL
|| registerOnlyScalarSubQueries,
clause);
}
}
// Now that we've located any scalar sub-queries inside the IN
// expression, register the IN expression itself. We need to
// register the scalar sub-queries first so they can be converted
// before the IN expression is converted.
switch (kind) {
case IN:
case NOT_IN:
case SOME:
case ALL:
switch (logic) {
case TRUE_FALSE_UNKNOWN:
RelDataType type = validator().getValidatedNodeTypeIfKnown(node);
if (type == null) {
// The node might not be validated if we still don't know type of the
// node.
// Therefore return directly.
return;
} else {
break;
}
case UNKNOWN_AS_FALSE:
logic = RelOptUtil.Logic.TRUE;
break;
default:
break;
}
bb.registerSubQuery(node, logic, clause);
break;
default:
break;
}
} | Builds a list of all <code>IN</code> or <code>EXISTS</code> operators inside SQL parse tree.
Does not traverse inside queries.
@param bb blackboard
@param node the SQL parse tree
@param logic Whether the answer needs to be in full 3-valued logic (TRUE, FALSE, UNKNOWN)
will be required, or whether we can accept an approximation (say representing UNKNOWN as
FALSE)
@param registerOnlyScalarSubQueries if set to true and the parse tree corresponds to a
variation of a select node, only register it if it's a scalar sub-query
@param clause A clause inside which sub-query is searched | findSubQueries | java | apache/flink | flink-table/flink-table-planner/src/main/java/org/apache/calcite/sql2rel/SqlToRelConverter.java | https://github.com/apache/flink/blob/master/flink-table/flink-table-planner/src/main/java/org/apache/calcite/sql2rel/SqlToRelConverter.java | Apache-2.0 |
public RexNode convertExpression(SqlNode node) {
Map<String, RelDataType> nameToTypeMap = Collections.emptyMap();
final ParameterScope scope =
new ParameterScope((SqlValidatorImpl) validator(), nameToTypeMap);
final Blackboard bb = createBlackboard(scope, null, false);
replaceSubQueries(bb, node, RelOptUtil.Logic.TRUE_FALSE_UNKNOWN);
return bb.convertExpression(node);
} | Converts an expression from {@link SqlNode} to {@link RexNode} format.
@param node Expression to translate
@return Converted expression | convertExpression | java | apache/flink | flink-table/flink-table-planner/src/main/java/org/apache/calcite/sql2rel/SqlToRelConverter.java | https://github.com/apache/flink/blob/master/flink-table/flink-table-planner/src/main/java/org/apache/calcite/sql2rel/SqlToRelConverter.java | Apache-2.0 |
public RexNode convertExpression(SqlNode node, Map<String, RexNode> nameToNodeMap) {
final Map<String, RelDataType> nameToTypeMap = new HashMap<>();
for (Map.Entry<String, RexNode> entry : nameToNodeMap.entrySet()) {
nameToTypeMap.put(entry.getKey(), entry.getValue().getType());
}
final ParameterScope scope =
new ParameterScope((SqlValidatorImpl) validator(), nameToTypeMap);
final Blackboard bb = createBlackboard(scope, nameToNodeMap, false);
replaceSubQueries(bb, node, RelOptUtil.Logic.TRUE_FALSE_UNKNOWN);
return bb.convertExpression(node);
} | Converts an expression from {@link SqlNode} to {@link RexNode} format, mapping identifier
references to predefined expressions.
@param node Expression to translate
@param nameToNodeMap map from String to {@link RexNode}; when an {@link SqlIdentifier} is
encountered, it is used as a key and translated to the corresponding value from this map
@return Converted expression | convertExpression | java | apache/flink | flink-table/flink-table-planner/src/main/java/org/apache/calcite/sql2rel/SqlToRelConverter.java | https://github.com/apache/flink/blob/master/flink-table/flink-table-planner/src/main/java/org/apache/calcite/sql2rel/SqlToRelConverter.java | Apache-2.0 |
public void containsQueryHints() {
containsQueryHints = true;
} | To tell this converter that this SqlNode tree contains query hints and then a query block
alias will be attached to the root node of the query block.
<p>The `containsQueryHints` is false default to be compatible with previous behavior and then
planner can reuse some node.
<p>TODO At present, it is a relatively hacked way | containsQueryHints | java | apache/flink | flink-table/flink-table-planner/src/main/java/org/apache/calcite/sql2rel/SqlToRelConverter.java | https://github.com/apache/flink/blob/master/flink-table/flink-table-planner/src/main/java/org/apache/calcite/sql2rel/SqlToRelConverter.java | Apache-2.0 |
private boolean isSubQueryNonCorrelated(RelNode subq, Blackboard bb) {
Set<CorrelationId> correlatedVariables = RelOptUtil.getVariablesUsed(subq);
for (CorrelationId correlName : correlatedVariables) {
DeferredLookup lookup =
requireNonNull(
mapCorrelToDeferred.get(correlName),
() -> "correlation variable is not found: " + correlName);
String originalRelName = lookup.getOriginalRelName();
final SqlNameMatcher nameMatcher =
lookup.bb.scope().getValidator().getCatalogReader().nameMatcher();
final SqlValidatorScope.ResolvedImpl resolved = new SqlValidatorScope.ResolvedImpl();
lookup.bb
.scope()
.resolve(ImmutableList.of(originalRelName), nameMatcher, false, resolved);
SqlValidatorScope ancestorScope = resolved.only().scope;
// If the correlated reference is in a scope that's "above" the
// sub-query, then this is a correlated sub-query.
SqlValidatorScope parentScope = bb.scope;
do {
if (ancestorScope == parentScope) {
return false;
}
if (parentScope instanceof DelegatingScope) {
parentScope = ((DelegatingScope) parentScope).getParent();
} else {
break;
}
} while (parentScope != null);
}
return true;
} | Determines whether a sub-query is non-correlated. Note that a non-correlated sub-query can
contain correlated references, provided those references do not reference select statements
that are parents of the sub-query.
@param subq the sub-query
@param bb blackboard used while converting the sub-query, i.e., the blackboard of the parent
query of this sub-query
@return true if the sub-query is non-correlated | isSubQueryNonCorrelated | java | apache/flink | flink-table/flink-table-planner/src/main/java/org/apache/calcite/sql2rel/SqlToRelConverter.java | https://github.com/apache/flink/blob/master/flink-table/flink-table-planner/src/main/java/org/apache/calcite/sql2rel/SqlToRelConverter.java | Apache-2.0 |
@Deprecated // to be removed before 2.0
public boolean isTrimUnusedFields() {
return config.isTrimUnusedFields();
} | Returns whether to trim unused fields as part of the conversion process.
@return Whether to trim unused fields | isTrimUnusedFields | java | apache/flink | flink-table/flink-table-planner/src/main/java/org/apache/calcite/sql2rel/SqlToRelConverter.java | https://github.com/apache/flink/blob/master/flink-table/flink-table-planner/src/main/java/org/apache/calcite/sql2rel/SqlToRelConverter.java | Apache-2.0 |
protected RelRoot convertQueryRecursive(
SqlNode query, boolean top, @Nullable RelDataType targetRowType) {
final SqlKind kind = query.getKind();
switch (kind) {
case SELECT:
return RelRoot.of(convertSelect((SqlSelect) query, top), kind);
case INSERT:
return RelRoot.of(convertInsert((SqlInsert) query), kind);
case DELETE:
return RelRoot.of(convertDelete((SqlDelete) query), kind);
case UPDATE:
return RelRoot.of(convertUpdate((SqlUpdate) query), kind);
case MERGE:
return RelRoot.of(convertMerge((SqlMerge) query), kind);
case UNION:
case INTERSECT:
case EXCEPT:
return RelRoot.of(convertSetOp((SqlCall) query), kind);
case WITH:
return convertWith((SqlWith) query, top);
case VALUES:
return RelRoot.of(convertValues((SqlCall) query, targetRowType), kind);
default:
throw new AssertionError("not a query: " + query);
}
} | Recursively converts a query to a relational expression.
@param query Query
@param top Whether this query is the top-level query of the statement
@param targetRowType Target row type, or null
@return Relational expression | convertQueryRecursive | java | apache/flink | flink-table/flink-table-planner/src/main/java/org/apache/calcite/sql2rel/SqlToRelConverter.java | https://github.com/apache/flink/blob/master/flink-table/flink-table-planner/src/main/java/org/apache/calcite/sql2rel/SqlToRelConverter.java | Apache-2.0 |
private RelNode createModify(RelOptTable targetTable, RelNode source) {
final ModifiableTable modifiableTable = targetTable.unwrap(ModifiableTable.class);
if (modifiableTable != null && modifiableTable == targetTable.unwrap(Table.class)) {
return modifiableTable.toModificationRel(
cluster,
targetTable,
catalogReader,
source,
LogicalTableModify.Operation.INSERT,
null,
null,
false);
}
final ModifiableView modifiableView = targetTable.unwrap(ModifiableView.class);
if (modifiableView != null) {
final Table delegateTable = modifiableView.getTable();
final RelDataType delegateRowType = delegateTable.getRowType(typeFactory);
final RelOptTable delegateRelOptTable =
RelOptTableImpl.create(
null, delegateRowType, delegateTable, modifiableView.getTablePath());
final RelNode newSource =
createSource(targetTable, source, modifiableView, delegateRowType);
return createModify(delegateRelOptTable, newSource);
}
return LogicalTableModify.create(
targetTable,
catalogReader,
source,
LogicalTableModify.Operation.INSERT,
null,
null,
false);
} | Creates a relational expression to modify a table or modifiable view. | createModify | java | apache/flink | flink-table/flink-table-planner/src/main/java/org/apache/calcite/sql2rel/SqlToRelConverter.java | https://github.com/apache/flink/blob/master/flink-table/flink-table-planner/src/main/java/org/apache/calcite/sql2rel/SqlToRelConverter.java | Apache-2.0 |
protected RelNode convertColumnList(final SqlInsert call, RelNode source) {
RelDataType sourceRowType = source.getRowType();
final RexNode sourceRef = rexBuilder.makeRangeReference(sourceRowType, 0, false);
final List<String> targetColumnNames = new ArrayList<>();
final List<RexNode> columnExprs = new ArrayList<>();
collectInsertTargets(call, sourceRef, targetColumnNames, columnExprs);
final RelOptTable targetTable = getTargetTable(call);
final RelDataType targetRowType = RelOptTableImpl.realRowType(targetTable);
final List<RelDataTypeField> targetFields = targetRowType.getFieldList();
final List<@Nullable RexNode> sourceExps =
new ArrayList<>(Collections.nCopies(targetFields.size(), null));
final List<@Nullable String> fieldNames =
new ArrayList<>(Collections.nCopies(targetFields.size(), null));
final InitializerExpressionFactory initializerFactory =
getInitializerFactory(getNamespace(call).getTable());
// Walk the name list and place the associated value in the
// expression list according to the ordinal value returned from
// the table construct, leaving nulls in the list for columns
// that are not referenced.
final SqlNameMatcher nameMatcher = catalogReader.nameMatcher();
for (Pair<String, RexNode> p : Pair.zip(targetColumnNames, columnExprs)) {
RelDataTypeField field = nameMatcher.field(targetRowType, p.left);
assert field != null : "column " + p.left + " not found";
sourceExps.set(field.getIndex(), p.right);
}
// Lazily create a blackboard that contains all non-generated columns.
final Supplier<Blackboard> bb =
() -> createInsertBlackboard(targetTable, sourceRef, targetColumnNames);
// Walk the expression list and get default values for any columns
// that were not supplied in the statement. Get field names too.
for (int i = 0; i < targetFields.size(); ++i) {
final RelDataTypeField field = targetFields.get(i);
final String fieldName = field.getName();
fieldNames.set(i, fieldName);
RexNode sourceExpression = sourceExps.get(i);
if (sourceExpression == null || sourceExpression.getKind() == SqlKind.DEFAULT) {
sourceExpression =
initializerFactory.newColumnDefaultValue(targetTable, i, bb.get());
// bare nulls are dangerous in the wrong hands
sourceExpression = castNullLiteralIfNeeded(sourceExpression, field.getType());
sourceExps.set(i, sourceExpression);
}
}
// sourceExps should not contain nulls (see the loop above)
@SuppressWarnings("assignment.type.incompatible")
List<RexNode> nonNullExprs = sourceExps;
return relBuilder.push(source).projectNamed(nonNullExprs, fieldNames, false).build();
} | Creates a source for an INSERT statement.
<p>If the column list is not specified, source expressions match target columns in order.
<p>If the column list is specified, Source expressions are mapped to target columns by name
via targetColumnList, and may not cover the entire target table. So, we'll make up a full
row, using a combination of default values and the source expressions provided.
@param call Insert expression
@param source Source relational expression
@return Converted INSERT statement | convertColumnList | java | apache/flink | flink-table/flink-table-planner/src/main/java/org/apache/calcite/sql2rel/SqlToRelConverter.java | https://github.com/apache/flink/blob/master/flink-table/flink-table-planner/src/main/java/org/apache/calcite/sql2rel/SqlToRelConverter.java | Apache-2.0 |
private RexNode convertIdentifier(Blackboard bb, SqlIdentifier identifier) {
// first check for reserved identifiers like CURRENT_USER
final SqlCall call = bb.getValidator().makeNullaryCall(identifier);
if (call != null) {
return bb.convertExpression(call);
}
String pv = null;
if (bb.isPatternVarRef && identifier.names.size() > 1) {
pv = identifier.names.get(0);
}
final SqlQualified qualified;
if (bb.scope != null) {
qualified = bb.scope.fullyQualify(identifier);
} else {
qualified = SqlQualified.create(null, 1, null, identifier);
}
final Pair<RexNode, @Nullable BiFunction<RexNode, String, RexNode>> e0 =
bb.lookupExp(qualified);
RexNode e = e0.left;
for (String name : qualified.suffix()) {
if (e == e0.left && e0.right != null) {
e = e0.right.apply(e, name);
} else {
final boolean caseSensitive = true; // name already fully-qualified
if (identifier.isStar() && bb.scope instanceof MatchRecognizeScope) {
e = rexBuilder.makeFieldAccess(e, 0);
} else {
e = rexBuilder.makeFieldAccess(e, name, caseSensitive);
}
}
}
if (e instanceof RexInputRef) {
// adjust the type to account for nulls introduced by outer joins
e = adjustInputRef(bb, (RexInputRef) e);
if (pv != null) {
e = RexPatternFieldRef.of(pv, (RexInputRef) e);
}
}
if (e0.left instanceof RexCorrelVariable) {
assert e instanceof RexFieldAccess;
final RexNode prev =
bb.mapCorrelateToRex.put(((RexCorrelVariable) e0.left).id, (RexFieldAccess) e);
assert prev == null;
}
return e;
} | Converts an identifier into an expression in a given scope. For example, the "empno" in
"select empno from emp join dept" becomes "emp.empno". | convertIdentifier | java | apache/flink | flink-table/flink-table-planner/src/main/java/org/apache/calcite/sql2rel/SqlToRelConverter.java | https://github.com/apache/flink/blob/master/flink-table/flink-table-planner/src/main/java/org/apache/calcite/sql2rel/SqlToRelConverter.java | Apache-2.0 |
protected RexNode adjustInputRef(Blackboard bb, RexInputRef inputRef) {
RelDataTypeField field = bb.getRootField(inputRef);
if (field != null) {
if (!SqlTypeUtil.equalSansNullability(
typeFactory, field.getType(), inputRef.getType())) {
return inputRef;
}
return rexBuilder.makeInputRef(field.getType(), inputRef.getIndex());
}
return inputRef;
} | Adjusts the type of a reference to an input field to account for nulls introduced by outer
joins; and adjusts the offset to match the physical implementation.
@param bb Blackboard
@param inputRef Input ref
@return Adjusted input ref | adjustInputRef | java | apache/flink | flink-table/flink-table-planner/src/main/java/org/apache/calcite/sql2rel/SqlToRelConverter.java | https://github.com/apache/flink/blob/master/flink-table/flink-table-planner/src/main/java/org/apache/calcite/sql2rel/SqlToRelConverter.java | Apache-2.0 |
public RelRoot convertWith(SqlWith with, boolean top) {
return convertQuery(with.body, false, top);
} | Converts a WITH sub-query into a relational expression. | convertWith | java | apache/flink | flink-table/flink-table-planner/src/main/java/org/apache/calcite/sql2rel/SqlToRelConverter.java | https://github.com/apache/flink/blob/master/flink-table/flink-table-planner/src/main/java/org/apache/calcite/sql2rel/SqlToRelConverter.java | Apache-2.0 |
private void convertValuesImpl(
Blackboard bb, SqlCall values, @Nullable RelDataType targetRowType) {
// Attempt direct conversion to LogicalValues; if that fails, deal with
// fancy stuff like sub-queries below.
RelNode valuesRel =
convertRowValues(bb, values, values.getOperandList(), true, targetRowType);
if (valuesRel != null) {
bb.setRoot(valuesRel, true);
return;
}
for (SqlNode rowConstructor1 : values.getOperandList()) {
SqlCall rowConstructor = (SqlCall) rowConstructor1;
Blackboard tmpBb = createBlackboard(bb.scope, null, false);
replaceSubQueries(tmpBb, rowConstructor, RelOptUtil.Logic.TRUE_FALSE_UNKNOWN);
final List<Pair<RexNode, String>> exps = new ArrayList<>();
Ord.forEach(
rowConstructor.getOperandList(),
(operand, i) ->
exps.add(
Pair.of(
tmpBb.convertExpression(operand),
SqlValidatorUtil.alias(operand, i))));
RelNode in = (null == tmpBb.root) ? LogicalValues.createOneRow(cluster) : tmpBb.root;
relBuilder.push(in).project(Pair.left(exps), Pair.right(exps));
}
bb.setRoot(relBuilder.union(true, values.getOperandList().size()).build(), true);
} | Converts a values clause (as in "INSERT INTO T(x,y) VALUES (1,2)") into a relational
expression.
@param bb Blackboard
@param values Call to SQL VALUES operator
@param targetRowType Target row type | convertValuesImpl | java | apache/flink | flink-table/flink-table-planner/src/main/java/org/apache/calcite/sql2rel/SqlToRelConverter.java | https://github.com/apache/flink/blob/master/flink-table/flink-table-planner/src/main/java/org/apache/calcite/sql2rel/SqlToRelConverter.java | Apache-2.0 |
public RelNode reRegister(RelNode root) {
setRoot(root, false);
List<RegisterArgs> registerCopy = registered;
registered = new ArrayList<>();
for (RegisterArgs reg : registerCopy) {
RelNode relNode = reg.rel;
relBuilder.push(relNode);
final RelMetadataQuery mq = relBuilder.getCluster().getMetadataQuery();
final Boolean unique = mq.areColumnsUnique(relBuilder.peek(), ImmutableBitSet.of());
if (unique == null || !unique) {
relBuilder.aggregate(
relBuilder.groupKey(),
relBuilder.aggregateCall(
SqlStdOperatorTable.SINGLE_VALUE, relBuilder.field(0)));
}
register(relBuilder.build(), reg.joinType, reg.leftKeys);
}
return requireNonNull(this.root, "root");
} | Re-register the {@code registered} with given root node and return the new root node.
@param root The given root, never leaf
@return new root after the registration | reRegister | java | apache/flink | flink-table/flink-table-planner/src/main/java/org/apache/calcite/sql2rel/SqlToRelConverter.java | https://github.com/apache/flink/blob/master/flink-table/flink-table-planner/src/main/java/org/apache/calcite/sql2rel/SqlToRelConverter.java | Apache-2.0 |
void setRoot(List<RelNode> inputs) {
setRoot(inputs, null, false);
} | Notifies this Blackboard that the root just set using {@link #setRoot(RelNode, boolean)}
was derived using dataset substitution.
<p>The default implementation is not interested in such notifications, and does nothing.
@param datasetName Dataset name | setRoot | java | apache/flink | flink-table/flink-table-planner/src/main/java/org/apache/calcite/sql2rel/SqlToRelConverter.java | https://github.com/apache/flink/blob/master/flink-table/flink-table-planner/src/main/java/org/apache/calcite/sql2rel/SqlToRelConverter.java | Apache-2.0 |
Pair<RexNode, @Nullable BiFunction<RexNode, String, RexNode>> lookupExp(
SqlQualified qualified) {
if (nameToNodeMap != null && qualified.prefixLength == 1) {
RexNode node = nameToNodeMap.get(qualified.identifier.names.get(0));
if (node == null) {
throw new AssertionError(
"Unknown identifier '"
+ qualified.identifier
+ "' encountered while expanding expression");
}
return Pair.of(node, null);
}
final SqlNameMatcher nameMatcher =
scope().getValidator().getCatalogReader().nameMatcher();
final SqlValidatorScope.ResolvedImpl resolved = new SqlValidatorScope.ResolvedImpl();
scope().resolve(qualified.prefix(), nameMatcher, false, resolved);
if (resolved.count() != 1) {
throw new AssertionError(
"no unique expression found for "
+ qualified
+ "; count is "
+ resolved.count());
}
final SqlValidatorScope.Resolve resolve = resolved.only();
final RelDataType rowType = resolve.rowType();
// Found in current query's from list. Find which from item.
// We assume that the order of the from clause items has been
// preserved.
final SqlValidatorScope ancestorScope = resolve.scope;
boolean isParent = ancestorScope != scope;
if ((inputs != null) && !isParent) {
final LookupContext rels = new LookupContext(this, inputs, systemFieldList.size());
final RexNode node = lookup(resolve.path.steps().get(0).i, rels);
assert node != null;
return Pair.of(
node,
(e, fieldName) -> {
final RelDataTypeField field =
requireNonNull(
rowType.getField(fieldName, true, false),
() -> "field " + fieldName);
return rexBuilder.makeFieldAccess(e, field.getIndex());
});
} else {
// We're referencing a relational expression which has not been
// converted yet. This occurs when from items are correlated,
// e.g. "select from emp as emp join emp.getDepts() as dept".
// Create a temporary expression.
DeferredLookup lookup = new DeferredLookup(this, qualified.identifier.names.get(0));
final CorrelationId correlId = cluster.createCorrel();
mapCorrelToDeferred.put(correlId, lookup);
if (resolve.path.steps().get(0).i < 0) {
return Pair.of(rexBuilder.makeCorrel(rowType, correlId), null);
} else {
final RelDataTypeFactory.Builder builder = typeFactory.builder();
final ListScope ancestorScope1 =
(ListScope) requireNonNull(resolve.scope, "resolve.scope");
final ImmutableMap.Builder<String, Integer> fields = ImmutableMap.builder();
int i = 0;
int offset = 0;
for (SqlValidatorNamespace c : ancestorScope1.getChildren()) {
if (ancestorScope1.isChildNullable(i)) {
for (final RelDataTypeField f : c.getRowType().getFieldList()) {
builder.add(
f.getName(),
typeFactory.createTypeWithNullability(f.getType(), true));
}
} else {
builder.addAll(c.getRowType().getFieldList());
}
if (i == resolve.path.steps().get(0).i) {
for (RelDataTypeField field : c.getRowType().getFieldList()) {
fields.put(field.getName(), field.getIndex() + offset);
}
}
++i;
offset += c.getRowType().getFieldCount();
}
final RexNode c = rexBuilder.makeCorrel(builder.uniquify().build(), correlId);
final ImmutableMap<String, Integer> fieldMap = fields.build();
return Pair.of(
c,
(e, fieldName) -> {
final int j =
requireNonNull(
fieldMap.get(fieldName), "field " + fieldName);
return rexBuilder.makeFieldAccess(e, j);
});
}
}
} | Returns an expression with which to reference a from-list item; throws if not found.
@param qualified The alias of the FROM item
@return a {@link RexFieldAccess} or {@link RexRangeRef}, never null | lookupExp | java | apache/flink | flink-table/flink-table-planner/src/main/java/org/apache/calcite/sql2rel/SqlToRelConverter.java | https://github.com/apache/flink/blob/master/flink-table/flink-table-planner/src/main/java/org/apache/calcite/sql2rel/SqlToRelConverter.java | Apache-2.0 |
@Deprecated // to be removed before 2.0
public RexFieldCollation convertSortExpression(
SqlNode expr,
RelFieldCollation.Direction direction,
RelFieldCollation.NullDirection nullDirection) {
return convertSortExpression(
expr, direction, nullDirection, this::sortToRexFieldCollation);
} | Converts an item in an ORDER BY clause inside a window (OVER) clause, extracting DESC,
NULLS LAST and NULLS FIRST flags first. | convertSortExpression | java | apache/flink | flink-table/flink-table-planner/src/main/java/org/apache/calcite/sql2rel/SqlToRelConverter.java | https://github.com/apache/flink/blob/master/flink-table/flink-table-planner/src/main/java/org/apache/calcite/sql2rel/SqlToRelConverter.java | Apache-2.0 |
Pair<RelNode, Integer> findRel(int offset) {
return relOffsetList.get(offset);
} | Returns the relational expression with a given offset, and the ordinal in the combined
row of its first field.
<p>For example, in {@code Emp JOIN Dept}, findRel(1) returns the relational expression
for {@code Dept} and offset 6 (because {@code Emp} has 6 fields, therefore the first
field of {@code Dept} is field 6.
@param offset Offset of relational expression in FROM clause
@return Relational expression and the ordinal of its first field | findRel | java | apache/flink | flink-table/flink-table-planner/src/main/java/org/apache/calcite/sql2rel/SqlToRelConverter.java | https://github.com/apache/flink/blob/master/flink-table/flink-table-planner/src/main/java/org/apache/calcite/sql2rel/SqlToRelConverter.java | Apache-2.0 |
@Override
public RexNode visitCall(RexCall call) {
final SqlOperator op = call.getOperator();
if (!(op instanceof SqlAggFunction)) {
return super.visitCall(call);
}
final SqlAggFunction aggOp = (SqlAggFunction) op;
final RelDataType type = call.getType();
List<RexNode> exprs = call.getOperands();
SqlFunction histogramOp = !ENABLE_HISTOGRAM_AGG ? null : getHistogramOp(aggOp);
if (histogramOp != null) {
final RelDataType histogramType = computeHistogramType(type);
// For DECIMAL, since it's already represented as a bigint we
// want to do a reinterpretCast instead of a cast to avoid
// losing any precision.
boolean reinterpretCast = type.getSqlTypeName() == SqlTypeName.DECIMAL;
// Replace original expression with CAST of not one
// of the supported types
if (histogramType != type) {
exprs = new ArrayList<>(exprs);
exprs.set(
0,
reinterpretCast
? rexBuilder.makeReinterpretCast(
histogramType,
exprs.get(0),
rexBuilder.makeLiteral(false))
: rexBuilder.makeCast(histogramType, exprs.get(0)));
}
RexNode over =
relBuilder
.aggregateCall(SqlStdOperatorTable.HISTOGRAM_AGG, exprs)
.distinct(distinct)
.ignoreNulls(ignoreNulls)
.over()
.partitionBy(partitionKeys)
.orderBy(orderKeys)
.let(
c ->
rows
? c.rowsBetween(lowerBound, upperBound)
: c.rangeBetween(lowerBound, upperBound))
.allowPartial(allowPartial)
.toRex();
RexNode histogramCall =
rexBuilder.makeCall(histogramType, histogramOp, ImmutableList.of(over));
// If needed, post Cast result back to original
// type.
if (histogramType != type) {
if (reinterpretCast) {
histogramCall =
rexBuilder.makeReinterpretCast(
type, histogramCall, rexBuilder.makeLiteral(false));
} else {
histogramCall = rexBuilder.makeCast(type, histogramCall);
}
}
return histogramCall;
} else {
boolean needSum0 = aggOp == SqlStdOperatorTable.SUM && type.isNullable();
SqlAggFunction aggOpToUse = needSum0 ? SqlStdOperatorTable.SUM0 : aggOp;
return relBuilder
.aggregateCall(aggOpToUse, exprs)
.distinct(distinct)
.ignoreNulls(ignoreNulls)
.over()
.partitionBy(partitionKeys)
.orderBy(orderKeys)
.let(
c ->
rows
? c.rowsBetween(lowerBound, upperBound)
: c.rangeBetween(lowerBound, upperBound))
.allowPartial(allowPartial)
.nullWhenCountZero(needSum0)
.toRex();
}
} | Whether to convert calls to MIN(x) to HISTOGRAM_MIN(HISTOGRAM(x)). Histograms allow
rolling computation, but require more space. | visitCall | java | apache/flink | flink-table/flink-table-planner/src/main/java/org/apache/calcite/sql2rel/SqlToRelConverter.java | https://github.com/apache/flink/blob/master/flink-table/flink-table-planner/src/main/java/org/apache/calcite/sql2rel/SqlToRelConverter.java | Apache-2.0 |
@Nullable SqlFunction getHistogramOp(SqlAggFunction aggFunction) {
if (aggFunction == SqlStdOperatorTable.MIN) {
return SqlStdOperatorTable.HISTOGRAM_MIN;
} else if (aggFunction == SqlStdOperatorTable.MAX) {
return SqlStdOperatorTable.HISTOGRAM_MAX;
} else if (aggFunction == SqlStdOperatorTable.FIRST_VALUE) {
return SqlStdOperatorTable.HISTOGRAM_FIRST_VALUE;
} else if (aggFunction == SqlStdOperatorTable.LAST_VALUE) {
return SqlStdOperatorTable.HISTOGRAM_LAST_VALUE;
} else {
return null;
}
} | Returns the histogram operator corresponding to a given aggregate function.
<p>For example, <code>getHistogramOp
({@link SqlStdOperatorTable#MIN}}</code> returns {@link
SqlStdOperatorTable#HISTOGRAM_MIN}.
@param aggFunction An aggregate function
@return Its histogram function, or null | getHistogramOp | java | apache/flink | flink-table/flink-table-planner/src/main/java/org/apache/calcite/sql2rel/SqlToRelConverter.java | https://github.com/apache/flink/blob/master/flink-table/flink-table-planner/src/main/java/org/apache/calcite/sql2rel/SqlToRelConverter.java | Apache-2.0 |
private RelDataType computeHistogramType(RelDataType type) {
if (SqlTypeUtil.isExactNumeric(type) && type.getSqlTypeName() != SqlTypeName.BIGINT) {
return typeFactory.createSqlType(SqlTypeName.BIGINT);
} else if (SqlTypeUtil.isApproximateNumeric(type)
&& type.getSqlTypeName() != SqlTypeName.DOUBLE) {
return typeFactory.createSqlType(SqlTypeName.DOUBLE);
} else {
return type;
}
} | Returns the type for a histogram function. It is either the actual type or an an
approximation to it. | computeHistogramType | java | apache/flink | flink-table/flink-table-planner/src/main/java/org/apache/calcite/sql2rel/SqlToRelConverter.java | https://github.com/apache/flink/blob/master/flink-table/flink-table-planner/src/main/java/org/apache/calcite/sql2rel/SqlToRelConverter.java | Apache-2.0 |
@Override
public Void visit(SqlCall call) {
// ignore window aggregates and ranking functions (associated with OVER operator)
if (call.getOperator().getKind() == SqlKind.OVER) {
return null;
}
if (call.getOperator().getKind() == SqlKind.FILTER) {
// the WHERE in a FILTER must be tracked too so we can call replaceSubQueries on it.
// see https://issues.apache.org/jira/browse/CALCITE-1910
final SqlNode aggCall = call.getOperandList().get(0);
final SqlNode whereCall = call.getOperandList().get(1);
list.add(aggCall);
filterList.add(whereCall);
return null;
}
if (call.getOperator().getKind() == SqlKind.WITHIN_DISTINCT) {
final SqlNode aggCall = call.getOperandList().get(0);
final SqlNodeList distinctList = (SqlNodeList) call.getOperandList().get(1);
list.add(aggCall);
distinctList.getList().forEach(this.distinctList::add);
return null;
}
if (call.getOperator().getKind() == SqlKind.WITHIN_GROUP) {
final SqlNode aggCall = call.getOperandList().get(0);
final SqlNodeList orderList = (SqlNodeList) call.getOperandList().get(1);
list.add(aggCall);
this.orderList.addAll(orderList);
return null;
}
if (call.getOperator().isAggregator()) {
list.add(call);
return null;
}
// Don't traverse into sub-queries, even if they contain aggregate
// functions.
if (call instanceof SqlSelect) {
return null;
}
return call.getOperator().acceptCall(this, call);
} | Visitor that collects all aggregate functions in a {@link SqlNode} tree. | visit | java | apache/flink | flink-table/flink-table-planner/src/main/java/org/apache/calcite/sql2rel/SqlToRelConverter.java | https://github.com/apache/flink/blob/master/flink-table/flink-table-planner/src/main/java/org/apache/calcite/sql2rel/SqlToRelConverter.java | Apache-2.0 |
@Value.Default
default boolean isDecorrelationEnabled() {
return true;
} | Returns the {@code decorrelationEnabled} option. Controls whether to disable sub-query
decorrelation when needed. e.g. if outer joins are not supported. | isDecorrelationEnabled | java | apache/flink | flink-table/flink-table-planner/src/main/java/org/apache/calcite/sql2rel/SqlToRelConverter.java | https://github.com/apache/flink/blob/master/flink-table/flink-table-planner/src/main/java/org/apache/calcite/sql2rel/SqlToRelConverter.java | Apache-2.0 |
@Value.Default
default boolean isTrimUnusedFields() {
return false;
} | Returns the {@code trimUnusedFields} option. Controls whether to trim unused fields as
part of the conversion process. | isTrimUnusedFields | java | apache/flink | flink-table/flink-table-planner/src/main/java/org/apache/calcite/sql2rel/SqlToRelConverter.java | https://github.com/apache/flink/blob/master/flink-table/flink-table-planner/src/main/java/org/apache/calcite/sql2rel/SqlToRelConverter.java | Apache-2.0 |
@Value.Default
default boolean isExplain() {
return false;
} | Returns the {@code explain} option. Describes whether the current statement is part of an
EXPLAIN PLAN statement. | isExplain | java | apache/flink | flink-table/flink-table-planner/src/main/java/org/apache/calcite/sql2rel/SqlToRelConverter.java | https://github.com/apache/flink/blob/master/flink-table/flink-table-planner/src/main/java/org/apache/calcite/sql2rel/SqlToRelConverter.java | Apache-2.0 |
@Value.Default
default boolean isAddJsonTypeOperatorEnabled() {
return true;
} | Whether add {@link SqlStdOperatorTable#JSON_TYPE_OPERATOR} for between json functions. | isAddJsonTypeOperatorEnabled | java | apache/flink | flink-table/flink-table-planner/src/main/java/org/apache/calcite/sql2rel/SqlToRelConverter.java | https://github.com/apache/flink/blob/master/flink-table/flink-table-planner/src/main/java/org/apache/calcite/sql2rel/SqlToRelConverter.java | Apache-2.0 |
private static RexNode convertNvl(SqlRexContext cx, SqlCall call) {
final RexBuilder rexBuilder = cx.getRexBuilder();
final RexNode operand0 = cx.convertExpression(call.getOperandList().get(0));
final RexNode operand1 = cx.convertExpression(call.getOperandList().get(1));
final RelDataType type = cx.getValidator().getValidatedNodeType(call);
// Preserve Operand Nullability
return rexBuilder.makeCall(
type,
SqlStdOperatorTable.CASE,
ImmutableList.of(
rexBuilder.makeCall(SqlStdOperatorTable.IS_NOT_NULL, operand0),
rexBuilder.makeCast(
cx.getTypeFactory()
.createTypeWithNullability(
type, operand0.getType().isNullable()),
operand0),
rexBuilder.makeCast(
cx.getTypeFactory()
.createTypeWithNullability(
type, operand1.getType().isNullable()),
operand1)));
} | Converts a call to the {@code NVL} function (and also its synonym, {@code IFNULL}). | convertNvl | java | apache/flink | flink-table/flink-table-planner/src/main/java/org/apache/calcite/sql2rel/StandardConvertletTable.java | https://github.com/apache/flink/blob/master/flink-table/flink-table-planner/src/main/java/org/apache/calcite/sql2rel/StandardConvertletTable.java | Apache-2.0 |
private static RexNode convertDecode(SqlRexContext cx, SqlCall call) {
final RexBuilder rexBuilder = cx.getRexBuilder();
final List<RexNode> operands =
convertOperands(cx, call, SqlOperandTypeChecker.Consistency.NONE);
final RelDataType type = cx.getValidator().getValidatedNodeType(call);
final List<RexNode> exprs = new ArrayList<>();
for (int i = 1; i < operands.size() - 1; i += 2) {
exprs.add(
RelOptUtil.isDistinctFrom(rexBuilder, operands.get(0), operands.get(i), true));
exprs.add(operands.get(i + 1));
}
if (operands.size() % 2 == 0) {
exprs.add(Util.last(operands));
} else {
exprs.add(rexBuilder.makeNullLiteral(type));
}
return rexBuilder.makeCall(type, SqlStdOperatorTable.CASE, exprs);
} | Converts a call to the DECODE function. | convertDecode | java | apache/flink | flink-table/flink-table-planner/src/main/java/org/apache/calcite/sql2rel/StandardConvertletTable.java | https://github.com/apache/flink/blob/master/flink-table/flink-table-planner/src/main/java/org/apache/calcite/sql2rel/StandardConvertletTable.java | Apache-2.0 |
private static RexNode convertIf(SqlRexContext cx, SqlCall call) {
final RexBuilder rexBuilder = cx.getRexBuilder();
final List<RexNode> operands =
convertOperands(cx, call, SqlOperandTypeChecker.Consistency.NONE);
final RelDataType type = cx.getValidator().getValidatedNodeType(call);
return rexBuilder.makeCall(type, SqlStdOperatorTable.CASE, operands);
} | Converts a call to the IF function.
<p>{@code IF(b, x, y)} → {@code CASE WHEN b THEN x ELSE y END}. | convertIf | java | apache/flink | flink-table/flink-table-planner/src/main/java/org/apache/calcite/sql2rel/StandardConvertletTable.java | https://github.com/apache/flink/blob/master/flink-table/flink-table-planner/src/main/java/org/apache/calcite/sql2rel/StandardConvertletTable.java | Apache-2.0 |
public RexNode convertBetween(SqlRexContext cx, SqlBetweenOperator op, SqlCall call) {
SqlOperandTypeChecker operandTypeChecker = op.getOperandTypeChecker();
final SqlOperandTypeChecker.Consistency consistency =
operandTypeChecker == null
? SqlOperandTypeChecker.Consistency.NONE
: operandTypeChecker.getConsistency();
final List<RexNode> list = convertOperands(cx, call, consistency);
final RexNode x = list.get(SqlBetweenOperator.VALUE_OPERAND);
final RexNode y = list.get(SqlBetweenOperator.LOWER_OPERAND);
final RexNode z = list.get(SqlBetweenOperator.UPPER_OPERAND);
final RexBuilder rexBuilder = cx.getRexBuilder();
RexNode ge1 = ge(rexBuilder, x, y);
RexNode le1 = le(rexBuilder, x, z);
RexNode and1 = and(rexBuilder, ge1, le1);
RexNode res;
final SqlBetweenOperator.Flag symmetric = op.flag;
switch (symmetric) {
case ASYMMETRIC:
res = and1;
break;
case SYMMETRIC:
RexNode ge2 = ge(rexBuilder, x, z);
RexNode le2 = le(rexBuilder, x, y);
RexNode and2 = and(rexBuilder, ge2, le2);
res = or(rexBuilder, and1, and2);
break;
default:
throw Util.unexpected(symmetric);
}
final SqlBetweenOperator betweenOp = (SqlBetweenOperator) call.getOperator();
if (betweenOp.isNegated()) {
res = rexBuilder.makeCall(SqlStdOperatorTable.NOT, res);
}
return res;
} | Converts a BETWEEN expression.
<p>Called automatically via reflection. | convertBetween | java | apache/flink | flink-table/flink-table-planner/src/main/java/org/apache/calcite/sql2rel/StandardConvertletTable.java | https://github.com/apache/flink/blob/master/flink-table/flink-table-planner/src/main/java/org/apache/calcite/sql2rel/StandardConvertletTable.java | Apache-2.0 |
public RexNode convertLiteralChain(SqlRexContext cx, SqlLiteralChainOperator op, SqlCall call) {
Util.discard(cx);
SqlLiteral sum = SqlLiteralChainOperator.concatenateOperands(call);
return cx.convertLiteral(sum);
} | Converts a LiteralChain expression: that is, concatenates the operands immediately, to
produce a single literal string.
<p>Called automatically via reflection. | convertLiteralChain | java | apache/flink | flink-table/flink-table-planner/src/main/java/org/apache/calcite/sql2rel/StandardConvertletTable.java | https://github.com/apache/flink/blob/master/flink-table/flink-table-planner/src/main/java/org/apache/calcite/sql2rel/StandardConvertletTable.java | Apache-2.0 |
public RexNode convertRow(SqlRexContext cx, SqlRowOperator op, SqlCall call) {
if (cx.getValidator().getValidatedNodeType(call).getSqlTypeName()
!= SqlTypeName.COLUMN_LIST) {
return convertCall(cx, call);
}
final RexBuilder rexBuilder = cx.getRexBuilder();
final List<RexNode> columns = new ArrayList<>();
for (String operand : SqlIdentifier.simpleNames(call.getOperandList())) {
columns.add(rexBuilder.makeLiteral(operand));
}
final RelDataType type =
rexBuilder.deriveReturnType(SqlStdOperatorTable.COLUMN_LIST, columns);
return rexBuilder.makeCall(type, SqlStdOperatorTable.COLUMN_LIST, columns);
} | Converts a ROW.
<p>Called automatically via reflection. | convertRow | java | apache/flink | flink-table/flink-table-planner/src/main/java/org/apache/calcite/sql2rel/StandardConvertletTable.java | https://github.com/apache/flink/blob/master/flink-table/flink-table-planner/src/main/java/org/apache/calcite/sql2rel/StandardConvertletTable.java | Apache-2.0 |
@Override
public RexNode convertCall(SqlRexContext cx, SqlCall call) {
final RexBuilder rexBuilder = cx.getRexBuilder();
final RexNode operand = cx.convertExpression(call.getOperandList().get(0));
return rexBuilder.makeCall(
SqlStdOperatorTable.TRIM,
rexBuilder.makeFlag(flag),
rexBuilder.makeLiteral(" "),
operand);
} | Convertlet that converts {@code LTRIM} and {@code RTRIM} to {@code TRIM}. | convertCall | java | apache/flink | flink-table/flink-table-planner/src/main/java/org/apache/calcite/sql2rel/StandardConvertletTable.java | https://github.com/apache/flink/blob/master/flink-table/flink-table-planner/src/main/java/org/apache/calcite/sql2rel/StandardConvertletTable.java | Apache-2.0 |
@Override
public RexNode convertCall(SqlRexContext cx, SqlCall call) {
// Translate
// GREATEST(a, b, c, d)
// to
// CASE
// WHEN a IS NULL OR b IS NULL OR c IS NULL OR d IS NULL
// THEN NULL
// WHEN a > b AND a > c AND a > d
// THEN a
// WHEN b > c AND b > d
// THEN b
// WHEN c > d
// THEN c
// ELSE d
// END
final RexBuilder rexBuilder = cx.getRexBuilder();
final RelDataType type = cx.getValidator().getValidatedNodeType(call);
final SqlBinaryOperator op;
switch (call.getKind()) {
case GREATEST:
op = SqlStdOperatorTable.GREATER_THAN;
break;
case LEAST:
op = SqlStdOperatorTable.LESS_THAN;
break;
default:
throw new AssertionError();
}
final List<RexNode> exprs =
convertOperands(cx, call, SqlOperandTypeChecker.Consistency.NONE);
final List<RexNode> list = new ArrayList<>();
final List<RexNode> orList = new ArrayList<>();
for (RexNode expr : exprs) {
orList.add(rexBuilder.makeCall(SqlStdOperatorTable.IS_NULL, expr));
}
list.add(RexUtil.composeDisjunction(rexBuilder, orList));
list.add(rexBuilder.makeNullLiteral(type));
for (int i = 0; i < exprs.size() - 1; i++) {
RexNode expr = exprs.get(i);
final List<RexNode> andList = new ArrayList<>();
for (int j = i + 1; j < exprs.size(); j++) {
final RexNode expr2 = exprs.get(j);
andList.add(rexBuilder.makeCall(op, expr, expr2));
}
list.add(RexUtil.composeConjunction(rexBuilder, andList));
list.add(expr);
}
list.add(exprs.get(exprs.size() - 1));
return rexBuilder.makeCall(type, SqlStdOperatorTable.CASE, list);
} | Convertlet that converts {@code GREATEST} and {@code LEAST}. | convertCall | java | apache/flink | flink-table/flink-table-planner/src/main/java/org/apache/calcite/sql2rel/StandardConvertletTable.java | https://github.com/apache/flink/blob/master/flink-table/flink-table-planner/src/main/java/org/apache/calcite/sql2rel/StandardConvertletTable.java | Apache-2.0 |
@Override
public RexNode convertCall(SqlRexContext cx, SqlCall call) {
return convertFloorCeil(cx, call);
} | Convertlet that handles {@code FLOOR} and {@code CEIL} functions. | convertCall | java | apache/flink | flink-table/flink-table-planner/src/main/java/org/apache/calcite/sql2rel/StandardConvertletTable.java | https://github.com/apache/flink/blob/master/flink-table/flink-table-planner/src/main/java/org/apache/calcite/sql2rel/StandardConvertletTable.java | Apache-2.0 |
@Override
public RexNode convertCall(SqlRexContext cx, SqlCall call) {
// Translate
// SUBSTR(value, start, length)
//
// to the following if we want PostgreSQL semantics:
// SUBSTRING(value, start, length)
//
// to the following if we want Oracle semantics:
// SUBSTRING(
// value
// FROM CASE
// WHEN start = 0
// THEN 1
// WHEN start + (length(value) + 1) < 1
// THEN length(value) + 1
// WHEN start < 0
// THEN start + (length(value) + 1)
// ELSE start)
// FOR CASE WHEN length < 0 THEN 0 ELSE length END)
//
// to the following in MySQL:
// SUBSTRING(
// value
// FROM CASE
// WHEN start = 0
// THEN length(value) + 1 -- different from Oracle
// WHEN start + (length(value) + 1) < 1
// THEN length(value) + 1
// WHEN start < 0
// THEN start + length(value) + 1
// ELSE start)
// FOR CASE WHEN length < 0 THEN 0 ELSE length END)
//
// to the following if we want BigQuery semantics:
// CASE
// WHEN start + (length(value) + 1) < 1
// THEN value
// ELSE SUBSTRING(
// value
// FROM CASE
// WHEN start = 0
// THEN 1
// WHEN start < 0
// THEN start + length(value) + 1
// ELSE start)
// FOR CASE WHEN length < 0 THEN 0 ELSE length END)
final RexBuilder rexBuilder = cx.getRexBuilder();
final List<RexNode> exprs =
convertOperands(cx, call, SqlOperandTypeChecker.Consistency.NONE);
final RexNode value = exprs.get(0);
final RexNode start = exprs.get(1);
final RelDataType startType = start.getType();
final RexLiteral zeroLiteral = rexBuilder.makeLiteral(0, startType);
final RexLiteral oneLiteral = rexBuilder.makeLiteral(1, startType);
final RexNode valueLength =
SqlTypeUtil.isBinary(value.getType())
? rexBuilder.makeCall(SqlStdOperatorTable.OCTET_LENGTH, value)
: rexBuilder.makeCall(SqlStdOperatorTable.CHAR_LENGTH, value);
final RexNode valueLengthPlusOne =
rexBuilder.makeCall(SqlStdOperatorTable.PLUS, valueLength, oneLiteral);
final RexNode newStart;
switch (library) {
case POSTGRESQL:
if (call.operandCount() == 2) {
newStart =
rexBuilder.makeCall(
SqlStdOperatorTable.CASE,
rexBuilder.makeCall(
SqlStdOperatorTable.LESS_THAN, start, oneLiteral),
oneLiteral,
start);
} else {
newStart = start;
}
break;
case BIG_QUERY:
newStart =
rexBuilder.makeCall(
SqlStdOperatorTable.CASE,
rexBuilder.makeCall(
SqlStdOperatorTable.EQUALS, start, zeroLiteral),
oneLiteral,
rexBuilder.makeCall(
SqlStdOperatorTable.LESS_THAN, start, zeroLiteral),
rexBuilder.makeCall(
SqlStdOperatorTable.PLUS, start, valueLengthPlusOne),
start);
break;
default:
newStart =
rexBuilder.makeCall(
SqlStdOperatorTable.CASE,
rexBuilder.makeCall(
SqlStdOperatorTable.EQUALS, start, zeroLiteral),
library == SqlLibrary.MYSQL ? valueLengthPlusOne : oneLiteral,
rexBuilder.makeCall(
SqlStdOperatorTable.LESS_THAN,
rexBuilder.makeCall(
SqlStdOperatorTable.PLUS,
start,
valueLengthPlusOne),
oneLiteral),
valueLengthPlusOne,
rexBuilder.makeCall(
SqlStdOperatorTable.LESS_THAN, start, zeroLiteral),
rexBuilder.makeCall(
SqlStdOperatorTable.PLUS, start, valueLengthPlusOne),
start);
break;
}
if (call.operandCount() == 2) {
return rexBuilder.makeCall(SqlStdOperatorTable.SUBSTRING, value, newStart);
}
assert call.operandCount() == 3;
final RexNode length = exprs.get(2);
final RexNode newLength;
switch (library) {
case POSTGRESQL:
newLength = length;
break;
default:
newLength =
rexBuilder.makeCall(
SqlStdOperatorTable.CASE,
rexBuilder.makeCall(
SqlStdOperatorTable.LESS_THAN, length, zeroLiteral),
zeroLiteral,
length);
}
final RexNode substringCall =
rexBuilder.makeCall(SqlStdOperatorTable.SUBSTRING, value, newStart, newLength);
switch (library) {
case BIG_QUERY:
return rexBuilder.makeCall(
SqlStdOperatorTable.CASE,
rexBuilder.makeCall(
SqlStdOperatorTable.LESS_THAN,
rexBuilder.makeCall(
SqlStdOperatorTable.PLUS, start, valueLengthPlusOne),
oneLiteral),
value,
substringCall);
default:
return substringCall;
}
} | Convertlet that handles the {@code SUBSTR} function; various dialects have slightly different
specifications. PostgreSQL seems to comply with the ISO standard for the {@code SUBSTRING}
function, and therefore Calcite's default behavior matches PostgreSQL. | convertCall | java | apache/flink | flink-table/flink-table-planner/src/main/java/org/apache/calcite/sql2rel/StandardConvertletTable.java | https://github.com/apache/flink/blob/master/flink-table/flink-table-planner/src/main/java/org/apache/calcite/sql2rel/StandardConvertletTable.java | Apache-2.0 |
@Override
public RexNode convertCall(SqlRexContext cx, SqlCall call) {
// TIMESTAMPADD(unit, count, timestamp)
// => timestamp + count * INTERVAL '1' UNIT
// TIMESTAMP_ADD(timestamp, interval)
// => timestamp + interval
// "timestamp" may be of type TIMESTAMP or TIMESTAMP WITH LOCAL TIME ZONE.
final RexBuilder rexBuilder = cx.getRexBuilder();
SqlIntervalQualifier qualifier;
final RexNode op1;
final RexNode op2;
switch (call.operandCount()) {
case 2:
// BigQuery-style 'TIMESTAMP_ADD(timestamp, interval)'
final SqlBasicCall operandCall = call.operand(1);
qualifier = operandCall.operand(1);
op1 = cx.convertExpression(operandCall.operand(0));
op2 = cx.convertExpression(call.operand(0));
break;
default:
// JDBC-style 'TIMESTAMPADD(unit, count, timestamp)'
qualifier = call.operand(0);
op1 = cx.convertExpression(call.operand(1));
op2 = cx.convertExpression(call.operand(2));
}
final TimeFrame timeFrame = cx.getValidator().validateTimeFrame(qualifier);
final TimeUnit unit = first(timeFrame.unit(), TimeUnit.EPOCH);
final RelDataType type = cx.getValidator().getValidatedNodeType(call);
if (unit == TimeUnit.EPOCH && qualifier.timeFrameName != null) {
// Custom time frames have a different path. They are kept as names,
// and then handled by Java functions such as
// SqlFunctions.customTimestampAdd.
final RexLiteral timeFrameName = rexBuilder.makeLiteral(qualifier.timeFrameName);
// If the TIMESTAMPADD call has type TIMESTAMP and op2 has type DATE
// (which can happen for sub-day time frames such as HOUR), cast op2 to
// TIMESTAMP.
final RexNode op2b = rexBuilder.makeCast(type, op2, false);
return rexBuilder.makeCall(
type,
SqlStdOperatorTable.TIMESTAMP_ADD,
ImmutableList.of(timeFrameName, op1, op2b));
}
if (qualifier.getUnit() != unit) {
qualifier = new SqlIntervalQualifier(unit, null, qualifier.getParserPosition());
}
RexNode interval2Add;
switch (unit) {
case MICROSECOND:
case NANOSECOND:
interval2Add =
divide(
rexBuilder,
multiply(
rexBuilder,
rexBuilder.makeIntervalLiteral(
BigDecimal.ONE, qualifier),
op1),
BigDecimal.ONE.divide(
unit.multiplier, RoundingMode.UNNECESSARY));
break;
default:
interval2Add =
multiply(
rexBuilder,
rexBuilder.makeIntervalLiteral(unit.multiplier, qualifier),
op1);
}
return rexBuilder.makeCall(SqlStdOperatorTable.DATETIME_PLUS, op2, interval2Add);
} | Convertlet that handles the 3-argument {@code TIMESTAMPADD} function and the 2-argument
BigQuery-style {@code TIMESTAMP_ADD} function. | convertCall | java | apache/flink | flink-table/flink-table-planner/src/main/java/org/apache/calcite/sql2rel/StandardConvertletTable.java | https://github.com/apache/flink/blob/master/flink-table/flink-table-planner/src/main/java/org/apache/calcite/sql2rel/StandardConvertletTable.java | Apache-2.0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.