id int64 0 190k | prompt stringlengths 21 13.4M | docstring stringlengths 1 12k ⌀ |
|---|---|---|
152,875 | from __future__ import annotations
import typing as t
from sqlglot import exp, generator, parser, tokens, transforms
from sqlglot.dialects.dialect import (
DATE_ADD_OR_SUB,
Dialect,
JSON_EXTRACT_TYPE,
any_value_to_max_sql,
bool_xor_sql,
datestrtodate_sql,
build_formatted_time,
filter_array_using_unnest,
json_extract_segments,
json_path_key_only_name,
max_or_greatest,
merge_without_target_sql,
min_or_least,
no_last_day_sql,
no_map_from_entries_sql,
no_paren_current_date_sql,
no_pivot_sql,
no_trycast_sql,
build_json_extract_path,
build_timestamp_trunc,
rename_func,
str_position_sql,
struct_extract_sql,
timestamptrunc_sql,
timestrtotime_sql,
trim_sql,
ts_or_ds_add_cast,
)
from sqlglot.helper import seq_get
from sqlglot.parser import binary_range_parser
from sqlglot.tokens import TokenType
class Postgres(Dialect):
INDEX_OFFSET = 1
TYPED_DIVISION = True
CONCAT_COALESCE = True
NULL_ORDERING = "nulls_are_large"
TIME_FORMAT = "'YYYY-MM-DD HH24:MI:SS'"
TIME_MAPPING = {
"AM": "%p",
"PM": "%p",
"D": "%u", # 1-based day of week
"DD": "%d", # day of month
"DDD": "%j", # zero padded day of year
"FMDD": "%-d", # - is no leading zero for Python; same for FM in postgres
"FMDDD": "%-j", # day of year
"FMHH12": "%-I", # 9
"FMHH24": "%-H", # 9
"FMMI": "%-M", # Minute
"FMMM": "%-m", # 1
"FMSS": "%-S", # Second
"HH12": "%I", # 09
"HH24": "%H", # 09
"MI": "%M", # zero padded minute
"MM": "%m", # 01
"OF": "%z", # utc offset
"SS": "%S", # zero padded second
"TMDay": "%A", # TM is locale dependent
"TMDy": "%a",
"TMMon": "%b", # Sep
"TMMonth": "%B", # September
"TZ": "%Z", # uppercase timezone name
"US": "%f", # zero padded microsecond
"WW": "%U", # 1-based week of year
"YY": "%y", # 15
"YYYY": "%Y", # 2015
}
class Tokenizer(tokens.Tokenizer):
BIT_STRINGS = [("b'", "'"), ("B'", "'")]
HEX_STRINGS = [("x'", "'"), ("X'", "'")]
BYTE_STRINGS = [("e'", "'"), ("E'", "'")]
HEREDOC_STRINGS = ["$"]
HEREDOC_TAG_IS_IDENTIFIER = True
HEREDOC_STRING_ALTERNATIVE = TokenType.PARAMETER
KEYWORDS = {
**tokens.Tokenizer.KEYWORDS,
"~~": TokenType.LIKE,
"~~*": TokenType.ILIKE,
"~*": TokenType.IRLIKE,
"~": TokenType.RLIKE,
"@@": TokenType.DAT,
"@>": TokenType.AT_GT,
"<@": TokenType.LT_AT,
"|/": TokenType.PIPE_SLASH,
"||/": TokenType.DPIPE_SLASH,
"BEGIN": TokenType.COMMAND,
"BEGIN TRANSACTION": TokenType.BEGIN,
"BIGSERIAL": TokenType.BIGSERIAL,
"CHARACTER VARYING": TokenType.VARCHAR,
"CONSTRAINT TRIGGER": TokenType.COMMAND,
"DECLARE": TokenType.COMMAND,
"DO": TokenType.COMMAND,
"EXEC": TokenType.COMMAND,
"HSTORE": TokenType.HSTORE,
"JSONB": TokenType.JSONB,
"MONEY": TokenType.MONEY,
"REFRESH": TokenType.COMMAND,
"REINDEX": TokenType.COMMAND,
"RESET": TokenType.COMMAND,
"REVOKE": TokenType.COMMAND,
"SERIAL": TokenType.SERIAL,
"SMALLSERIAL": TokenType.SMALLSERIAL,
"NAME": TokenType.NAME,
"TEMP": TokenType.TEMPORARY,
"CSTRING": TokenType.PSEUDO_TYPE,
"OID": TokenType.OBJECT_IDENTIFIER,
"ONLY": TokenType.ONLY,
"OPERATOR": TokenType.OPERATOR,
"REGCLASS": TokenType.OBJECT_IDENTIFIER,
"REGCOLLATION": TokenType.OBJECT_IDENTIFIER,
"REGCONFIG": TokenType.OBJECT_IDENTIFIER,
"REGDICTIONARY": TokenType.OBJECT_IDENTIFIER,
"REGNAMESPACE": TokenType.OBJECT_IDENTIFIER,
"REGOPER": TokenType.OBJECT_IDENTIFIER,
"REGOPERATOR": TokenType.OBJECT_IDENTIFIER,
"REGPROC": TokenType.OBJECT_IDENTIFIER,
"REGPROCEDURE": TokenType.OBJECT_IDENTIFIER,
"REGROLE": TokenType.OBJECT_IDENTIFIER,
"REGTYPE": TokenType.OBJECT_IDENTIFIER,
}
SINGLE_TOKENS = {
**tokens.Tokenizer.SINGLE_TOKENS,
"$": TokenType.HEREDOC_STRING,
}
VAR_SINGLE_TOKENS = {"$"}
class Parser(parser.Parser):
PROPERTY_PARSERS = {
**parser.Parser.PROPERTY_PARSERS,
"SET": lambda self: self.expression(exp.SetConfigProperty, this=self._parse_set()),
}
PROPERTY_PARSERS.pop("INPUT")
FUNCTIONS = {
**parser.Parser.FUNCTIONS,
"DATE_TRUNC": build_timestamp_trunc,
"GENERATE_SERIES": _build_generate_series,
"JSON_EXTRACT_PATH": build_json_extract_path(exp.JSONExtract),
"JSON_EXTRACT_PATH_TEXT": build_json_extract_path(exp.JSONExtractScalar),
"MAKE_TIME": exp.TimeFromParts.from_arg_list,
"MAKE_TIMESTAMP": exp.TimestampFromParts.from_arg_list,
"NOW": exp.CurrentTimestamp.from_arg_list,
"TO_CHAR": build_formatted_time(exp.TimeToStr, "postgres"),
"TO_TIMESTAMP": _build_to_timestamp,
"UNNEST": exp.Explode.from_arg_list,
}
FUNCTION_PARSERS = {
**parser.Parser.FUNCTION_PARSERS,
"DATE_PART": lambda self: self._parse_date_part(),
}
BITWISE = {
**parser.Parser.BITWISE,
TokenType.HASH: exp.BitwiseXor,
}
EXPONENT = {
TokenType.CARET: exp.Pow,
}
RANGE_PARSERS = {
**parser.Parser.RANGE_PARSERS,
TokenType.AT_GT: binary_range_parser(exp.ArrayContains),
TokenType.DAMP: binary_range_parser(exp.ArrayOverlaps),
TokenType.DAT: lambda self, this: self.expression(
exp.MatchAgainst, this=self._parse_bitwise(), expressions=[this]
),
TokenType.LT_AT: binary_range_parser(exp.ArrayContained),
TokenType.OPERATOR: lambda self, this: self._parse_operator(this),
}
STATEMENT_PARSERS = {
**parser.Parser.STATEMENT_PARSERS,
TokenType.END: lambda self: self._parse_commit_or_rollback(),
}
JSON_ARROWS_REQUIRE_JSON_TYPE = True
def _parse_operator(self, this: t.Optional[exp.Expression]) -> t.Optional[exp.Expression]:
while True:
if not self._match(TokenType.L_PAREN):
break
op = ""
while self._curr and not self._match(TokenType.R_PAREN):
op += self._curr.text
self._advance()
this = self.expression(
exp.Operator,
comments=self._prev_comments,
this=this,
operator=op,
expression=self._parse_bitwise(),
)
if not self._match(TokenType.OPERATOR):
break
return this
def _parse_date_part(self) -> exp.Expression:
part = self._parse_type()
self._match(TokenType.COMMA)
value = self._parse_bitwise()
if part and part.is_string:
part = exp.var(part.name)
return self.expression(exp.Extract, this=part, expression=value)
class Generator(generator.Generator):
SINGLE_STRING_INTERVAL = True
RENAME_TABLE_WITH_DB = False
LOCKING_READS_SUPPORTED = True
JOIN_HINTS = False
TABLE_HINTS = False
QUERY_HINTS = False
NVL2_SUPPORTED = False
PARAMETER_TOKEN = "$"
TABLESAMPLE_SIZE_IS_ROWS = False
TABLESAMPLE_SEED_KEYWORD = "REPEATABLE"
SUPPORTS_SELECT_INTO = True
JSON_TYPE_REQUIRED_FOR_EXTRACTION = True
SUPPORTS_UNLOGGED_TABLES = True
LIKE_PROPERTY_INSIDE_SCHEMA = True
MULTI_ARG_DISTINCT = False
CAN_IMPLEMENT_ARRAY_ANY = True
SUPPORTED_JSON_PATH_PARTS = {
exp.JSONPathKey,
exp.JSONPathRoot,
exp.JSONPathSubscript,
}
TYPE_MAPPING = {
**generator.Generator.TYPE_MAPPING,
exp.DataType.Type.TINYINT: "SMALLINT",
exp.DataType.Type.FLOAT: "REAL",
exp.DataType.Type.DOUBLE: "DOUBLE PRECISION",
exp.DataType.Type.BINARY: "BYTEA",
exp.DataType.Type.VARBINARY: "BYTEA",
exp.DataType.Type.DATETIME: "TIMESTAMP",
}
TRANSFORMS = {
**generator.Generator.TRANSFORMS,
exp.AnyValue: any_value_to_max_sql,
exp.Array: lambda self, e: (
f"{self.normalize_func('ARRAY')}({self.sql(e.expressions[0])})"
if isinstance(seq_get(e.expressions, 0), exp.Select)
else f"{self.normalize_func('ARRAY')}[{self.expressions(e, flat=True)}]"
),
exp.ArrayConcat: rename_func("ARRAY_CAT"),
exp.ArrayContained: lambda self, e: self.binary(e, "<@"),
exp.ArrayContains: lambda self, e: self.binary(e, "@>"),
exp.ArrayOverlaps: lambda self, e: self.binary(e, "&&"),
exp.ArrayFilter: filter_array_using_unnest,
exp.ArraySize: lambda self, e: self.func("ARRAY_LENGTH", e.this, e.expression or "1"),
exp.BitwiseXor: lambda self, e: self.binary(e, "#"),
exp.ColumnDef: transforms.preprocess([_auto_increment_to_serial, _serial_to_generated]),
exp.CurrentDate: no_paren_current_date_sql,
exp.CurrentTimestamp: lambda *_: "CURRENT_TIMESTAMP",
exp.CurrentUser: lambda *_: "CURRENT_USER",
exp.DateAdd: _date_add_sql("+"),
exp.DateDiff: _date_diff_sql,
exp.DateStrToDate: datestrtodate_sql,
exp.DataType: _datatype_sql,
exp.DateSub: _date_add_sql("-"),
exp.Explode: rename_func("UNNEST"),
exp.GroupConcat: _string_agg_sql,
exp.JSONExtract: _json_extract_sql("JSON_EXTRACT_PATH", "->"),
exp.JSONExtractScalar: _json_extract_sql("JSON_EXTRACT_PATH_TEXT", "->>"),
exp.JSONBExtract: lambda self, e: self.binary(e, "#>"),
exp.JSONBExtractScalar: lambda self, e: self.binary(e, "#>>"),
exp.JSONBContains: lambda self, e: self.binary(e, "?"),
exp.ParseJSON: lambda self, e: self.sql(exp.cast(e.this, exp.DataType.Type.JSON)),
exp.JSONPathKey: json_path_key_only_name,
exp.JSONPathRoot: lambda *_: "",
exp.JSONPathSubscript: lambda self, e: self.json_path_part(e.this),
exp.LastDay: no_last_day_sql,
exp.LogicalOr: rename_func("BOOL_OR"),
exp.LogicalAnd: rename_func("BOOL_AND"),
exp.Max: max_or_greatest,
exp.MapFromEntries: no_map_from_entries_sql,
exp.Min: min_or_least,
exp.Merge: merge_without_target_sql,
exp.PartitionedByProperty: lambda self, e: f"PARTITION BY {self.sql(e, 'this')}",
exp.PercentileCont: transforms.preprocess(
[transforms.add_within_group_for_percentiles]
),
exp.PercentileDisc: transforms.preprocess(
[transforms.add_within_group_for_percentiles]
),
exp.Pivot: no_pivot_sql,
exp.Pow: lambda self, e: self.binary(e, "^"),
exp.Rand: rename_func("RANDOM"),
exp.RegexpLike: lambda self, e: self.binary(e, "~"),
exp.RegexpILike: lambda self, e: self.binary(e, "~*"),
exp.Select: transforms.preprocess(
[
transforms.eliminate_semi_and_anti_joins,
transforms.eliminate_qualify,
]
),
exp.StrPosition: str_position_sql,
exp.StrToDate: lambda self, e: self.func("TO_DATE", e.this, self.format_time(e)),
exp.StrToTime: lambda self, e: self.func("TO_TIMESTAMP", e.this, self.format_time(e)),
exp.StructExtract: struct_extract_sql,
exp.Substring: _substring_sql,
exp.TimeFromParts: rename_func("MAKE_TIME"),
exp.TimestampFromParts: rename_func("MAKE_TIMESTAMP"),
exp.TimestampTrunc: timestamptrunc_sql,
exp.TimeStrToTime: timestrtotime_sql,
exp.TimeToStr: lambda self, e: self.func("TO_CHAR", e.this, self.format_time(e)),
exp.ToChar: lambda self, e: self.function_fallback_sql(e),
exp.Trim: trim_sql,
exp.TryCast: no_trycast_sql,
exp.TsOrDsAdd: _date_add_sql("+"),
exp.TsOrDsDiff: _date_diff_sql,
exp.UnixToTime: lambda self, e: self.func("TO_TIMESTAMP", e.this),
exp.VariancePop: rename_func("VAR_POP"),
exp.Variance: rename_func("VAR_SAMP"),
exp.Xor: bool_xor_sql,
}
PROPERTIES_LOCATION = {
**generator.Generator.PROPERTIES_LOCATION,
exp.PartitionedByProperty: exp.Properties.Location.POST_SCHEMA,
exp.TransientProperty: exp.Properties.Location.UNSUPPORTED,
exp.VolatileProperty: exp.Properties.Location.UNSUPPORTED,
}
def unnest_sql(self, expression: exp.Unnest) -> str:
if len(expression.expressions) == 1:
from sqlglot.optimizer.annotate_types import annotate_types
this = annotate_types(expression.expressions[0])
if this.is_type("array<json>"):
while isinstance(this, exp.Cast):
this = this.this
arg = self.sql(exp.cast(this, exp.DataType.Type.JSON))
alias = self.sql(expression, "alias")
alias = f" AS {alias}" if alias else ""
if expression.args.get("offset"):
self.unsupported("Unsupported JSON_ARRAY_ELEMENTS with offset")
return f"JSON_ARRAY_ELEMENTS({arg}){alias}"
return super().unnest_sql(expression)
def bracket_sql(self, expression: exp.Bracket) -> str:
"""Forms like ARRAY[1, 2, 3][3] aren't allowed; we need to wrap the ARRAY."""
if isinstance(expression.this, exp.Array):
expression.set("this", exp.paren(expression.this, copy=False))
return super().bracket_sql(expression)
def matchagainst_sql(self, expression: exp.MatchAgainst) -> str:
this = self.sql(expression, "this")
expressions = [f"{self.sql(e)} @@ {this}" for e in expression.expressions]
sql = " OR ".join(expressions)
return f"({sql})" if len(expressions) > 1 else sql
def _string_agg_sql(self: Postgres.Generator, expression: exp.GroupConcat) -> str:
separator = expression.args.get("separator") or exp.Literal.string(",")
order = ""
this = expression.this
if isinstance(this, exp.Order):
if this.this:
this = this.this.pop()
order = self.sql(expression.this) # Order has a leading space
return f"STRING_AGG({self.format_args(this, separator)}{order})" | null |
152,876 | from __future__ import annotations
import typing as t
from sqlglot import exp, generator, parser, tokens, transforms
from sqlglot.dialects.dialect import (
DATE_ADD_OR_SUB,
Dialect,
JSON_EXTRACT_TYPE,
any_value_to_max_sql,
bool_xor_sql,
datestrtodate_sql,
build_formatted_time,
filter_array_using_unnest,
json_extract_segments,
json_path_key_only_name,
max_or_greatest,
merge_without_target_sql,
min_or_least,
no_last_day_sql,
no_map_from_entries_sql,
no_paren_current_date_sql,
no_pivot_sql,
no_trycast_sql,
build_json_extract_path,
build_timestamp_trunc,
rename_func,
str_position_sql,
struct_extract_sql,
timestamptrunc_sql,
timestrtotime_sql,
trim_sql,
ts_or_ds_add_cast,
)
from sqlglot.helper import seq_get
from sqlglot.parser import binary_range_parser
from sqlglot.tokens import TokenType
class Postgres(Dialect):
INDEX_OFFSET = 1
TYPED_DIVISION = True
CONCAT_COALESCE = True
NULL_ORDERING = "nulls_are_large"
TIME_FORMAT = "'YYYY-MM-DD HH24:MI:SS'"
TIME_MAPPING = {
"AM": "%p",
"PM": "%p",
"D": "%u", # 1-based day of week
"DD": "%d", # day of month
"DDD": "%j", # zero padded day of year
"FMDD": "%-d", # - is no leading zero for Python; same for FM in postgres
"FMDDD": "%-j", # day of year
"FMHH12": "%-I", # 9
"FMHH24": "%-H", # 9
"FMMI": "%-M", # Minute
"FMMM": "%-m", # 1
"FMSS": "%-S", # Second
"HH12": "%I", # 09
"HH24": "%H", # 09
"MI": "%M", # zero padded minute
"MM": "%m", # 01
"OF": "%z", # utc offset
"SS": "%S", # zero padded second
"TMDay": "%A", # TM is locale dependent
"TMDy": "%a",
"TMMon": "%b", # Sep
"TMMonth": "%B", # September
"TZ": "%Z", # uppercase timezone name
"US": "%f", # zero padded microsecond
"WW": "%U", # 1-based week of year
"YY": "%y", # 15
"YYYY": "%Y", # 2015
}
class Tokenizer(tokens.Tokenizer):
BIT_STRINGS = [("b'", "'"), ("B'", "'")]
HEX_STRINGS = [("x'", "'"), ("X'", "'")]
BYTE_STRINGS = [("e'", "'"), ("E'", "'")]
HEREDOC_STRINGS = ["$"]
HEREDOC_TAG_IS_IDENTIFIER = True
HEREDOC_STRING_ALTERNATIVE = TokenType.PARAMETER
KEYWORDS = {
**tokens.Tokenizer.KEYWORDS,
"~~": TokenType.LIKE,
"~~*": TokenType.ILIKE,
"~*": TokenType.IRLIKE,
"~": TokenType.RLIKE,
"@@": TokenType.DAT,
"@>": TokenType.AT_GT,
"<@": TokenType.LT_AT,
"|/": TokenType.PIPE_SLASH,
"||/": TokenType.DPIPE_SLASH,
"BEGIN": TokenType.COMMAND,
"BEGIN TRANSACTION": TokenType.BEGIN,
"BIGSERIAL": TokenType.BIGSERIAL,
"CHARACTER VARYING": TokenType.VARCHAR,
"CONSTRAINT TRIGGER": TokenType.COMMAND,
"DECLARE": TokenType.COMMAND,
"DO": TokenType.COMMAND,
"EXEC": TokenType.COMMAND,
"HSTORE": TokenType.HSTORE,
"JSONB": TokenType.JSONB,
"MONEY": TokenType.MONEY,
"REFRESH": TokenType.COMMAND,
"REINDEX": TokenType.COMMAND,
"RESET": TokenType.COMMAND,
"REVOKE": TokenType.COMMAND,
"SERIAL": TokenType.SERIAL,
"SMALLSERIAL": TokenType.SMALLSERIAL,
"NAME": TokenType.NAME,
"TEMP": TokenType.TEMPORARY,
"CSTRING": TokenType.PSEUDO_TYPE,
"OID": TokenType.OBJECT_IDENTIFIER,
"ONLY": TokenType.ONLY,
"OPERATOR": TokenType.OPERATOR,
"REGCLASS": TokenType.OBJECT_IDENTIFIER,
"REGCOLLATION": TokenType.OBJECT_IDENTIFIER,
"REGCONFIG": TokenType.OBJECT_IDENTIFIER,
"REGDICTIONARY": TokenType.OBJECT_IDENTIFIER,
"REGNAMESPACE": TokenType.OBJECT_IDENTIFIER,
"REGOPER": TokenType.OBJECT_IDENTIFIER,
"REGOPERATOR": TokenType.OBJECT_IDENTIFIER,
"REGPROC": TokenType.OBJECT_IDENTIFIER,
"REGPROCEDURE": TokenType.OBJECT_IDENTIFIER,
"REGROLE": TokenType.OBJECT_IDENTIFIER,
"REGTYPE": TokenType.OBJECT_IDENTIFIER,
}
SINGLE_TOKENS = {
**tokens.Tokenizer.SINGLE_TOKENS,
"$": TokenType.HEREDOC_STRING,
}
VAR_SINGLE_TOKENS = {"$"}
class Parser(parser.Parser):
PROPERTY_PARSERS = {
**parser.Parser.PROPERTY_PARSERS,
"SET": lambda self: self.expression(exp.SetConfigProperty, this=self._parse_set()),
}
PROPERTY_PARSERS.pop("INPUT")
FUNCTIONS = {
**parser.Parser.FUNCTIONS,
"DATE_TRUNC": build_timestamp_trunc,
"GENERATE_SERIES": _build_generate_series,
"JSON_EXTRACT_PATH": build_json_extract_path(exp.JSONExtract),
"JSON_EXTRACT_PATH_TEXT": build_json_extract_path(exp.JSONExtractScalar),
"MAKE_TIME": exp.TimeFromParts.from_arg_list,
"MAKE_TIMESTAMP": exp.TimestampFromParts.from_arg_list,
"NOW": exp.CurrentTimestamp.from_arg_list,
"TO_CHAR": build_formatted_time(exp.TimeToStr, "postgres"),
"TO_TIMESTAMP": _build_to_timestamp,
"UNNEST": exp.Explode.from_arg_list,
}
FUNCTION_PARSERS = {
**parser.Parser.FUNCTION_PARSERS,
"DATE_PART": lambda self: self._parse_date_part(),
}
BITWISE = {
**parser.Parser.BITWISE,
TokenType.HASH: exp.BitwiseXor,
}
EXPONENT = {
TokenType.CARET: exp.Pow,
}
RANGE_PARSERS = {
**parser.Parser.RANGE_PARSERS,
TokenType.AT_GT: binary_range_parser(exp.ArrayContains),
TokenType.DAMP: binary_range_parser(exp.ArrayOverlaps),
TokenType.DAT: lambda self, this: self.expression(
exp.MatchAgainst, this=self._parse_bitwise(), expressions=[this]
),
TokenType.LT_AT: binary_range_parser(exp.ArrayContained),
TokenType.OPERATOR: lambda self, this: self._parse_operator(this),
}
STATEMENT_PARSERS = {
**parser.Parser.STATEMENT_PARSERS,
TokenType.END: lambda self: self._parse_commit_or_rollback(),
}
JSON_ARROWS_REQUIRE_JSON_TYPE = True
def _parse_operator(self, this: t.Optional[exp.Expression]) -> t.Optional[exp.Expression]:
while True:
if not self._match(TokenType.L_PAREN):
break
op = ""
while self._curr and not self._match(TokenType.R_PAREN):
op += self._curr.text
self._advance()
this = self.expression(
exp.Operator,
comments=self._prev_comments,
this=this,
operator=op,
expression=self._parse_bitwise(),
)
if not self._match(TokenType.OPERATOR):
break
return this
def _parse_date_part(self) -> exp.Expression:
part = self._parse_type()
self._match(TokenType.COMMA)
value = self._parse_bitwise()
if part and part.is_string:
part = exp.var(part.name)
return self.expression(exp.Extract, this=part, expression=value)
class Generator(generator.Generator):
SINGLE_STRING_INTERVAL = True
RENAME_TABLE_WITH_DB = False
LOCKING_READS_SUPPORTED = True
JOIN_HINTS = False
TABLE_HINTS = False
QUERY_HINTS = False
NVL2_SUPPORTED = False
PARAMETER_TOKEN = "$"
TABLESAMPLE_SIZE_IS_ROWS = False
TABLESAMPLE_SEED_KEYWORD = "REPEATABLE"
SUPPORTS_SELECT_INTO = True
JSON_TYPE_REQUIRED_FOR_EXTRACTION = True
SUPPORTS_UNLOGGED_TABLES = True
LIKE_PROPERTY_INSIDE_SCHEMA = True
MULTI_ARG_DISTINCT = False
CAN_IMPLEMENT_ARRAY_ANY = True
SUPPORTED_JSON_PATH_PARTS = {
exp.JSONPathKey,
exp.JSONPathRoot,
exp.JSONPathSubscript,
}
TYPE_MAPPING = {
**generator.Generator.TYPE_MAPPING,
exp.DataType.Type.TINYINT: "SMALLINT",
exp.DataType.Type.FLOAT: "REAL",
exp.DataType.Type.DOUBLE: "DOUBLE PRECISION",
exp.DataType.Type.BINARY: "BYTEA",
exp.DataType.Type.VARBINARY: "BYTEA",
exp.DataType.Type.DATETIME: "TIMESTAMP",
}
TRANSFORMS = {
**generator.Generator.TRANSFORMS,
exp.AnyValue: any_value_to_max_sql,
exp.Array: lambda self, e: (
f"{self.normalize_func('ARRAY')}({self.sql(e.expressions[0])})"
if isinstance(seq_get(e.expressions, 0), exp.Select)
else f"{self.normalize_func('ARRAY')}[{self.expressions(e, flat=True)}]"
),
exp.ArrayConcat: rename_func("ARRAY_CAT"),
exp.ArrayContained: lambda self, e: self.binary(e, "<@"),
exp.ArrayContains: lambda self, e: self.binary(e, "@>"),
exp.ArrayOverlaps: lambda self, e: self.binary(e, "&&"),
exp.ArrayFilter: filter_array_using_unnest,
exp.ArraySize: lambda self, e: self.func("ARRAY_LENGTH", e.this, e.expression or "1"),
exp.BitwiseXor: lambda self, e: self.binary(e, "#"),
exp.ColumnDef: transforms.preprocess([_auto_increment_to_serial, _serial_to_generated]),
exp.CurrentDate: no_paren_current_date_sql,
exp.CurrentTimestamp: lambda *_: "CURRENT_TIMESTAMP",
exp.CurrentUser: lambda *_: "CURRENT_USER",
exp.DateAdd: _date_add_sql("+"),
exp.DateDiff: _date_diff_sql,
exp.DateStrToDate: datestrtodate_sql,
exp.DataType: _datatype_sql,
exp.DateSub: _date_add_sql("-"),
exp.Explode: rename_func("UNNEST"),
exp.GroupConcat: _string_agg_sql,
exp.JSONExtract: _json_extract_sql("JSON_EXTRACT_PATH", "->"),
exp.JSONExtractScalar: _json_extract_sql("JSON_EXTRACT_PATH_TEXT", "->>"),
exp.JSONBExtract: lambda self, e: self.binary(e, "#>"),
exp.JSONBExtractScalar: lambda self, e: self.binary(e, "#>>"),
exp.JSONBContains: lambda self, e: self.binary(e, "?"),
exp.ParseJSON: lambda self, e: self.sql(exp.cast(e.this, exp.DataType.Type.JSON)),
exp.JSONPathKey: json_path_key_only_name,
exp.JSONPathRoot: lambda *_: "",
exp.JSONPathSubscript: lambda self, e: self.json_path_part(e.this),
exp.LastDay: no_last_day_sql,
exp.LogicalOr: rename_func("BOOL_OR"),
exp.LogicalAnd: rename_func("BOOL_AND"),
exp.Max: max_or_greatest,
exp.MapFromEntries: no_map_from_entries_sql,
exp.Min: min_or_least,
exp.Merge: merge_without_target_sql,
exp.PartitionedByProperty: lambda self, e: f"PARTITION BY {self.sql(e, 'this')}",
exp.PercentileCont: transforms.preprocess(
[transforms.add_within_group_for_percentiles]
),
exp.PercentileDisc: transforms.preprocess(
[transforms.add_within_group_for_percentiles]
),
exp.Pivot: no_pivot_sql,
exp.Pow: lambda self, e: self.binary(e, "^"),
exp.Rand: rename_func("RANDOM"),
exp.RegexpLike: lambda self, e: self.binary(e, "~"),
exp.RegexpILike: lambda self, e: self.binary(e, "~*"),
exp.Select: transforms.preprocess(
[
transforms.eliminate_semi_and_anti_joins,
transforms.eliminate_qualify,
]
),
exp.StrPosition: str_position_sql,
exp.StrToDate: lambda self, e: self.func("TO_DATE", e.this, self.format_time(e)),
exp.StrToTime: lambda self, e: self.func("TO_TIMESTAMP", e.this, self.format_time(e)),
exp.StructExtract: struct_extract_sql,
exp.Substring: _substring_sql,
exp.TimeFromParts: rename_func("MAKE_TIME"),
exp.TimestampFromParts: rename_func("MAKE_TIMESTAMP"),
exp.TimestampTrunc: timestamptrunc_sql,
exp.TimeStrToTime: timestrtotime_sql,
exp.TimeToStr: lambda self, e: self.func("TO_CHAR", e.this, self.format_time(e)),
exp.ToChar: lambda self, e: self.function_fallback_sql(e),
exp.Trim: trim_sql,
exp.TryCast: no_trycast_sql,
exp.TsOrDsAdd: _date_add_sql("+"),
exp.TsOrDsDiff: _date_diff_sql,
exp.UnixToTime: lambda self, e: self.func("TO_TIMESTAMP", e.this),
exp.VariancePop: rename_func("VAR_POP"),
exp.Variance: rename_func("VAR_SAMP"),
exp.Xor: bool_xor_sql,
}
PROPERTIES_LOCATION = {
**generator.Generator.PROPERTIES_LOCATION,
exp.PartitionedByProperty: exp.Properties.Location.POST_SCHEMA,
exp.TransientProperty: exp.Properties.Location.UNSUPPORTED,
exp.VolatileProperty: exp.Properties.Location.UNSUPPORTED,
}
def unnest_sql(self, expression: exp.Unnest) -> str:
if len(expression.expressions) == 1:
from sqlglot.optimizer.annotate_types import annotate_types
this = annotate_types(expression.expressions[0])
if this.is_type("array<json>"):
while isinstance(this, exp.Cast):
this = this.this
arg = self.sql(exp.cast(this, exp.DataType.Type.JSON))
alias = self.sql(expression, "alias")
alias = f" AS {alias}" if alias else ""
if expression.args.get("offset"):
self.unsupported("Unsupported JSON_ARRAY_ELEMENTS with offset")
return f"JSON_ARRAY_ELEMENTS({arg}){alias}"
return super().unnest_sql(expression)
def bracket_sql(self, expression: exp.Bracket) -> str:
"""Forms like ARRAY[1, 2, 3][3] aren't allowed; we need to wrap the ARRAY."""
if isinstance(expression.this, exp.Array):
expression.set("this", exp.paren(expression.this, copy=False))
return super().bracket_sql(expression)
def matchagainst_sql(self, expression: exp.MatchAgainst) -> str:
this = self.sql(expression, "this")
expressions = [f"{self.sql(e)} @@ {this}" for e in expression.expressions]
sql = " OR ".join(expressions)
return f"({sql})" if len(expressions) > 1 else sql
def _datatype_sql(self: Postgres.Generator, expression: exp.DataType) -> str:
if expression.is_type("array"):
return f"{self.expressions(expression, flat=True)}[]" if expression.expressions else "ARRAY"
return self.datatype_sql(expression) | null |
152,877 | from __future__ import annotations
import typing as t
from sqlglot import exp, generator, parser, tokens, transforms
from sqlglot.dialects.dialect import (
DATE_ADD_OR_SUB,
Dialect,
JSON_EXTRACT_TYPE,
any_value_to_max_sql,
bool_xor_sql,
datestrtodate_sql,
build_formatted_time,
filter_array_using_unnest,
json_extract_segments,
json_path_key_only_name,
max_or_greatest,
merge_without_target_sql,
min_or_least,
no_last_day_sql,
no_map_from_entries_sql,
no_paren_current_date_sql,
no_pivot_sql,
no_trycast_sql,
build_json_extract_path,
build_timestamp_trunc,
rename_func,
str_position_sql,
struct_extract_sql,
timestamptrunc_sql,
timestrtotime_sql,
trim_sql,
ts_or_ds_add_cast,
)
from sqlglot.helper import seq_get
from sqlglot.parser import binary_range_parser
from sqlglot.tokens import TokenType
def _auto_increment_to_serial(expression: exp.Expression) -> exp.Expression:
auto = expression.find(exp.AutoIncrementColumnConstraint)
if auto:
expression.args["constraints"].remove(auto.parent)
kind = expression.args["kind"]
if kind.this == exp.DataType.Type.INT:
kind.replace(exp.DataType(this=exp.DataType.Type.SERIAL))
elif kind.this == exp.DataType.Type.SMALLINT:
kind.replace(exp.DataType(this=exp.DataType.Type.SMALLSERIAL))
elif kind.this == exp.DataType.Type.BIGINT:
kind.replace(exp.DataType(this=exp.DataType.Type.BIGSERIAL))
return expression | null |
152,878 | from __future__ import annotations
import typing as t
from sqlglot import exp, generator, parser, tokens, transforms
from sqlglot.dialects.dialect import (
DATE_ADD_OR_SUB,
Dialect,
JSON_EXTRACT_TYPE,
any_value_to_max_sql,
bool_xor_sql,
datestrtodate_sql,
build_formatted_time,
filter_array_using_unnest,
json_extract_segments,
json_path_key_only_name,
max_or_greatest,
merge_without_target_sql,
min_or_least,
no_last_day_sql,
no_map_from_entries_sql,
no_paren_current_date_sql,
no_pivot_sql,
no_trycast_sql,
build_json_extract_path,
build_timestamp_trunc,
rename_func,
str_position_sql,
struct_extract_sql,
timestamptrunc_sql,
timestrtotime_sql,
trim_sql,
ts_or_ds_add_cast,
)
from sqlglot.helper import seq_get
from sqlglot.parser import binary_range_parser
from sqlglot.tokens import TokenType
def _serial_to_generated(expression: exp.Expression) -> exp.Expression:
if not isinstance(expression, exp.ColumnDef):
return expression
kind = expression.kind
if not kind:
return expression
if kind.this == exp.DataType.Type.SERIAL:
data_type = exp.DataType(this=exp.DataType.Type.INT)
elif kind.this == exp.DataType.Type.SMALLSERIAL:
data_type = exp.DataType(this=exp.DataType.Type.SMALLINT)
elif kind.this == exp.DataType.Type.BIGSERIAL:
data_type = exp.DataType(this=exp.DataType.Type.BIGINT)
else:
data_type = None
if data_type:
expression.args["kind"].replace(data_type)
constraints = expression.args["constraints"]
generated = exp.ColumnConstraint(kind=exp.GeneratedAsIdentityColumnConstraint(this=False))
notnull = exp.ColumnConstraint(kind=exp.NotNullColumnConstraint())
if notnull not in constraints:
constraints.insert(0, notnull)
if generated not in constraints:
constraints.insert(0, generated)
return expression | null |
152,879 | from __future__ import annotations
import typing as t
from sqlglot import exp, generator, parser, tokens, transforms
from sqlglot.dialects.dialect import (
DATE_ADD_OR_SUB,
Dialect,
JSON_EXTRACT_TYPE,
any_value_to_max_sql,
bool_xor_sql,
datestrtodate_sql,
build_formatted_time,
filter_array_using_unnest,
json_extract_segments,
json_path_key_only_name,
max_or_greatest,
merge_without_target_sql,
min_or_least,
no_last_day_sql,
no_map_from_entries_sql,
no_paren_current_date_sql,
no_pivot_sql,
no_trycast_sql,
build_json_extract_path,
build_timestamp_trunc,
rename_func,
str_position_sql,
struct_extract_sql,
timestamptrunc_sql,
timestrtotime_sql,
trim_sql,
ts_or_ds_add_cast,
)
from sqlglot.helper import seq_get
from sqlglot.parser import binary_range_parser
from sqlglot.tokens import TokenType
def seq_get(seq: t.Sequence[T], index: int) -> t.Optional[T]:
def _build_generate_series(args: t.List) -> exp.GenerateSeries:
# The goal is to convert step values like '1 day' or INTERVAL '1 day' into INTERVAL '1' day
step = seq_get(args, 2)
if step is None:
# Postgres allows calls with just two arguments -- the "step" argument defaults to 1
return exp.GenerateSeries.from_arg_list(args)
if step.is_string:
args[2] = exp.to_interval(step.this)
elif isinstance(step, exp.Interval) and not step.args.get("unit"):
args[2] = exp.to_interval(step.this.this)
return exp.GenerateSeries.from_arg_list(args) | null |
152,880 | from __future__ import annotations
import typing as t
from sqlglot import exp, generator, parser, tokens, transforms
from sqlglot.dialects.dialect import (
DATE_ADD_OR_SUB,
Dialect,
JSON_EXTRACT_TYPE,
any_value_to_max_sql,
bool_xor_sql,
datestrtodate_sql,
build_formatted_time,
filter_array_using_unnest,
json_extract_segments,
json_path_key_only_name,
max_or_greatest,
merge_without_target_sql,
min_or_least,
no_last_day_sql,
no_map_from_entries_sql,
no_paren_current_date_sql,
no_pivot_sql,
no_trycast_sql,
build_json_extract_path,
build_timestamp_trunc,
rename_func,
str_position_sql,
struct_extract_sql,
timestamptrunc_sql,
timestrtotime_sql,
trim_sql,
ts_or_ds_add_cast,
)
from sqlglot.helper import seq_get
from sqlglot.parser import binary_range_parser
from sqlglot.tokens import TokenType
def build_formatted_time(
exp_class: t.Type[E], dialect: str, default: t.Optional[bool | str] = None
) -> t.Callable[[t.List], E]:
"""Helper used for time expressions.
Args:
exp_class: the expression class to instantiate.
dialect: target sql dialect.
default: the default format, True being time.
Returns:
A callable that can be used to return the appropriately formatted time expression.
"""
def _builder(args: t.List):
return exp_class(
this=seq_get(args, 0),
format=Dialect[dialect].format_time(
seq_get(args, 1)
or (Dialect[dialect].TIME_FORMAT if default is True else default or None)
),
)
return _builder
def _build_to_timestamp(args: t.List) -> exp.UnixToTime | exp.StrToTime:
# TO_TIMESTAMP accepts either a single double argument or (text, text)
if len(args) == 1:
# https://www.postgresql.org/docs/current/functions-datetime.html#FUNCTIONS-DATETIME-TABLE
return exp.UnixToTime.from_arg_list(args)
# https://www.postgresql.org/docs/current/functions-formatting.html
return build_formatted_time(exp.StrToTime, "postgres")(args) | null |
152,881 | from __future__ import annotations
import typing as t
from sqlglot import exp, generator, parser, tokens, transforms
from sqlglot.dialects.dialect import (
DATE_ADD_OR_SUB,
Dialect,
JSON_EXTRACT_TYPE,
any_value_to_max_sql,
bool_xor_sql,
datestrtodate_sql,
build_formatted_time,
filter_array_using_unnest,
json_extract_segments,
json_path_key_only_name,
max_or_greatest,
merge_without_target_sql,
min_or_least,
no_last_day_sql,
no_map_from_entries_sql,
no_paren_current_date_sql,
no_pivot_sql,
no_trycast_sql,
build_json_extract_path,
build_timestamp_trunc,
rename_func,
str_position_sql,
struct_extract_sql,
timestamptrunc_sql,
timestrtotime_sql,
trim_sql,
ts_or_ds_add_cast,
)
from sqlglot.helper import seq_get
from sqlglot.parser import binary_range_parser
from sqlglot.tokens import TokenType
class Postgres(Dialect):
INDEX_OFFSET = 1
TYPED_DIVISION = True
CONCAT_COALESCE = True
NULL_ORDERING = "nulls_are_large"
TIME_FORMAT = "'YYYY-MM-DD HH24:MI:SS'"
TIME_MAPPING = {
"AM": "%p",
"PM": "%p",
"D": "%u", # 1-based day of week
"DD": "%d", # day of month
"DDD": "%j", # zero padded day of year
"FMDD": "%-d", # - is no leading zero for Python; same for FM in postgres
"FMDDD": "%-j", # day of year
"FMHH12": "%-I", # 9
"FMHH24": "%-H", # 9
"FMMI": "%-M", # Minute
"FMMM": "%-m", # 1
"FMSS": "%-S", # Second
"HH12": "%I", # 09
"HH24": "%H", # 09
"MI": "%M", # zero padded minute
"MM": "%m", # 01
"OF": "%z", # utc offset
"SS": "%S", # zero padded second
"TMDay": "%A", # TM is locale dependent
"TMDy": "%a",
"TMMon": "%b", # Sep
"TMMonth": "%B", # September
"TZ": "%Z", # uppercase timezone name
"US": "%f", # zero padded microsecond
"WW": "%U", # 1-based week of year
"YY": "%y", # 15
"YYYY": "%Y", # 2015
}
class Tokenizer(tokens.Tokenizer):
BIT_STRINGS = [("b'", "'"), ("B'", "'")]
HEX_STRINGS = [("x'", "'"), ("X'", "'")]
BYTE_STRINGS = [("e'", "'"), ("E'", "'")]
HEREDOC_STRINGS = ["$"]
HEREDOC_TAG_IS_IDENTIFIER = True
HEREDOC_STRING_ALTERNATIVE = TokenType.PARAMETER
KEYWORDS = {
**tokens.Tokenizer.KEYWORDS,
"~~": TokenType.LIKE,
"~~*": TokenType.ILIKE,
"~*": TokenType.IRLIKE,
"~": TokenType.RLIKE,
"@@": TokenType.DAT,
"@>": TokenType.AT_GT,
"<@": TokenType.LT_AT,
"|/": TokenType.PIPE_SLASH,
"||/": TokenType.DPIPE_SLASH,
"BEGIN": TokenType.COMMAND,
"BEGIN TRANSACTION": TokenType.BEGIN,
"BIGSERIAL": TokenType.BIGSERIAL,
"CHARACTER VARYING": TokenType.VARCHAR,
"CONSTRAINT TRIGGER": TokenType.COMMAND,
"DECLARE": TokenType.COMMAND,
"DO": TokenType.COMMAND,
"EXEC": TokenType.COMMAND,
"HSTORE": TokenType.HSTORE,
"JSONB": TokenType.JSONB,
"MONEY": TokenType.MONEY,
"REFRESH": TokenType.COMMAND,
"REINDEX": TokenType.COMMAND,
"RESET": TokenType.COMMAND,
"REVOKE": TokenType.COMMAND,
"SERIAL": TokenType.SERIAL,
"SMALLSERIAL": TokenType.SMALLSERIAL,
"NAME": TokenType.NAME,
"TEMP": TokenType.TEMPORARY,
"CSTRING": TokenType.PSEUDO_TYPE,
"OID": TokenType.OBJECT_IDENTIFIER,
"ONLY": TokenType.ONLY,
"OPERATOR": TokenType.OPERATOR,
"REGCLASS": TokenType.OBJECT_IDENTIFIER,
"REGCOLLATION": TokenType.OBJECT_IDENTIFIER,
"REGCONFIG": TokenType.OBJECT_IDENTIFIER,
"REGDICTIONARY": TokenType.OBJECT_IDENTIFIER,
"REGNAMESPACE": TokenType.OBJECT_IDENTIFIER,
"REGOPER": TokenType.OBJECT_IDENTIFIER,
"REGOPERATOR": TokenType.OBJECT_IDENTIFIER,
"REGPROC": TokenType.OBJECT_IDENTIFIER,
"REGPROCEDURE": TokenType.OBJECT_IDENTIFIER,
"REGROLE": TokenType.OBJECT_IDENTIFIER,
"REGTYPE": TokenType.OBJECT_IDENTIFIER,
}
SINGLE_TOKENS = {
**tokens.Tokenizer.SINGLE_TOKENS,
"$": TokenType.HEREDOC_STRING,
}
VAR_SINGLE_TOKENS = {"$"}
class Parser(parser.Parser):
PROPERTY_PARSERS = {
**parser.Parser.PROPERTY_PARSERS,
"SET": lambda self: self.expression(exp.SetConfigProperty, this=self._parse_set()),
}
PROPERTY_PARSERS.pop("INPUT")
FUNCTIONS = {
**parser.Parser.FUNCTIONS,
"DATE_TRUNC": build_timestamp_trunc,
"GENERATE_SERIES": _build_generate_series,
"JSON_EXTRACT_PATH": build_json_extract_path(exp.JSONExtract),
"JSON_EXTRACT_PATH_TEXT": build_json_extract_path(exp.JSONExtractScalar),
"MAKE_TIME": exp.TimeFromParts.from_arg_list,
"MAKE_TIMESTAMP": exp.TimestampFromParts.from_arg_list,
"NOW": exp.CurrentTimestamp.from_arg_list,
"TO_CHAR": build_formatted_time(exp.TimeToStr, "postgres"),
"TO_TIMESTAMP": _build_to_timestamp,
"UNNEST": exp.Explode.from_arg_list,
}
FUNCTION_PARSERS = {
**parser.Parser.FUNCTION_PARSERS,
"DATE_PART": lambda self: self._parse_date_part(),
}
BITWISE = {
**parser.Parser.BITWISE,
TokenType.HASH: exp.BitwiseXor,
}
EXPONENT = {
TokenType.CARET: exp.Pow,
}
RANGE_PARSERS = {
**parser.Parser.RANGE_PARSERS,
TokenType.AT_GT: binary_range_parser(exp.ArrayContains),
TokenType.DAMP: binary_range_parser(exp.ArrayOverlaps),
TokenType.DAT: lambda self, this: self.expression(
exp.MatchAgainst, this=self._parse_bitwise(), expressions=[this]
),
TokenType.LT_AT: binary_range_parser(exp.ArrayContained),
TokenType.OPERATOR: lambda self, this: self._parse_operator(this),
}
STATEMENT_PARSERS = {
**parser.Parser.STATEMENT_PARSERS,
TokenType.END: lambda self: self._parse_commit_or_rollback(),
}
JSON_ARROWS_REQUIRE_JSON_TYPE = True
def _parse_operator(self, this: t.Optional[exp.Expression]) -> t.Optional[exp.Expression]:
while True:
if not self._match(TokenType.L_PAREN):
break
op = ""
while self._curr and not self._match(TokenType.R_PAREN):
op += self._curr.text
self._advance()
this = self.expression(
exp.Operator,
comments=self._prev_comments,
this=this,
operator=op,
expression=self._parse_bitwise(),
)
if not self._match(TokenType.OPERATOR):
break
return this
def _parse_date_part(self) -> exp.Expression:
part = self._parse_type()
self._match(TokenType.COMMA)
value = self._parse_bitwise()
if part and part.is_string:
part = exp.var(part.name)
return self.expression(exp.Extract, this=part, expression=value)
class Generator(generator.Generator):
SINGLE_STRING_INTERVAL = True
RENAME_TABLE_WITH_DB = False
LOCKING_READS_SUPPORTED = True
JOIN_HINTS = False
TABLE_HINTS = False
QUERY_HINTS = False
NVL2_SUPPORTED = False
PARAMETER_TOKEN = "$"
TABLESAMPLE_SIZE_IS_ROWS = False
TABLESAMPLE_SEED_KEYWORD = "REPEATABLE"
SUPPORTS_SELECT_INTO = True
JSON_TYPE_REQUIRED_FOR_EXTRACTION = True
SUPPORTS_UNLOGGED_TABLES = True
LIKE_PROPERTY_INSIDE_SCHEMA = True
MULTI_ARG_DISTINCT = False
CAN_IMPLEMENT_ARRAY_ANY = True
SUPPORTED_JSON_PATH_PARTS = {
exp.JSONPathKey,
exp.JSONPathRoot,
exp.JSONPathSubscript,
}
TYPE_MAPPING = {
**generator.Generator.TYPE_MAPPING,
exp.DataType.Type.TINYINT: "SMALLINT",
exp.DataType.Type.FLOAT: "REAL",
exp.DataType.Type.DOUBLE: "DOUBLE PRECISION",
exp.DataType.Type.BINARY: "BYTEA",
exp.DataType.Type.VARBINARY: "BYTEA",
exp.DataType.Type.DATETIME: "TIMESTAMP",
}
TRANSFORMS = {
**generator.Generator.TRANSFORMS,
exp.AnyValue: any_value_to_max_sql,
exp.Array: lambda self, e: (
f"{self.normalize_func('ARRAY')}({self.sql(e.expressions[0])})"
if isinstance(seq_get(e.expressions, 0), exp.Select)
else f"{self.normalize_func('ARRAY')}[{self.expressions(e, flat=True)}]"
),
exp.ArrayConcat: rename_func("ARRAY_CAT"),
exp.ArrayContained: lambda self, e: self.binary(e, "<@"),
exp.ArrayContains: lambda self, e: self.binary(e, "@>"),
exp.ArrayOverlaps: lambda self, e: self.binary(e, "&&"),
exp.ArrayFilter: filter_array_using_unnest,
exp.ArraySize: lambda self, e: self.func("ARRAY_LENGTH", e.this, e.expression or "1"),
exp.BitwiseXor: lambda self, e: self.binary(e, "#"),
exp.ColumnDef: transforms.preprocess([_auto_increment_to_serial, _serial_to_generated]),
exp.CurrentDate: no_paren_current_date_sql,
exp.CurrentTimestamp: lambda *_: "CURRENT_TIMESTAMP",
exp.CurrentUser: lambda *_: "CURRENT_USER",
exp.DateAdd: _date_add_sql("+"),
exp.DateDiff: _date_diff_sql,
exp.DateStrToDate: datestrtodate_sql,
exp.DataType: _datatype_sql,
exp.DateSub: _date_add_sql("-"),
exp.Explode: rename_func("UNNEST"),
exp.GroupConcat: _string_agg_sql,
exp.JSONExtract: _json_extract_sql("JSON_EXTRACT_PATH", "->"),
exp.JSONExtractScalar: _json_extract_sql("JSON_EXTRACT_PATH_TEXT", "->>"),
exp.JSONBExtract: lambda self, e: self.binary(e, "#>"),
exp.JSONBExtractScalar: lambda self, e: self.binary(e, "#>>"),
exp.JSONBContains: lambda self, e: self.binary(e, "?"),
exp.ParseJSON: lambda self, e: self.sql(exp.cast(e.this, exp.DataType.Type.JSON)),
exp.JSONPathKey: json_path_key_only_name,
exp.JSONPathRoot: lambda *_: "",
exp.JSONPathSubscript: lambda self, e: self.json_path_part(e.this),
exp.LastDay: no_last_day_sql,
exp.LogicalOr: rename_func("BOOL_OR"),
exp.LogicalAnd: rename_func("BOOL_AND"),
exp.Max: max_or_greatest,
exp.MapFromEntries: no_map_from_entries_sql,
exp.Min: min_or_least,
exp.Merge: merge_without_target_sql,
exp.PartitionedByProperty: lambda self, e: f"PARTITION BY {self.sql(e, 'this')}",
exp.PercentileCont: transforms.preprocess(
[transforms.add_within_group_for_percentiles]
),
exp.PercentileDisc: transforms.preprocess(
[transforms.add_within_group_for_percentiles]
),
exp.Pivot: no_pivot_sql,
exp.Pow: lambda self, e: self.binary(e, "^"),
exp.Rand: rename_func("RANDOM"),
exp.RegexpLike: lambda self, e: self.binary(e, "~"),
exp.RegexpILike: lambda self, e: self.binary(e, "~*"),
exp.Select: transforms.preprocess(
[
transforms.eliminate_semi_and_anti_joins,
transforms.eliminate_qualify,
]
),
exp.StrPosition: str_position_sql,
exp.StrToDate: lambda self, e: self.func("TO_DATE", e.this, self.format_time(e)),
exp.StrToTime: lambda self, e: self.func("TO_TIMESTAMP", e.this, self.format_time(e)),
exp.StructExtract: struct_extract_sql,
exp.Substring: _substring_sql,
exp.TimeFromParts: rename_func("MAKE_TIME"),
exp.TimestampFromParts: rename_func("MAKE_TIMESTAMP"),
exp.TimestampTrunc: timestamptrunc_sql,
exp.TimeStrToTime: timestrtotime_sql,
exp.TimeToStr: lambda self, e: self.func("TO_CHAR", e.this, self.format_time(e)),
exp.ToChar: lambda self, e: self.function_fallback_sql(e),
exp.Trim: trim_sql,
exp.TryCast: no_trycast_sql,
exp.TsOrDsAdd: _date_add_sql("+"),
exp.TsOrDsDiff: _date_diff_sql,
exp.UnixToTime: lambda self, e: self.func("TO_TIMESTAMP", e.this),
exp.VariancePop: rename_func("VAR_POP"),
exp.Variance: rename_func("VAR_SAMP"),
exp.Xor: bool_xor_sql,
}
PROPERTIES_LOCATION = {
**generator.Generator.PROPERTIES_LOCATION,
exp.PartitionedByProperty: exp.Properties.Location.POST_SCHEMA,
exp.TransientProperty: exp.Properties.Location.UNSUPPORTED,
exp.VolatileProperty: exp.Properties.Location.UNSUPPORTED,
}
def unnest_sql(self, expression: exp.Unnest) -> str:
if len(expression.expressions) == 1:
from sqlglot.optimizer.annotate_types import annotate_types
this = annotate_types(expression.expressions[0])
if this.is_type("array<json>"):
while isinstance(this, exp.Cast):
this = this.this
arg = self.sql(exp.cast(this, exp.DataType.Type.JSON))
alias = self.sql(expression, "alias")
alias = f" AS {alias}" if alias else ""
if expression.args.get("offset"):
self.unsupported("Unsupported JSON_ARRAY_ELEMENTS with offset")
return f"JSON_ARRAY_ELEMENTS({arg}){alias}"
return super().unnest_sql(expression)
def bracket_sql(self, expression: exp.Bracket) -> str:
"""Forms like ARRAY[1, 2, 3][3] aren't allowed; we need to wrap the ARRAY."""
if isinstance(expression.this, exp.Array):
expression.set("this", exp.paren(expression.this, copy=False))
return super().bracket_sql(expression)
def matchagainst_sql(self, expression: exp.MatchAgainst) -> str:
this = self.sql(expression, "this")
expressions = [f"{self.sql(e)} @@ {this}" for e in expression.expressions]
sql = " OR ".join(expressions)
return f"({sql})" if len(expressions) > 1 else sql
JSON_EXTRACT_TYPE = t.Union[exp.JSONExtract, exp.JSONExtractScalar]
def json_extract_segments(
name: str, quoted_index: bool = True, op: t.Optional[str] = None
) -> t.Callable[[Generator, JSON_EXTRACT_TYPE], str]:
def _json_extract_segments(self: Generator, expression: JSON_EXTRACT_TYPE) -> str:
path = expression.expression
if not isinstance(path, exp.JSONPath):
return rename_func(name)(self, expression)
segments = []
for segment in path.expressions:
path = self.sql(segment)
if path:
if isinstance(segment, exp.JSONPathPart) and (
quoted_index or not isinstance(segment, exp.JSONPathSubscript)
):
path = f"{self.dialect.QUOTE_START}{path}{self.dialect.QUOTE_END}"
segments.append(path)
if op:
return f" {op} ".join([self.sql(expression.this), *segments])
return self.func(name, expression.this, *segments)
return _json_extract_segments
def _json_extract_sql(
name: str, op: str
) -> t.Callable[[Postgres.Generator, JSON_EXTRACT_TYPE], str]:
def _generate(self: Postgres.Generator, expression: JSON_EXTRACT_TYPE) -> str:
if expression.args.get("only_json_types"):
return json_extract_segments(name, quoted_index=False, op=op)(self, expression)
return json_extract_segments(name)(self, expression)
return _generate | null |
152,882 | from __future__ import annotations
import typing as t
from sqlglot import expressions as exp
from sqlglot.helper import find_new_name, name_sequence
The provided code snippet includes necessary dependencies for implementing the `unalias_group` function. Write a Python function `def unalias_group(expression: exp.Expression) -> exp.Expression` to solve the following problem:
Replace references to select aliases in GROUP BY clauses. Example: >>> import sqlglot >>> sqlglot.parse_one("SELECT a AS b FROM x GROUP BY b").transform(unalias_group).sql() 'SELECT a AS b FROM x GROUP BY 1' Args: expression: the expression that will be transformed. Returns: The transformed expression.
Here is the function:
def unalias_group(expression: exp.Expression) -> exp.Expression:
"""
Replace references to select aliases in GROUP BY clauses.
Example:
>>> import sqlglot
>>> sqlglot.parse_one("SELECT a AS b FROM x GROUP BY b").transform(unalias_group).sql()
'SELECT a AS b FROM x GROUP BY 1'
Args:
expression: the expression that will be transformed.
Returns:
The transformed expression.
"""
if isinstance(expression, exp.Group) and isinstance(expression.parent, exp.Select):
aliased_selects = {
e.alias: i
for i, e in enumerate(expression.parent.expressions, start=1)
if isinstance(e, exp.Alias)
}
for group_by in expression.expressions:
if (
isinstance(group_by, exp.Column)
and not group_by.table
and group_by.name in aliased_selects
):
group_by.replace(exp.Literal.number(aliased_selects.get(group_by.name)))
return expression | Replace references to select aliases in GROUP BY clauses. Example: >>> import sqlglot >>> sqlglot.parse_one("SELECT a AS b FROM x GROUP BY b").transform(unalias_group).sql() 'SELECT a AS b FROM x GROUP BY 1' Args: expression: the expression that will be transformed. Returns: The transformed expression. |
152,883 | from __future__ import annotations
import typing as t
from sqlglot import expressions as exp
from sqlglot.helper import find_new_name, name_sequence
def find_new_name(taken: t.Collection[str], base: str) -> str:
"""
Searches for a new name.
Args:
taken: A collection of taken names.
base: Base name to alter.
Returns:
The new, available name.
"""
if base not in taken:
return base
i = 2
new = f"{base}_{i}"
while new in taken:
i += 1
new = f"{base}_{i}"
return new
The provided code snippet includes necessary dependencies for implementing the `eliminate_distinct_on` function. Write a Python function `def eliminate_distinct_on(expression: exp.Expression) -> exp.Expression` to solve the following problem:
Convert SELECT DISTINCT ON statements to a subquery with a window function. This is useful for dialects that don't support SELECT DISTINCT ON but support window functions. Args: expression: the expression that will be transformed. Returns: The transformed expression.
Here is the function:
def eliminate_distinct_on(expression: exp.Expression) -> exp.Expression:
"""
Convert SELECT DISTINCT ON statements to a subquery with a window function.
This is useful for dialects that don't support SELECT DISTINCT ON but support window functions.
Args:
expression: the expression that will be transformed.
Returns:
The transformed expression.
"""
if (
isinstance(expression, exp.Select)
and expression.args.get("distinct")
and expression.args["distinct"].args.get("on")
and isinstance(expression.args["distinct"].args["on"], exp.Tuple)
):
distinct_cols = expression.args["distinct"].pop().args["on"].expressions
outer_selects = expression.selects
row_number = find_new_name(expression.named_selects, "_row_number")
window = exp.Window(this=exp.RowNumber(), partition_by=distinct_cols)
order = expression.args.get("order")
if order:
window.set("order", order.pop())
else:
window.set("order", exp.Order(expressions=[c.copy() for c in distinct_cols]))
window = exp.alias_(window, row_number)
expression.select(window, copy=False)
return (
exp.select(*outer_selects, copy=False)
.from_(expression.subquery("_t", copy=False), copy=False)
.where(exp.column(row_number).eq(1), copy=False)
)
return expression | Convert SELECT DISTINCT ON statements to a subquery with a window function. This is useful for dialects that don't support SELECT DISTINCT ON but support window functions. Args: expression: the expression that will be transformed. Returns: The transformed expression. |
152,884 | from __future__ import annotations
import typing as t
from sqlglot import expressions as exp
from sqlglot.helper import find_new_name, name_sequence
def find_new_name(taken: t.Collection[str], base: str) -> str:
"""
Searches for a new name.
Args:
taken: A collection of taken names.
base: Base name to alter.
Returns:
The new, available name.
"""
if base not in taken:
return base
i = 2
new = f"{base}_{i}"
while new in taken:
i += 1
new = f"{base}_{i}"
return new
The provided code snippet includes necessary dependencies for implementing the `eliminate_qualify` function. Write a Python function `def eliminate_qualify(expression: exp.Expression) -> exp.Expression` to solve the following problem:
Convert SELECT statements that contain the QUALIFY clause into subqueries, filtered equivalently. The idea behind this transformation can be seen in Snowflake's documentation for QUALIFY: https://docs.snowflake.com/en/sql-reference/constructs/qualify Some dialects don't support window functions in the WHERE clause, so we need to include them as projections in the subquery, in order to refer to them in the outer filter using aliases. Also, if a column is referenced in the QUALIFY clause but is not selected, we need to include it too, otherwise we won't be able to refer to it in the outer query's WHERE clause.
Here is the function:
def eliminate_qualify(expression: exp.Expression) -> exp.Expression:
"""
Convert SELECT statements that contain the QUALIFY clause into subqueries, filtered equivalently.
The idea behind this transformation can be seen in Snowflake's documentation for QUALIFY:
https://docs.snowflake.com/en/sql-reference/constructs/qualify
Some dialects don't support window functions in the WHERE clause, so we need to include them as
projections in the subquery, in order to refer to them in the outer filter using aliases. Also,
if a column is referenced in the QUALIFY clause but is not selected, we need to include it too,
otherwise we won't be able to refer to it in the outer query's WHERE clause.
"""
if isinstance(expression, exp.Select) and expression.args.get("qualify"):
taken = set(expression.named_selects)
for select in expression.selects:
if not select.alias_or_name:
alias = find_new_name(taken, "_c")
select.replace(exp.alias_(select, alias))
taken.add(alias)
outer_selects = exp.select(*[select.alias_or_name for select in expression.selects])
qualify_filters = expression.args["qualify"].pop().this
select_candidates = exp.Window if expression.is_star else (exp.Window, exp.Column)
for expr in qualify_filters.find_all(select_candidates):
if isinstance(expr, exp.Window):
alias = find_new_name(expression.named_selects, "_w")
expression.select(exp.alias_(expr, alias), copy=False)
column = exp.column(alias)
if isinstance(expr.parent, exp.Qualify):
qualify_filters = column
else:
expr.replace(column)
elif expr.name not in expression.named_selects:
expression.select(expr.copy(), copy=False)
return outer_selects.from_(expression.subquery(alias="_t", copy=False), copy=False).where(
qualify_filters, copy=False
)
return expression | Convert SELECT statements that contain the QUALIFY clause into subqueries, filtered equivalently. The idea behind this transformation can be seen in Snowflake's documentation for QUALIFY: https://docs.snowflake.com/en/sql-reference/constructs/qualify Some dialects don't support window functions in the WHERE clause, so we need to include them as projections in the subquery, in order to refer to them in the outer filter using aliases. Also, if a column is referenced in the QUALIFY clause but is not selected, we need to include it too, otherwise we won't be able to refer to it in the outer query's WHERE clause. |
152,885 | from __future__ import annotations
import typing as t
from sqlglot import expressions as exp
from sqlglot.helper import find_new_name, name_sequence
The provided code snippet includes necessary dependencies for implementing the `remove_precision_parameterized_types` function. Write a Python function `def remove_precision_parameterized_types(expression: exp.Expression) -> exp.Expression` to solve the following problem:
Some dialects only allow the precision for parameterized types to be defined in the DDL and not in other expressions. This transforms removes the precision from parameterized types in expressions.
Here is the function:
def remove_precision_parameterized_types(expression: exp.Expression) -> exp.Expression:
"""
Some dialects only allow the precision for parameterized types to be defined in the DDL and not in
other expressions. This transforms removes the precision from parameterized types in expressions.
"""
for node in expression.find_all(exp.DataType):
node.set(
"expressions", [e for e in node.expressions if not isinstance(e, exp.DataTypeParam)]
)
return expression | Some dialects only allow the precision for parameterized types to be defined in the DDL and not in other expressions. This transforms removes the precision from parameterized types in expressions. |
152,886 | from __future__ import annotations
import typing as t
from sqlglot import expressions as exp
from sqlglot.helper import find_new_name, name_sequence
The provided code snippet includes necessary dependencies for implementing the `unnest_to_explode` function. Write a Python function `def unnest_to_explode(expression: exp.Expression) -> exp.Expression` to solve the following problem:
Convert cross join unnest into lateral view explode.
Here is the function:
def unnest_to_explode(expression: exp.Expression) -> exp.Expression:
"""Convert cross join unnest into lateral view explode."""
if isinstance(expression, exp.Select):
for join in expression.args.get("joins") or []:
unnest = join.this
if isinstance(unnest, exp.Unnest):
alias = unnest.args.get("alias")
udtf = exp.Posexplode if unnest.args.get("offset") else exp.Explode
expression.args["joins"].remove(join)
for e, column in zip(unnest.expressions, alias.columns if alias else []):
expression.append(
"laterals",
exp.Lateral(
this=udtf(this=e),
view=True,
alias=exp.TableAlias(this=alias.this, columns=[column]), # type: ignore
),
)
return expression | Convert cross join unnest into lateral view explode. |
152,887 | from __future__ import annotations
import typing as t
from sqlglot import expressions as exp
from sqlglot.helper import find_new_name, name_sequence
if t.TYPE_CHECKING:
from sqlglot.generator import Generator
def find_new_name(taken: t.Collection[str], base: str) -> str:
"""
Searches for a new name.
Args:
taken: A collection of taken names.
base: Base name to alter.
Returns:
The new, available name.
"""
if base not in taken:
return base
i = 2
new = f"{base}_{i}"
while new in taken:
i += 1
new = f"{base}_{i}"
return new
class Scope:
"""
Selection scope.
Attributes:
expression (exp.Select|exp.Union): Root expression of this scope
sources (dict[str, exp.Table|Scope]): Mapping of source name to either
a Table expression or another Scope instance. For example:
SELECT * FROM x {"x": Table(this="x")}
SELECT * FROM x AS y {"y": Table(this="x")}
SELECT * FROM (SELECT ...) AS y {"y": Scope(...)}
lateral_sources (dict[str, exp.Table|Scope]): Sources from laterals
For example:
SELECT c FROM x LATERAL VIEW EXPLODE (a) AS c;
The LATERAL VIEW EXPLODE gets x as a source.
cte_sources (dict[str, Scope]): Sources from CTES
outer_columns (list[str]): If this is a derived table or CTE, and the outer query
defines a column list for the alias of this scope, this is that list of columns.
For example:
SELECT * FROM (SELECT ...) AS y(col1, col2)
The inner query would have `["col1", "col2"]` for its `outer_columns`
parent (Scope): Parent scope
scope_type (ScopeType): Type of this scope, relative to it's parent
subquery_scopes (list[Scope]): List of all child scopes for subqueries
cte_scopes (list[Scope]): List of all child scopes for CTEs
derived_table_scopes (list[Scope]): List of all child scopes for derived_tables
udtf_scopes (list[Scope]): List of all child scopes for user defined tabular functions
table_scopes (list[Scope]): derived_table_scopes + udtf_scopes, in the order that they're defined
union_scopes (list[Scope, Scope]): If this Scope is for a Union expression, this will be
a list of the left and right child scopes.
"""
def __init__(
self,
expression,
sources=None,
outer_columns=None,
parent=None,
scope_type=ScopeType.ROOT,
lateral_sources=None,
cte_sources=None,
):
self.expression = expression
self.sources = sources or {}
self.lateral_sources = lateral_sources or {}
self.cte_sources = cte_sources or {}
self.sources.update(self.lateral_sources)
self.sources.update(self.cte_sources)
self.outer_columns = outer_columns or []
self.parent = parent
self.scope_type = scope_type
self.subquery_scopes = []
self.derived_table_scopes = []
self.table_scopes = []
self.cte_scopes = []
self.union_scopes = []
self.udtf_scopes = []
self.clear_cache()
def clear_cache(self):
self._collected = False
self._raw_columns = None
self._derived_tables = None
self._udtfs = None
self._tables = None
self._ctes = None
self._subqueries = None
self._selected_sources = None
self._columns = None
self._external_columns = None
self._join_hints = None
self._pivots = None
self._references = None
def branch(
self, expression, scope_type, sources=None, cte_sources=None, lateral_sources=None, **kwargs
):
"""Branch from the current scope to a new, inner scope"""
return Scope(
expression=expression.unnest(),
sources=sources.copy() if sources else None,
parent=self,
scope_type=scope_type,
cte_sources={**self.cte_sources, **(cte_sources or {})},
lateral_sources=lateral_sources.copy() if lateral_sources else None,
**kwargs,
)
def _collect(self):
self._tables = []
self._ctes = []
self._subqueries = []
self._derived_tables = []
self._udtfs = []
self._raw_columns = []
self._join_hints = []
for node in self.walk(bfs=False):
if node is self.expression:
continue
if isinstance(node, exp.Column) and not isinstance(node.this, exp.Star):
self._raw_columns.append(node)
elif isinstance(node, exp.Table) and not isinstance(node.parent, exp.JoinHint):
self._tables.append(node)
elif isinstance(node, exp.JoinHint):
self._join_hints.append(node)
elif isinstance(node, exp.UDTF):
self._udtfs.append(node)
elif isinstance(node, exp.CTE):
self._ctes.append(node)
elif _is_derived_table(node) and isinstance(
node.parent, (exp.From, exp.Join, exp.Subquery)
):
self._derived_tables.append(node)
elif isinstance(node, exp.UNWRAPPED_QUERIES):
self._subqueries.append(node)
self._collected = True
def _ensure_collected(self):
if not self._collected:
self._collect()
def walk(self, bfs=True, prune=None):
return walk_in_scope(self.expression, bfs=bfs, prune=None)
def find(self, *expression_types, bfs=True):
return find_in_scope(self.expression, expression_types, bfs=bfs)
def find_all(self, *expression_types, bfs=True):
return find_all_in_scope(self.expression, expression_types, bfs=bfs)
def replace(self, old, new):
"""
Replace `old` with `new`.
This can be used instead of `exp.Expression.replace` to ensure the `Scope` is kept up-to-date.
Args:
old (exp.Expression): old node
new (exp.Expression): new node
"""
old.replace(new)
self.clear_cache()
def tables(self):
"""
List of tables in this scope.
Returns:
list[exp.Table]: tables
"""
self._ensure_collected()
return self._tables
def ctes(self):
"""
List of CTEs in this scope.
Returns:
list[exp.CTE]: ctes
"""
self._ensure_collected()
return self._ctes
def derived_tables(self):
"""
List of derived tables in this scope.
For example:
SELECT * FROM (SELECT ...) <- that's a derived table
Returns:
list[exp.Subquery]: derived tables
"""
self._ensure_collected()
return self._derived_tables
def udtfs(self):
"""
List of "User Defined Tabular Functions" in this scope.
Returns:
list[exp.UDTF]: UDTFs
"""
self._ensure_collected()
return self._udtfs
def subqueries(self):
"""
List of subqueries in this scope.
For example:
SELECT * FROM x WHERE a IN (SELECT ...) <- that's a subquery
Returns:
list[exp.Select | exp.Union]: subqueries
"""
self._ensure_collected()
return self._subqueries
def columns(self):
"""
List of columns in this scope.
Returns:
list[exp.Column]: Column instances in this scope, plus any
Columns that reference this scope from correlated subqueries.
"""
if self._columns is None:
self._ensure_collected()
columns = self._raw_columns
external_columns = [
column
for scope in itertools.chain(self.subquery_scopes, self.udtf_scopes)
for column in scope.external_columns
]
named_selects = set(self.expression.named_selects)
self._columns = []
for column in columns + external_columns:
ancestor = column.find_ancestor(
exp.Select, exp.Qualify, exp.Order, exp.Having, exp.Hint, exp.Table, exp.Star
)
if (
not ancestor
or column.table
or isinstance(ancestor, exp.Select)
or (isinstance(ancestor, exp.Table) and not isinstance(ancestor.this, exp.Func))
or (
isinstance(ancestor, exp.Order)
and (
isinstance(ancestor.parent, exp.Window)
or column.name not in named_selects
)
)
):
self._columns.append(column)
return self._columns
def selected_sources(self):
"""
Mapping of nodes and sources that are actually selected from in this scope.
That is, all tables in a schema are selectable at any point. But a
table only becomes a selected source if it's included in a FROM or JOIN clause.
Returns:
dict[str, (exp.Table|exp.Select, exp.Table|Scope)]: selected sources and nodes
"""
if self._selected_sources is None:
result = {}
for name, node in self.references:
if name in result:
raise OptimizeError(f"Alias already used: {name}")
if name in self.sources:
result[name] = (node, self.sources[name])
self._selected_sources = result
return self._selected_sources
def references(self) -> t.List[t.Tuple[str, exp.Expression]]:
if self._references is None:
self._references = []
for table in self.tables:
self._references.append((table.alias_or_name, table))
for expression in itertools.chain(self.derived_tables, self.udtfs):
self._references.append(
(
expression.alias,
expression if expression.args.get("pivots") else expression.unnest(),
)
)
return self._references
def external_columns(self):
"""
Columns that appear to reference sources in outer scopes.
Returns:
list[exp.Column]: Column instances that don't reference
sources in the current scope.
"""
if self._external_columns is None:
if isinstance(self.expression, exp.Union):
left, right = self.union_scopes
self._external_columns = left.external_columns + right.external_columns
else:
self._external_columns = [
c for c in self.columns if c.table not in self.selected_sources
]
return self._external_columns
def unqualified_columns(self):
"""
Unqualified columns in the current scope.
Returns:
list[exp.Column]: Unqualified columns
"""
return [c for c in self.columns if not c.table]
def join_hints(self):
"""
Hints that exist in the scope that reference tables
Returns:
list[exp.JoinHint]: Join hints that are referenced within the scope
"""
if self._join_hints is None:
return []
return self._join_hints
def pivots(self):
if not self._pivots:
self._pivots = [
pivot for _, node in self.references for pivot in node.args.get("pivots") or []
]
return self._pivots
def source_columns(self, source_name):
"""
Get all columns in the current scope for a particular source.
Args:
source_name (str): Name of the source
Returns:
list[exp.Column]: Column instances that reference `source_name`
"""
return [column for column in self.columns if column.table == source_name]
def is_subquery(self):
"""Determine if this scope is a subquery"""
return self.scope_type == ScopeType.SUBQUERY
def is_derived_table(self):
"""Determine if this scope is a derived table"""
return self.scope_type == ScopeType.DERIVED_TABLE
def is_union(self):
"""Determine if this scope is a union"""
return self.scope_type == ScopeType.UNION
def is_cte(self):
"""Determine if this scope is a common table expression"""
return self.scope_type == ScopeType.CTE
def is_root(self):
"""Determine if this is the root scope"""
return self.scope_type == ScopeType.ROOT
def is_udtf(self):
"""Determine if this scope is a UDTF (User Defined Table Function)"""
return self.scope_type == ScopeType.UDTF
def is_correlated_subquery(self):
"""Determine if this scope is a correlated subquery"""
return bool(
(self.is_subquery or (self.parent and isinstance(self.parent.expression, exp.Lateral)))
and self.external_columns
)
def rename_source(self, old_name, new_name):
"""Rename a source in this scope"""
columns = self.sources.pop(old_name or "", [])
self.sources[new_name] = columns
def add_source(self, name, source):
"""Add a source to this scope"""
self.sources[name] = source
self.clear_cache()
def remove_source(self, name):
"""Remove a source from this scope"""
self.sources.pop(name, None)
self.clear_cache()
def __repr__(self):
return f"Scope<{self.expression.sql()}>"
def traverse(self):
"""
Traverse the scope tree from this node.
Yields:
Scope: scope instances in depth-first-search post-order
"""
stack = [self]
result = []
while stack:
scope = stack.pop()
result.append(scope)
stack.extend(
itertools.chain(
scope.cte_scopes,
scope.union_scopes,
scope.table_scopes,
scope.subquery_scopes,
)
)
yield from reversed(result)
def ref_count(self):
"""
Count the number of times each scope in this tree is referenced.
Returns:
dict[int, int]: Mapping of Scope instance ID to reference count
"""
scope_ref_count = defaultdict(lambda: 0)
for scope in self.traverse():
for _, source in scope.selected_sources.values():
scope_ref_count[id(source)] += 1
return scope_ref_count
The provided code snippet includes necessary dependencies for implementing the `explode_to_unnest` function. Write a Python function `def explode_to_unnest(index_offset: int = 0) -> t.Callable[[exp.Expression], exp.Expression]` to solve the following problem:
Convert explode/posexplode into unnest.
Here is the function:
def explode_to_unnest(index_offset: int = 0) -> t.Callable[[exp.Expression], exp.Expression]:
"""Convert explode/posexplode into unnest."""
def _explode_to_unnest(expression: exp.Expression) -> exp.Expression:
if isinstance(expression, exp.Select):
from sqlglot.optimizer.scope import Scope
taken_select_names = set(expression.named_selects)
taken_source_names = {name for name, _ in Scope(expression).references}
def new_name(names: t.Set[str], name: str) -> str:
name = find_new_name(names, name)
names.add(name)
return name
arrays: t.List[exp.Condition] = []
series_alias = new_name(taken_select_names, "pos")
series = exp.alias_(
exp.Unnest(
expressions=[exp.GenerateSeries(start=exp.Literal.number(index_offset))]
),
new_name(taken_source_names, "_u"),
table=[series_alias],
)
# we use list here because expression.selects is mutated inside the loop
for select in list(expression.selects):
explode = select.find(exp.Explode)
if explode:
pos_alias = ""
explode_alias = ""
if isinstance(select, exp.Alias):
explode_alias = select.args["alias"]
alias = select
elif isinstance(select, exp.Aliases):
pos_alias = select.aliases[0]
explode_alias = select.aliases[1]
alias = select.replace(exp.alias_(select.this, "", copy=False))
else:
alias = select.replace(exp.alias_(select, ""))
explode = alias.find(exp.Explode)
assert explode
is_posexplode = isinstance(explode, exp.Posexplode)
explode_arg = explode.this
if isinstance(explode, exp.ExplodeOuter):
bracket = explode_arg[0]
bracket.set("safe", True)
bracket.set("offset", True)
explode_arg = exp.func(
"IF",
exp.func(
"ARRAY_SIZE", exp.func("COALESCE", explode_arg, exp.Array())
).eq(0),
exp.array(bracket, copy=False),
explode_arg,
)
# This ensures that we won't use [POS]EXPLODE's argument as a new selection
if isinstance(explode_arg, exp.Column):
taken_select_names.add(explode_arg.output_name)
unnest_source_alias = new_name(taken_source_names, "_u")
if not explode_alias:
explode_alias = new_name(taken_select_names, "col")
if is_posexplode:
pos_alias = new_name(taken_select_names, "pos")
if not pos_alias:
pos_alias = new_name(taken_select_names, "pos")
alias.set("alias", exp.to_identifier(explode_alias))
series_table_alias = series.args["alias"].this
column = exp.If(
this=exp.column(series_alias, table=series_table_alias).eq(
exp.column(pos_alias, table=unnest_source_alias)
),
true=exp.column(explode_alias, table=unnest_source_alias),
)
explode.replace(column)
if is_posexplode:
expressions = expression.expressions
expressions.insert(
expressions.index(alias) + 1,
exp.If(
this=exp.column(series_alias, table=series_table_alias).eq(
exp.column(pos_alias, table=unnest_source_alias)
),
true=exp.column(pos_alias, table=unnest_source_alias),
).as_(pos_alias),
)
expression.set("expressions", expressions)
if not arrays:
if expression.args.get("from"):
expression.join(series, copy=False, join_type="CROSS")
else:
expression.from_(series, copy=False)
size: exp.Condition = exp.ArraySize(this=explode_arg.copy())
arrays.append(size)
# trino doesn't support left join unnest with on conditions
# if it did, this would be much simpler
expression.join(
exp.alias_(
exp.Unnest(
expressions=[explode_arg.copy()],
offset=exp.to_identifier(pos_alias),
),
unnest_source_alias,
table=[explode_alias],
),
join_type="CROSS",
copy=False,
)
if index_offset != 1:
size = size - 1
expression.where(
exp.column(series_alias, table=series_table_alias)
.eq(exp.column(pos_alias, table=unnest_source_alias))
.or_(
(exp.column(series_alias, table=series_table_alias) > size).and_(
exp.column(pos_alias, table=unnest_source_alias).eq(size)
)
),
copy=False,
)
if arrays:
end: exp.Condition = exp.Greatest(this=arrays[0], expressions=arrays[1:])
if index_offset != 1:
end = end - (1 - index_offset)
series.expressions[0].set("end", end)
return expression
return _explode_to_unnest | Convert explode/posexplode into unnest. |
152,888 | from __future__ import annotations
import typing as t
from sqlglot import expressions as exp
from sqlglot.helper import find_new_name, name_sequence
PERCENTILES = (exp.PercentileCont, exp.PercentileDisc)
The provided code snippet includes necessary dependencies for implementing the `add_within_group_for_percentiles` function. Write a Python function `def add_within_group_for_percentiles(expression: exp.Expression) -> exp.Expression` to solve the following problem:
Transforms percentiles by adding a WITHIN GROUP clause to them.
Here is the function:
def add_within_group_for_percentiles(expression: exp.Expression) -> exp.Expression:
"""Transforms percentiles by adding a WITHIN GROUP clause to them."""
if (
isinstance(expression, PERCENTILES)
and not isinstance(expression.parent, exp.WithinGroup)
and expression.expression
):
column = expression.this.pop()
expression.set("this", expression.expression.pop())
order = exp.Order(expressions=[exp.Ordered(this=column)])
expression = exp.WithinGroup(this=expression, expression=order)
return expression | Transforms percentiles by adding a WITHIN GROUP clause to them. |
152,889 | from __future__ import annotations
import typing as t
from sqlglot import expressions as exp
from sqlglot.helper import find_new_name, name_sequence
if t.TYPE_CHECKING:
from sqlglot.generator import Generator
PERCENTILES = (exp.PercentileCont, exp.PercentileDisc)
The provided code snippet includes necessary dependencies for implementing the `remove_within_group_for_percentiles` function. Write a Python function `def remove_within_group_for_percentiles(expression: exp.Expression) -> exp.Expression` to solve the following problem:
Transforms percentiles by getting rid of their corresponding WITHIN GROUP clause.
Here is the function:
def remove_within_group_for_percentiles(expression: exp.Expression) -> exp.Expression:
"""Transforms percentiles by getting rid of their corresponding WITHIN GROUP clause."""
if (
isinstance(expression, exp.WithinGroup)
and isinstance(expression.this, PERCENTILES)
and isinstance(expression.expression, exp.Order)
):
quantile = expression.this.this
input_value = t.cast(exp.Ordered, expression.find(exp.Ordered)).this
return expression.replace(exp.ApproxQuantile(this=input_value, quantile=quantile))
return expression | Transforms percentiles by getting rid of their corresponding WITHIN GROUP clause. |
152,890 | from __future__ import annotations
import typing as t
from sqlglot import expressions as exp
from sqlglot.helper import find_new_name, name_sequence
def name_sequence(prefix: str) -> t.Callable[[], str]:
"""Returns a name generator given a prefix (e.g. a0, a1, a2, ... if the prefix is "a")."""
sequence = count()
return lambda: f"{prefix}{next(sequence)}"
The provided code snippet includes necessary dependencies for implementing the `add_recursive_cte_column_names` function. Write a Python function `def add_recursive_cte_column_names(expression: exp.Expression) -> exp.Expression` to solve the following problem:
Uses projection output names in recursive CTE definitions to define the CTEs' columns.
Here is the function:
def add_recursive_cte_column_names(expression: exp.Expression) -> exp.Expression:
"""Uses projection output names in recursive CTE definitions to define the CTEs' columns."""
if isinstance(expression, exp.With) and expression.recursive:
next_name = name_sequence("_c_")
for cte in expression.expressions:
if not cte.args["alias"].columns:
query = cte.this
if isinstance(query, exp.Union):
query = query.this
cte.args["alias"].set(
"columns",
[exp.to_identifier(s.alias_or_name or next_name()) for s in query.selects],
)
return expression | Uses projection output names in recursive CTE definitions to define the CTEs' columns. |
152,891 | from __future__ import annotations
import typing as t
from sqlglot import expressions as exp
from sqlglot.helper import find_new_name, name_sequence
The provided code snippet includes necessary dependencies for implementing the `epoch_cast_to_ts` function. Write a Python function `def epoch_cast_to_ts(expression: exp.Expression) -> exp.Expression` to solve the following problem:
Replace 'epoch' in casts by the equivalent date literal.
Here is the function:
def epoch_cast_to_ts(expression: exp.Expression) -> exp.Expression:
"""Replace 'epoch' in casts by the equivalent date literal."""
if (
isinstance(expression, (exp.Cast, exp.TryCast))
and expression.name.lower() == "epoch"
and expression.to.this in exp.DataType.TEMPORAL_TYPES
):
expression.this.replace(exp.Literal.string("1970-01-01 00:00:00"))
return expression | Replace 'epoch' in casts by the equivalent date literal. |
152,892 | from __future__ import annotations
import typing as t
from sqlglot import expressions as exp
from sqlglot.helper import find_new_name, name_sequence
The provided code snippet includes necessary dependencies for implementing the `eliminate_semi_and_anti_joins` function. Write a Python function `def eliminate_semi_and_anti_joins(expression: exp.Expression) -> exp.Expression` to solve the following problem:
Convert SEMI and ANTI joins into equivalent forms that use EXIST instead.
Here is the function:
def eliminate_semi_and_anti_joins(expression: exp.Expression) -> exp.Expression:
"""Convert SEMI and ANTI joins into equivalent forms that use EXIST instead."""
if isinstance(expression, exp.Select):
for join in expression.args.get("joins") or []:
on = join.args.get("on")
if on and join.kind in ("SEMI", "ANTI"):
subquery = exp.select("1").from_(join.this).where(on)
exists = exp.Exists(this=subquery)
if join.kind == "ANTI":
exists = exists.not_(copy=False)
join.pop()
expression.where(exists, copy=False)
return expression | Convert SEMI and ANTI joins into equivalent forms that use EXIST instead. |
152,893 | from __future__ import annotations
import typing as t
from sqlglot import expressions as exp
from sqlglot.helper import find_new_name, name_sequence
The provided code snippet includes necessary dependencies for implementing the `eliminate_full_outer_join` function. Write a Python function `def eliminate_full_outer_join(expression: exp.Expression) -> exp.Expression` to solve the following problem:
Converts a query with a FULL OUTER join to a union of identical queries that use LEFT/RIGHT OUTER joins instead. This transformation currently only works for queries that have a single FULL OUTER join.
Here is the function:
def eliminate_full_outer_join(expression: exp.Expression) -> exp.Expression:
"""
Converts a query with a FULL OUTER join to a union of identical queries that
use LEFT/RIGHT OUTER joins instead. This transformation currently only works
for queries that have a single FULL OUTER join.
"""
if isinstance(expression, exp.Select):
full_outer_joins = [
(index, join)
for index, join in enumerate(expression.args.get("joins") or [])
if join.side == "FULL"
]
if len(full_outer_joins) == 1:
expression_copy = expression.copy()
expression.set("limit", None)
index, full_outer_join = full_outer_joins[0]
full_outer_join.set("side", "left")
expression_copy.args["joins"][index].set("side", "right")
expression_copy.args.pop("with", None) # remove CTEs from RIGHT side
return exp.union(expression, expression_copy, copy=False)
return expression | Converts a query with a FULL OUTER join to a union of identical queries that use LEFT/RIGHT OUTER joins instead. This transformation currently only works for queries that have a single FULL OUTER join. |
152,894 | from __future__ import annotations
import typing as t
from sqlglot import expressions as exp
from sqlglot.helper import find_new_name, name_sequence
The provided code snippet includes necessary dependencies for implementing the `move_ctes_to_top_level` function. Write a Python function `def move_ctes_to_top_level(expression: exp.Expression) -> exp.Expression` to solve the following problem:
Some dialects (e.g. Hive, T-SQL, Spark prior to version 3) only allow CTEs to be defined at the top-level, so for example queries like: SELECT * FROM (WITH t(c) AS (SELECT 1) SELECT * FROM t) AS subq are invalid in those dialects. This transformation can be used to ensure all CTEs are moved to the top level so that the final SQL code is valid from a syntax standpoint. TODO: handle name clashes whilst moving CTEs (it can get quite tricky & costly).
Here is the function:
def move_ctes_to_top_level(expression: exp.Expression) -> exp.Expression:
"""
Some dialects (e.g. Hive, T-SQL, Spark prior to version 3) only allow CTEs to be
defined at the top-level, so for example queries like:
SELECT * FROM (WITH t(c) AS (SELECT 1) SELECT * FROM t) AS subq
are invalid in those dialects. This transformation can be used to ensure all CTEs are
moved to the top level so that the final SQL code is valid from a syntax standpoint.
TODO: handle name clashes whilst moving CTEs (it can get quite tricky & costly).
"""
top_level_with = expression.args.get("with")
for node in expression.find_all(exp.With):
if node.parent is expression:
continue
inner_with = node.pop()
if not top_level_with:
top_level_with = inner_with
expression.set("with", top_level_with)
else:
if inner_with.recursive:
top_level_with.set("recursive", True)
top_level_with.expressions.extend(inner_with.expressions)
return expression | Some dialects (e.g. Hive, T-SQL, Spark prior to version 3) only allow CTEs to be defined at the top-level, so for example queries like: SELECT * FROM (WITH t(c) AS (SELECT 1) SELECT * FROM t) AS subq are invalid in those dialects. This transformation can be used to ensure all CTEs are moved to the top level so that the final SQL code is valid from a syntax standpoint. TODO: handle name clashes whilst moving CTEs (it can get quite tricky & costly). |
152,895 | from __future__ import annotations
import typing as t
from sqlglot import expressions as exp
from sqlglot.helper import find_new_name, name_sequence
The provided code snippet includes necessary dependencies for implementing the `ensure_bools` function. Write a Python function `def ensure_bools(expression: exp.Expression) -> exp.Expression` to solve the following problem:
Converts numeric values used in conditions into explicit boolean expressions.
Here is the function:
def ensure_bools(expression: exp.Expression) -> exp.Expression:
"""Converts numeric values used in conditions into explicit boolean expressions."""
from sqlglot.optimizer.canonicalize import ensure_bools
def _ensure_bool(node: exp.Expression) -> None:
if (
node.is_number
or node.is_type(exp.DataType.Type.UNKNOWN, *exp.DataType.NUMERIC_TYPES)
or (isinstance(node, exp.Column) and not node.type)
):
node.replace(node.neq(0))
for node in expression.walk():
ensure_bools(node, _ensure_bool)
return expression | Converts numeric values used in conditions into explicit boolean expressions. |
152,896 | from __future__ import annotations
import typing as t
from sqlglot import expressions as exp
from sqlglot.helper import find_new_name, name_sequence
def remove_unique_constraints(expression: exp.Expression) -> exp.Expression:
assert isinstance(expression, exp.Create)
for constraint in expression.find_all(exp.UniqueColumnConstraint):
if constraint.parent:
constraint.parent.pop()
return expression | null |
152,897 | from __future__ import annotations
import typing as t
from sqlglot import expressions as exp
from sqlglot.helper import find_new_name, name_sequence
if t.TYPE_CHECKING:
from sqlglot.generator import Generator
def ctas_with_tmp_tables_to_create_tmp_view(
expression: exp.Expression,
tmp_storage_provider: t.Callable[[exp.Expression], exp.Expression] = lambda e: e,
) -> exp.Expression:
assert isinstance(expression, exp.Create)
properties = expression.args.get("properties")
temporary = any(
isinstance(prop, exp.TemporaryProperty)
for prop in (properties.expressions if properties else [])
)
# CTAS with temp tables map to CREATE TEMPORARY VIEW
if expression.kind == "TABLE" and temporary:
if expression.expression:
return exp.Create(
kind="TEMPORARY VIEW",
this=expression.this,
expression=expression.expression,
)
return tmp_storage_provider(expression)
return expression | null |
152,898 | from __future__ import annotations
import typing as t
from sqlglot import expressions as exp
from sqlglot.helper import find_new_name, name_sequence
The provided code snippet includes necessary dependencies for implementing the `move_schema_columns_to_partitioned_by` function. Write a Python function `def move_schema_columns_to_partitioned_by(expression: exp.Expression) -> exp.Expression` to solve the following problem:
In Hive, the PARTITIONED BY property acts as an extension of a table's schema. When the PARTITIONED BY value is an array of column names, they are transformed into a schema. The corresponding columns are removed from the create statement.
Here is the function:
def move_schema_columns_to_partitioned_by(expression: exp.Expression) -> exp.Expression:
"""
In Hive, the PARTITIONED BY property acts as an extension of a table's schema. When the
PARTITIONED BY value is an array of column names, they are transformed into a schema.
The corresponding columns are removed from the create statement.
"""
assert isinstance(expression, exp.Create)
has_schema = isinstance(expression.this, exp.Schema)
is_partitionable = expression.kind in {"TABLE", "VIEW"}
if has_schema and is_partitionable:
prop = expression.find(exp.PartitionedByProperty)
if prop and prop.this and not isinstance(prop.this, exp.Schema):
schema = expression.this
columns = {v.name.upper() for v in prop.this.expressions}
partitions = [col for col in schema.expressions if col.name.upper() in columns]
schema.set("expressions", [e for e in schema.expressions if e not in partitions])
prop.replace(exp.PartitionedByProperty(this=exp.Schema(expressions=partitions)))
expression.set("this", schema)
return expression | In Hive, the PARTITIONED BY property acts as an extension of a table's schema. When the PARTITIONED BY value is an array of column names, they are transformed into a schema. The corresponding columns are removed from the create statement. |
152,899 | from __future__ import annotations
import typing as t
from sqlglot import expressions as exp
from sqlglot.helper import find_new_name, name_sequence
The provided code snippet includes necessary dependencies for implementing the `move_partitioned_by_to_schema_columns` function. Write a Python function `def move_partitioned_by_to_schema_columns(expression: exp.Expression) -> exp.Expression` to solve the following problem:
Spark 3 supports both "HIVEFORMAT" and "DATASOURCE" formats for CREATE TABLE. Currently, SQLGlot uses the DATASOURCE format for Spark 3.
Here is the function:
def move_partitioned_by_to_schema_columns(expression: exp.Expression) -> exp.Expression:
"""
Spark 3 supports both "HIVEFORMAT" and "DATASOURCE" formats for CREATE TABLE.
Currently, SQLGlot uses the DATASOURCE format for Spark 3.
"""
assert isinstance(expression, exp.Create)
prop = expression.find(exp.PartitionedByProperty)
if (
prop
and prop.this
and isinstance(prop.this, exp.Schema)
and all(isinstance(e, exp.ColumnDef) and e.kind for e in prop.this.expressions)
):
prop_this = exp.Tuple(
expressions=[exp.to_identifier(e.this) for e in prop.this.expressions]
)
schema = expression.this
for e in prop.this.expressions:
schema.append("expressions", e)
prop.set("this", prop_this)
return expression | Spark 3 supports both "HIVEFORMAT" and "DATASOURCE" formats for CREATE TABLE. Currently, SQLGlot uses the DATASOURCE format for Spark 3. |
152,900 | from __future__ import annotations
import typing as t
from sqlglot import expressions as exp
from sqlglot.helper import find_new_name, name_sequence
The provided code snippet includes necessary dependencies for implementing the `struct_kv_to_alias` function. Write a Python function `def struct_kv_to_alias(expression: exp.Expression) -> exp.Expression` to solve the following problem:
Converts struct arguments to aliases, e.g. STRUCT(1 AS y).
Here is the function:
def struct_kv_to_alias(expression: exp.Expression) -> exp.Expression:
"""Converts struct arguments to aliases, e.g. STRUCT(1 AS y)."""
if isinstance(expression, exp.Struct):
expression.set(
"expressions",
[
exp.alias_(e.expression, e.this) if isinstance(e, exp.PropertyEQ) else e
for e in expression.expressions
],
)
return expression | Converts struct arguments to aliases, e.g. STRUCT(1 AS y). |
152,901 | from __future__ import annotations
import typing as t
from sqlglot import expressions as exp
from sqlglot.helper import find_new_name, name_sequence
if t.TYPE_CHECKING:
from sqlglot.generator import Generator
class Generator(metaclass=_Generator):
"""
Generator converts a given syntax tree to the corresponding SQL string.
Args:
pretty: Whether to format the produced SQL string.
Default: False.
identify: Determines when an identifier should be quoted. Possible values are:
False (default): Never quote, except in cases where it's mandatory by the dialect.
True or 'always': Always quote.
'safe': Only quote identifiers that are case insensitive.
normalize: Whether to normalize identifiers to lowercase.
Default: False.
pad: The pad size in a formatted string.
Default: 2.
indent: The indentation size in a formatted string.
Default: 2.
normalize_functions: How to normalize function names. Possible values are:
"upper" or True (default): Convert names to uppercase.
"lower": Convert names to lowercase.
False: Disables function name normalization.
unsupported_level: Determines the generator's behavior when it encounters unsupported expressions.
Default ErrorLevel.WARN.
max_unsupported: Maximum number of unsupported messages to include in a raised UnsupportedError.
This is only relevant if unsupported_level is ErrorLevel.RAISE.
Default: 3
leading_comma: Whether the comma is leading or trailing in select expressions.
This is only relevant when generating in pretty mode.
Default: False
max_text_width: The max number of characters in a segment before creating new lines in pretty mode.
The default is on the smaller end because the length only represents a segment and not the true
line length.
Default: 80
comments: Whether to preserve comments in the output SQL code.
Default: True
"""
TRANSFORMS: t.Dict[t.Type[exp.Expression], t.Callable[..., str]] = {
**JSON_PATH_PART_TRANSFORMS,
exp.AutoRefreshProperty: lambda self, e: f"AUTO REFRESH {self.sql(e, 'this')}",
exp.BackupProperty: lambda self, e: f"BACKUP {self.sql(e, 'this')}",
exp.CaseSpecificColumnConstraint: lambda _,
e: f"{'NOT ' if e.args.get('not_') else ''}CASESPECIFIC",
exp.CharacterSetColumnConstraint: lambda self, e: f"CHARACTER SET {self.sql(e, 'this')}",
exp.CharacterSetProperty: lambda self,
e: f"{'DEFAULT ' if e.args.get('default') else ''}CHARACTER SET={self.sql(e, 'this')}",
exp.ClusteredColumnConstraint: lambda self,
e: f"CLUSTERED ({self.expressions(e, 'this', indent=False)})",
exp.CollateColumnConstraint: lambda self, e: f"COLLATE {self.sql(e, 'this')}",
exp.CommentColumnConstraint: lambda self, e: f"COMMENT {self.sql(e, 'this')}",
exp.CopyGrantsProperty: lambda *_: "COPY GRANTS",
exp.DateAdd: lambda self, e: self.func(
"DATE_ADD", e.this, e.expression, exp.Literal.string(e.text("unit"))
),
exp.DateFormatColumnConstraint: lambda self, e: f"FORMAT {self.sql(e, 'this')}",
exp.DefaultColumnConstraint: lambda self, e: f"DEFAULT {self.sql(e, 'this')}",
exp.EncodeColumnConstraint: lambda self, e: f"ENCODE {self.sql(e, 'this')}",
exp.ExcludeColumnConstraint: lambda self, e: f"EXCLUDE {self.sql(e, 'this').lstrip()}",
exp.ExecuteAsProperty: lambda self, e: self.naked_property(e),
exp.ExternalProperty: lambda *_: "EXTERNAL",
exp.GlobalProperty: lambda *_: "GLOBAL",
exp.HeapProperty: lambda *_: "HEAP",
exp.IcebergProperty: lambda *_: "ICEBERG",
exp.InheritsProperty: lambda self, e: f"INHERITS ({self.expressions(e, flat=True)})",
exp.InlineLengthColumnConstraint: lambda self, e: f"INLINE LENGTH {self.sql(e, 'this')}",
exp.InputModelProperty: lambda self, e: f"INPUT{self.sql(e, 'this')}",
exp.IntervalSpan: lambda self, e: f"{self.sql(e, 'this')} TO {self.sql(e, 'expression')}",
exp.JSONExtract: lambda self, e: self.func(
"JSON_EXTRACT", e.this, e.expression, *e.expressions
),
exp.JSONExtractScalar: lambda self, e: self.func(
"JSON_EXTRACT_SCALAR", e.this, e.expression, *e.expressions
),
exp.LanguageProperty: lambda self, e: self.naked_property(e),
exp.LocationProperty: lambda self, e: self.naked_property(e),
exp.LogProperty: lambda _, e: f"{'NO ' if e.args.get('no') else ''}LOG",
exp.MaterializedProperty: lambda *_: "MATERIALIZED",
exp.NonClusteredColumnConstraint: lambda self,
e: f"NONCLUSTERED ({self.expressions(e, 'this', indent=False)})",
exp.NoPrimaryIndexProperty: lambda *_: "NO PRIMARY INDEX",
exp.NotForReplicationColumnConstraint: lambda *_: "NOT FOR REPLICATION",
exp.OnCommitProperty: lambda _,
e: f"ON COMMIT {'DELETE' if e.args.get('delete') else 'PRESERVE'} ROWS",
exp.OnProperty: lambda self, e: f"ON {self.sql(e, 'this')}",
exp.OnUpdateColumnConstraint: lambda self, e: f"ON UPDATE {self.sql(e, 'this')}",
exp.OutputModelProperty: lambda self, e: f"OUTPUT{self.sql(e, 'this')}",
exp.PathColumnConstraint: lambda self, e: f"PATH {self.sql(e, 'this')}",
exp.RemoteWithConnectionModelProperty: lambda self,
e: f"REMOTE WITH CONNECTION {self.sql(e, 'this')}",
exp.ReturnsProperty: lambda self, e: self.naked_property(e),
exp.SampleProperty: lambda self, e: f"SAMPLE BY {self.sql(e, 'this')}",
exp.SetConfigProperty: lambda self, e: self.sql(e, "this"),
exp.SetProperty: lambda _, e: f"{'MULTI' if e.args.get('multi') else ''}SET",
exp.SettingsProperty: lambda self, e: f"SETTINGS{self.seg('')}{(self.expressions(e))}",
exp.SharingProperty: lambda self, e: f"SHARING={self.sql(e, 'this')}",
exp.SqlReadWriteProperty: lambda _, e: e.name,
exp.SqlSecurityProperty: lambda _,
e: f"SQL SECURITY {'DEFINER' if e.args.get('definer') else 'INVOKER'}",
exp.StabilityProperty: lambda _, e: e.name,
exp.TemporaryProperty: lambda *_: "TEMPORARY",
exp.TitleColumnConstraint: lambda self, e: f"TITLE {self.sql(e, 'this')}",
exp.Timestamp: lambda self, e: self.func("TIMESTAMP", e.this, e.expression),
exp.ToTableProperty: lambda self, e: f"TO {self.sql(e.this)}",
exp.TransformModelProperty: lambda self, e: self.func("TRANSFORM", *e.expressions),
exp.TransientProperty: lambda *_: "TRANSIENT",
exp.UppercaseColumnConstraint: lambda *_: "UPPERCASE",
exp.UnloggedProperty: lambda *_: "UNLOGGED",
exp.VarMap: lambda self, e: self.func("MAP", e.args["keys"], e.args["values"]),
exp.VolatileProperty: lambda *_: "VOLATILE",
exp.WithJournalTableProperty: lambda self, e: f"WITH JOURNAL TABLE={self.sql(e, 'this')}",
exp.WithOperator: lambda self, e: f"{self.sql(e, 'this')} WITH {self.sql(e, 'op')}",
}
# Whether null ordering is supported in order by
# True: Full Support, None: No support, False: No support in window specifications
NULL_ORDERING_SUPPORTED: t.Optional[bool] = True
# Whether ignore nulls is inside the agg or outside.
# FIRST(x IGNORE NULLS) OVER vs FIRST (x) IGNORE NULLS OVER
IGNORE_NULLS_IN_FUNC = False
# Whether locking reads (i.e. SELECT ... FOR UPDATE/SHARE) are supported
LOCKING_READS_SUPPORTED = False
# Always do union distinct or union all
EXPLICIT_UNION = False
# Wrap derived values in parens, usually standard but spark doesn't support it
WRAP_DERIVED_VALUES = True
# Whether create function uses an AS before the RETURN
CREATE_FUNCTION_RETURN_AS = True
# Whether MERGE ... WHEN MATCHED BY SOURCE is allowed
MATCHED_BY_SOURCE = True
# Whether the INTERVAL expression works only with values like '1 day'
SINGLE_STRING_INTERVAL = False
# Whether the plural form of date parts like day (i.e. "days") is supported in INTERVALs
INTERVAL_ALLOWS_PLURAL_FORM = True
# Whether limit and fetch are supported (possible values: "ALL", "LIMIT", "FETCH")
LIMIT_FETCH = "ALL"
# Whether limit and fetch allows expresions or just limits
LIMIT_ONLY_LITERALS = False
# Whether a table is allowed to be renamed with a db
RENAME_TABLE_WITH_DB = True
# The separator for grouping sets and rollups
GROUPINGS_SEP = ","
# The string used for creating an index on a table
INDEX_ON = "ON"
# Whether join hints should be generated
JOIN_HINTS = True
# Whether table hints should be generated
TABLE_HINTS = True
# Whether query hints should be generated
QUERY_HINTS = True
# What kind of separator to use for query hints
QUERY_HINT_SEP = ", "
# Whether comparing against booleans (e.g. x IS TRUE) is supported
IS_BOOL_ALLOWED = True
# Whether to include the "SET" keyword in the "INSERT ... ON DUPLICATE KEY UPDATE" statement
DUPLICATE_KEY_UPDATE_WITH_SET = True
# Whether to generate the limit as TOP <value> instead of LIMIT <value>
LIMIT_IS_TOP = False
# Whether to generate INSERT INTO ... RETURNING or INSERT INTO RETURNING ...
RETURNING_END = True
# Whether to generate the (+) suffix for columns used in old-style join conditions
COLUMN_JOIN_MARKS_SUPPORTED = False
# Whether to generate an unquoted value for EXTRACT's date part argument
EXTRACT_ALLOWS_QUOTES = True
# Whether TIMETZ / TIMESTAMPTZ will be generated using the "WITH TIME ZONE" syntax
TZ_TO_WITH_TIME_ZONE = False
# Whether the NVL2 function is supported
NVL2_SUPPORTED = True
# https://cloud.google.com/bigquery/docs/reference/standard-sql/query-syntax
SELECT_KINDS: t.Tuple[str, ...] = ("STRUCT", "VALUE")
# Whether VALUES statements can be used as derived tables.
# MySQL 5 and Redshift do not allow this, so when False, it will convert
# SELECT * VALUES into SELECT UNION
VALUES_AS_TABLE = True
# Whether the word COLUMN is included when adding a column with ALTER TABLE
ALTER_TABLE_INCLUDE_COLUMN_KEYWORD = True
# UNNEST WITH ORDINALITY (presto) instead of UNNEST WITH OFFSET (bigquery)
UNNEST_WITH_ORDINALITY = True
# Whether FILTER (WHERE cond) can be used for conditional aggregation
AGGREGATE_FILTER_SUPPORTED = True
# Whether JOIN sides (LEFT, RIGHT) are supported in conjunction with SEMI/ANTI join kinds
SEMI_ANTI_JOIN_WITH_SIDE = True
# Whether to include the type of a computed column in the CREATE DDL
COMPUTED_COLUMN_WITH_TYPE = True
# Whether CREATE TABLE .. COPY .. is supported. False means we'll generate CLONE instead of COPY
SUPPORTS_TABLE_COPY = True
# Whether parentheses are required around the table sample's expression
TABLESAMPLE_REQUIRES_PARENS = True
# Whether a table sample clause's size needs to be followed by the ROWS keyword
TABLESAMPLE_SIZE_IS_ROWS = True
# The keyword(s) to use when generating a sample clause
TABLESAMPLE_KEYWORDS = "TABLESAMPLE"
# Whether the TABLESAMPLE clause supports a method name, like BERNOULLI
TABLESAMPLE_WITH_METHOD = True
# The keyword to use when specifying the seed of a sample clause
TABLESAMPLE_SEED_KEYWORD = "SEED"
# Whether COLLATE is a function instead of a binary operator
COLLATE_IS_FUNC = False
# Whether data types support additional specifiers like e.g. CHAR or BYTE (oracle)
DATA_TYPE_SPECIFIERS_ALLOWED = False
# Whether conditions require booleans WHERE x = 0 vs WHERE x
ENSURE_BOOLS = False
# Whether the "RECURSIVE" keyword is required when defining recursive CTEs
CTE_RECURSIVE_KEYWORD_REQUIRED = True
# Whether CONCAT requires >1 arguments
SUPPORTS_SINGLE_ARG_CONCAT = True
# Whether LAST_DAY function supports a date part argument
LAST_DAY_SUPPORTS_DATE_PART = True
# Whether named columns are allowed in table aliases
SUPPORTS_TABLE_ALIAS_COLUMNS = True
# Whether UNPIVOT aliases are Identifiers (False means they're Literals)
UNPIVOT_ALIASES_ARE_IDENTIFIERS = True
# What delimiter to use for separating JSON key/value pairs
JSON_KEY_VALUE_PAIR_SEP = ":"
# INSERT OVERWRITE TABLE x override
INSERT_OVERWRITE = " OVERWRITE TABLE"
# Whether the SELECT .. INTO syntax is used instead of CTAS
SUPPORTS_SELECT_INTO = False
# Whether UNLOGGED tables can be created
SUPPORTS_UNLOGGED_TABLES = False
# Whether the CREATE TABLE LIKE statement is supported
SUPPORTS_CREATE_TABLE_LIKE = True
# Whether the LikeProperty needs to be specified inside of the schema clause
LIKE_PROPERTY_INSIDE_SCHEMA = False
# Whether DISTINCT can be followed by multiple args in an AggFunc. If not, it will be
# transpiled into a series of CASE-WHEN-ELSE, ultimately using a tuple conseisting of the args
MULTI_ARG_DISTINCT = True
# Whether the JSON extraction operators expect a value of type JSON
JSON_TYPE_REQUIRED_FOR_EXTRACTION = False
# Whether bracketed keys like ["foo"] are supported in JSON paths
JSON_PATH_BRACKETED_KEY_SUPPORTED = True
# Whether to escape keys using single quotes in JSON paths
JSON_PATH_SINGLE_QUOTE_ESCAPE = False
# The JSONPathPart expressions supported by this dialect
SUPPORTED_JSON_PATH_PARTS = ALL_JSON_PATH_PARTS.copy()
# Whether any(f(x) for x in array) can be implemented by this dialect
CAN_IMPLEMENT_ARRAY_ANY = False
# Whether the function TO_NUMBER is supported
SUPPORTS_TO_NUMBER = True
TYPE_MAPPING = {
exp.DataType.Type.NCHAR: "CHAR",
exp.DataType.Type.NVARCHAR: "VARCHAR",
exp.DataType.Type.MEDIUMTEXT: "TEXT",
exp.DataType.Type.LONGTEXT: "TEXT",
exp.DataType.Type.TINYTEXT: "TEXT",
exp.DataType.Type.MEDIUMBLOB: "BLOB",
exp.DataType.Type.LONGBLOB: "BLOB",
exp.DataType.Type.TINYBLOB: "BLOB",
exp.DataType.Type.INET: "INET",
}
STAR_MAPPING = {
"except": "EXCEPT",
"replace": "REPLACE",
}
TIME_PART_SINGULARS = {
"MICROSECONDS": "MICROSECOND",
"SECONDS": "SECOND",
"MINUTES": "MINUTE",
"HOURS": "HOUR",
"DAYS": "DAY",
"WEEKS": "WEEK",
"MONTHS": "MONTH",
"QUARTERS": "QUARTER",
"YEARS": "YEAR",
}
AFTER_HAVING_MODIFIER_TRANSFORMS = {
"cluster": lambda self, e: self.sql(e, "cluster"),
"distribute": lambda self, e: self.sql(e, "distribute"),
"qualify": lambda self, e: self.sql(e, "qualify"),
"sort": lambda self, e: self.sql(e, "sort"),
"windows": lambda self, e: (
self.seg("WINDOW ") + self.expressions(e, key="windows", flat=True)
if e.args.get("windows")
else ""
),
}
TOKEN_MAPPING: t.Dict[TokenType, str] = {}
STRUCT_DELIMITER = ("<", ">")
PARAMETER_TOKEN = "@"
NAMED_PLACEHOLDER_TOKEN = ":"
PROPERTIES_LOCATION = {
exp.AlgorithmProperty: exp.Properties.Location.POST_CREATE,
exp.AutoIncrementProperty: exp.Properties.Location.POST_SCHEMA,
exp.AutoRefreshProperty: exp.Properties.Location.POST_SCHEMA,
exp.BackupProperty: exp.Properties.Location.POST_SCHEMA,
exp.BlockCompressionProperty: exp.Properties.Location.POST_NAME,
exp.CharacterSetProperty: exp.Properties.Location.POST_SCHEMA,
exp.ChecksumProperty: exp.Properties.Location.POST_NAME,
exp.CollateProperty: exp.Properties.Location.POST_SCHEMA,
exp.CopyGrantsProperty: exp.Properties.Location.POST_SCHEMA,
exp.Cluster: exp.Properties.Location.POST_SCHEMA,
exp.ClusteredByProperty: exp.Properties.Location.POST_SCHEMA,
exp.DataBlocksizeProperty: exp.Properties.Location.POST_NAME,
exp.DefinerProperty: exp.Properties.Location.POST_CREATE,
exp.DictRange: exp.Properties.Location.POST_SCHEMA,
exp.DictProperty: exp.Properties.Location.POST_SCHEMA,
exp.DistKeyProperty: exp.Properties.Location.POST_SCHEMA,
exp.DistStyleProperty: exp.Properties.Location.POST_SCHEMA,
exp.EngineProperty: exp.Properties.Location.POST_SCHEMA,
exp.ExecuteAsProperty: exp.Properties.Location.POST_SCHEMA,
exp.ExternalProperty: exp.Properties.Location.POST_CREATE,
exp.FallbackProperty: exp.Properties.Location.POST_NAME,
exp.FileFormatProperty: exp.Properties.Location.POST_WITH,
exp.FreespaceProperty: exp.Properties.Location.POST_NAME,
exp.GlobalProperty: exp.Properties.Location.POST_CREATE,
exp.HeapProperty: exp.Properties.Location.POST_WITH,
exp.InheritsProperty: exp.Properties.Location.POST_SCHEMA,
exp.IcebergProperty: exp.Properties.Location.POST_CREATE,
exp.InputModelProperty: exp.Properties.Location.POST_SCHEMA,
exp.IsolatedLoadingProperty: exp.Properties.Location.POST_NAME,
exp.JournalProperty: exp.Properties.Location.POST_NAME,
exp.LanguageProperty: exp.Properties.Location.POST_SCHEMA,
exp.LikeProperty: exp.Properties.Location.POST_SCHEMA,
exp.LocationProperty: exp.Properties.Location.POST_SCHEMA,
exp.LockProperty: exp.Properties.Location.POST_SCHEMA,
exp.LockingProperty: exp.Properties.Location.POST_ALIAS,
exp.LogProperty: exp.Properties.Location.POST_NAME,
exp.MaterializedProperty: exp.Properties.Location.POST_CREATE,
exp.MergeBlockRatioProperty: exp.Properties.Location.POST_NAME,
exp.NoPrimaryIndexProperty: exp.Properties.Location.POST_EXPRESSION,
exp.OnProperty: exp.Properties.Location.POST_SCHEMA,
exp.OnCommitProperty: exp.Properties.Location.POST_EXPRESSION,
exp.Order: exp.Properties.Location.POST_SCHEMA,
exp.OutputModelProperty: exp.Properties.Location.POST_SCHEMA,
exp.PartitionedByProperty: exp.Properties.Location.POST_WITH,
exp.PartitionedOfProperty: exp.Properties.Location.POST_SCHEMA,
exp.PrimaryKey: exp.Properties.Location.POST_SCHEMA,
exp.Property: exp.Properties.Location.POST_WITH,
exp.RemoteWithConnectionModelProperty: exp.Properties.Location.POST_SCHEMA,
exp.ReturnsProperty: exp.Properties.Location.POST_SCHEMA,
exp.RowFormatProperty: exp.Properties.Location.POST_SCHEMA,
exp.RowFormatDelimitedProperty: exp.Properties.Location.POST_SCHEMA,
exp.RowFormatSerdeProperty: exp.Properties.Location.POST_SCHEMA,
exp.SampleProperty: exp.Properties.Location.POST_SCHEMA,
exp.SchemaCommentProperty: exp.Properties.Location.POST_SCHEMA,
exp.SerdeProperties: exp.Properties.Location.POST_SCHEMA,
exp.Set: exp.Properties.Location.POST_SCHEMA,
exp.SettingsProperty: exp.Properties.Location.POST_SCHEMA,
exp.SetProperty: exp.Properties.Location.POST_CREATE,
exp.SetConfigProperty: exp.Properties.Location.POST_SCHEMA,
exp.SharingProperty: exp.Properties.Location.POST_EXPRESSION,
exp.SequenceProperties: exp.Properties.Location.POST_EXPRESSION,
exp.SortKeyProperty: exp.Properties.Location.POST_SCHEMA,
exp.SqlReadWriteProperty: exp.Properties.Location.POST_SCHEMA,
exp.SqlSecurityProperty: exp.Properties.Location.POST_CREATE,
exp.StabilityProperty: exp.Properties.Location.POST_SCHEMA,
exp.TemporaryProperty: exp.Properties.Location.POST_CREATE,
exp.ToTableProperty: exp.Properties.Location.POST_SCHEMA,
exp.TransientProperty: exp.Properties.Location.POST_CREATE,
exp.TransformModelProperty: exp.Properties.Location.POST_SCHEMA,
exp.MergeTreeTTL: exp.Properties.Location.POST_SCHEMA,
exp.UnloggedProperty: exp.Properties.Location.POST_CREATE,
exp.VolatileProperty: exp.Properties.Location.POST_CREATE,
exp.WithDataProperty: exp.Properties.Location.POST_EXPRESSION,
exp.WithJournalTableProperty: exp.Properties.Location.POST_NAME,
exp.WithSystemVersioningProperty: exp.Properties.Location.POST_SCHEMA,
}
# Keywords that can't be used as unquoted identifier names
RESERVED_KEYWORDS: t.Set[str] = set()
# Expressions whose comments are separated from them for better formatting
WITH_SEPARATED_COMMENTS: t.Tuple[t.Type[exp.Expression], ...] = (
exp.Create,
exp.Delete,
exp.Drop,
exp.From,
exp.Insert,
exp.Join,
exp.Select,
exp.Union,
exp.Update,
exp.Where,
exp.With,
)
# Expressions that should not have their comments generated in maybe_comment
EXCLUDE_COMMENTS: t.Tuple[t.Type[exp.Expression], ...] = (
exp.Binary,
exp.Union,
)
# Expressions that can remain unwrapped when appearing in the context of an INTERVAL
UNWRAPPED_INTERVAL_VALUES: t.Tuple[t.Type[exp.Expression], ...] = (
exp.Column,
exp.Literal,
exp.Neg,
exp.Paren,
)
PARAMETERIZABLE_TEXT_TYPES = {
exp.DataType.Type.NVARCHAR,
exp.DataType.Type.VARCHAR,
exp.DataType.Type.CHAR,
exp.DataType.Type.NCHAR,
}
# Expressions that need to have all CTEs under them bubbled up to them
EXPRESSIONS_WITHOUT_NESTED_CTES: t.Set[t.Type[exp.Expression]] = set()
SENTINEL_LINE_BREAK = "__SQLGLOT__LB__"
__slots__ = (
"pretty",
"identify",
"normalize",
"pad",
"_indent",
"normalize_functions",
"unsupported_level",
"max_unsupported",
"leading_comma",
"max_text_width",
"comments",
"dialect",
"unsupported_messages",
"_escaped_quote_end",
"_escaped_identifier_end",
)
def __init__(
self,
pretty: t.Optional[bool] = None,
identify: str | bool = False,
normalize: bool = False,
pad: int = 2,
indent: int = 2,
normalize_functions: t.Optional[str | bool] = None,
unsupported_level: ErrorLevel = ErrorLevel.WARN,
max_unsupported: int = 3,
leading_comma: bool = False,
max_text_width: int = 80,
comments: bool = True,
dialect: DialectType = None,
):
import sqlglot
from sqlglot.dialects import Dialect
self.pretty = pretty if pretty is not None else sqlglot.pretty
self.identify = identify
self.normalize = normalize
self.pad = pad
self._indent = indent
self.unsupported_level = unsupported_level
self.max_unsupported = max_unsupported
self.leading_comma = leading_comma
self.max_text_width = max_text_width
self.comments = comments
self.dialect = Dialect.get_or_raise(dialect)
# This is both a Dialect property and a Generator argument, so we prioritize the latter
self.normalize_functions = (
self.dialect.NORMALIZE_FUNCTIONS if normalize_functions is None else normalize_functions
)
self.unsupported_messages: t.List[str] = []
self._escaped_quote_end: str = (
self.dialect.tokenizer_class.STRING_ESCAPES[0] + self.dialect.QUOTE_END
)
self._escaped_identifier_end: str = (
self.dialect.tokenizer_class.IDENTIFIER_ESCAPES[0] + self.dialect.IDENTIFIER_END
)
def generate(self, expression: exp.Expression, copy: bool = True) -> str:
"""
Generates the SQL string corresponding to the given syntax tree.
Args:
expression: The syntax tree.
copy: Whether to copy the expression. The generator performs mutations so
it is safer to copy.
Returns:
The SQL string corresponding to `expression`.
"""
if copy:
expression = expression.copy()
expression = self.preprocess(expression)
self.unsupported_messages = []
sql = self.sql(expression).strip()
if self.pretty:
sql = sql.replace(self.SENTINEL_LINE_BREAK, "\n")
if self.unsupported_level == ErrorLevel.IGNORE:
return sql
if self.unsupported_level == ErrorLevel.WARN:
for msg in self.unsupported_messages:
logger.warning(msg)
elif self.unsupported_level == ErrorLevel.RAISE and self.unsupported_messages:
raise UnsupportedError(concat_messages(self.unsupported_messages, self.max_unsupported))
return sql
def preprocess(self, expression: exp.Expression) -> exp.Expression:
"""Apply generic preprocessing transformations to a given expression."""
if (
not expression.parent
and type(expression) in self.EXPRESSIONS_WITHOUT_NESTED_CTES
and any(node.parent is not expression for node in expression.find_all(exp.With))
):
from sqlglot.transforms import move_ctes_to_top_level
expression = move_ctes_to_top_level(expression)
if self.ENSURE_BOOLS:
from sqlglot.transforms import ensure_bools
expression = ensure_bools(expression)
return expression
def unsupported(self, message: str) -> None:
if self.unsupported_level == ErrorLevel.IMMEDIATE:
raise UnsupportedError(message)
self.unsupported_messages.append(message)
def sep(self, sep: str = " ") -> str:
return f"{sep.strip()}\n" if self.pretty else sep
def seg(self, sql: str, sep: str = " ") -> str:
return f"{self.sep(sep)}{sql}"
def pad_comment(self, comment: str) -> str:
comment = " " + comment if comment[0].strip() else comment
comment = comment + " " if comment[-1].strip() else comment
return comment
def maybe_comment(
self,
sql: str,
expression: t.Optional[exp.Expression] = None,
comments: t.Optional[t.List[str]] = None,
) -> str:
comments = (
((expression and expression.comments) if comments is None else comments) # type: ignore
if self.comments
else None
)
if not comments or isinstance(expression, self.EXCLUDE_COMMENTS):
return sql
comments_sql = " ".join(
f"/*{self.pad_comment(comment)}*/" for comment in comments if comment
)
if not comments_sql:
return sql
if isinstance(expression, self.WITH_SEPARATED_COMMENTS):
return (
f"{self.sep()}{comments_sql}{sql}"
if sql[0].isspace()
else f"{comments_sql}{self.sep()}{sql}"
)
return f"{sql} {comments_sql}"
def wrap(self, expression: exp.Expression | str) -> str:
this_sql = self.indent(
(
self.sql(expression)
if isinstance(expression, exp.UNWRAPPED_QUERIES)
else self.sql(expression, "this")
),
level=1,
pad=0,
)
return f"({self.sep('')}{this_sql}{self.seg(')', sep='')}"
def no_identify(self, func: t.Callable[..., str], *args, **kwargs) -> str:
original = self.identify
self.identify = False
result = func(*args, **kwargs)
self.identify = original
return result
def normalize_func(self, name: str) -> str:
if self.normalize_functions == "upper" or self.normalize_functions is True:
return name.upper()
if self.normalize_functions == "lower":
return name.lower()
return name
def indent(
self,
sql: str,
level: int = 0,
pad: t.Optional[int] = None,
skip_first: bool = False,
skip_last: bool = False,
) -> str:
if not self.pretty:
return sql
pad = self.pad if pad is None else pad
lines = sql.split("\n")
return "\n".join(
(
line
if (skip_first and i == 0) or (skip_last and i == len(lines) - 1)
else f"{' ' * (level * self._indent + pad)}{line}"
)
for i, line in enumerate(lines)
)
def sql(
self,
expression: t.Optional[str | exp.Expression],
key: t.Optional[str] = None,
comment: bool = True,
) -> str:
if not expression:
return ""
if isinstance(expression, str):
return expression
if key:
value = expression.args.get(key)
if value:
return self.sql(value)
return ""
transform = self.TRANSFORMS.get(expression.__class__)
if callable(transform):
sql = transform(self, expression)
elif isinstance(expression, exp.Expression):
exp_handler_name = f"{expression.key}_sql"
if hasattr(self, exp_handler_name):
sql = getattr(self, exp_handler_name)(expression)
elif isinstance(expression, exp.Func):
sql = self.function_fallback_sql(expression)
elif isinstance(expression, exp.Property):
sql = self.property_sql(expression)
else:
raise ValueError(f"Unsupported expression type {expression.__class__.__name__}")
else:
raise ValueError(f"Expected an Expression. Received {type(expression)}: {expression}")
return self.maybe_comment(sql, expression) if self.comments and comment else sql
def uncache_sql(self, expression: exp.Uncache) -> str:
table = self.sql(expression, "this")
exists_sql = " IF EXISTS" if expression.args.get("exists") else ""
return f"UNCACHE TABLE{exists_sql} {table}"
def cache_sql(self, expression: exp.Cache) -> str:
lazy = " LAZY" if expression.args.get("lazy") else ""
table = self.sql(expression, "this")
options = expression.args.get("options")
options = f" OPTIONS({self.sql(options[0])} = {self.sql(options[1])})" if options else ""
sql = self.sql(expression, "expression")
sql = f" AS{self.sep()}{sql}" if sql else ""
sql = f"CACHE{lazy} TABLE {table}{options}{sql}"
return self.prepend_ctes(expression, sql)
def characterset_sql(self, expression: exp.CharacterSet) -> str:
if isinstance(expression.parent, exp.Cast):
return f"CHAR CHARACTER SET {self.sql(expression, 'this')}"
default = "DEFAULT " if expression.args.get("default") else ""
return f"{default}CHARACTER SET={self.sql(expression, 'this')}"
def column_sql(self, expression: exp.Column) -> str:
join_mark = " (+)" if expression.args.get("join_mark") else ""
if join_mark and not self.COLUMN_JOIN_MARKS_SUPPORTED:
join_mark = ""
self.unsupported("Outer join syntax using the (+) operator is not supported.")
column = ".".join(
self.sql(part)
for part in (
expression.args.get("catalog"),
expression.args.get("db"),
expression.args.get("table"),
expression.args.get("this"),
)
if part
)
return f"{column}{join_mark}"
def columnposition_sql(self, expression: exp.ColumnPosition) -> str:
this = self.sql(expression, "this")
this = f" {this}" if this else ""
position = self.sql(expression, "position")
return f"{position}{this}"
def columndef_sql(self, expression: exp.ColumnDef, sep: str = " ") -> str:
column = self.sql(expression, "this")
kind = self.sql(expression, "kind")
constraints = self.expressions(expression, key="constraints", sep=" ", flat=True)
exists = "IF NOT EXISTS " if expression.args.get("exists") else ""
kind = f"{sep}{kind}" if kind else ""
constraints = f" {constraints}" if constraints else ""
position = self.sql(expression, "position")
position = f" {position}" if position else ""
if expression.find(exp.ComputedColumnConstraint) and not self.COMPUTED_COLUMN_WITH_TYPE:
kind = ""
return f"{exists}{column}{kind}{constraints}{position}"
def columnconstraint_sql(self, expression: exp.ColumnConstraint) -> str:
this = self.sql(expression, "this")
kind_sql = self.sql(expression, "kind").strip()
return f"CONSTRAINT {this} {kind_sql}" if this else kind_sql
def computedcolumnconstraint_sql(self, expression: exp.ComputedColumnConstraint) -> str:
this = self.sql(expression, "this")
if expression.args.get("not_null"):
persisted = " PERSISTED NOT NULL"
elif expression.args.get("persisted"):
persisted = " PERSISTED"
else:
persisted = ""
return f"AS {this}{persisted}"
def autoincrementcolumnconstraint_sql(self, _) -> str:
return self.token_sql(TokenType.AUTO_INCREMENT)
def compresscolumnconstraint_sql(self, expression: exp.CompressColumnConstraint) -> str:
if isinstance(expression.this, list):
this = self.wrap(self.expressions(expression, key="this", flat=True))
else:
this = self.sql(expression, "this")
return f"COMPRESS {this}"
def generatedasidentitycolumnconstraint_sql(
self, expression: exp.GeneratedAsIdentityColumnConstraint
) -> str:
this = ""
if expression.this is not None:
on_null = " ON NULL" if expression.args.get("on_null") else ""
this = " ALWAYS" if expression.this else f" BY DEFAULT{on_null}"
start = expression.args.get("start")
start = f"START WITH {start}" if start else ""
increment = expression.args.get("increment")
increment = f" INCREMENT BY {increment}" if increment else ""
minvalue = expression.args.get("minvalue")
minvalue = f" MINVALUE {minvalue}" if minvalue else ""
maxvalue = expression.args.get("maxvalue")
maxvalue = f" MAXVALUE {maxvalue}" if maxvalue else ""
cycle = expression.args.get("cycle")
cycle_sql = ""
if cycle is not None:
cycle_sql = f"{' NO' if not cycle else ''} CYCLE"
cycle_sql = cycle_sql.strip() if not start and not increment else cycle_sql
sequence_opts = ""
if start or increment or cycle_sql:
sequence_opts = f"{start}{increment}{minvalue}{maxvalue}{cycle_sql}"
sequence_opts = f" ({sequence_opts.strip()})"
expr = self.sql(expression, "expression")
expr = f"({expr})" if expr else "IDENTITY"
return f"GENERATED{this} AS {expr}{sequence_opts}"
def generatedasrowcolumnconstraint_sql(
self, expression: exp.GeneratedAsRowColumnConstraint
) -> str:
start = "START" if expression.args.get("start") else "END"
hidden = " HIDDEN" if expression.args.get("hidden") else ""
return f"GENERATED ALWAYS AS ROW {start}{hidden}"
def periodforsystemtimeconstraint_sql(
self, expression: exp.PeriodForSystemTimeConstraint
) -> str:
return f"PERIOD FOR SYSTEM_TIME ({self.sql(expression, 'this')}, {self.sql(expression, 'expression')})"
def notnullcolumnconstraint_sql(self, expression: exp.NotNullColumnConstraint) -> str:
return f"{'' if expression.args.get('allow_null') else 'NOT '}NULL"
def transformcolumnconstraint_sql(self, expression: exp.TransformColumnConstraint) -> str:
return f"AS {self.sql(expression, 'this')}"
def primarykeycolumnconstraint_sql(self, expression: exp.PrimaryKeyColumnConstraint) -> str:
desc = expression.args.get("desc")
if desc is not None:
return f"PRIMARY KEY{' DESC' if desc else ' ASC'}"
return "PRIMARY KEY"
def uniquecolumnconstraint_sql(self, expression: exp.UniqueColumnConstraint) -> str:
this = self.sql(expression, "this")
this = f" {this}" if this else ""
index_type = expression.args.get("index_type")
index_type = f" USING {index_type}" if index_type else ""
on_conflict = self.sql(expression, "on_conflict")
on_conflict = f" {on_conflict}" if on_conflict else ""
return f"UNIQUE{this}{index_type}{on_conflict}"
def createable_sql(self, expression: exp.Create, locations: t.DefaultDict) -> str:
return self.sql(expression, "this")
def create_sql(self, expression: exp.Create) -> str:
kind = self.sql(expression, "kind")
properties = expression.args.get("properties")
properties_locs = self.locate_properties(properties) if properties else defaultdict()
this = self.createable_sql(expression, properties_locs)
properties_sql = ""
if properties_locs.get(exp.Properties.Location.POST_SCHEMA) or properties_locs.get(
exp.Properties.Location.POST_WITH
):
properties_sql = self.sql(
exp.Properties(
expressions=[
*properties_locs[exp.Properties.Location.POST_SCHEMA],
*properties_locs[exp.Properties.Location.POST_WITH],
]
)
)
begin = " BEGIN" if expression.args.get("begin") else ""
end = " END" if expression.args.get("end") else ""
expression_sql = self.sql(expression, "expression")
if expression_sql:
expression_sql = f"{begin}{self.sep()}{expression_sql}{end}"
if self.CREATE_FUNCTION_RETURN_AS or not isinstance(expression.expression, exp.Return):
if properties_locs.get(exp.Properties.Location.POST_ALIAS):
postalias_props_sql = self.properties(
exp.Properties(
expressions=properties_locs[exp.Properties.Location.POST_ALIAS]
),
wrapped=False,
)
expression_sql = f" AS {postalias_props_sql}{expression_sql}"
else:
expression_sql = f" AS{expression_sql}"
postindex_props_sql = ""
if properties_locs.get(exp.Properties.Location.POST_INDEX):
postindex_props_sql = self.properties(
exp.Properties(expressions=properties_locs[exp.Properties.Location.POST_INDEX]),
wrapped=False,
prefix=" ",
)
indexes = self.expressions(expression, key="indexes", indent=False, sep=" ")
indexes = f" {indexes}" if indexes else ""
index_sql = indexes + postindex_props_sql
replace = " OR REPLACE" if expression.args.get("replace") else ""
unique = " UNIQUE" if expression.args.get("unique") else ""
postcreate_props_sql = ""
if properties_locs.get(exp.Properties.Location.POST_CREATE):
postcreate_props_sql = self.properties(
exp.Properties(expressions=properties_locs[exp.Properties.Location.POST_CREATE]),
sep=" ",
prefix=" ",
wrapped=False,
)
modifiers = "".join((replace, unique, postcreate_props_sql))
postexpression_props_sql = ""
if properties_locs.get(exp.Properties.Location.POST_EXPRESSION):
postexpression_props_sql = self.properties(
exp.Properties(
expressions=properties_locs[exp.Properties.Location.POST_EXPRESSION]
),
sep=" ",
prefix=" ",
wrapped=False,
)
exists_sql = " IF NOT EXISTS" if expression.args.get("exists") else ""
no_schema_binding = (
" WITH NO SCHEMA BINDING" if expression.args.get("no_schema_binding") else ""
)
clone = self.sql(expression, "clone")
clone = f" {clone}" if clone else ""
expression_sql = f"CREATE{modifiers} {kind}{exists_sql} {this}{properties_sql}{expression_sql}{postexpression_props_sql}{index_sql}{no_schema_binding}{clone}"
return self.prepend_ctes(expression, expression_sql)
def sequenceproperties_sql(self, expression: exp.SequenceProperties) -> str:
start = self.sql(expression, "start")
start = f"START WITH {start}" if start else ""
increment = self.sql(expression, "increment")
increment = f" INCREMENT BY {increment}" if increment else ""
minvalue = self.sql(expression, "minvalue")
minvalue = f" MINVALUE {minvalue}" if minvalue else ""
maxvalue = self.sql(expression, "maxvalue")
maxvalue = f" MAXVALUE {maxvalue}" if maxvalue else ""
owned = self.sql(expression, "owned")
owned = f" OWNED BY {owned}" if owned else ""
cache = expression.args.get("cache")
if cache is None:
cache_str = ""
elif cache is True:
cache_str = " CACHE"
else:
cache_str = f" CACHE {cache}"
options = self.expressions(expression, key="options", flat=True, sep=" ")
options = f" {options}" if options else ""
return f"{start}{increment}{minvalue}{maxvalue}{cache_str}{options}{owned}".lstrip()
def clone_sql(self, expression: exp.Clone) -> str:
this = self.sql(expression, "this")
shallow = "SHALLOW " if expression.args.get("shallow") else ""
keyword = "COPY" if expression.args.get("copy") and self.SUPPORTS_TABLE_COPY else "CLONE"
return f"{shallow}{keyword} {this}"
def describe_sql(self, expression: exp.Describe) -> str:
extended = " EXTENDED" if expression.args.get("extended") else ""
return f"DESCRIBE{extended} {self.sql(expression, 'this')}"
def heredoc_sql(self, expression: exp.Heredoc) -> str:
tag = self.sql(expression, "tag")
return f"${tag}${self.sql(expression, 'this')}${tag}$"
def prepend_ctes(self, expression: exp.Expression, sql: str) -> str:
with_ = self.sql(expression, "with")
if with_:
sql = f"{with_}{self.sep()}{sql}"
return sql
def with_sql(self, expression: exp.With) -> str:
sql = self.expressions(expression, flat=True)
recursive = (
"RECURSIVE "
if self.CTE_RECURSIVE_KEYWORD_REQUIRED and expression.args.get("recursive")
else ""
)
return f"WITH {recursive}{sql}"
def cte_sql(self, expression: exp.CTE) -> str:
alias = self.sql(expression, "alias")
materialized = expression.args.get("materialized")
if materialized is False:
materialized = "NOT MATERIALIZED "
elif materialized:
materialized = "MATERIALIZED "
return f"{alias} AS {materialized or ''}{self.wrap(expression)}"
def tablealias_sql(self, expression: exp.TableAlias) -> str:
alias = self.sql(expression, "this")
columns = self.expressions(expression, key="columns", flat=True)
columns = f"({columns})" if columns else ""
if columns and not self.SUPPORTS_TABLE_ALIAS_COLUMNS:
columns = ""
self.unsupported("Named columns are not supported in table alias.")
if not alias and not self.dialect.UNNEST_COLUMN_ONLY:
alias = "_t"
return f"{alias}{columns}"
def bitstring_sql(self, expression: exp.BitString) -> str:
this = self.sql(expression, "this")
if self.dialect.BIT_START:
return f"{self.dialect.BIT_START}{this}{self.dialect.BIT_END}"
return f"{int(this, 2)}"
def hexstring_sql(self, expression: exp.HexString) -> str:
this = self.sql(expression, "this")
if self.dialect.HEX_START:
return f"{self.dialect.HEX_START}{this}{self.dialect.HEX_END}"
return f"{int(this, 16)}"
def bytestring_sql(self, expression: exp.ByteString) -> str:
this = self.sql(expression, "this")
if self.dialect.BYTE_START:
return f"{self.dialect.BYTE_START}{this}{self.dialect.BYTE_END}"
return this
def unicodestring_sql(self, expression: exp.UnicodeString) -> str:
this = self.sql(expression, "this")
escape = expression.args.get("escape")
if self.dialect.UNICODE_START:
escape = f" UESCAPE {self.sql(escape)}" if escape else ""
return f"{self.dialect.UNICODE_START}{this}{self.dialect.UNICODE_END}{escape}"
if escape:
pattern = re.compile(rf"{escape.name}(\d+)")
else:
pattern = ESCAPED_UNICODE_RE
this = pattern.sub(r"\\u\1", this)
return f"{self.dialect.QUOTE_START}{this}{self.dialect.QUOTE_END}"
def rawstring_sql(self, expression: exp.RawString) -> str:
string = self.escape_str(expression.this.replace("\\", "\\\\"))
return f"{self.dialect.QUOTE_START}{string}{self.dialect.QUOTE_END}"
def datatypeparam_sql(self, expression: exp.DataTypeParam) -> str:
this = self.sql(expression, "this")
specifier = self.sql(expression, "expression")
specifier = f" {specifier}" if specifier and self.DATA_TYPE_SPECIFIERS_ALLOWED else ""
return f"{this}{specifier}"
def datatype_sql(self, expression: exp.DataType) -> str:
type_value = expression.this
if type_value == exp.DataType.Type.USERDEFINED and expression.args.get("kind"):
type_sql = self.sql(expression, "kind")
else:
type_sql = (
self.TYPE_MAPPING.get(type_value, type_value.value)
if isinstance(type_value, exp.DataType.Type)
else type_value
)
nested = ""
interior = self.expressions(expression, flat=True)
values = ""
if interior:
if expression.args.get("nested"):
nested = f"{self.STRUCT_DELIMITER[0]}{interior}{self.STRUCT_DELIMITER[1]}"
if expression.args.get("values") is not None:
delimiters = ("[", "]") if type_value == exp.DataType.Type.ARRAY else ("(", ")")
values = self.expressions(expression, key="values", flat=True)
values = f"{delimiters[0]}{values}{delimiters[1]}"
elif type_value == exp.DataType.Type.INTERVAL:
nested = f" {interior}"
else:
nested = f"({interior})"
type_sql = f"{type_sql}{nested}{values}"
if self.TZ_TO_WITH_TIME_ZONE and type_value in (
exp.DataType.Type.TIMETZ,
exp.DataType.Type.TIMESTAMPTZ,
):
type_sql = f"{type_sql} WITH TIME ZONE"
return type_sql
def directory_sql(self, expression: exp.Directory) -> str:
local = "LOCAL " if expression.args.get("local") else ""
row_format = self.sql(expression, "row_format")
row_format = f" {row_format}" if row_format else ""
return f"{local}DIRECTORY {self.sql(expression, 'this')}{row_format}"
def delete_sql(self, expression: exp.Delete) -> str:
this = self.sql(expression, "this")
this = f" FROM {this}" if this else ""
using = self.sql(expression, "using")
using = f" USING {using}" if using else ""
where = self.sql(expression, "where")
returning = self.sql(expression, "returning")
limit = self.sql(expression, "limit")
tables = self.expressions(expression, key="tables")
tables = f" {tables}" if tables else ""
if self.RETURNING_END:
expression_sql = f"{this}{using}{where}{returning}{limit}"
else:
expression_sql = f"{returning}{this}{using}{where}{limit}"
return self.prepend_ctes(expression, f"DELETE{tables}{expression_sql}")
def drop_sql(self, expression: exp.Drop) -> str:
this = self.sql(expression, "this")
expressions = self.expressions(expression, flat=True)
expressions = f" ({expressions})" if expressions else ""
kind = expression.args["kind"]
exists_sql = " IF EXISTS " if expression.args.get("exists") else " "
temporary = " TEMPORARY" if expression.args.get("temporary") else ""
materialized = " MATERIALIZED" if expression.args.get("materialized") else ""
cascade = " CASCADE" if expression.args.get("cascade") else ""
constraints = " CONSTRAINTS" if expression.args.get("constraints") else ""
purge = " PURGE" if expression.args.get("purge") else ""
return f"DROP{temporary}{materialized} {kind}{exists_sql}{this}{expressions}{cascade}{constraints}{purge}"
def except_sql(self, expression: exp.Except) -> str:
return self.set_operations(expression)
def except_op(self, expression: exp.Except) -> str:
return f"EXCEPT{'' if expression.args.get('distinct') else ' ALL'}"
def fetch_sql(self, expression: exp.Fetch) -> str:
direction = expression.args.get("direction")
direction = f" {direction}" if direction else ""
count = expression.args.get("count")
count = f" {count}" if count else ""
if expression.args.get("percent"):
count = f"{count} PERCENT"
with_ties_or_only = "WITH TIES" if expression.args.get("with_ties") else "ONLY"
return f"{self.seg('FETCH')}{direction}{count} ROWS {with_ties_or_only}"
def filter_sql(self, expression: exp.Filter) -> str:
if self.AGGREGATE_FILTER_SUPPORTED:
this = self.sql(expression, "this")
where = self.sql(expression, "expression").strip()
return f"{this} FILTER({where})"
agg = expression.this
agg_arg = agg.this
cond = expression.expression.this
agg_arg.replace(exp.If(this=cond.copy(), true=agg_arg.copy()))
return self.sql(agg)
def hint_sql(self, expression: exp.Hint) -> str:
if not self.QUERY_HINTS:
self.unsupported("Hints are not supported")
return ""
return f" /*+ {self.expressions(expression, sep=self.QUERY_HINT_SEP).strip()} */"
def indexparameters_sql(self, expression: exp.IndexParameters) -> str:
using = self.sql(expression, "using")
using = f" USING {using}" if using else ""
columns = self.expressions(expression, key="columns", flat=True)
columns = f"({columns})" if columns else ""
partition_by = self.expressions(expression, key="partition_by", flat=True)
partition_by = f" PARTITION BY {partition_by}" if partition_by else ""
where = self.sql(expression, "where")
include = self.expressions(expression, key="include", flat=True)
if include:
include = f" INCLUDE ({include})"
with_storage = self.expressions(expression, key="with_storage", flat=True)
with_storage = f" WITH ({with_storage})" if with_storage else ""
tablespace = self.sql(expression, "tablespace")
tablespace = f" USING INDEX TABLESPACE {tablespace}" if tablespace else ""
return f"{using}{columns}{include}{with_storage}{tablespace}{partition_by}{where}"
def index_sql(self, expression: exp.Index) -> str:
unique = "UNIQUE " if expression.args.get("unique") else ""
primary = "PRIMARY " if expression.args.get("primary") else ""
amp = "AMP " if expression.args.get("amp") else ""
name = self.sql(expression, "this")
name = f"{name} " if name else ""
table = self.sql(expression, "table")
table = f"{self.INDEX_ON} {table}" if table else ""
index = "INDEX " if not table else ""
params = self.sql(expression, "params")
return f"{unique}{primary}{amp}{index}{name}{table}{params}"
def identifier_sql(self, expression: exp.Identifier) -> str:
text = expression.name
lower = text.lower()
text = lower if self.normalize and not expression.quoted else text
text = text.replace(self.dialect.IDENTIFIER_END, self._escaped_identifier_end)
if (
expression.quoted
or self.dialect.can_identify(text, self.identify)
or lower in self.RESERVED_KEYWORDS
or (not self.dialect.IDENTIFIERS_CAN_START_WITH_DIGIT and text[:1].isdigit())
):
text = f"{self.dialect.IDENTIFIER_START}{text}{self.dialect.IDENTIFIER_END}"
return text
def inputoutputformat_sql(self, expression: exp.InputOutputFormat) -> str:
input_format = self.sql(expression, "input_format")
input_format = f"INPUTFORMAT {input_format}" if input_format else ""
output_format = self.sql(expression, "output_format")
output_format = f"OUTPUTFORMAT {output_format}" if output_format else ""
return self.sep().join((input_format, output_format))
def national_sql(self, expression: exp.National, prefix: str = "N") -> str:
string = self.sql(exp.Literal.string(expression.name))
return f"{prefix}{string}"
def partition_sql(self, expression: exp.Partition) -> str:
return f"PARTITION({self.expressions(expression, flat=True)})"
def properties_sql(self, expression: exp.Properties) -> str:
root_properties = []
with_properties = []
for p in expression.expressions:
p_loc = self.PROPERTIES_LOCATION[p.__class__]
if p_loc == exp.Properties.Location.POST_WITH:
with_properties.append(p)
elif p_loc == exp.Properties.Location.POST_SCHEMA:
root_properties.append(p)
return self.root_properties(
exp.Properties(expressions=root_properties)
) + self.with_properties(exp.Properties(expressions=with_properties))
def root_properties(self, properties: exp.Properties) -> str:
if properties.expressions:
return self.sep() + self.expressions(properties, indent=False, sep=" ")
return ""
def properties(
self,
properties: exp.Properties,
prefix: str = "",
sep: str = ", ",
suffix: str = "",
wrapped: bool = True,
) -> str:
if properties.expressions:
expressions = self.expressions(properties, sep=sep, indent=False)
if expressions:
expressions = self.wrap(expressions) if wrapped else expressions
return f"{prefix}{' ' if prefix.strip() else ''}{expressions}{suffix}"
return ""
def with_properties(self, properties: exp.Properties) -> str:
return self.properties(properties, prefix=self.seg("WITH"))
def locate_properties(self, properties: exp.Properties) -> t.DefaultDict:
properties_locs = defaultdict(list)
for p in properties.expressions:
p_loc = self.PROPERTIES_LOCATION[p.__class__]
if p_loc != exp.Properties.Location.UNSUPPORTED:
properties_locs[p_loc].append(p)
else:
self.unsupported(f"Unsupported property {p.key}")
return properties_locs
def property_name(self, expression: exp.Property, string_key: bool = False) -> str:
if isinstance(expression.this, exp.Dot):
return self.sql(expression, "this")
return f"'{expression.name}'" if string_key else expression.name
def property_sql(self, expression: exp.Property) -> str:
property_cls = expression.__class__
if property_cls == exp.Property:
return f"{self.property_name(expression)}={self.sql(expression, 'value')}"
property_name = exp.Properties.PROPERTY_TO_NAME.get(property_cls)
if not property_name:
self.unsupported(f"Unsupported property {expression.key}")
return f"{property_name}={self.sql(expression, 'this')}"
def likeproperty_sql(self, expression: exp.LikeProperty) -> str:
if self.SUPPORTS_CREATE_TABLE_LIKE:
options = " ".join(f"{e.name} {self.sql(e, 'value')}" for e in expression.expressions)
options = f" {options}" if options else ""
like = f"LIKE {self.sql(expression, 'this')}{options}"
if self.LIKE_PROPERTY_INSIDE_SCHEMA and not isinstance(expression.parent, exp.Schema):
like = f"({like})"
return like
if expression.expressions:
self.unsupported("Transpilation of LIKE property options is unsupported")
select = exp.select("*").from_(expression.this).limit(0)
return f"AS {self.sql(select)}"
def fallbackproperty_sql(self, expression: exp.FallbackProperty) -> str:
no = "NO " if expression.args.get("no") else ""
protection = " PROTECTION" if expression.args.get("protection") else ""
return f"{no}FALLBACK{protection}"
def journalproperty_sql(self, expression: exp.JournalProperty) -> str:
no = "NO " if expression.args.get("no") else ""
local = expression.args.get("local")
local = f"{local} " if local else ""
dual = "DUAL " if expression.args.get("dual") else ""
before = "BEFORE " if expression.args.get("before") else ""
after = "AFTER " if expression.args.get("after") else ""
return f"{no}{local}{dual}{before}{after}JOURNAL"
def freespaceproperty_sql(self, expression: exp.FreespaceProperty) -> str:
freespace = self.sql(expression, "this")
percent = " PERCENT" if expression.args.get("percent") else ""
return f"FREESPACE={freespace}{percent}"
def checksumproperty_sql(self, expression: exp.ChecksumProperty) -> str:
if expression.args.get("default"):
property = "DEFAULT"
elif expression.args.get("on"):
property = "ON"
else:
property = "OFF"
return f"CHECKSUM={property}"
def mergeblockratioproperty_sql(self, expression: exp.MergeBlockRatioProperty) -> str:
if expression.args.get("no"):
return "NO MERGEBLOCKRATIO"
if expression.args.get("default"):
return "DEFAULT MERGEBLOCKRATIO"
percent = " PERCENT" if expression.args.get("percent") else ""
return f"MERGEBLOCKRATIO={self.sql(expression, 'this')}{percent}"
def datablocksizeproperty_sql(self, expression: exp.DataBlocksizeProperty) -> str:
default = expression.args.get("default")
minimum = expression.args.get("minimum")
maximum = expression.args.get("maximum")
if default or minimum or maximum:
if default:
prop = "DEFAULT"
elif minimum:
prop = "MINIMUM"
else:
prop = "MAXIMUM"
return f"{prop} DATABLOCKSIZE"
units = expression.args.get("units")
units = f" {units}" if units else ""
return f"DATABLOCKSIZE={self.sql(expression, 'size')}{units}"
def blockcompressionproperty_sql(self, expression: exp.BlockCompressionProperty) -> str:
autotemp = expression.args.get("autotemp")
always = expression.args.get("always")
default = expression.args.get("default")
manual = expression.args.get("manual")
never = expression.args.get("never")
if autotemp is not None:
prop = f"AUTOTEMP({self.expressions(autotemp)})"
elif always:
prop = "ALWAYS"
elif default:
prop = "DEFAULT"
elif manual:
prop = "MANUAL"
elif never:
prop = "NEVER"
return f"BLOCKCOMPRESSION={prop}"
def isolatedloadingproperty_sql(self, expression: exp.IsolatedLoadingProperty) -> str:
no = expression.args.get("no")
no = " NO" if no else ""
concurrent = expression.args.get("concurrent")
concurrent = " CONCURRENT" if concurrent else ""
for_ = ""
if expression.args.get("for_all"):
for_ = " FOR ALL"
elif expression.args.get("for_insert"):
for_ = " FOR INSERT"
elif expression.args.get("for_none"):
for_ = " FOR NONE"
return f"WITH{no}{concurrent} ISOLATED LOADING{for_}"
def partitionboundspec_sql(self, expression: exp.PartitionBoundSpec) -> str:
if isinstance(expression.this, list):
return f"IN ({self.expressions(expression, key='this', flat=True)})"
if expression.this:
modulus = self.sql(expression, "this")
remainder = self.sql(expression, "expression")
return f"WITH (MODULUS {modulus}, REMAINDER {remainder})"
from_expressions = self.expressions(expression, key="from_expressions", flat=True)
to_expressions = self.expressions(expression, key="to_expressions", flat=True)
return f"FROM ({from_expressions}) TO ({to_expressions})"
def partitionedofproperty_sql(self, expression: exp.PartitionedOfProperty) -> str:
this = self.sql(expression, "this")
for_values_or_default = expression.expression
if isinstance(for_values_or_default, exp.PartitionBoundSpec):
for_values_or_default = f" FOR VALUES {self.sql(for_values_or_default)}"
else:
for_values_or_default = " DEFAULT"
return f"PARTITION OF {this}{for_values_or_default}"
def lockingproperty_sql(self, expression: exp.LockingProperty) -> str:
kind = expression.args.get("kind")
this = f" {self.sql(expression, 'this')}" if expression.this else ""
for_or_in = expression.args.get("for_or_in")
for_or_in = f" {for_or_in}" if for_or_in else ""
lock_type = expression.args.get("lock_type")
override = " OVERRIDE" if expression.args.get("override") else ""
return f"LOCKING {kind}{this}{for_or_in} {lock_type}{override}"
def withdataproperty_sql(self, expression: exp.WithDataProperty) -> str:
data_sql = f"WITH {'NO ' if expression.args.get('no') else ''}DATA"
statistics = expression.args.get("statistics")
statistics_sql = ""
if statistics is not None:
statistics_sql = f" AND {'NO ' if not statistics else ''}STATISTICS"
return f"{data_sql}{statistics_sql}"
def withsystemversioningproperty_sql(self, expression: exp.WithSystemVersioningProperty) -> str:
sql = "WITH(SYSTEM_VERSIONING=ON"
if expression.this:
history_table = self.sql(expression, "this")
sql = f"{sql}(HISTORY_TABLE={history_table}"
if expression.expression:
data_consistency_check = self.sql(expression, "expression")
sql = f"{sql}, DATA_CONSISTENCY_CHECK={data_consistency_check}"
sql = f"{sql})"
return f"{sql})"
def insert_sql(self, expression: exp.Insert) -> str:
hint = self.sql(expression, "hint")
overwrite = expression.args.get("overwrite")
if isinstance(expression.this, exp.Directory):
this = " OVERWRITE" if overwrite else " INTO"
else:
this = self.INSERT_OVERWRITE if overwrite else " INTO"
alternative = expression.args.get("alternative")
alternative = f" OR {alternative}" if alternative else ""
ignore = " IGNORE" if expression.args.get("ignore") else ""
is_function = expression.args.get("is_function")
if is_function:
this = f"{this} FUNCTION"
this = f"{this} {self.sql(expression, 'this')}"
exists = " IF EXISTS" if expression.args.get("exists") else ""
partition_sql = (
f" {self.sql(expression, 'partition')}" if expression.args.get("partition") else ""
)
where = self.sql(expression, "where")
where = f"{self.sep()}REPLACE WHERE {where}" if where else ""
expression_sql = f"{self.sep()}{self.sql(expression, 'expression')}"
on_conflict = self.sql(expression, "conflict")
on_conflict = f" {on_conflict}" if on_conflict else ""
by_name = " BY NAME" if expression.args.get("by_name") else ""
returning = self.sql(expression, "returning")
if self.RETURNING_END:
expression_sql = f"{expression_sql}{on_conflict}{returning}"
else:
expression_sql = f"{returning}{expression_sql}{on_conflict}"
sql = f"INSERT{hint}{alternative}{ignore}{this}{by_name}{exists}{partition_sql}{where}{expression_sql}"
return self.prepend_ctes(expression, sql)
def intersect_sql(self, expression: exp.Intersect) -> str:
return self.set_operations(expression)
def intersect_op(self, expression: exp.Intersect) -> str:
return f"INTERSECT{'' if expression.args.get('distinct') else ' ALL'}"
def introducer_sql(self, expression: exp.Introducer) -> str:
return f"{self.sql(expression, 'this')} {self.sql(expression, 'expression')}"
def kill_sql(self, expression: exp.Kill) -> str:
kind = self.sql(expression, "kind")
kind = f" {kind}" if kind else ""
this = self.sql(expression, "this")
this = f" {this}" if this else ""
return f"KILL{kind}{this}"
def pseudotype_sql(self, expression: exp.PseudoType) -> str:
return expression.name
def objectidentifier_sql(self, expression: exp.ObjectIdentifier) -> str:
return expression.name
def onconflict_sql(self, expression: exp.OnConflict) -> str:
conflict = "ON DUPLICATE KEY" if expression.args.get("duplicate") else "ON CONFLICT"
constraint = self.sql(expression, "constraint")
constraint = f" ON CONSTRAINT {constraint}" if constraint else ""
conflict_keys = self.expressions(expression, key="conflict_keys", flat=True)
conflict_keys = f"({conflict_keys}) " if conflict_keys else " "
action = self.sql(expression, "action")
expressions = self.expressions(expression, flat=True)
if expressions:
set_keyword = "SET " if self.DUPLICATE_KEY_UPDATE_WITH_SET else ""
expressions = f" {set_keyword}{expressions}"
return f"{conflict}{constraint}{conflict_keys}{action}{expressions}"
def returning_sql(self, expression: exp.Returning) -> str:
return f"{self.seg('RETURNING')} {self.expressions(expression, flat=True)}"
def rowformatdelimitedproperty_sql(self, expression: exp.RowFormatDelimitedProperty) -> str:
fields = expression.args.get("fields")
fields = f" FIELDS TERMINATED BY {fields}" if fields else ""
escaped = expression.args.get("escaped")
escaped = f" ESCAPED BY {escaped}" if escaped else ""
items = expression.args.get("collection_items")
items = f" COLLECTION ITEMS TERMINATED BY {items}" if items else ""
keys = expression.args.get("map_keys")
keys = f" MAP KEYS TERMINATED BY {keys}" if keys else ""
lines = expression.args.get("lines")
lines = f" LINES TERMINATED BY {lines}" if lines else ""
null = expression.args.get("null")
null = f" NULL DEFINED AS {null}" if null else ""
return f"ROW FORMAT DELIMITED{fields}{escaped}{items}{keys}{lines}{null}"
def withtablehint_sql(self, expression: exp.WithTableHint) -> str:
return f"WITH ({self.expressions(expression, flat=True)})"
def indextablehint_sql(self, expression: exp.IndexTableHint) -> str:
this = f"{self.sql(expression, 'this')} INDEX"
target = self.sql(expression, "target")
target = f" FOR {target}" if target else ""
return f"{this}{target} ({self.expressions(expression, flat=True)})"
def historicaldata_sql(self, expression: exp.HistoricalData) -> str:
this = self.sql(expression, "this")
kind = self.sql(expression, "kind")
expr = self.sql(expression, "expression")
return f"{this} ({kind} => {expr})"
def table_parts(self, expression: exp.Table) -> str:
return ".".join(
self.sql(part)
for part in (
expression.args.get("catalog"),
expression.args.get("db"),
expression.args.get("this"),
)
if part is not None
)
def table_sql(self, expression: exp.Table, sep: str = " AS ") -> str:
table = self.table_parts(expression)
only = "ONLY " if expression.args.get("only") else ""
version = self.sql(expression, "version")
version = f" {version}" if version else ""
alias = self.sql(expression, "alias")
alias = f"{sep}{alias}" if alias else ""
hints = self.expressions(expression, key="hints", sep=" ")
hints = f" {hints}" if hints and self.TABLE_HINTS else ""
pivots = self.expressions(expression, key="pivots", sep=" ", flat=True)
pivots = f" {pivots}" if pivots else ""
joins = self.expressions(expression, key="joins", sep="", skip_first=True)
laterals = self.expressions(expression, key="laterals", sep="")
file_format = self.sql(expression, "format")
if file_format:
pattern = self.sql(expression, "pattern")
pattern = f", PATTERN => {pattern}" if pattern else ""
file_format = f" (FILE_FORMAT => {file_format}{pattern})"
ordinality = expression.args.get("ordinality") or ""
if ordinality:
ordinality = f" WITH ORDINALITY{alias}"
alias = ""
when = self.sql(expression, "when")
if when:
table = f"{table} {when}"
return f"{only}{table}{version}{file_format}{alias}{hints}{pivots}{joins}{laterals}{ordinality}"
def tablesample_sql(
self,
expression: exp.TableSample,
sep: str = " AS ",
tablesample_keyword: t.Optional[str] = None,
) -> str:
if self.dialect.ALIAS_POST_TABLESAMPLE and expression.this and expression.this.alias:
table = expression.this.copy()
table.set("alias", None)
this = self.sql(table)
alias = f"{sep}{self.sql(expression.this, 'alias')}"
else:
this = self.sql(expression, "this")
alias = ""
method = self.sql(expression, "method")
method = f"{method} " if method and self.TABLESAMPLE_WITH_METHOD else ""
numerator = self.sql(expression, "bucket_numerator")
denominator = self.sql(expression, "bucket_denominator")
field = self.sql(expression, "bucket_field")
field = f" ON {field}" if field else ""
bucket = f"BUCKET {numerator} OUT OF {denominator}{field}" if numerator else ""
seed = self.sql(expression, "seed")
seed = f" {self.TABLESAMPLE_SEED_KEYWORD} ({seed})" if seed else ""
size = self.sql(expression, "size")
if size and self.TABLESAMPLE_SIZE_IS_ROWS:
size = f"{size} ROWS"
percent = self.sql(expression, "percent")
if percent and not self.dialect.TABLESAMPLE_SIZE_IS_PERCENT:
percent = f"{percent} PERCENT"
expr = f"{bucket}{percent}{size}"
if self.TABLESAMPLE_REQUIRES_PARENS:
expr = f"({expr})"
return (
f"{this} {tablesample_keyword or self.TABLESAMPLE_KEYWORDS} {method}{expr}{seed}{alias}"
)
def pivot_sql(self, expression: exp.Pivot) -> str:
expressions = self.expressions(expression, flat=True)
if expression.this:
this = self.sql(expression, "this")
if not expressions:
return f"UNPIVOT {this}"
on = f"{self.seg('ON')} {expressions}"
using = self.expressions(expression, key="using", flat=True)
using = f"{self.seg('USING')} {using}" if using else ""
group = self.sql(expression, "group")
return f"PIVOT {this}{on}{using}{group}"
alias = self.sql(expression, "alias")
alias = f" AS {alias}" if alias else ""
direction = "UNPIVOT" if expression.unpivot else "PIVOT"
field = self.sql(expression, "field")
include_nulls = expression.args.get("include_nulls")
if include_nulls is not None:
nulls = " INCLUDE NULLS " if include_nulls else " EXCLUDE NULLS "
else:
nulls = ""
return f"{direction}{nulls}({expressions} FOR {field}){alias}"
def version_sql(self, expression: exp.Version) -> str:
this = f"FOR {expression.name}"
kind = expression.text("kind")
expr = self.sql(expression, "expression")
return f"{this} {kind} {expr}"
def tuple_sql(self, expression: exp.Tuple) -> str:
return f"({self.expressions(expression, flat=True)})"
def update_sql(self, expression: exp.Update) -> str:
this = self.sql(expression, "this")
set_sql = self.expressions(expression, flat=True)
from_sql = self.sql(expression, "from")
where_sql = self.sql(expression, "where")
returning = self.sql(expression, "returning")
order = self.sql(expression, "order")
limit = self.sql(expression, "limit")
if self.RETURNING_END:
expression_sql = f"{from_sql}{where_sql}{returning}"
else:
expression_sql = f"{returning}{from_sql}{where_sql}"
sql = f"UPDATE {this} SET {set_sql}{expression_sql}{order}{limit}"
return self.prepend_ctes(expression, sql)
def values_sql(self, expression: exp.Values) -> str:
# The VALUES clause is still valid in an `INSERT INTO ..` statement, for example
if self.VALUES_AS_TABLE or not expression.find_ancestor(exp.From, exp.Join):
args = self.expressions(expression)
alias = self.sql(expression, "alias")
values = f"VALUES{self.seg('')}{args}"
values = (
f"({values})"
if self.WRAP_DERIVED_VALUES and (alias or isinstance(expression.parent, exp.From))
else values
)
return f"{values} AS {alias}" if alias else values
# Converts `VALUES...` expression into a series of select unions.
alias_node = expression.args.get("alias")
column_names = alias_node and alias_node.columns
selects: t.List[exp.Query] = []
for i, tup in enumerate(expression.expressions):
row = tup.expressions
if i == 0 and column_names:
row = [
exp.alias_(value, column_name) for value, column_name in zip(row, column_names)
]
selects.append(exp.Select(expressions=row))
if self.pretty:
# This may result in poor performance for large-cardinality `VALUES` tables, due to
# the deep nesting of the resulting exp.Unions. If this is a problem, either increase
# `sys.setrecursionlimit` to avoid RecursionErrors, or don't set `pretty`.
query = reduce(lambda x, y: exp.union(x, y, distinct=False, copy=False), selects)
return self.subquery_sql(query.subquery(alias_node and alias_node.this, copy=False))
alias = f" AS {self.sql(alias_node, 'this')}" if alias_node else ""
unions = " UNION ALL ".join(self.sql(select) for select in selects)
return f"({unions}){alias}"
def var_sql(self, expression: exp.Var) -> str:
return self.sql(expression, "this")
def into_sql(self, expression: exp.Into) -> str:
temporary = " TEMPORARY" if expression.args.get("temporary") else ""
unlogged = " UNLOGGED" if expression.args.get("unlogged") else ""
return f"{self.seg('INTO')}{temporary or unlogged} {self.sql(expression, 'this')}"
def from_sql(self, expression: exp.From) -> str:
return f"{self.seg('FROM')} {self.sql(expression, 'this')}"
def group_sql(self, expression: exp.Group) -> str:
group_by = self.op_expressions("GROUP BY", expression)
if expression.args.get("all"):
return f"{group_by} ALL"
grouping_sets = self.expressions(expression, key="grouping_sets", indent=False)
grouping_sets = (
f"{self.seg('GROUPING SETS')} {self.wrap(grouping_sets)}" if grouping_sets else ""
)
cube = expression.args.get("cube", [])
if seq_get(cube, 0) is True:
return f"{group_by}{self.seg('WITH CUBE')}"
else:
cube_sql = self.expressions(expression, key="cube", indent=False)
cube_sql = f"{self.seg('CUBE')} {self.wrap(cube_sql)}" if cube_sql else ""
rollup = expression.args.get("rollup", [])
if seq_get(rollup, 0) is True:
return f"{group_by}{self.seg('WITH ROLLUP')}"
else:
rollup_sql = self.expressions(expression, key="rollup", indent=False)
rollup_sql = f"{self.seg('ROLLUP')} {self.wrap(rollup_sql)}" if rollup_sql else ""
groupings = csv(
grouping_sets,
cube_sql,
rollup_sql,
self.seg("WITH TOTALS") if expression.args.get("totals") else "",
sep=self.GROUPINGS_SEP,
)
if expression.args.get("expressions") and groupings:
group_by = f"{group_by}{self.GROUPINGS_SEP}"
return f"{group_by}{groupings}"
def having_sql(self, expression: exp.Having) -> str:
this = self.indent(self.sql(expression, "this"))
return f"{self.seg('HAVING')}{self.sep()}{this}"
def connect_sql(self, expression: exp.Connect) -> str:
start = self.sql(expression, "start")
start = self.seg(f"START WITH {start}") if start else ""
connect = self.sql(expression, "connect")
connect = self.seg(f"CONNECT BY {connect}")
return start + connect
def prior_sql(self, expression: exp.Prior) -> str:
return f"PRIOR {self.sql(expression, 'this')}"
def join_sql(self, expression: exp.Join) -> str:
if not self.SEMI_ANTI_JOIN_WITH_SIDE and expression.kind in ("SEMI", "ANTI"):
side = None
else:
side = expression.side
op_sql = " ".join(
op
for op in (
expression.method,
"GLOBAL" if expression.args.get("global") else None,
side,
expression.kind,
expression.hint if self.JOIN_HINTS else None,
)
if op
)
on_sql = self.sql(expression, "on")
using = expression.args.get("using")
if not on_sql and using:
on_sql = csv(*(self.sql(column) for column in using))
this = expression.this
this_sql = self.sql(this)
if on_sql:
on_sql = self.indent(on_sql, skip_first=True)
space = self.seg(" " * self.pad) if self.pretty else " "
if using:
on_sql = f"{space}USING ({on_sql})"
else:
on_sql = f"{space}ON {on_sql}"
elif not op_sql:
if isinstance(this, exp.Lateral) and this.args.get("cross_apply") is not None:
return f" {this_sql}"
return f", {this_sql}"
op_sql = f"{op_sql} JOIN" if op_sql else "JOIN"
return f"{self.seg(op_sql)} {this_sql}{on_sql}"
def lambda_sql(self, expression: exp.Lambda, arrow_sep: str = "->") -> str:
args = self.expressions(expression, flat=True)
args = f"({args})" if len(args.split(",")) > 1 else args
return f"{args} {arrow_sep} {self.sql(expression, 'this')}"
def lateral_op(self, expression: exp.Lateral) -> str:
cross_apply = expression.args.get("cross_apply")
# https://www.mssqltips.com/sqlservertip/1958/sql-server-cross-apply-and-outer-apply/
if cross_apply is True:
op = "INNER JOIN "
elif cross_apply is False:
op = "LEFT JOIN "
else:
op = ""
return f"{op}LATERAL"
def lateral_sql(self, expression: exp.Lateral) -> str:
this = self.sql(expression, "this")
if expression.args.get("view"):
alias = expression.args["alias"]
columns = self.expressions(alias, key="columns", flat=True)
table = f" {alias.name}" if alias.name else ""
columns = f" AS {columns}" if columns else ""
op_sql = self.seg(f"LATERAL VIEW{' OUTER' if expression.args.get('outer') else ''}")
return f"{op_sql}{self.sep()}{this}{table}{columns}"
alias = self.sql(expression, "alias")
alias = f" AS {alias}" if alias else ""
return f"{self.lateral_op(expression)} {this}{alias}"
def limit_sql(self, expression: exp.Limit, top: bool = False) -> str:
this = self.sql(expression, "this")
args = [
self._simplify_unless_literal(e) if self.LIMIT_ONLY_LITERALS else e
for e in (expression.args.get(k) for k in ("offset", "expression"))
if e
]
args_sql = ", ".join(self.sql(e) for e in args)
args_sql = f"({args_sql})" if top and any(not e.is_number for e in args) else args_sql
expressions = self.expressions(expression, flat=True)
expressions = f" BY {expressions}" if expressions else ""
return f"{this}{self.seg('TOP' if top else 'LIMIT')} {args_sql}{expressions}"
def offset_sql(self, expression: exp.Offset) -> str:
this = self.sql(expression, "this")
value = expression.expression
value = self._simplify_unless_literal(value) if self.LIMIT_ONLY_LITERALS else value
expressions = self.expressions(expression, flat=True)
expressions = f" BY {expressions}" if expressions else ""
return f"{this}{self.seg('OFFSET')} {self.sql(value)}{expressions}"
def setitem_sql(self, expression: exp.SetItem) -> str:
kind = self.sql(expression, "kind")
kind = f"{kind} " if kind else ""
this = self.sql(expression, "this")
expressions = self.expressions(expression)
collate = self.sql(expression, "collate")
collate = f" COLLATE {collate}" if collate else ""
global_ = "GLOBAL " if expression.args.get("global") else ""
return f"{global_}{kind}{this}{expressions}{collate}"
def set_sql(self, expression: exp.Set) -> str:
expressions = (
f" {self.expressions(expression, flat=True)}" if expression.expressions else ""
)
tag = " TAG" if expression.args.get("tag") else ""
return f"{'UNSET' if expression.args.get('unset') else 'SET'}{tag}{expressions}"
def pragma_sql(self, expression: exp.Pragma) -> str:
return f"PRAGMA {self.sql(expression, 'this')}"
def lock_sql(self, expression: exp.Lock) -> str:
if not self.LOCKING_READS_SUPPORTED:
self.unsupported("Locking reads using 'FOR UPDATE/SHARE' are not supported")
return ""
lock_type = "FOR UPDATE" if expression.args["update"] else "FOR SHARE"
expressions = self.expressions(expression, flat=True)
expressions = f" OF {expressions}" if expressions else ""
wait = expression.args.get("wait")
if wait is not None:
if isinstance(wait, exp.Literal):
wait = f" WAIT {self.sql(wait)}"
else:
wait = " NOWAIT" if wait else " SKIP LOCKED"
return f"{lock_type}{expressions}{wait or ''}"
def literal_sql(self, expression: exp.Literal) -> str:
text = expression.this or ""
if expression.is_string:
text = f"{self.dialect.QUOTE_START}{self.escape_str(text)}{self.dialect.QUOTE_END}"
return text
def escape_str(self, text: str) -> str:
text = text.replace(self.dialect.QUOTE_END, self._escaped_quote_end)
if self.dialect.INVERSE_ESCAPE_SEQUENCES:
text = "".join(self.dialect.INVERSE_ESCAPE_SEQUENCES.get(ch, ch) for ch in text)
elif self.pretty:
text = text.replace("\n", self.SENTINEL_LINE_BREAK)
return text
def loaddata_sql(self, expression: exp.LoadData) -> str:
local = " LOCAL" if expression.args.get("local") else ""
inpath = f" INPATH {self.sql(expression, 'inpath')}"
overwrite = " OVERWRITE" if expression.args.get("overwrite") else ""
this = f" INTO TABLE {self.sql(expression, 'this')}"
partition = self.sql(expression, "partition")
partition = f" {partition}" if partition else ""
input_format = self.sql(expression, "input_format")
input_format = f" INPUTFORMAT {input_format}" if input_format else ""
serde = self.sql(expression, "serde")
serde = f" SERDE {serde}" if serde else ""
return f"LOAD DATA{local}{inpath}{overwrite}{this}{partition}{input_format}{serde}"
def null_sql(self, *_) -> str:
return "NULL"
def boolean_sql(self, expression: exp.Boolean) -> str:
return "TRUE" if expression.this else "FALSE"
def order_sql(self, expression: exp.Order, flat: bool = False) -> str:
this = self.sql(expression, "this")
this = f"{this} " if this else this
siblings = "SIBLINGS " if expression.args.get("siblings") else ""
order = self.op_expressions(f"{this}ORDER {siblings}BY", expression, flat=this or flat) # type: ignore
interpolated_values = [
f"{self.sql(named_expression, 'alias')} AS {self.sql(named_expression, 'this')}"
for named_expression in expression.args.get("interpolate") or []
]
interpolate = (
f" INTERPOLATE ({', '.join(interpolated_values)})" if interpolated_values else ""
)
return f"{order}{interpolate}"
def withfill_sql(self, expression: exp.WithFill) -> str:
from_sql = self.sql(expression, "from")
from_sql = f" FROM {from_sql}" if from_sql else ""
to_sql = self.sql(expression, "to")
to_sql = f" TO {to_sql}" if to_sql else ""
step_sql = self.sql(expression, "step")
step_sql = f" STEP {step_sql}" if step_sql else ""
return f"WITH FILL{from_sql}{to_sql}{step_sql}"
def cluster_sql(self, expression: exp.Cluster) -> str:
return self.op_expressions("CLUSTER BY", expression)
def distribute_sql(self, expression: exp.Distribute) -> str:
return self.op_expressions("DISTRIBUTE BY", expression)
def sort_sql(self, expression: exp.Sort) -> str:
return self.op_expressions("SORT BY", expression)
def ordered_sql(self, expression: exp.Ordered) -> str:
desc = expression.args.get("desc")
asc = not desc
nulls_first = expression.args.get("nulls_first")
nulls_last = not nulls_first
nulls_are_large = self.dialect.NULL_ORDERING == "nulls_are_large"
nulls_are_small = self.dialect.NULL_ORDERING == "nulls_are_small"
nulls_are_last = self.dialect.NULL_ORDERING == "nulls_are_last"
this = self.sql(expression, "this")
sort_order = " DESC" if desc else (" ASC" if desc is False else "")
nulls_sort_change = ""
if nulls_first and (
(asc and nulls_are_large) or (desc and nulls_are_small) or nulls_are_last
):
nulls_sort_change = " NULLS FIRST"
elif (
nulls_last
and ((asc and nulls_are_small) or (desc and nulls_are_large))
and not nulls_are_last
):
nulls_sort_change = " NULLS LAST"
# If the NULLS FIRST/LAST clause is unsupported, we add another sort key to simulate it
if nulls_sort_change and not self.NULL_ORDERING_SUPPORTED:
window = expression.find_ancestor(exp.Window, exp.Select)
if isinstance(window, exp.Window) and window.args.get("spec"):
self.unsupported(
f"'{nulls_sort_change.strip()}' translation not supported in window functions"
)
nulls_sort_change = ""
elif self.NULL_ORDERING_SUPPORTED is None:
if expression.this.is_int:
self.unsupported(
f"'{nulls_sort_change.strip()}' translation not supported with positional ordering"
)
else:
null_sort_order = " DESC" if nulls_sort_change == " NULLS FIRST" else ""
this = f"CASE WHEN {this} IS NULL THEN 1 ELSE 0 END{null_sort_order}, {this}"
nulls_sort_change = ""
with_fill = self.sql(expression, "with_fill")
with_fill = f" {with_fill}" if with_fill else ""
return f"{this}{sort_order}{nulls_sort_change}{with_fill}"
def matchrecognize_sql(self, expression: exp.MatchRecognize) -> str:
partition = self.partition_by_sql(expression)
order = self.sql(expression, "order")
measures = self.expressions(expression, key="measures")
measures = self.seg(f"MEASURES{self.seg(measures)}") if measures else ""
rows = self.sql(expression, "rows")
rows = self.seg(rows) if rows else ""
after = self.sql(expression, "after")
after = self.seg(after) if after else ""
pattern = self.sql(expression, "pattern")
pattern = self.seg(f"PATTERN ({pattern})") if pattern else ""
definition_sqls = [
f"{self.sql(definition, 'alias')} AS {self.sql(definition, 'this')}"
for definition in expression.args.get("define", [])
]
definitions = self.expressions(sqls=definition_sqls)
define = self.seg(f"DEFINE{self.seg(definitions)}") if definitions else ""
body = "".join(
(
partition,
order,
measures,
rows,
after,
pattern,
define,
)
)
alias = self.sql(expression, "alias")
alias = f" {alias}" if alias else ""
return f"{self.seg('MATCH_RECOGNIZE')} {self.wrap(body)}{alias}"
def query_modifiers(self, expression: exp.Expression, *sqls: str) -> str:
limit = expression.args.get("limit")
if self.LIMIT_FETCH == "LIMIT" and isinstance(limit, exp.Fetch):
limit = exp.Limit(expression=exp.maybe_copy(limit.args.get("count")))
elif self.LIMIT_FETCH == "FETCH" and isinstance(limit, exp.Limit):
limit = exp.Fetch(direction="FIRST", count=exp.maybe_copy(limit.expression))
options = self.expressions(expression, key="options")
if options:
options = f" OPTION{self.wrap(options)}"
return csv(
*sqls,
*[self.sql(join) for join in expression.args.get("joins") or []],
self.sql(expression, "connect"),
self.sql(expression, "match"),
*[self.sql(lateral) for lateral in expression.args.get("laterals") or []],
self.sql(expression, "prewhere"),
self.sql(expression, "where"),
self.sql(expression, "group"),
self.sql(expression, "having"),
*[gen(self, expression) for gen in self.AFTER_HAVING_MODIFIER_TRANSFORMS.values()],
self.sql(expression, "order"),
*self.offset_limit_modifiers(expression, isinstance(limit, exp.Fetch), limit),
*self.after_limit_modifiers(expression),
options,
sep="",
)
def queryoption_sql(self, expression: exp.QueryOption) -> str:
return ""
def offset_limit_modifiers(
self, expression: exp.Expression, fetch: bool, limit: t.Optional[exp.Fetch | exp.Limit]
) -> t.List[str]:
return [
self.sql(expression, "offset") if fetch else self.sql(limit),
self.sql(limit) if fetch else self.sql(expression, "offset"),
]
def after_limit_modifiers(self, expression: exp.Expression) -> t.List[str]:
locks = self.expressions(expression, key="locks", sep=" ")
locks = f" {locks}" if locks else ""
return [locks, self.sql(expression, "sample")]
def select_sql(self, expression: exp.Select) -> str:
into = expression.args.get("into")
if not self.SUPPORTS_SELECT_INTO and into:
into.pop()
hint = self.sql(expression, "hint")
distinct = self.sql(expression, "distinct")
distinct = f" {distinct}" if distinct else ""
kind = self.sql(expression, "kind")
limit = expression.args.get("limit")
if isinstance(limit, exp.Limit) and self.LIMIT_IS_TOP:
top = self.limit_sql(limit, top=True)
limit.pop()
else:
top = ""
expressions = self.expressions(expression)
if kind:
if kind in self.SELECT_KINDS:
kind = f" AS {kind}"
else:
if kind == "STRUCT":
expressions = self.expressions(
sqls=[
self.sql(
exp.Struct(
expressions=[
exp.PropertyEQ(this=e.args.get("alias"), expression=e.this)
if isinstance(e, exp.Alias)
else e
for e in expression.expressions
]
)
)
]
)
kind = ""
# We use LIMIT_IS_TOP as a proxy for whether DISTINCT should go first because tsql and Teradata
# are the only dialects that use LIMIT_IS_TOP and both place DISTINCT first.
top_distinct = f"{distinct}{hint}{top}" if self.LIMIT_IS_TOP else f"{top}{hint}{distinct}"
expressions = f"{self.sep()}{expressions}" if expressions else expressions
sql = self.query_modifiers(
expression,
f"SELECT{top_distinct}{kind}{expressions}",
self.sql(expression, "into", comment=False),
self.sql(expression, "from", comment=False),
)
sql = self.prepend_ctes(expression, sql)
if not self.SUPPORTS_SELECT_INTO and into:
if into.args.get("temporary"):
table_kind = " TEMPORARY"
elif self.SUPPORTS_UNLOGGED_TABLES and into.args.get("unlogged"):
table_kind = " UNLOGGED"
else:
table_kind = ""
sql = f"CREATE{table_kind} TABLE {self.sql(into.this)} AS {sql}"
return sql
def schema_sql(self, expression: exp.Schema) -> str:
this = self.sql(expression, "this")
sql = self.schema_columns_sql(expression)
return f"{this} {sql}" if this and sql else this or sql
def schema_columns_sql(self, expression: exp.Schema) -> str:
if expression.expressions:
return f"({self.sep('')}{self.expressions(expression)}{self.seg(')', sep='')}"
return ""
def star_sql(self, expression: exp.Star) -> str:
except_ = self.expressions(expression, key="except", flat=True)
except_ = f"{self.seg(self.STAR_MAPPING['except'])} ({except_})" if except_ else ""
replace = self.expressions(expression, key="replace", flat=True)
replace = f"{self.seg(self.STAR_MAPPING['replace'])} ({replace})" if replace else ""
return f"*{except_}{replace}"
def parameter_sql(self, expression: exp.Parameter) -> str:
this = self.sql(expression, "this")
return f"{self.PARAMETER_TOKEN}{this}"
def sessionparameter_sql(self, expression: exp.SessionParameter) -> str:
this = self.sql(expression, "this")
kind = expression.text("kind")
if kind:
kind = f"{kind}."
return f"@@{kind}{this}"
def placeholder_sql(self, expression: exp.Placeholder) -> str:
return f"{self.NAMED_PLACEHOLDER_TOKEN}{expression.name}" if expression.name else "?"
def subquery_sql(self, expression: exp.Subquery, sep: str = " AS ") -> str:
alias = self.sql(expression, "alias")
alias = f"{sep}{alias}" if alias else ""
pivots = self.expressions(expression, key="pivots", sep=" ", flat=True)
pivots = f" {pivots}" if pivots else ""
sql = self.query_modifiers(expression, self.wrap(expression), alias, pivots)
return self.prepend_ctes(expression, sql)
def qualify_sql(self, expression: exp.Qualify) -> str:
this = self.indent(self.sql(expression, "this"))
return f"{self.seg('QUALIFY')}{self.sep()}{this}"
def set_operations(self, expression: exp.Union) -> str:
sqls: t.List[str] = []
stack: t.List[t.Union[str, exp.Expression]] = [expression]
while stack:
node = stack.pop()
if isinstance(node, exp.Union):
stack.append(node.expression)
stack.append(
self.maybe_comment(
getattr(self, f"{node.key}_op")(node),
expression=node.this,
comments=node.comments,
)
)
stack.append(node.this)
else:
sqls.append(self.sql(node))
this = self.sep().join(sqls)
this = self.query_modifiers(expression, this)
return self.prepend_ctes(expression, this)
def union_sql(self, expression: exp.Union) -> str:
return self.set_operations(expression)
def union_op(self, expression: exp.Union) -> str:
kind = " DISTINCT" if self.EXPLICIT_UNION else ""
kind = kind if expression.args.get("distinct") else " ALL"
by_name = " BY NAME" if expression.args.get("by_name") else ""
return f"UNION{kind}{by_name}"
def unnest_sql(self, expression: exp.Unnest) -> str:
args = self.expressions(expression, flat=True)
alias = expression.args.get("alias")
offset = expression.args.get("offset")
if self.UNNEST_WITH_ORDINALITY:
if alias and isinstance(offset, exp.Expression):
alias.append("columns", offset)
if alias and self.dialect.UNNEST_COLUMN_ONLY:
columns = alias.columns
alias = self.sql(columns[0]) if columns else ""
else:
alias = self.sql(alias)
alias = f" AS {alias}" if alias else alias
if self.UNNEST_WITH_ORDINALITY:
suffix = f" WITH ORDINALITY{alias}" if offset else alias
else:
if isinstance(offset, exp.Expression):
suffix = f"{alias} WITH OFFSET AS {self.sql(offset)}"
elif offset:
suffix = f"{alias} WITH OFFSET"
else:
suffix = alias
return f"UNNEST({args}){suffix}"
def prewhere_sql(self, expression: exp.PreWhere) -> str:
return ""
def where_sql(self, expression: exp.Where) -> str:
this = self.indent(self.sql(expression, "this"))
return f"{self.seg('WHERE')}{self.sep()}{this}"
def window_sql(self, expression: exp.Window) -> str:
this = self.sql(expression, "this")
partition = self.partition_by_sql(expression)
order = expression.args.get("order")
order = self.order_sql(order, flat=True) if order else ""
spec = self.sql(expression, "spec")
alias = self.sql(expression, "alias")
over = self.sql(expression, "over") or "OVER"
this = f"{this} {'AS' if expression.arg_key == 'windows' else over}"
first = expression.args.get("first")
if first is None:
first = ""
else:
first = "FIRST" if first else "LAST"
if not partition and not order and not spec and alias:
return f"{this} {alias}"
args = " ".join(arg for arg in (alias, first, partition, order, spec) if arg)
return f"{this} ({args})"
def partition_by_sql(self, expression: exp.Window | exp.MatchRecognize) -> str:
partition = self.expressions(expression, key="partition_by", flat=True)
return f"PARTITION BY {partition}" if partition else ""
def windowspec_sql(self, expression: exp.WindowSpec) -> str:
kind = self.sql(expression, "kind")
start = csv(self.sql(expression, "start"), self.sql(expression, "start_side"), sep=" ")
end = (
csv(self.sql(expression, "end"), self.sql(expression, "end_side"), sep=" ")
or "CURRENT ROW"
)
return f"{kind} BETWEEN {start} AND {end}"
def withingroup_sql(self, expression: exp.WithinGroup) -> str:
this = self.sql(expression, "this")
expression_sql = self.sql(expression, "expression")[1:] # order has a leading space
return f"{this} WITHIN GROUP ({expression_sql})"
def between_sql(self, expression: exp.Between) -> str:
this = self.sql(expression, "this")
low = self.sql(expression, "low")
high = self.sql(expression, "high")
return f"{this} BETWEEN {low} AND {high}"
def bracket_sql(self, expression: exp.Bracket) -> str:
expressions = apply_index_offset(
expression.this,
expression.expressions,
self.dialect.INDEX_OFFSET - expression.args.get("offset", 0),
)
expressions_sql = ", ".join(self.sql(e) for e in expressions)
return f"{self.sql(expression, 'this')}[{expressions_sql}]"
def all_sql(self, expression: exp.All) -> str:
return f"ALL {self.wrap(expression)}"
def any_sql(self, expression: exp.Any) -> str:
this = self.sql(expression, "this")
if isinstance(expression.this, (*exp.UNWRAPPED_QUERIES, exp.Paren)):
if isinstance(expression.this, exp.UNWRAPPED_QUERIES):
this = self.wrap(this)
return f"ANY{this}"
return f"ANY {this}"
def exists_sql(self, expression: exp.Exists) -> str:
return f"EXISTS{self.wrap(expression)}"
def case_sql(self, expression: exp.Case) -> str:
this = self.sql(expression, "this")
statements = [f"CASE {this}" if this else "CASE"]
for e in expression.args["ifs"]:
statements.append(f"WHEN {self.sql(e, 'this')}")
statements.append(f"THEN {self.sql(e, 'true')}")
default = self.sql(expression, "default")
if default:
statements.append(f"ELSE {default}")
statements.append("END")
if self.pretty and self.text_width(statements) > self.max_text_width:
return self.indent("\n".join(statements), skip_first=True, skip_last=True)
return " ".join(statements)
def constraint_sql(self, expression: exp.Constraint) -> str:
this = self.sql(expression, "this")
expressions = self.expressions(expression, flat=True)
return f"CONSTRAINT {this} {expressions}"
def nextvaluefor_sql(self, expression: exp.NextValueFor) -> str:
order = expression.args.get("order")
order = f" OVER ({self.order_sql(order, flat=True)})" if order else ""
return f"NEXT VALUE FOR {self.sql(expression, 'this')}{order}"
def extract_sql(self, expression: exp.Extract) -> str:
this = self.sql(expression, "this") if self.EXTRACT_ALLOWS_QUOTES else expression.this.name
expression_sql = self.sql(expression, "expression")
return f"EXTRACT({this} FROM {expression_sql})"
def trim_sql(self, expression: exp.Trim) -> str:
trim_type = self.sql(expression, "position")
if trim_type == "LEADING":
return self.func("LTRIM", expression.this)
elif trim_type == "TRAILING":
return self.func("RTRIM", expression.this)
else:
return self.func("TRIM", expression.this, expression.expression)
def convert_concat_args(self, expression: exp.Concat | exp.ConcatWs) -> t.List[exp.Expression]:
args = expression.expressions
if isinstance(expression, exp.ConcatWs):
args = args[1:] # Skip the delimiter
if self.dialect.STRICT_STRING_CONCAT and expression.args.get("safe"):
args = [exp.cast(e, "text") for e in args]
if not self.dialect.CONCAT_COALESCE and expression.args.get("coalesce"):
args = [exp.func("coalesce", e, exp.Literal.string("")) for e in args]
return args
def concat_sql(self, expression: exp.Concat) -> str:
expressions = self.convert_concat_args(expression)
# Some dialects don't allow a single-argument CONCAT call
if not self.SUPPORTS_SINGLE_ARG_CONCAT and len(expressions) == 1:
return self.sql(expressions[0])
return self.func("CONCAT", *expressions)
def concatws_sql(self, expression: exp.ConcatWs) -> str:
return self.func(
"CONCAT_WS", seq_get(expression.expressions, 0), *self.convert_concat_args(expression)
)
def check_sql(self, expression: exp.Check) -> str:
this = self.sql(expression, key="this")
return f"CHECK ({this})"
def foreignkey_sql(self, expression: exp.ForeignKey) -> str:
expressions = self.expressions(expression, flat=True)
reference = self.sql(expression, "reference")
reference = f" {reference}" if reference else ""
delete = self.sql(expression, "delete")
delete = f" ON DELETE {delete}" if delete else ""
update = self.sql(expression, "update")
update = f" ON UPDATE {update}" if update else ""
return f"FOREIGN KEY ({expressions}){reference}{delete}{update}"
def primarykey_sql(self, expression: exp.ForeignKey) -> str:
expressions = self.expressions(expression, flat=True)
options = self.expressions(expression, key="options", flat=True, sep=" ")
options = f" {options}" if options else ""
return f"PRIMARY KEY ({expressions}){options}"
def if_sql(self, expression: exp.If) -> str:
return self.case_sql(exp.Case(ifs=[expression], default=expression.args.get("false")))
def matchagainst_sql(self, expression: exp.MatchAgainst) -> str:
modifier = expression.args.get("modifier")
modifier = f" {modifier}" if modifier else ""
return f"{self.func('MATCH', *expression.expressions)} AGAINST({self.sql(expression, 'this')}{modifier})"
def jsonkeyvalue_sql(self, expression: exp.JSONKeyValue) -> str:
return f"{self.sql(expression, 'this')}{self.JSON_KEY_VALUE_PAIR_SEP} {self.sql(expression, 'expression')}"
def jsonpath_sql(self, expression: exp.JSONPath) -> str:
path = self.expressions(expression, sep="", flat=True).lstrip(".")
return f"{self.dialect.QUOTE_START}{path}{self.dialect.QUOTE_END}"
def json_path_part(self, expression: int | str | exp.JSONPathPart) -> str:
if isinstance(expression, exp.JSONPathPart):
transform = self.TRANSFORMS.get(expression.__class__)
if not callable(transform):
self.unsupported(f"Unsupported JSONPathPart type {expression.__class__.__name__}")
return ""
return transform(self, expression)
if isinstance(expression, int):
return str(expression)
if self.JSON_PATH_SINGLE_QUOTE_ESCAPE:
escaped = expression.replace("'", "\\'")
escaped = f"\\'{expression}\\'"
else:
escaped = expression.replace('"', '\\"')
escaped = f'"{escaped}"'
return escaped
def formatjson_sql(self, expression: exp.FormatJson) -> str:
return f"{self.sql(expression, 'this')} FORMAT JSON"
def jsonobject_sql(self, expression: exp.JSONObject | exp.JSONObjectAgg) -> str:
null_handling = expression.args.get("null_handling")
null_handling = f" {null_handling}" if null_handling else ""
unique_keys = expression.args.get("unique_keys")
if unique_keys is not None:
unique_keys = f" {'WITH' if unique_keys else 'WITHOUT'} UNIQUE KEYS"
else:
unique_keys = ""
return_type = self.sql(expression, "return_type")
return_type = f" RETURNING {return_type}" if return_type else ""
encoding = self.sql(expression, "encoding")
encoding = f" ENCODING {encoding}" if encoding else ""
return self.func(
"JSON_OBJECT" if isinstance(expression, exp.JSONObject) else "JSON_OBJECTAGG",
*expression.expressions,
suffix=f"{null_handling}{unique_keys}{return_type}{encoding})",
)
def jsonobjectagg_sql(self, expression: exp.JSONObjectAgg) -> str:
return self.jsonobject_sql(expression)
def jsonarray_sql(self, expression: exp.JSONArray) -> str:
null_handling = expression.args.get("null_handling")
null_handling = f" {null_handling}" if null_handling else ""
return_type = self.sql(expression, "return_type")
return_type = f" RETURNING {return_type}" if return_type else ""
strict = " STRICT" if expression.args.get("strict") else ""
return self.func(
"JSON_ARRAY", *expression.expressions, suffix=f"{null_handling}{return_type}{strict})"
)
def jsonarrayagg_sql(self, expression: exp.JSONArrayAgg) -> str:
this = self.sql(expression, "this")
order = self.sql(expression, "order")
null_handling = expression.args.get("null_handling")
null_handling = f" {null_handling}" if null_handling else ""
return_type = self.sql(expression, "return_type")
return_type = f" RETURNING {return_type}" if return_type else ""
strict = " STRICT" if expression.args.get("strict") else ""
return self.func(
"JSON_ARRAYAGG",
this,
suffix=f"{order}{null_handling}{return_type}{strict})",
)
def jsoncolumndef_sql(self, expression: exp.JSONColumnDef) -> str:
path = self.sql(expression, "path")
path = f" PATH {path}" if path else ""
nested_schema = self.sql(expression, "nested_schema")
if nested_schema:
return f"NESTED{path} {nested_schema}"
this = self.sql(expression, "this")
kind = self.sql(expression, "kind")
kind = f" {kind}" if kind else ""
return f"{this}{kind}{path}"
def jsonschema_sql(self, expression: exp.JSONSchema) -> str:
return self.func("COLUMNS", *expression.expressions)
def jsontable_sql(self, expression: exp.JSONTable) -> str:
this = self.sql(expression, "this")
path = self.sql(expression, "path")
path = f", {path}" if path else ""
error_handling = expression.args.get("error_handling")
error_handling = f" {error_handling}" if error_handling else ""
empty_handling = expression.args.get("empty_handling")
empty_handling = f" {empty_handling}" if empty_handling else ""
schema = self.sql(expression, "schema")
return self.func(
"JSON_TABLE", this, suffix=f"{path}{error_handling}{empty_handling} {schema})"
)
def openjsoncolumndef_sql(self, expression: exp.OpenJSONColumnDef) -> str:
this = self.sql(expression, "this")
kind = self.sql(expression, "kind")
path = self.sql(expression, "path")
path = f" {path}" if path else ""
as_json = " AS JSON" if expression.args.get("as_json") else ""
return f"{this} {kind}{path}{as_json}"
def openjson_sql(self, expression: exp.OpenJSON) -> str:
this = self.sql(expression, "this")
path = self.sql(expression, "path")
path = f", {path}" if path else ""
expressions = self.expressions(expression)
with_ = (
f" WITH ({self.seg(self.indent(expressions), sep='')}{self.seg(')', sep='')}"
if expressions
else ""
)
return f"OPENJSON({this}{path}){with_}"
def in_sql(self, expression: exp.In) -> str:
query = expression.args.get("query")
unnest = expression.args.get("unnest")
field = expression.args.get("field")
is_global = " GLOBAL" if expression.args.get("is_global") else ""
if query:
in_sql = self.wrap(self.sql(query))
elif unnest:
in_sql = self.in_unnest_op(unnest)
elif field:
in_sql = self.sql(field)
else:
in_sql = f"({self.expressions(expression, flat=True)})"
return f"{self.sql(expression, 'this')}{is_global} IN {in_sql}"
def in_unnest_op(self, unnest: exp.Unnest) -> str:
return f"(SELECT {self.sql(unnest)})"
def interval_sql(self, expression: exp.Interval) -> str:
unit = self.sql(expression, "unit")
if not self.INTERVAL_ALLOWS_PLURAL_FORM:
unit = self.TIME_PART_SINGULARS.get(unit, unit)
unit = f" {unit}" if unit else ""
if self.SINGLE_STRING_INTERVAL:
this = expression.this.name if expression.this else ""
return f"INTERVAL '{this}{unit}'" if this else f"INTERVAL{unit}"
this = self.sql(expression, "this")
if this:
unwrapped = isinstance(expression.this, self.UNWRAPPED_INTERVAL_VALUES)
this = f" {this}" if unwrapped else f" ({this})"
return f"INTERVAL{this}{unit}"
def return_sql(self, expression: exp.Return) -> str:
return f"RETURN {self.sql(expression, 'this')}"
def reference_sql(self, expression: exp.Reference) -> str:
this = self.sql(expression, "this")
expressions = self.expressions(expression, flat=True)
expressions = f"({expressions})" if expressions else ""
options = self.expressions(expression, key="options", flat=True, sep=" ")
options = f" {options}" if options else ""
return f"REFERENCES {this}{expressions}{options}"
def anonymous_sql(self, expression: exp.Anonymous) -> str:
return self.func(self.sql(expression, "this"), *expression.expressions)
def paren_sql(self, expression: exp.Paren) -> str:
sql = self.seg(self.indent(self.sql(expression, "this")), sep="")
return f"({sql}{self.seg(')', sep='')}"
def neg_sql(self, expression: exp.Neg) -> str:
# This makes sure we don't convert "- - 5" to "--5", which is a comment
this_sql = self.sql(expression, "this")
sep = " " if this_sql[0] == "-" else ""
return f"-{sep}{this_sql}"
def not_sql(self, expression: exp.Not) -> str:
return f"NOT {self.sql(expression, 'this')}"
def alias_sql(self, expression: exp.Alias) -> str:
alias = self.sql(expression, "alias")
alias = f" AS {alias}" if alias else ""
return f"{self.sql(expression, 'this')}{alias}"
def pivotalias_sql(self, expression: exp.PivotAlias) -> str:
alias = expression.args["alias"]
identifier_alias = isinstance(alias, exp.Identifier)
if identifier_alias and not self.UNPIVOT_ALIASES_ARE_IDENTIFIERS:
alias.replace(exp.Literal.string(alias.output_name))
elif not identifier_alias and self.UNPIVOT_ALIASES_ARE_IDENTIFIERS:
alias.replace(exp.to_identifier(alias.output_name))
return self.alias_sql(expression)
def aliases_sql(self, expression: exp.Aliases) -> str:
return f"{self.sql(expression, 'this')} AS ({self.expressions(expression, flat=True)})"
def atindex_sql(self, expression: exp.AtTimeZone) -> str:
this = self.sql(expression, "this")
index = self.sql(expression, "expression")
return f"{this} AT {index}"
def attimezone_sql(self, expression: exp.AtTimeZone) -> str:
this = self.sql(expression, "this")
zone = self.sql(expression, "zone")
return f"{this} AT TIME ZONE {zone}"
def fromtimezone_sql(self, expression: exp.FromTimeZone) -> str:
this = self.sql(expression, "this")
zone = self.sql(expression, "zone")
return f"{this} AT TIME ZONE {zone} AT TIME ZONE 'UTC'"
def add_sql(self, expression: exp.Add) -> str:
return self.binary(expression, "+")
def and_sql(
self, expression: exp.And, stack: t.Optional[t.List[str | exp.Expression]] = None
) -> str:
return self.connector_sql(expression, "AND", stack)
def or_sql(
self, expression: exp.Or, stack: t.Optional[t.List[str | exp.Expression]] = None
) -> str:
return self.connector_sql(expression, "OR", stack)
def xor_sql(
self, expression: exp.Xor, stack: t.Optional[t.List[str | exp.Expression]] = None
) -> str:
return self.connector_sql(expression, "XOR", stack)
def connector_sql(
self,
expression: exp.Connector,
op: str,
stack: t.Optional[t.List[str | exp.Expression]] = None,
) -> str:
if stack is not None:
if expression.expressions:
stack.append(self.expressions(expression, sep=f" {op} "))
else:
stack.append(expression.right)
if expression.comments:
for comment in expression.comments:
op += f" /*{self.pad_comment(comment)}*/"
stack.extend((op, expression.left))
return op
stack = [expression]
sqls: t.List[str] = []
ops = set()
while stack:
node = stack.pop()
if isinstance(node, exp.Connector):
ops.add(getattr(self, f"{node.key}_sql")(node, stack))
else:
sql = self.sql(node)
if sqls and sqls[-1] in ops:
sqls[-1] += f" {sql}"
else:
sqls.append(sql)
sep = "\n" if self.pretty and self.text_width(sqls) > self.max_text_width else " "
return sep.join(sqls)
def bitwiseand_sql(self, expression: exp.BitwiseAnd) -> str:
return self.binary(expression, "&")
def bitwiseleftshift_sql(self, expression: exp.BitwiseLeftShift) -> str:
return self.binary(expression, "<<")
def bitwisenot_sql(self, expression: exp.BitwiseNot) -> str:
return f"~{self.sql(expression, 'this')}"
def bitwiseor_sql(self, expression: exp.BitwiseOr) -> str:
return self.binary(expression, "|")
def bitwiserightshift_sql(self, expression: exp.BitwiseRightShift) -> str:
return self.binary(expression, ">>")
def bitwisexor_sql(self, expression: exp.BitwiseXor) -> str:
return self.binary(expression, "^")
def cast_sql(self, expression: exp.Cast, safe_prefix: t.Optional[str] = None) -> str:
format_sql = self.sql(expression, "format")
format_sql = f" FORMAT {format_sql}" if format_sql else ""
to_sql = self.sql(expression, "to")
to_sql = f" {to_sql}" if to_sql else ""
action = self.sql(expression, "action")
action = f" {action}" if action else ""
return f"{safe_prefix or ''}CAST({self.sql(expression, 'this')} AS{to_sql}{format_sql}{action})"
def currentdate_sql(self, expression: exp.CurrentDate) -> str:
zone = self.sql(expression, "this")
return f"CURRENT_DATE({zone})" if zone else "CURRENT_DATE"
def currenttimestamp_sql(self, expression: exp.CurrentTimestamp) -> str:
return self.func("CURRENT_TIMESTAMP", expression.this)
def collate_sql(self, expression: exp.Collate) -> str:
if self.COLLATE_IS_FUNC:
return self.function_fallback_sql(expression)
return self.binary(expression, "COLLATE")
def command_sql(self, expression: exp.Command) -> str:
return f"{self.sql(expression, 'this')} {expression.text('expression').strip()}"
def comment_sql(self, expression: exp.Comment) -> str:
this = self.sql(expression, "this")
kind = expression.args["kind"]
exists_sql = " IF EXISTS " if expression.args.get("exists") else " "
expression_sql = self.sql(expression, "expression")
return f"COMMENT{exists_sql}ON {kind} {this} IS {expression_sql}"
def mergetreettlaction_sql(self, expression: exp.MergeTreeTTLAction) -> str:
this = self.sql(expression, "this")
delete = " DELETE" if expression.args.get("delete") else ""
recompress = self.sql(expression, "recompress")
recompress = f" RECOMPRESS {recompress}" if recompress else ""
to_disk = self.sql(expression, "to_disk")
to_disk = f" TO DISK {to_disk}" if to_disk else ""
to_volume = self.sql(expression, "to_volume")
to_volume = f" TO VOLUME {to_volume}" if to_volume else ""
return f"{this}{delete}{recompress}{to_disk}{to_volume}"
def mergetreettl_sql(self, expression: exp.MergeTreeTTL) -> str:
where = self.sql(expression, "where")
group = self.sql(expression, "group")
aggregates = self.expressions(expression, key="aggregates")
aggregates = self.seg("SET") + self.seg(aggregates) if aggregates else ""
if not (where or group or aggregates) and len(expression.expressions) == 1:
return f"TTL {self.expressions(expression, flat=True)}"
return f"TTL{self.seg(self.expressions(expression))}{where}{group}{aggregates}"
def transaction_sql(self, expression: exp.Transaction) -> str:
return "BEGIN"
def commit_sql(self, expression: exp.Commit) -> str:
chain = expression.args.get("chain")
if chain is not None:
chain = " AND CHAIN" if chain else " AND NO CHAIN"
return f"COMMIT{chain or ''}"
def rollback_sql(self, expression: exp.Rollback) -> str:
savepoint = expression.args.get("savepoint")
savepoint = f" TO {savepoint}" if savepoint else ""
return f"ROLLBACK{savepoint}"
def altercolumn_sql(self, expression: exp.AlterColumn) -> str:
this = self.sql(expression, "this")
dtype = self.sql(expression, "dtype")
if dtype:
collate = self.sql(expression, "collate")
collate = f" COLLATE {collate}" if collate else ""
using = self.sql(expression, "using")
using = f" USING {using}" if using else ""
return f"ALTER COLUMN {this} SET DATA TYPE {dtype}{collate}{using}"
default = self.sql(expression, "default")
if default:
return f"ALTER COLUMN {this} SET DEFAULT {default}"
comment = self.sql(expression, "comment")
if comment:
return f"ALTER COLUMN {this} COMMENT {comment}"
if not expression.args.get("drop"):
self.unsupported("Unsupported ALTER COLUMN syntax")
return f"ALTER COLUMN {this} DROP DEFAULT"
def renametable_sql(self, expression: exp.RenameTable) -> str:
if not self.RENAME_TABLE_WITH_DB:
# Remove db from tables
expression = expression.transform(
lambda n: exp.table_(n.this) if isinstance(n, exp.Table) else n
).assert_is(exp.RenameTable)
this = self.sql(expression, "this")
return f"RENAME TO {this}"
def renamecolumn_sql(self, expression: exp.RenameColumn) -> str:
exists = " IF EXISTS" if expression.args.get("exists") else ""
old_column = self.sql(expression, "this")
new_column = self.sql(expression, "to")
return f"RENAME COLUMN{exists} {old_column} TO {new_column}"
def altertable_sql(self, expression: exp.AlterTable) -> str:
actions = expression.args["actions"]
if isinstance(actions[0], exp.ColumnDef):
actions = self.add_column_sql(expression)
elif isinstance(actions[0], exp.Schema):
actions = self.expressions(expression, key="actions", prefix="ADD COLUMNS ")
elif isinstance(actions[0], exp.Delete):
actions = self.expressions(expression, key="actions", flat=True)
else:
actions = self.expressions(expression, key="actions", flat=True)
exists = " IF EXISTS" if expression.args.get("exists") else ""
only = " ONLY" if expression.args.get("only") else ""
options = self.expressions(expression, key="options")
options = f", {options}" if options else ""
return f"ALTER TABLE{exists}{only} {self.sql(expression, 'this')} {actions}{options}"
def add_column_sql(self, expression: exp.AlterTable) -> str:
if self.ALTER_TABLE_INCLUDE_COLUMN_KEYWORD:
return self.expressions(
expression,
key="actions",
prefix="ADD COLUMN ",
)
return f"ADD {self.expressions(expression, key='actions', flat=True)}"
def droppartition_sql(self, expression: exp.DropPartition) -> str:
expressions = self.expressions(expression)
exists = " IF EXISTS " if expression.args.get("exists") else " "
return f"DROP{exists}{expressions}"
def addconstraint_sql(self, expression: exp.AddConstraint) -> str:
return f"ADD {self.expressions(expression)}"
def distinct_sql(self, expression: exp.Distinct) -> str:
this = self.expressions(expression, flat=True)
if not self.MULTI_ARG_DISTINCT and len(expression.expressions) > 1:
case = exp.case()
for arg in expression.expressions:
case = case.when(arg.is_(exp.null()), exp.null())
this = self.sql(case.else_(f"({this})"))
this = f" {this}" if this else ""
on = self.sql(expression, "on")
on = f" ON {on}" if on else ""
return f"DISTINCT{this}{on}"
def ignorenulls_sql(self, expression: exp.IgnoreNulls) -> str:
return self._embed_ignore_nulls(expression, "IGNORE NULLS")
def respectnulls_sql(self, expression: exp.RespectNulls) -> str:
return self._embed_ignore_nulls(expression, "RESPECT NULLS")
def havingmax_sql(self, expression: exp.HavingMax) -> str:
this_sql = self.sql(expression, "this")
expression_sql = self.sql(expression, "expression")
kind = "MAX" if expression.args.get("max") else "MIN"
return f"{this_sql} HAVING {kind} {expression_sql}"
def _embed_ignore_nulls(self, expression: exp.IgnoreNulls | exp.RespectNulls, text: str) -> str:
if self.IGNORE_NULLS_IN_FUNC and not expression.meta.get("inline"):
# The first modifier here will be the one closest to the AggFunc's arg
mods = sorted(
expression.find_all(exp.HavingMax, exp.Order, exp.Limit),
key=lambda x: 0
if isinstance(x, exp.HavingMax)
else (1 if isinstance(x, exp.Order) else 2),
)
if mods:
mod = mods[0]
this = expression.__class__(this=mod.this.copy())
this.meta["inline"] = True
mod.this.replace(this)
return self.sql(expression.this)
agg_func = expression.find(exp.AggFunc)
if agg_func:
return self.sql(agg_func)[:-1] + f" {text})"
return f"{self.sql(expression, 'this')} {text}"
def intdiv_sql(self, expression: exp.IntDiv) -> str:
return self.sql(
exp.Cast(
this=exp.Div(this=expression.this, expression=expression.expression),
to=exp.DataType(this=exp.DataType.Type.INT),
)
)
def dpipe_sql(self, expression: exp.DPipe) -> str:
if self.dialect.STRICT_STRING_CONCAT and expression.args.get("safe"):
return self.func("CONCAT", *(exp.cast(e, "text") for e in expression.flatten()))
return self.binary(expression, "||")
def div_sql(self, expression: exp.Div) -> str:
l, r = expression.left, expression.right
if not self.dialect.SAFE_DIVISION and expression.args.get("safe"):
r.replace(exp.Nullif(this=r.copy(), expression=exp.Literal.number(0)))
if self.dialect.TYPED_DIVISION and not expression.args.get("typed"):
if not l.is_type(*exp.DataType.REAL_TYPES) and not r.is_type(*exp.DataType.REAL_TYPES):
l.replace(exp.cast(l.copy(), to=exp.DataType.Type.DOUBLE))
elif not self.dialect.TYPED_DIVISION and expression.args.get("typed"):
if l.is_type(*exp.DataType.INTEGER_TYPES) and r.is_type(*exp.DataType.INTEGER_TYPES):
return self.sql(
exp.cast(
l / r,
to=exp.DataType.Type.BIGINT,
)
)
return self.binary(expression, "/")
def overlaps_sql(self, expression: exp.Overlaps) -> str:
return self.binary(expression, "OVERLAPS")
def distance_sql(self, expression: exp.Distance) -> str:
return self.binary(expression, "<->")
def dot_sql(self, expression: exp.Dot) -> str:
return f"{self.sql(expression, 'this')}.{self.sql(expression, 'expression')}"
def eq_sql(self, expression: exp.EQ) -> str:
return self.binary(expression, "=")
def propertyeq_sql(self, expression: exp.PropertyEQ) -> str:
return self.binary(expression, ":=")
def escape_sql(self, expression: exp.Escape) -> str:
return self.binary(expression, "ESCAPE")
def glob_sql(self, expression: exp.Glob) -> str:
return self.binary(expression, "GLOB")
def gt_sql(self, expression: exp.GT) -> str:
return self.binary(expression, ">")
def gte_sql(self, expression: exp.GTE) -> str:
return self.binary(expression, ">=")
def ilike_sql(self, expression: exp.ILike) -> str:
return self.binary(expression, "ILIKE")
def ilikeany_sql(self, expression: exp.ILikeAny) -> str:
return self.binary(expression, "ILIKE ANY")
def is_sql(self, expression: exp.Is) -> str:
if not self.IS_BOOL_ALLOWED and isinstance(expression.expression, exp.Boolean):
return self.sql(
expression.this if expression.expression.this else exp.not_(expression.this)
)
return self.binary(expression, "IS")
def like_sql(self, expression: exp.Like) -> str:
return self.binary(expression, "LIKE")
def likeany_sql(self, expression: exp.LikeAny) -> str:
return self.binary(expression, "LIKE ANY")
def similarto_sql(self, expression: exp.SimilarTo) -> str:
return self.binary(expression, "SIMILAR TO")
def lt_sql(self, expression: exp.LT) -> str:
return self.binary(expression, "<")
def lte_sql(self, expression: exp.LTE) -> str:
return self.binary(expression, "<=")
def mod_sql(self, expression: exp.Mod) -> str:
return self.binary(expression, "%")
def mul_sql(self, expression: exp.Mul) -> str:
return self.binary(expression, "*")
def neq_sql(self, expression: exp.NEQ) -> str:
return self.binary(expression, "<>")
def nullsafeeq_sql(self, expression: exp.NullSafeEQ) -> str:
return self.binary(expression, "IS NOT DISTINCT FROM")
def nullsafeneq_sql(self, expression: exp.NullSafeNEQ) -> str:
return self.binary(expression, "IS DISTINCT FROM")
def slice_sql(self, expression: exp.Slice) -> str:
return self.binary(expression, ":")
def sub_sql(self, expression: exp.Sub) -> str:
return self.binary(expression, "-")
def trycast_sql(self, expression: exp.TryCast) -> str:
return self.cast_sql(expression, safe_prefix="TRY_")
def log_sql(self, expression: exp.Log) -> str:
this = expression.this
expr = expression.expression
if self.dialect.LOG_BASE_FIRST is False:
this, expr = expr, this
elif self.dialect.LOG_BASE_FIRST is None and expr:
if this.name in ("2", "10"):
return self.func(f"LOG{this.name}", expr)
self.unsupported(f"Unsupported logarithm with base {self.sql(this)}")
return self.func("LOG", this, expr)
def use_sql(self, expression: exp.Use) -> str:
kind = self.sql(expression, "kind")
kind = f" {kind}" if kind else ""
this = self.sql(expression, "this")
this = f" {this}" if this else ""
return f"USE{kind}{this}"
def binary(self, expression: exp.Binary, op: str) -> str:
op = self.maybe_comment(op, comments=expression.comments)
return f"{self.sql(expression, 'this')} {op} {self.sql(expression, 'expression')}"
def function_fallback_sql(self, expression: exp.Func) -> str:
args = []
for key in expression.arg_types:
arg_value = expression.args.get(key)
if isinstance(arg_value, list):
for value in arg_value:
args.append(value)
elif arg_value is not None:
args.append(arg_value)
if self.normalize_functions:
name = expression.sql_name()
else:
name = (expression._meta and expression.meta.get("name")) or expression.sql_name()
return self.func(name, *args)
def func(
self,
name: str,
*args: t.Optional[exp.Expression | str],
prefix: str = "(",
suffix: str = ")",
) -> str:
return f"{self.normalize_func(name)}{prefix}{self.format_args(*args)}{suffix}"
def format_args(self, *args: t.Optional[str | exp.Expression]) -> str:
arg_sqls = tuple(self.sql(arg) for arg in args if arg is not None)
if self.pretty and self.text_width(arg_sqls) > self.max_text_width:
return self.indent("\n" + ",\n".join(arg_sqls) + "\n", skip_first=True, skip_last=True)
return ", ".join(arg_sqls)
def text_width(self, args: t.Iterable) -> int:
return sum(len(arg) for arg in args)
def format_time(self, expression: exp.Expression) -> t.Optional[str]:
return format_time(
self.sql(expression, "format"),
self.dialect.INVERSE_TIME_MAPPING,
self.dialect.INVERSE_TIME_TRIE,
)
def expressions(
self,
expression: t.Optional[exp.Expression] = None,
key: t.Optional[str] = None,
sqls: t.Optional[t.Collection[str | exp.Expression]] = None,
flat: bool = False,
indent: bool = True,
skip_first: bool = False,
sep: str = ", ",
prefix: str = "",
) -> str:
expressions = expression.args.get(key or "expressions") if expression else sqls
if not expressions:
return ""
if flat:
return sep.join(sql for sql in (self.sql(e) for e in expressions) if sql)
num_sqls = len(expressions)
# These are calculated once in case we have the leading_comma / pretty option set, correspondingly
pad = " " * self.pad
stripped_sep = sep.strip()
result_sqls = []
for i, e in enumerate(expressions):
sql = self.sql(e, comment=False)
if not sql:
continue
comments = self.maybe_comment("", e) if isinstance(e, exp.Expression) else ""
if self.pretty:
if self.leading_comma:
result_sqls.append(f"{sep if i > 0 else pad}{prefix}{sql}{comments}")
else:
result_sqls.append(
f"{prefix}{sql}{stripped_sep if i + 1 < num_sqls else ''}{comments}"
)
else:
result_sqls.append(f"{prefix}{sql}{comments}{sep if i + 1 < num_sqls else ''}")
result_sql = "\n".join(result_sqls) if self.pretty else "".join(result_sqls)
return self.indent(result_sql, skip_first=skip_first) if indent else result_sql
def op_expressions(self, op: str, expression: exp.Expression, flat: bool = False) -> str:
flat = flat or isinstance(expression.parent, exp.Properties)
expressions_sql = self.expressions(expression, flat=flat)
if flat:
return f"{op} {expressions_sql}"
return f"{self.seg(op)}{self.sep() if expressions_sql else ''}{expressions_sql}"
def naked_property(self, expression: exp.Property) -> str:
property_name = exp.Properties.PROPERTY_TO_NAME.get(expression.__class__)
if not property_name:
self.unsupported(f"Unsupported property {expression.__class__.__name__}")
return f"{property_name} {self.sql(expression, 'this')}"
def tag_sql(self, expression: exp.Tag) -> str:
return f"{expression.args.get('prefix')}{self.sql(expression.this)}{expression.args.get('postfix')}"
def token_sql(self, token_type: TokenType) -> str:
return self.TOKEN_MAPPING.get(token_type, token_type.name)
def userdefinedfunction_sql(self, expression: exp.UserDefinedFunction) -> str:
this = self.sql(expression, "this")
expressions = self.no_identify(self.expressions, expression)
expressions = (
self.wrap(expressions) if expression.args.get("wrapped") else f" {expressions}"
)
return f"{this}{expressions}"
def joinhint_sql(self, expression: exp.JoinHint) -> str:
this = self.sql(expression, "this")
expressions = self.expressions(expression, flat=True)
return f"{this}({expressions})"
def kwarg_sql(self, expression: exp.Kwarg) -> str:
return self.binary(expression, "=>")
def when_sql(self, expression: exp.When) -> str:
matched = "MATCHED" if expression.args["matched"] else "NOT MATCHED"
source = " BY SOURCE" if self.MATCHED_BY_SOURCE and expression.args.get("source") else ""
condition = self.sql(expression, "condition")
condition = f" AND {condition}" if condition else ""
then_expression = expression.args.get("then")
if isinstance(then_expression, exp.Insert):
then = f"INSERT {self.sql(then_expression, 'this')}"
if "expression" in then_expression.args:
then += f" VALUES {self.sql(then_expression, 'expression')}"
elif isinstance(then_expression, exp.Update):
if isinstance(then_expression.args.get("expressions"), exp.Star):
then = f"UPDATE {self.sql(then_expression, 'expressions')}"
else:
then = f"UPDATE SET {self.expressions(then_expression, flat=True)}"
else:
then = self.sql(then_expression)
return f"WHEN {matched}{source}{condition} THEN {then}"
def merge_sql(self, expression: exp.Merge) -> str:
table = expression.this
table_alias = ""
hints = table.args.get("hints")
if hints and table.alias and isinstance(hints[0], exp.WithTableHint):
# T-SQL syntax is MERGE ... <target_table> [WITH (<merge_hint>)] [[AS] table_alias]
table_alias = f" AS {self.sql(table.args['alias'].pop())}"
this = self.sql(table)
using = f"USING {self.sql(expression, 'using')}"
on = f"ON {self.sql(expression, 'on')}"
expressions = self.expressions(expression, sep=" ")
return self.prepend_ctes(
expression, f"MERGE INTO {this}{table_alias} {using} {on} {expressions}"
)
def tochar_sql(self, expression: exp.ToChar) -> str:
if expression.args.get("format"):
self.unsupported("Format argument unsupported for TO_CHAR/TO_VARCHAR function")
return self.sql(exp.cast(expression.this, "text"))
def tonumber_sql(self, expression: exp.ToNumber) -> str:
if not self.SUPPORTS_TO_NUMBER:
self.unsupported("Unsupported TO_NUMBER function")
return self.sql(exp.cast(expression.this, "double"))
fmt = expression.args.get("format")
if not fmt:
self.unsupported("Conversion format is required for TO_NUMBER")
return self.sql(exp.cast(expression.this, "double"))
return self.func("TO_NUMBER", expression.this, fmt)
def dictproperty_sql(self, expression: exp.DictProperty) -> str:
this = self.sql(expression, "this")
kind = self.sql(expression, "kind")
settings_sql = self.expressions(expression, key="settings", sep=" ")
args = f"({self.sep('')}{settings_sql}{self.seg(')', sep='')}" if settings_sql else "()"
return f"{this}({kind}{args})"
def dictrange_sql(self, expression: exp.DictRange) -> str:
this = self.sql(expression, "this")
max = self.sql(expression, "max")
min = self.sql(expression, "min")
return f"{this}(MIN {min} MAX {max})"
def dictsubproperty_sql(self, expression: exp.DictSubProperty) -> str:
return f"{self.sql(expression, 'this')} {self.sql(expression, 'value')}"
def oncluster_sql(self, expression: exp.OnCluster) -> str:
return ""
def clusteredbyproperty_sql(self, expression: exp.ClusteredByProperty) -> str:
expressions = self.expressions(expression, key="expressions", flat=True)
sorted_by = self.expressions(expression, key="sorted_by", flat=True)
sorted_by = f" SORTED BY ({sorted_by})" if sorted_by else ""
buckets = self.sql(expression, "buckets")
return f"CLUSTERED BY ({expressions}){sorted_by} INTO {buckets} BUCKETS"
def anyvalue_sql(self, expression: exp.AnyValue) -> str:
this = self.sql(expression, "this")
having = self.sql(expression, "having")
if having:
this = f"{this} HAVING {'MAX' if expression.args.get('max') else 'MIN'} {having}"
return self.func("ANY_VALUE", this)
def querytransform_sql(self, expression: exp.QueryTransform) -> str:
transform = self.func("TRANSFORM", *expression.expressions)
row_format_before = self.sql(expression, "row_format_before")
row_format_before = f" {row_format_before}" if row_format_before else ""
record_writer = self.sql(expression, "record_writer")
record_writer = f" RECORDWRITER {record_writer}" if record_writer else ""
using = f" USING {self.sql(expression, 'command_script')}"
schema = self.sql(expression, "schema")
schema = f" AS {schema}" if schema else ""
row_format_after = self.sql(expression, "row_format_after")
row_format_after = f" {row_format_after}" if row_format_after else ""
record_reader = self.sql(expression, "record_reader")
record_reader = f" RECORDREADER {record_reader}" if record_reader else ""
return f"{transform}{row_format_before}{record_writer}{using}{schema}{row_format_after}{record_reader}"
def indexconstraintoption_sql(self, expression: exp.IndexConstraintOption) -> str:
key_block_size = self.sql(expression, "key_block_size")
if key_block_size:
return f"KEY_BLOCK_SIZE = {key_block_size}"
using = self.sql(expression, "using")
if using:
return f"USING {using}"
parser = self.sql(expression, "parser")
if parser:
return f"WITH PARSER {parser}"
comment = self.sql(expression, "comment")
if comment:
return f"COMMENT {comment}"
visible = expression.args.get("visible")
if visible is not None:
return "VISIBLE" if visible else "INVISIBLE"
engine_attr = self.sql(expression, "engine_attr")
if engine_attr:
return f"ENGINE_ATTRIBUTE = {engine_attr}"
secondary_engine_attr = self.sql(expression, "secondary_engine_attr")
if secondary_engine_attr:
return f"SECONDARY_ENGINE_ATTRIBUTE = {secondary_engine_attr}"
self.unsupported("Unsupported index constraint option.")
return ""
def checkcolumnconstraint_sql(self, expression: exp.CheckColumnConstraint) -> str:
enforced = " ENFORCED" if expression.args.get("enforced") else ""
return f"CHECK ({self.sql(expression, 'this')}){enforced}"
def indexcolumnconstraint_sql(self, expression: exp.IndexColumnConstraint) -> str:
kind = self.sql(expression, "kind")
kind = f"{kind} INDEX" if kind else "INDEX"
this = self.sql(expression, "this")
this = f" {this}" if this else ""
index_type = self.sql(expression, "index_type")
index_type = f" USING {index_type}" if index_type else ""
schema = self.sql(expression, "schema")
schema = f" {schema}" if schema else ""
options = self.expressions(expression, key="options", sep=" ")
options = f" {options}" if options else ""
return f"{kind}{this}{index_type}{schema}{options}"
def nvl2_sql(self, expression: exp.Nvl2) -> str:
if self.NVL2_SUPPORTED:
return self.function_fallback_sql(expression)
case = exp.Case().when(
expression.this.is_(exp.null()).not_(copy=False),
expression.args["true"],
copy=False,
)
else_cond = expression.args.get("false")
if else_cond:
case.else_(else_cond, copy=False)
return self.sql(case)
def comprehension_sql(self, expression: exp.Comprehension) -> str:
this = self.sql(expression, "this")
expr = self.sql(expression, "expression")
iterator = self.sql(expression, "iterator")
condition = self.sql(expression, "condition")
condition = f" IF {condition}" if condition else ""
return f"{this} FOR {expr} IN {iterator}{condition}"
def columnprefix_sql(self, expression: exp.ColumnPrefix) -> str:
return f"{self.sql(expression, 'this')}({self.sql(expression, 'expression')})"
def opclass_sql(self, expression: exp.Opclass) -> str:
return f"{self.sql(expression, 'this')} {self.sql(expression, 'expression')}"
def predict_sql(self, expression: exp.Predict) -> str:
model = self.sql(expression, "this")
model = f"MODEL {model}"
table = self.sql(expression, "expression")
table = f"TABLE {table}" if not isinstance(expression.expression, exp.Subquery) else table
parameters = self.sql(expression, "params_struct")
return self.func("PREDICT", model, table, parameters or None)
def forin_sql(self, expression: exp.ForIn) -> str:
this = self.sql(expression, "this")
expression_sql = self.sql(expression, "expression")
return f"FOR {this} DO {expression_sql}"
def refresh_sql(self, expression: exp.Refresh) -> str:
this = self.sql(expression, "this")
table = "" if isinstance(expression.this, exp.Literal) else "TABLE "
return f"REFRESH {table}{this}"
def operator_sql(self, expression: exp.Operator) -> str:
return self.binary(expression, f"OPERATOR({self.sql(expression, 'operator')})")
def toarray_sql(self, expression: exp.ToArray) -> str:
arg = expression.this
if not arg.type:
from sqlglot.optimizer.annotate_types import annotate_types
arg = annotate_types(arg)
if arg.is_type(exp.DataType.Type.ARRAY):
return self.sql(arg)
cond_for_null = arg.is_(exp.null())
return self.sql(exp.func("IF", cond_for_null, exp.null(), exp.array(arg, copy=False)))
def tsordstotime_sql(self, expression: exp.TsOrDsToTime) -> str:
this = expression.this
if isinstance(this, exp.TsOrDsToTime) or this.is_type(exp.DataType.Type.TIME):
return self.sql(this)
return self.sql(exp.cast(this, "time"))
def tsordstodate_sql(self, expression: exp.TsOrDsToDate) -> str:
this = expression.this
time_format = self.format_time(expression)
if time_format and time_format not in (self.dialect.TIME_FORMAT, self.dialect.DATE_FORMAT):
return self.sql(
exp.cast(exp.StrToTime(this=this, format=expression.args["format"]), "date")
)
if isinstance(this, exp.TsOrDsToDate) or this.is_type(exp.DataType.Type.DATE):
return self.sql(this)
return self.sql(exp.cast(this, "date"))
def unixdate_sql(self, expression: exp.UnixDate) -> str:
return self.sql(
exp.func(
"DATEDIFF",
expression.this,
exp.cast(exp.Literal.string("1970-01-01"), "date"),
"day",
)
)
def lastday_sql(self, expression: exp.LastDay) -> str:
if self.LAST_DAY_SUPPORTS_DATE_PART:
return self.function_fallback_sql(expression)
unit = expression.text("unit")
if unit and unit != "MONTH":
self.unsupported("Date parts are not supported in LAST_DAY.")
return self.func("LAST_DAY", expression.this)
def arrayany_sql(self, expression: exp.ArrayAny) -> str:
if self.CAN_IMPLEMENT_ARRAY_ANY:
filtered = exp.ArrayFilter(this=expression.this, expression=expression.expression)
filtered_not_empty = exp.ArraySize(this=filtered).neq(0)
original_is_empty = exp.ArraySize(this=expression.this).eq(0)
return self.sql(exp.paren(original_is_empty.or_(filtered_not_empty)))
from sqlglot.dialects import Dialect
# SQLGlot's executor supports ARRAY_ANY, so we don't wanna warn for the SQLGlot dialect
if self.dialect.__class__ != Dialect:
self.unsupported("ARRAY_ANY is unsupported")
return self.function_fallback_sql(expression)
def _jsonpathkey_sql(self, expression: exp.JSONPathKey) -> str:
this = expression.this
if isinstance(this, exp.JSONPathWildcard):
this = self.json_path_part(this)
return f".{this}" if this else ""
if exp.SAFE_IDENTIFIER_RE.match(this):
return f".{this}"
this = self.json_path_part(this)
return f"[{this}]" if self.JSON_PATH_BRACKETED_KEY_SUPPORTED else f".{this}"
def _jsonpathsubscript_sql(self, expression: exp.JSONPathSubscript) -> str:
this = self.json_path_part(expression.this)
return f"[{this}]" if this else ""
def _simplify_unless_literal(self, expression: E) -> E:
if not isinstance(expression, exp.Literal):
from sqlglot.optimizer.simplify import simplify
expression = simplify(expression, dialect=self.dialect)
return expression
def generateseries_sql(self, expression: exp.GenerateSeries) -> str:
expression.set("is_end_exclusive", None)
return self.function_fallback_sql(expression)
def struct_sql(self, expression: exp.Struct) -> str:
expression.set(
"expressions",
[
exp.alias_(e.expression, e.this) if isinstance(e, exp.PropertyEQ) else e
for e in expression.expressions
],
)
return self.function_fallback_sql(expression)
def partitionrange_sql(self, expression: exp.PartitionRange) -> str:
low = self.sql(expression, "this")
high = self.sql(expression, "expression")
return f"{low} TO {high}"
def truncatetable_sql(self, expression: exp.TruncateTable) -> str:
target = "DATABASE" if expression.args.get("is_database") else "TABLE"
tables = f" {self.expressions(expression)}"
exists = " IF EXISTS" if expression.args.get("exists") else ""
on_cluster = self.sql(expression, "cluster")
on_cluster = f" {on_cluster}" if on_cluster else ""
identity = self.sql(expression, "identity")
identity = f" {identity} IDENTITY" if identity else ""
option = self.sql(expression, "option")
option = f" {option}" if option else ""
partition = self.sql(expression, "partition")
partition = f" {partition}" if partition else ""
return f"TRUNCATE {target}{exists}{tables}{on_cluster}{identity}{option}{partition}"
# This transpiles T-SQL's CONVERT function
# https://learn.microsoft.com/en-us/sql/t-sql/functions/cast-and-convert-transact-sql?view=sql-server-ver16
def convert_sql(self, expression: exp.Convert) -> str:
to = expression.this
value = expression.expression
style = expression.args.get("style")
safe = expression.args.get("safe")
strict = expression.args.get("strict")
if not to or not value:
return ""
# Retrieve length of datatype and override to default if not specified
if not seq_get(to.expressions, 0) and to.this in self.PARAMETERIZABLE_TEXT_TYPES:
to = exp.DataType.build(to.this, expressions=[exp.Literal.number(30)], nested=False)
transformed: t.Optional[exp.Expression] = None
cast = exp.Cast if strict else exp.TryCast
# Check whether a conversion with format (T-SQL calls this 'style') is applicable
if isinstance(style, exp.Literal) and style.is_int:
from sqlglot.dialects.tsql import TSQL
style_value = style.name
converted_style = TSQL.CONVERT_FORMAT_MAPPING.get(style_value)
if not converted_style:
self.unsupported(f"Unsupported T-SQL 'style' value: {style_value}")
fmt = exp.Literal.string(converted_style)
if to.this == exp.DataType.Type.DATE:
transformed = exp.StrToDate(this=value, format=fmt)
elif to.this == exp.DataType.Type.DATETIME:
transformed = exp.StrToTime(this=value, format=fmt)
elif to.this in self.PARAMETERIZABLE_TEXT_TYPES:
transformed = cast(this=exp.TimeToStr(this=value, format=fmt), to=to, safe=safe)
elif to.this == exp.DataType.Type.TEXT:
transformed = exp.TimeToStr(this=value, format=fmt)
if not transformed:
transformed = cast(this=value, to=to, safe=safe)
return self.sql(transformed)
The provided code snippet includes necessary dependencies for implementing the `preprocess` function. Write a Python function `def preprocess( transforms: t.List[t.Callable[[exp.Expression], exp.Expression]], ) -> t.Callable[[Generator, exp.Expression], str]` to solve the following problem:
Creates a new transform by chaining a sequence of transformations and converts the resulting expression to SQL, using either the "_sql" method corresponding to the resulting expression, or the appropriate `Generator.TRANSFORMS` function (when applicable -- see below). Args: transforms: sequence of transform functions. These will be called in order. Returns: Function that can be used as a generator transform.
Here is the function:
def preprocess(
transforms: t.List[t.Callable[[exp.Expression], exp.Expression]],
) -> t.Callable[[Generator, exp.Expression], str]:
"""
Creates a new transform by chaining a sequence of transformations and converts the resulting
expression to SQL, using either the "_sql" method corresponding to the resulting expression,
or the appropriate `Generator.TRANSFORMS` function (when applicable -- see below).
Args:
transforms: sequence of transform functions. These will be called in order.
Returns:
Function that can be used as a generator transform.
"""
def _to_sql(self, expression: exp.Expression) -> str:
expression_type = type(expression)
expression = transforms[0](expression)
for transform in transforms[1:]:
expression = transform(expression)
_sql_handler = getattr(self, expression.key + "_sql", None)
if _sql_handler:
return _sql_handler(expression)
transforms_handler = self.TRANSFORMS.get(type(expression))
if transforms_handler:
if expression_type is type(expression):
if isinstance(expression, exp.Func):
return self.function_fallback_sql(expression)
# Ensures we don't enter an infinite loop. This can happen when the original expression
# has the same type as the final expression and there's no _sql method available for it,
# because then it'd re-enter _to_sql.
raise ValueError(
f"Expression type {expression.__class__.__name__} requires a _sql method in order to be transformed."
)
return transforms_handler(self, expression)
raise ValueError(f"Unsupported expression type {expression.__class__.__name__}.")
return _to_sql | Creates a new transform by chaining a sequence of transformations and converts the resulting expression to SQL, using either the "_sql" method corresponding to the resulting expression, or the appropriate `Generator.TRANSFORMS` function (when applicable -- see below). Args: transforms: sequence of transform functions. These will be called in order. Returns: Function that can be used as a generator transform. |
152,902 | from __future__ import annotations
import logging
import typing as t
from collections import defaultdict
from sqlglot import exp
from sqlglot.errors import ErrorLevel, ParseError, concat_messages, merge_errors
from sqlglot.helper import apply_index_offset, ensure_list, seq_get
from sqlglot.time import format_time
from sqlglot.tokens import Token, Tokenizer, TokenType
from sqlglot.trie import TrieResult, in_trie, new_trie
if t.TYPE_CHECKING:
from sqlglot._typing import E, Lit
from sqlglot.dialects.dialect import Dialect, DialectType
def seq_get(seq: t.Sequence[T], index: int) -> t.Optional[T]:
"""Returns the value in `seq` at position `index`, or `None` if `index` is out of bounds."""
try:
return seq[index]
except IndexError:
return None
def build_like(args: t.List) -> exp.Escape | exp.Like:
like = exp.Like(this=seq_get(args, 1), expression=seq_get(args, 0))
return exp.Escape(this=like, expression=seq_get(args, 2)) if len(args) > 2 else like | null |
152,903 | from __future__ import annotations
import logging
import typing as t
from collections import defaultdict
from sqlglot import exp
from sqlglot.errors import ErrorLevel, ParseError, concat_messages, merge_errors
from sqlglot.helper import apply_index_offset, ensure_list, seq_get
from sqlglot.time import format_time
from sqlglot.tokens import Token, Tokenizer, TokenType
from sqlglot.trie import TrieResult, in_trie, new_trie
if t.TYPE_CHECKING:
from sqlglot._typing import E, Lit
from sqlglot.dialects.dialect import Dialect, DialectType
class Parser(metaclass=_Parser):
"""
Parser consumes a list of tokens produced by the Tokenizer and produces a parsed syntax tree.
Args:
error_level: The desired error level.
Default: ErrorLevel.IMMEDIATE
error_message_context: The amount of context to capture from a query string when displaying
the error message (in number of characters).
Default: 100
max_errors: Maximum number of error messages to include in a raised ParseError.
This is only relevant if error_level is ErrorLevel.RAISE.
Default: 3
"""
FUNCTIONS: t.Dict[str, t.Callable] = {
**{name: func.from_arg_list for name, func in exp.FUNCTION_BY_NAME.items()},
"CONCAT": lambda args, dialect: exp.Concat(
expressions=args,
safe=not dialect.STRICT_STRING_CONCAT,
coalesce=dialect.CONCAT_COALESCE,
),
"CONCAT_WS": lambda args, dialect: exp.ConcatWs(
expressions=args,
safe=not dialect.STRICT_STRING_CONCAT,
coalesce=dialect.CONCAT_COALESCE,
),
"DATE_TO_DATE_STR": lambda args: exp.Cast(
this=seq_get(args, 0),
to=exp.DataType(this=exp.DataType.Type.TEXT),
),
"GLOB": lambda args: exp.Glob(this=seq_get(args, 1), expression=seq_get(args, 0)),
"JSON_EXTRACT": build_extract_json_with_path(exp.JSONExtract),
"JSON_EXTRACT_SCALAR": build_extract_json_with_path(exp.JSONExtractScalar),
"JSON_EXTRACT_PATH_TEXT": build_extract_json_with_path(exp.JSONExtractScalar),
"LIKE": build_like,
"LOG": build_logarithm,
"LOG2": lambda args: exp.Log(this=exp.Literal.number(2), expression=seq_get(args, 0)),
"LOG10": lambda args: exp.Log(this=exp.Literal.number(10), expression=seq_get(args, 0)),
"MOD": lambda args: exp.Mod(this=seq_get(args, 0), expression=seq_get(args, 1)),
"TIME_TO_TIME_STR": lambda args: exp.Cast(
this=seq_get(args, 0),
to=exp.DataType(this=exp.DataType.Type.TEXT),
),
"TS_OR_DS_TO_DATE_STR": lambda args: exp.Substring(
this=exp.Cast(
this=seq_get(args, 0),
to=exp.DataType(this=exp.DataType.Type.TEXT),
),
start=exp.Literal.number(1),
length=exp.Literal.number(10),
),
"VAR_MAP": build_var_map,
}
NO_PAREN_FUNCTIONS = {
TokenType.CURRENT_DATE: exp.CurrentDate,
TokenType.CURRENT_DATETIME: exp.CurrentDate,
TokenType.CURRENT_TIME: exp.CurrentTime,
TokenType.CURRENT_TIMESTAMP: exp.CurrentTimestamp,
TokenType.CURRENT_USER: exp.CurrentUser,
}
STRUCT_TYPE_TOKENS = {
TokenType.NESTED,
TokenType.OBJECT,
TokenType.STRUCT,
}
NESTED_TYPE_TOKENS = {
TokenType.ARRAY,
TokenType.LOWCARDINALITY,
TokenType.MAP,
TokenType.NULLABLE,
*STRUCT_TYPE_TOKENS,
}
ENUM_TYPE_TOKENS = {
TokenType.ENUM,
TokenType.ENUM8,
TokenType.ENUM16,
}
AGGREGATE_TYPE_TOKENS = {
TokenType.AGGREGATEFUNCTION,
TokenType.SIMPLEAGGREGATEFUNCTION,
}
TYPE_TOKENS = {
TokenType.BIT,
TokenType.BOOLEAN,
TokenType.TINYINT,
TokenType.UTINYINT,
TokenType.SMALLINT,
TokenType.USMALLINT,
TokenType.INT,
TokenType.UINT,
TokenType.BIGINT,
TokenType.UBIGINT,
TokenType.INT128,
TokenType.UINT128,
TokenType.INT256,
TokenType.UINT256,
TokenType.MEDIUMINT,
TokenType.UMEDIUMINT,
TokenType.FIXEDSTRING,
TokenType.FLOAT,
TokenType.DOUBLE,
TokenType.CHAR,
TokenType.NCHAR,
TokenType.VARCHAR,
TokenType.NVARCHAR,
TokenType.BPCHAR,
TokenType.TEXT,
TokenType.MEDIUMTEXT,
TokenType.LONGTEXT,
TokenType.MEDIUMBLOB,
TokenType.LONGBLOB,
TokenType.BINARY,
TokenType.VARBINARY,
TokenType.JSON,
TokenType.JSONB,
TokenType.INTERVAL,
TokenType.TINYBLOB,
TokenType.TINYTEXT,
TokenType.TIME,
TokenType.TIMETZ,
TokenType.TIMESTAMP,
TokenType.TIMESTAMP_S,
TokenType.TIMESTAMP_MS,
TokenType.TIMESTAMP_NS,
TokenType.TIMESTAMPTZ,
TokenType.TIMESTAMPLTZ,
TokenType.DATETIME,
TokenType.DATETIME64,
TokenType.DATE,
TokenType.DATE32,
TokenType.INT4RANGE,
TokenType.INT4MULTIRANGE,
TokenType.INT8RANGE,
TokenType.INT8MULTIRANGE,
TokenType.NUMRANGE,
TokenType.NUMMULTIRANGE,
TokenType.TSRANGE,
TokenType.TSMULTIRANGE,
TokenType.TSTZRANGE,
TokenType.TSTZMULTIRANGE,
TokenType.DATERANGE,
TokenType.DATEMULTIRANGE,
TokenType.DECIMAL,
TokenType.UDECIMAL,
TokenType.BIGDECIMAL,
TokenType.UUID,
TokenType.GEOGRAPHY,
TokenType.GEOMETRY,
TokenType.HLLSKETCH,
TokenType.HSTORE,
TokenType.PSEUDO_TYPE,
TokenType.SUPER,
TokenType.SERIAL,
TokenType.SMALLSERIAL,
TokenType.BIGSERIAL,
TokenType.XML,
TokenType.YEAR,
TokenType.UNIQUEIDENTIFIER,
TokenType.USERDEFINED,
TokenType.MONEY,
TokenType.SMALLMONEY,
TokenType.ROWVERSION,
TokenType.IMAGE,
TokenType.VARIANT,
TokenType.OBJECT,
TokenType.OBJECT_IDENTIFIER,
TokenType.INET,
TokenType.IPADDRESS,
TokenType.IPPREFIX,
TokenType.IPV4,
TokenType.IPV6,
TokenType.UNKNOWN,
TokenType.NULL,
TokenType.NAME,
*ENUM_TYPE_TOKENS,
*NESTED_TYPE_TOKENS,
*AGGREGATE_TYPE_TOKENS,
}
SIGNED_TO_UNSIGNED_TYPE_TOKEN = {
TokenType.BIGINT: TokenType.UBIGINT,
TokenType.INT: TokenType.UINT,
TokenType.MEDIUMINT: TokenType.UMEDIUMINT,
TokenType.SMALLINT: TokenType.USMALLINT,
TokenType.TINYINT: TokenType.UTINYINT,
TokenType.DECIMAL: TokenType.UDECIMAL,
}
SUBQUERY_PREDICATES = {
TokenType.ANY: exp.Any,
TokenType.ALL: exp.All,
TokenType.EXISTS: exp.Exists,
TokenType.SOME: exp.Any,
}
RESERVED_TOKENS = {
*Tokenizer.SINGLE_TOKENS.values(),
TokenType.SELECT,
}
DB_CREATABLES = {
TokenType.DATABASE,
TokenType.SCHEMA,
TokenType.TABLE,
TokenType.VIEW,
TokenType.MODEL,
TokenType.DICTIONARY,
TokenType.SEQUENCE,
TokenType.STORAGE_INTEGRATION,
}
CREATABLES = {
TokenType.COLUMN,
TokenType.CONSTRAINT,
TokenType.FUNCTION,
TokenType.INDEX,
TokenType.PROCEDURE,
TokenType.FOREIGN_KEY,
*DB_CREATABLES,
}
# Tokens that can represent identifiers
ID_VAR_TOKENS = {
TokenType.VAR,
TokenType.ANTI,
TokenType.APPLY,
TokenType.ASC,
TokenType.ASOF,
TokenType.AUTO_INCREMENT,
TokenType.BEGIN,
TokenType.BPCHAR,
TokenType.CACHE,
TokenType.CASE,
TokenType.COLLATE,
TokenType.COMMAND,
TokenType.COMMENT,
TokenType.COMMIT,
TokenType.CONSTRAINT,
TokenType.DEFAULT,
TokenType.DELETE,
TokenType.DESC,
TokenType.DESCRIBE,
TokenType.DICTIONARY,
TokenType.DIV,
TokenType.END,
TokenType.EXECUTE,
TokenType.ESCAPE,
TokenType.FALSE,
TokenType.FIRST,
TokenType.FILTER,
TokenType.FINAL,
TokenType.FORMAT,
TokenType.FULL,
TokenType.IS,
TokenType.ISNULL,
TokenType.INTERVAL,
TokenType.KEEP,
TokenType.KILL,
TokenType.LEFT,
TokenType.LOAD,
TokenType.MERGE,
TokenType.NATURAL,
TokenType.NEXT,
TokenType.OFFSET,
TokenType.OPERATOR,
TokenType.ORDINALITY,
TokenType.OVERLAPS,
TokenType.OVERWRITE,
TokenType.PARTITION,
TokenType.PERCENT,
TokenType.PIVOT,
TokenType.PRAGMA,
TokenType.RANGE,
TokenType.RECURSIVE,
TokenType.REFERENCES,
TokenType.REFRESH,
TokenType.REPLACE,
TokenType.RIGHT,
TokenType.ROW,
TokenType.ROWS,
TokenType.SEMI,
TokenType.SET,
TokenType.SETTINGS,
TokenType.SHOW,
TokenType.TEMPORARY,
TokenType.TOP,
TokenType.TRUE,
TokenType.TRUNCATE,
TokenType.UNIQUE,
TokenType.UNPIVOT,
TokenType.UPDATE,
TokenType.USE,
TokenType.VOLATILE,
TokenType.WINDOW,
*CREATABLES,
*SUBQUERY_PREDICATES,
*TYPE_TOKENS,
*NO_PAREN_FUNCTIONS,
}
INTERVAL_VARS = ID_VAR_TOKENS - {TokenType.END}
TABLE_ALIAS_TOKENS = ID_VAR_TOKENS - {
TokenType.ANTI,
TokenType.APPLY,
TokenType.ASOF,
TokenType.FULL,
TokenType.LEFT,
TokenType.LOCK,
TokenType.NATURAL,
TokenType.OFFSET,
TokenType.RIGHT,
TokenType.SEMI,
TokenType.WINDOW,
}
COMMENT_TABLE_ALIAS_TOKENS = TABLE_ALIAS_TOKENS - {TokenType.IS}
UPDATE_ALIAS_TOKENS = TABLE_ALIAS_TOKENS - {TokenType.SET}
TRIM_TYPES = {"LEADING", "TRAILING", "BOTH"}
FUNC_TOKENS = {
TokenType.COLLATE,
TokenType.COMMAND,
TokenType.CURRENT_DATE,
TokenType.CURRENT_DATETIME,
TokenType.CURRENT_TIMESTAMP,
TokenType.CURRENT_TIME,
TokenType.CURRENT_USER,
TokenType.FILTER,
TokenType.FIRST,
TokenType.FORMAT,
TokenType.GLOB,
TokenType.IDENTIFIER,
TokenType.INDEX,
TokenType.ISNULL,
TokenType.ILIKE,
TokenType.INSERT,
TokenType.LIKE,
TokenType.MERGE,
TokenType.OFFSET,
TokenType.PRIMARY_KEY,
TokenType.RANGE,
TokenType.REPLACE,
TokenType.RLIKE,
TokenType.ROW,
TokenType.UNNEST,
TokenType.VAR,
TokenType.LEFT,
TokenType.RIGHT,
TokenType.SEQUENCE,
TokenType.DATE,
TokenType.DATETIME,
TokenType.TABLE,
TokenType.TIMESTAMP,
TokenType.TIMESTAMPTZ,
TokenType.TRUNCATE,
TokenType.WINDOW,
TokenType.XOR,
*TYPE_TOKENS,
*SUBQUERY_PREDICATES,
}
CONJUNCTION = {
TokenType.AND: exp.And,
TokenType.OR: exp.Or,
}
EQUALITY = {
TokenType.COLON_EQ: exp.PropertyEQ,
TokenType.EQ: exp.EQ,
TokenType.NEQ: exp.NEQ,
TokenType.NULLSAFE_EQ: exp.NullSafeEQ,
}
COMPARISON = {
TokenType.GT: exp.GT,
TokenType.GTE: exp.GTE,
TokenType.LT: exp.LT,
TokenType.LTE: exp.LTE,
}
BITWISE = {
TokenType.AMP: exp.BitwiseAnd,
TokenType.CARET: exp.BitwiseXor,
TokenType.PIPE: exp.BitwiseOr,
}
TERM = {
TokenType.DASH: exp.Sub,
TokenType.PLUS: exp.Add,
TokenType.MOD: exp.Mod,
TokenType.COLLATE: exp.Collate,
}
FACTOR = {
TokenType.DIV: exp.IntDiv,
TokenType.LR_ARROW: exp.Distance,
TokenType.SLASH: exp.Div,
TokenType.STAR: exp.Mul,
}
EXPONENT: t.Dict[TokenType, t.Type[exp.Expression]] = {}
TIMES = {
TokenType.TIME,
TokenType.TIMETZ,
}
TIMESTAMPS = {
TokenType.TIMESTAMP,
TokenType.TIMESTAMPTZ,
TokenType.TIMESTAMPLTZ,
*TIMES,
}
SET_OPERATIONS = {
TokenType.UNION,
TokenType.INTERSECT,
TokenType.EXCEPT,
}
JOIN_METHODS = {
TokenType.ASOF,
TokenType.NATURAL,
TokenType.POSITIONAL,
}
JOIN_SIDES = {
TokenType.LEFT,
TokenType.RIGHT,
TokenType.FULL,
}
JOIN_KINDS = {
TokenType.INNER,
TokenType.OUTER,
TokenType.CROSS,
TokenType.SEMI,
TokenType.ANTI,
}
JOIN_HINTS: t.Set[str] = set()
LAMBDAS = {
TokenType.ARROW: lambda self, expressions: self.expression(
exp.Lambda,
this=self._replace_lambda(
self._parse_conjunction(),
{node.name for node in expressions},
),
expressions=expressions,
),
TokenType.FARROW: lambda self, expressions: self.expression(
exp.Kwarg,
this=exp.var(expressions[0].name),
expression=self._parse_conjunction(),
),
}
COLUMN_OPERATORS = {
TokenType.DOT: None,
TokenType.DCOLON: lambda self, this, to: self.expression(
exp.Cast if self.STRICT_CAST else exp.TryCast,
this=this,
to=to,
),
TokenType.ARROW: lambda self, this, path: self.expression(
exp.JSONExtract,
this=this,
expression=self.dialect.to_json_path(path),
only_json_types=self.JSON_ARROWS_REQUIRE_JSON_TYPE,
),
TokenType.DARROW: lambda self, this, path: self.expression(
exp.JSONExtractScalar,
this=this,
expression=self.dialect.to_json_path(path),
only_json_types=self.JSON_ARROWS_REQUIRE_JSON_TYPE,
),
TokenType.HASH_ARROW: lambda self, this, path: self.expression(
exp.JSONBExtract,
this=this,
expression=path,
),
TokenType.DHASH_ARROW: lambda self, this, path: self.expression(
exp.JSONBExtractScalar,
this=this,
expression=path,
),
TokenType.PLACEHOLDER: lambda self, this, key: self.expression(
exp.JSONBContains,
this=this,
expression=key,
),
}
EXPRESSION_PARSERS = {
exp.Cluster: lambda self: self._parse_sort(exp.Cluster, TokenType.CLUSTER_BY),
exp.Column: lambda self: self._parse_column(),
exp.Condition: lambda self: self._parse_conjunction(),
exp.DataType: lambda self: self._parse_types(allow_identifiers=False),
exp.Expression: lambda self: self._parse_expression(),
exp.From: lambda self: self._parse_from(),
exp.Group: lambda self: self._parse_group(),
exp.Having: lambda self: self._parse_having(),
exp.Identifier: lambda self: self._parse_id_var(),
exp.Join: lambda self: self._parse_join(),
exp.Lambda: lambda self: self._parse_lambda(),
exp.Lateral: lambda self: self._parse_lateral(),
exp.Limit: lambda self: self._parse_limit(),
exp.Offset: lambda self: self._parse_offset(),
exp.Order: lambda self: self._parse_order(),
exp.Ordered: lambda self: self._parse_ordered(),
exp.Properties: lambda self: self._parse_properties(),
exp.Qualify: lambda self: self._parse_qualify(),
exp.Returning: lambda self: self._parse_returning(),
exp.Sort: lambda self: self._parse_sort(exp.Sort, TokenType.SORT_BY),
exp.Table: lambda self: self._parse_table_parts(),
exp.TableAlias: lambda self: self._parse_table_alias(),
exp.When: lambda self: seq_get(self._parse_when_matched(), 0),
exp.Where: lambda self: self._parse_where(),
exp.Window: lambda self: self._parse_named_window(),
exp.With: lambda self: self._parse_with(),
"JOIN_TYPE": lambda self: self._parse_join_parts(),
}
STATEMENT_PARSERS = {
TokenType.ALTER: lambda self: self._parse_alter(),
TokenType.BEGIN: lambda self: self._parse_transaction(),
TokenType.CACHE: lambda self: self._parse_cache(),
TokenType.COMMENT: lambda self: self._parse_comment(),
TokenType.COMMIT: lambda self: self._parse_commit_or_rollback(),
TokenType.CREATE: lambda self: self._parse_create(),
TokenType.DELETE: lambda self: self._parse_delete(),
TokenType.DESC: lambda self: self._parse_describe(),
TokenType.DESCRIBE: lambda self: self._parse_describe(),
TokenType.DROP: lambda self: self._parse_drop(),
TokenType.INSERT: lambda self: self._parse_insert(),
TokenType.KILL: lambda self: self._parse_kill(),
TokenType.LOAD: lambda self: self._parse_load(),
TokenType.MERGE: lambda self: self._parse_merge(),
TokenType.PIVOT: lambda self: self._parse_simplified_pivot(),
TokenType.PRAGMA: lambda self: self.expression(exp.Pragma, this=self._parse_expression()),
TokenType.REFRESH: lambda self: self._parse_refresh(),
TokenType.ROLLBACK: lambda self: self._parse_commit_or_rollback(),
TokenType.SET: lambda self: self._parse_set(),
TokenType.TRUNCATE: lambda self: self._parse_truncate_table(),
TokenType.UNCACHE: lambda self: self._parse_uncache(),
TokenType.UPDATE: lambda self: self._parse_update(),
TokenType.USE: lambda self: self.expression(
exp.Use,
kind=self._parse_var_from_options(self.USABLES, raise_unmatched=False),
this=self._parse_table(schema=False),
),
}
UNARY_PARSERS = {
TokenType.PLUS: lambda self: self._parse_unary(), # Unary + is handled as a no-op
TokenType.NOT: lambda self: self.expression(exp.Not, this=self._parse_equality()),
TokenType.TILDA: lambda self: self.expression(exp.BitwiseNot, this=self._parse_unary()),
TokenType.DASH: lambda self: self.expression(exp.Neg, this=self._parse_unary()),
TokenType.PIPE_SLASH: lambda self: self.expression(exp.Sqrt, this=self._parse_unary()),
TokenType.DPIPE_SLASH: lambda self: self.expression(exp.Cbrt, this=self._parse_unary()),
}
STRING_PARSERS = {
TokenType.HEREDOC_STRING: lambda self, token: self.expression(
exp.RawString, this=token.text
),
TokenType.NATIONAL_STRING: lambda self, token: self.expression(
exp.National, this=token.text
),
TokenType.RAW_STRING: lambda self, token: self.expression(exp.RawString, this=token.text),
TokenType.STRING: lambda self, token: self.expression(
exp.Literal, this=token.text, is_string=True
),
TokenType.UNICODE_STRING: lambda self, token: self.expression(
exp.UnicodeString,
this=token.text,
escape=self._match_text_seq("UESCAPE") and self._parse_string(),
),
}
NUMERIC_PARSERS = {
TokenType.BIT_STRING: lambda self, token: self.expression(exp.BitString, this=token.text),
TokenType.BYTE_STRING: lambda self, token: self.expression(exp.ByteString, this=token.text),
TokenType.HEX_STRING: lambda self, token: self.expression(exp.HexString, this=token.text),
TokenType.NUMBER: lambda self, token: self.expression(
exp.Literal, this=token.text, is_string=False
),
}
PRIMARY_PARSERS = {
**STRING_PARSERS,
**NUMERIC_PARSERS,
TokenType.INTRODUCER: lambda self, token: self._parse_introducer(token),
TokenType.NULL: lambda self, _: self.expression(exp.Null),
TokenType.TRUE: lambda self, _: self.expression(exp.Boolean, this=True),
TokenType.FALSE: lambda self, _: self.expression(exp.Boolean, this=False),
TokenType.SESSION_PARAMETER: lambda self, _: self._parse_session_parameter(),
TokenType.STAR: lambda self, _: self.expression(
exp.Star, **{"except": self._parse_except(), "replace": self._parse_replace()}
),
}
PLACEHOLDER_PARSERS = {
TokenType.PLACEHOLDER: lambda self: self.expression(exp.Placeholder),
TokenType.PARAMETER: lambda self: self._parse_parameter(),
TokenType.COLON: lambda self: (
self.expression(exp.Placeholder, this=self._prev.text)
if self._match(TokenType.NUMBER) or self._match_set(self.ID_VAR_TOKENS)
else None
),
}
RANGE_PARSERS = {
TokenType.BETWEEN: lambda self, this: self._parse_between(this),
TokenType.GLOB: binary_range_parser(exp.Glob),
TokenType.ILIKE: binary_range_parser(exp.ILike),
TokenType.IN: lambda self, this: self._parse_in(this),
TokenType.IRLIKE: binary_range_parser(exp.RegexpILike),
TokenType.IS: lambda self, this: self._parse_is(this),
TokenType.LIKE: binary_range_parser(exp.Like),
TokenType.OVERLAPS: binary_range_parser(exp.Overlaps),
TokenType.RLIKE: binary_range_parser(exp.RegexpLike),
TokenType.SIMILAR_TO: binary_range_parser(exp.SimilarTo),
TokenType.FOR: lambda self, this: self._parse_comprehension(this),
}
PROPERTY_PARSERS: t.Dict[str, t.Callable] = {
"ALGORITHM": lambda self: self._parse_property_assignment(exp.AlgorithmProperty),
"AUTO": lambda self: self._parse_auto_property(),
"AUTO_INCREMENT": lambda self: self._parse_property_assignment(exp.AutoIncrementProperty),
"BACKUP": lambda self: self.expression(
exp.BackupProperty, this=self._parse_var(any_token=True)
),
"BLOCKCOMPRESSION": lambda self: self._parse_blockcompression(),
"CHARSET": lambda self, **kwargs: self._parse_character_set(**kwargs),
"CHARACTER SET": lambda self, **kwargs: self._parse_character_set(**kwargs),
"CHECKSUM": lambda self: self._parse_checksum(),
"CLUSTER BY": lambda self: self._parse_cluster(),
"CLUSTERED": lambda self: self._parse_clustered_by(),
"COLLATE": lambda self, **kwargs: self._parse_property_assignment(
exp.CollateProperty, **kwargs
),
"COMMENT": lambda self: self._parse_property_assignment(exp.SchemaCommentProperty),
"CONTAINS": lambda self: self._parse_contains_property(),
"COPY": lambda self: self._parse_copy_property(),
"DATABLOCKSIZE": lambda self, **kwargs: self._parse_datablocksize(**kwargs),
"DEFINER": lambda self: self._parse_definer(),
"DETERMINISTIC": lambda self: self.expression(
exp.StabilityProperty, this=exp.Literal.string("IMMUTABLE")
),
"DISTKEY": lambda self: self._parse_distkey(),
"DISTSTYLE": lambda self: self._parse_property_assignment(exp.DistStyleProperty),
"ENGINE": lambda self: self._parse_property_assignment(exp.EngineProperty),
"EXECUTE": lambda self: self._parse_property_assignment(exp.ExecuteAsProperty),
"EXTERNAL": lambda self: self.expression(exp.ExternalProperty),
"FALLBACK": lambda self, **kwargs: self._parse_fallback(**kwargs),
"FORMAT": lambda self: self._parse_property_assignment(exp.FileFormatProperty),
"FREESPACE": lambda self: self._parse_freespace(),
"GLOBAL": lambda self: self.expression(exp.GlobalProperty),
"HEAP": lambda self: self.expression(exp.HeapProperty),
"ICEBERG": lambda self: self.expression(exp.IcebergProperty),
"IMMUTABLE": lambda self: self.expression(
exp.StabilityProperty, this=exp.Literal.string("IMMUTABLE")
),
"INHERITS": lambda self: self.expression(
exp.InheritsProperty, expressions=self._parse_wrapped_csv(self._parse_table)
),
"INPUT": lambda self: self.expression(exp.InputModelProperty, this=self._parse_schema()),
"JOURNAL": lambda self, **kwargs: self._parse_journal(**kwargs),
"LANGUAGE": lambda self: self._parse_property_assignment(exp.LanguageProperty),
"LAYOUT": lambda self: self._parse_dict_property(this="LAYOUT"),
"LIFETIME": lambda self: self._parse_dict_range(this="LIFETIME"),
"LIKE": lambda self: self._parse_create_like(),
"LOCATION": lambda self: self._parse_property_assignment(exp.LocationProperty),
"LOCK": lambda self: self._parse_locking(),
"LOCKING": lambda self: self._parse_locking(),
"LOG": lambda self, **kwargs: self._parse_log(**kwargs),
"MATERIALIZED": lambda self: self.expression(exp.MaterializedProperty),
"MERGEBLOCKRATIO": lambda self, **kwargs: self._parse_mergeblockratio(**kwargs),
"MODIFIES": lambda self: self._parse_modifies_property(),
"MULTISET": lambda self: self.expression(exp.SetProperty, multi=True),
"NO": lambda self: self._parse_no_property(),
"ON": lambda self: self._parse_on_property(),
"ORDER BY": lambda self: self._parse_order(skip_order_token=True),
"OUTPUT": lambda self: self.expression(exp.OutputModelProperty, this=self._parse_schema()),
"PARTITION": lambda self: self._parse_partitioned_of(),
"PARTITION BY": lambda self: self._parse_partitioned_by(),
"PARTITIONED BY": lambda self: self._parse_partitioned_by(),
"PARTITIONED_BY": lambda self: self._parse_partitioned_by(),
"PRIMARY KEY": lambda self: self._parse_primary_key(in_props=True),
"RANGE": lambda self: self._parse_dict_range(this="RANGE"),
"READS": lambda self: self._parse_reads_property(),
"REMOTE": lambda self: self._parse_remote_with_connection(),
"RETURNS": lambda self: self._parse_returns(),
"ROW": lambda self: self._parse_row(),
"ROW_FORMAT": lambda self: self._parse_property_assignment(exp.RowFormatProperty),
"SAMPLE": lambda self: self.expression(
exp.SampleProperty, this=self._match_text_seq("BY") and self._parse_bitwise()
),
"SET": lambda self: self.expression(exp.SetProperty, multi=False),
"SETTINGS": lambda self: self.expression(
exp.SettingsProperty, expressions=self._parse_csv(self._parse_set_item)
),
"SHARING": lambda self: self._parse_property_assignment(exp.SharingProperty),
"SORTKEY": lambda self: self._parse_sortkey(),
"SOURCE": lambda self: self._parse_dict_property(this="SOURCE"),
"STABLE": lambda self: self.expression(
exp.StabilityProperty, this=exp.Literal.string("STABLE")
),
"STORED": lambda self: self._parse_stored(),
"SYSTEM_VERSIONING": lambda self: self._parse_system_versioning_property(),
"TBLPROPERTIES": lambda self: self._parse_wrapped_properties(),
"TEMP": lambda self: self.expression(exp.TemporaryProperty),
"TEMPORARY": lambda self: self.expression(exp.TemporaryProperty),
"TO": lambda self: self._parse_to_table(),
"TRANSIENT": lambda self: self.expression(exp.TransientProperty),
"TRANSFORM": lambda self: self.expression(
exp.TransformModelProperty, expressions=self._parse_wrapped_csv(self._parse_expression)
),
"TTL": lambda self: self._parse_ttl(),
"USING": lambda self: self._parse_property_assignment(exp.FileFormatProperty),
"UNLOGGED": lambda self: self.expression(exp.UnloggedProperty),
"VOLATILE": lambda self: self._parse_volatile_property(),
"WITH": lambda self: self._parse_with_property(),
}
CONSTRAINT_PARSERS = {
"AUTOINCREMENT": lambda self: self._parse_auto_increment(),
"AUTO_INCREMENT": lambda self: self._parse_auto_increment(),
"CASESPECIFIC": lambda self: self.expression(exp.CaseSpecificColumnConstraint, not_=False),
"CHARACTER SET": lambda self: self.expression(
exp.CharacterSetColumnConstraint, this=self._parse_var_or_string()
),
"CHECK": lambda self: self.expression(
exp.CheckColumnConstraint,
this=self._parse_wrapped(self._parse_conjunction),
enforced=self._match_text_seq("ENFORCED"),
),
"COLLATE": lambda self: self.expression(
exp.CollateColumnConstraint, this=self._parse_var()
),
"COMMENT": lambda self: self.expression(
exp.CommentColumnConstraint, this=self._parse_string()
),
"COMPRESS": lambda self: self._parse_compress(),
"CLUSTERED": lambda self: self.expression(
exp.ClusteredColumnConstraint, this=self._parse_wrapped_csv(self._parse_ordered)
),
"NONCLUSTERED": lambda self: self.expression(
exp.NonClusteredColumnConstraint, this=self._parse_wrapped_csv(self._parse_ordered)
),
"DEFAULT": lambda self: self.expression(
exp.DefaultColumnConstraint, this=self._parse_bitwise()
),
"ENCODE": lambda self: self.expression(exp.EncodeColumnConstraint, this=self._parse_var()),
"EXCLUDE": lambda self: self.expression(
exp.ExcludeColumnConstraint, this=self._parse_index_params()
),
"FOREIGN KEY": lambda self: self._parse_foreign_key(),
"FORMAT": lambda self: self.expression(
exp.DateFormatColumnConstraint, this=self._parse_var_or_string()
),
"GENERATED": lambda self: self._parse_generated_as_identity(),
"IDENTITY": lambda self: self._parse_auto_increment(),
"INLINE": lambda self: self._parse_inline(),
"LIKE": lambda self: self._parse_create_like(),
"NOT": lambda self: self._parse_not_constraint(),
"NULL": lambda self: self.expression(exp.NotNullColumnConstraint, allow_null=True),
"ON": lambda self: (
self._match(TokenType.UPDATE)
and self.expression(exp.OnUpdateColumnConstraint, this=self._parse_function())
)
or self.expression(exp.OnProperty, this=self._parse_id_var()),
"PATH": lambda self: self.expression(exp.PathColumnConstraint, this=self._parse_string()),
"PERIOD": lambda self: self._parse_period_for_system_time(),
"PRIMARY KEY": lambda self: self._parse_primary_key(),
"REFERENCES": lambda self: self._parse_references(match=False),
"TITLE": lambda self: self.expression(
exp.TitleColumnConstraint, this=self._parse_var_or_string()
),
"TTL": lambda self: self.expression(exp.MergeTreeTTL, expressions=[self._parse_bitwise()]),
"UNIQUE": lambda self: self._parse_unique(),
"UPPERCASE": lambda self: self.expression(exp.UppercaseColumnConstraint),
"WITH": lambda self: self.expression(
exp.Properties, expressions=self._parse_wrapped_properties()
),
}
ALTER_PARSERS = {
"ADD": lambda self: self._parse_alter_table_add(),
"ALTER": lambda self: self._parse_alter_table_alter(),
"CLUSTER BY": lambda self: self._parse_cluster(wrapped=True),
"DELETE": lambda self: self.expression(exp.Delete, where=self._parse_where()),
"DROP": lambda self: self._parse_alter_table_drop(),
"RENAME": lambda self: self._parse_alter_table_rename(),
}
SCHEMA_UNNAMED_CONSTRAINTS = {
"CHECK",
"EXCLUDE",
"FOREIGN KEY",
"LIKE",
"PERIOD",
"PRIMARY KEY",
"UNIQUE",
}
NO_PAREN_FUNCTION_PARSERS = {
"ANY": lambda self: self.expression(exp.Any, this=self._parse_bitwise()),
"CASE": lambda self: self._parse_case(),
"IF": lambda self: self._parse_if(),
"NEXT": lambda self: self._parse_next_value_for(),
}
INVALID_FUNC_NAME_TOKENS = {
TokenType.IDENTIFIER,
TokenType.STRING,
}
FUNCTIONS_WITH_ALIASED_ARGS = {"STRUCT"}
KEY_VALUE_DEFINITIONS = (exp.Alias, exp.EQ, exp.PropertyEQ, exp.Slice)
FUNCTION_PARSERS = {
"CAST": lambda self: self._parse_cast(self.STRICT_CAST),
"CONVERT": lambda self: self._parse_convert(self.STRICT_CAST),
"DECODE": lambda self: self._parse_decode(),
"EXTRACT": lambda self: self._parse_extract(),
"JSON_OBJECT": lambda self: self._parse_json_object(),
"JSON_OBJECTAGG": lambda self: self._parse_json_object(agg=True),
"JSON_TABLE": lambda self: self._parse_json_table(),
"MATCH": lambda self: self._parse_match_against(),
"OPENJSON": lambda self: self._parse_open_json(),
"POSITION": lambda self: self._parse_position(),
"PREDICT": lambda self: self._parse_predict(),
"SAFE_CAST": lambda self: self._parse_cast(False, safe=True),
"STRING_AGG": lambda self: self._parse_string_agg(),
"SUBSTRING": lambda self: self._parse_substring(),
"TRIM": lambda self: self._parse_trim(),
"TRY_CAST": lambda self: self._parse_cast(False, safe=True),
"TRY_CONVERT": lambda self: self._parse_convert(False, safe=True),
}
QUERY_MODIFIER_PARSERS = {
TokenType.MATCH_RECOGNIZE: lambda self: ("match", self._parse_match_recognize()),
TokenType.PREWHERE: lambda self: ("prewhere", self._parse_prewhere()),
TokenType.WHERE: lambda self: ("where", self._parse_where()),
TokenType.GROUP_BY: lambda self: ("group", self._parse_group()),
TokenType.HAVING: lambda self: ("having", self._parse_having()),
TokenType.QUALIFY: lambda self: ("qualify", self._parse_qualify()),
TokenType.WINDOW: lambda self: ("windows", self._parse_window_clause()),
TokenType.ORDER_BY: lambda self: ("order", self._parse_order()),
TokenType.LIMIT: lambda self: ("limit", self._parse_limit()),
TokenType.FETCH: lambda self: ("limit", self._parse_limit()),
TokenType.OFFSET: lambda self: ("offset", self._parse_offset()),
TokenType.FOR: lambda self: ("locks", self._parse_locks()),
TokenType.LOCK: lambda self: ("locks", self._parse_locks()),
TokenType.TABLE_SAMPLE: lambda self: ("sample", self._parse_table_sample(as_modifier=True)),
TokenType.USING: lambda self: ("sample", self._parse_table_sample(as_modifier=True)),
TokenType.CLUSTER_BY: lambda self: (
"cluster",
self._parse_sort(exp.Cluster, TokenType.CLUSTER_BY),
),
TokenType.DISTRIBUTE_BY: lambda self: (
"distribute",
self._parse_sort(exp.Distribute, TokenType.DISTRIBUTE_BY),
),
TokenType.SORT_BY: lambda self: ("sort", self._parse_sort(exp.Sort, TokenType.SORT_BY)),
TokenType.CONNECT_BY: lambda self: ("connect", self._parse_connect(skip_start_token=True)),
TokenType.START_WITH: lambda self: ("connect", self._parse_connect()),
}
SET_PARSERS = {
"GLOBAL": lambda self: self._parse_set_item_assignment("GLOBAL"),
"LOCAL": lambda self: self._parse_set_item_assignment("LOCAL"),
"SESSION": lambda self: self._parse_set_item_assignment("SESSION"),
"TRANSACTION": lambda self: self._parse_set_transaction(),
}
SHOW_PARSERS: t.Dict[str, t.Callable] = {}
TYPE_LITERAL_PARSERS = {
exp.DataType.Type.JSON: lambda self, this, _: self.expression(exp.ParseJSON, this=this),
}
DDL_SELECT_TOKENS = {TokenType.SELECT, TokenType.WITH, TokenType.L_PAREN}
PRE_VOLATILE_TOKENS = {TokenType.CREATE, TokenType.REPLACE, TokenType.UNIQUE}
TRANSACTION_KIND = {"DEFERRED", "IMMEDIATE", "EXCLUSIVE"}
TRANSACTION_CHARACTERISTICS: OPTIONS_TYPE = {
"ISOLATION": (
("LEVEL", "REPEATABLE", "READ"),
("LEVEL", "READ", "COMMITTED"),
("LEVEL", "READ", "UNCOMITTED"),
("LEVEL", "SERIALIZABLE"),
),
"READ": ("WRITE", "ONLY"),
}
CONFLICT_ACTIONS: OPTIONS_TYPE = dict.fromkeys(
("ABORT", "FAIL", "IGNORE", "REPLACE", "ROLLBACK", "UPDATE"), tuple()
)
CONFLICT_ACTIONS["DO"] = ("NOTHING", "UPDATE")
CREATE_SEQUENCE: OPTIONS_TYPE = {
"SCALE": ("EXTEND", "NOEXTEND"),
"SHARD": ("EXTEND", "NOEXTEND"),
"NO": ("CYCLE", "CACHE", "MAXVALUE", "MINVALUE"),
**dict.fromkeys(
(
"SESSION",
"GLOBAL",
"KEEP",
"NOKEEP",
"ORDER",
"NOORDER",
"NOCACHE",
"CYCLE",
"NOCYCLE",
"NOMINVALUE",
"NOMAXVALUE",
"NOSCALE",
"NOSHARD",
),
tuple(),
),
}
USABLES: OPTIONS_TYPE = dict.fromkeys(("ROLE", "WAREHOUSE", "DATABASE", "SCHEMA"), tuple())
CAST_ACTIONS: OPTIONS_TYPE = dict.fromkeys(("RENAME", "ADD"), ("FIELDS",))
INSERT_ALTERNATIVES = {"ABORT", "FAIL", "IGNORE", "REPLACE", "ROLLBACK"}
CLONE_KEYWORDS = {"CLONE", "COPY"}
HISTORICAL_DATA_KIND = {"TIMESTAMP", "OFFSET", "STATEMENT", "STREAM"}
OPCLASS_FOLLOW_KEYWORDS = {"ASC", "DESC", "NULLS", "WITH"}
OPTYPE_FOLLOW_TOKENS = {TokenType.COMMA, TokenType.R_PAREN}
TABLE_INDEX_HINT_TOKENS = {TokenType.FORCE, TokenType.IGNORE, TokenType.USE}
WINDOW_ALIAS_TOKENS = ID_VAR_TOKENS - {TokenType.ROWS}
WINDOW_BEFORE_PAREN_TOKENS = {TokenType.OVER}
WINDOW_SIDES = {"FOLLOWING", "PRECEDING"}
JSON_KEY_VALUE_SEPARATOR_TOKENS = {TokenType.COLON, TokenType.COMMA, TokenType.IS}
FETCH_TOKENS = ID_VAR_TOKENS - {TokenType.ROW, TokenType.ROWS, TokenType.PERCENT}
ADD_CONSTRAINT_TOKENS = {TokenType.CONSTRAINT, TokenType.PRIMARY_KEY, TokenType.FOREIGN_KEY}
DISTINCT_TOKENS = {TokenType.DISTINCT}
NULL_TOKENS = {TokenType.NULL}
UNNEST_OFFSET_ALIAS_TOKENS = ID_VAR_TOKENS - SET_OPERATIONS
STRICT_CAST = True
PREFIXED_PIVOT_COLUMNS = False
IDENTIFY_PIVOT_STRINGS = False
LOG_DEFAULTS_TO_LN = False
# Whether ADD is present for each column added by ALTER TABLE
ALTER_TABLE_ADD_REQUIRED_FOR_EACH_COLUMN = True
# Whether the table sample clause expects CSV syntax
TABLESAMPLE_CSV = False
# Whether the SET command needs a delimiter (e.g. "=") for assignments
SET_REQUIRES_ASSIGNMENT_DELIMITER = True
# Whether the TRIM function expects the characters to trim as its first argument
TRIM_PATTERN_FIRST = False
# Whether string aliases are supported `SELECT COUNT(*) 'count'`
STRING_ALIASES = False
# Whether query modifiers such as LIMIT are attached to the UNION node (vs its right operand)
MODIFIERS_ATTACHED_TO_UNION = True
UNION_MODIFIERS = {"order", "limit", "offset"}
# Whether to parse IF statements that aren't followed by a left parenthesis as commands
NO_PAREN_IF_COMMANDS = True
# Whether the -> and ->> operators expect documents of type JSON (e.g. Postgres)
JSON_ARROWS_REQUIRE_JSON_TYPE = False
# Whether or not a VALUES keyword needs to be followed by '(' to form a VALUES clause.
# If this is True and '(' is not found, the keyword will be treated as an identifier
VALUES_FOLLOWED_BY_PAREN = True
# Whether implicit unnesting is supported, e.g. SELECT 1 FROM y.z AS z, z.a (Redshift)
SUPPORTS_IMPLICIT_UNNEST = False
__slots__ = (
"error_level",
"error_message_context",
"max_errors",
"dialect",
"sql",
"errors",
"_tokens",
"_index",
"_curr",
"_next",
"_prev",
"_prev_comments",
)
# Autofilled
SHOW_TRIE: t.Dict = {}
SET_TRIE: t.Dict = {}
def __init__(
self,
error_level: t.Optional[ErrorLevel] = None,
error_message_context: int = 100,
max_errors: int = 3,
dialect: DialectType = None,
):
from sqlglot.dialects import Dialect
self.error_level = error_level or ErrorLevel.IMMEDIATE
self.error_message_context = error_message_context
self.max_errors = max_errors
self.dialect = Dialect.get_or_raise(dialect)
self.reset()
def reset(self):
self.sql = ""
self.errors = []
self._tokens = []
self._index = 0
self._curr = None
self._next = None
self._prev = None
self._prev_comments = None
def parse(
self, raw_tokens: t.List[Token], sql: t.Optional[str] = None
) -> t.List[t.Optional[exp.Expression]]:
"""
Parses a list of tokens and returns a list of syntax trees, one tree
per parsed SQL statement.
Args:
raw_tokens: The list of tokens.
sql: The original SQL string, used to produce helpful debug messages.
Returns:
The list of the produced syntax trees.
"""
return self._parse(
parse_method=self.__class__._parse_statement, raw_tokens=raw_tokens, sql=sql
)
def parse_into(
self,
expression_types: exp.IntoType,
raw_tokens: t.List[Token],
sql: t.Optional[str] = None,
) -> t.List[t.Optional[exp.Expression]]:
"""
Parses a list of tokens into a given Expression type. If a collection of Expression
types is given instead, this method will try to parse the token list into each one
of them, stopping at the first for which the parsing succeeds.
Args:
expression_types: The expression type(s) to try and parse the token list into.
raw_tokens: The list of tokens.
sql: The original SQL string, used to produce helpful debug messages.
Returns:
The target Expression.
"""
errors = []
for expression_type in ensure_list(expression_types):
parser = self.EXPRESSION_PARSERS.get(expression_type)
if not parser:
raise TypeError(f"No parser registered for {expression_type}")
try:
return self._parse(parser, raw_tokens, sql)
except ParseError as e:
e.errors[0]["into_expression"] = expression_type
errors.append(e)
raise ParseError(
f"Failed to parse '{sql or raw_tokens}' into {expression_types}",
errors=merge_errors(errors),
) from errors[-1]
def _parse(
self,
parse_method: t.Callable[[Parser], t.Optional[exp.Expression]],
raw_tokens: t.List[Token],
sql: t.Optional[str] = None,
) -> t.List[t.Optional[exp.Expression]]:
self.reset()
self.sql = sql or ""
total = len(raw_tokens)
chunks: t.List[t.List[Token]] = [[]]
for i, token in enumerate(raw_tokens):
if token.token_type == TokenType.SEMICOLON:
if i < total - 1:
chunks.append([])
else:
chunks[-1].append(token)
expressions = []
for tokens in chunks:
self._index = -1
self._tokens = tokens
self._advance()
expressions.append(parse_method(self))
if self._index < len(self._tokens):
self.raise_error("Invalid expression / Unexpected token")
self.check_errors()
return expressions
def check_errors(self) -> None:
"""Logs or raises any found errors, depending on the chosen error level setting."""
if self.error_level == ErrorLevel.WARN:
for error in self.errors:
logger.error(str(error))
elif self.error_level == ErrorLevel.RAISE and self.errors:
raise ParseError(
concat_messages(self.errors, self.max_errors),
errors=merge_errors(self.errors),
)
def raise_error(self, message: str, token: t.Optional[Token] = None) -> None:
"""
Appends an error in the list of recorded errors or raises it, depending on the chosen
error level setting.
"""
token = token or self._curr or self._prev or Token.string("")
start = token.start
end = token.end + 1
start_context = self.sql[max(start - self.error_message_context, 0) : start]
highlight = self.sql[start:end]
end_context = self.sql[end : end + self.error_message_context]
error = ParseError.new(
f"{message}. Line {token.line}, Col: {token.col}.\n"
f" {start_context}\033[4m{highlight}\033[0m{end_context}",
description=message,
line=token.line,
col=token.col,
start_context=start_context,
highlight=highlight,
end_context=end_context,
)
if self.error_level == ErrorLevel.IMMEDIATE:
raise error
self.errors.append(error)
def expression(
self, exp_class: t.Type[E], comments: t.Optional[t.List[str]] = None, **kwargs
) -> E:
"""
Creates a new, validated Expression.
Args:
exp_class: The expression class to instantiate.
comments: An optional list of comments to attach to the expression.
kwargs: The arguments to set for the expression along with their respective values.
Returns:
The target expression.
"""
instance = exp_class(**kwargs)
instance.add_comments(comments) if comments else self._add_comments(instance)
return self.validate_expression(instance)
def _add_comments(self, expression: t.Optional[exp.Expression]) -> None:
if expression and self._prev_comments:
expression.add_comments(self._prev_comments)
self._prev_comments = None
def validate_expression(self, expression: E, args: t.Optional[t.List] = None) -> E:
"""
Validates an Expression, making sure that all its mandatory arguments are set.
Args:
expression: The expression to validate.
args: An optional list of items that was used to instantiate the expression, if it's a Func.
Returns:
The validated expression.
"""
if self.error_level != ErrorLevel.IGNORE:
for error_message in expression.error_messages(args):
self.raise_error(error_message)
return expression
def _find_sql(self, start: Token, end: Token) -> str:
return self.sql[start.start : end.end + 1]
def _is_connected(self) -> bool:
return self._prev and self._curr and self._prev.end + 1 == self._curr.start
def _advance(self, times: int = 1) -> None:
self._index += times
self._curr = seq_get(self._tokens, self._index)
self._next = seq_get(self._tokens, self._index + 1)
if self._index > 0:
self._prev = self._tokens[self._index - 1]
self._prev_comments = self._prev.comments
else:
self._prev = None
self._prev_comments = None
def _retreat(self, index: int) -> None:
if index != self._index:
self._advance(index - self._index)
def _warn_unsupported(self) -> None:
if len(self._tokens) <= 1:
return
# We use _find_sql because self.sql may comprise multiple chunks, and we're only
# interested in emitting a warning for the one being currently processed.
sql = self._find_sql(self._tokens[0], self._tokens[-1])[: self.error_message_context]
logger.warning(
f"'{sql}' contains unsupported syntax. Falling back to parsing as a 'Command'."
)
def _parse_command(self) -> exp.Command:
self._warn_unsupported()
return self.expression(
exp.Command, this=self._prev.text.upper(), expression=self._parse_string()
)
def _parse_comment(self, allow_exists: bool = True) -> exp.Expression:
start = self._prev
exists = self._parse_exists() if allow_exists else None
self._match(TokenType.ON)
kind = self._match_set(self.CREATABLES) and self._prev
if not kind:
return self._parse_as_command(start)
if kind.token_type in (TokenType.FUNCTION, TokenType.PROCEDURE):
this = self._parse_user_defined_function(kind=kind.token_type)
elif kind.token_type == TokenType.TABLE:
this = self._parse_table(alias_tokens=self.COMMENT_TABLE_ALIAS_TOKENS)
elif kind.token_type == TokenType.COLUMN:
this = self._parse_column()
else:
this = self._parse_id_var()
self._match(TokenType.IS)
return self.expression(
exp.Comment, this=this, kind=kind.text, expression=self._parse_string(), exists=exists
)
def _parse_to_table(
self,
) -> exp.ToTableProperty:
table = self._parse_table_parts(schema=True)
return self.expression(exp.ToTableProperty, this=table)
# https://clickhouse.com/docs/en/engines/table-engines/mergetree-family/mergetree#mergetree-table-ttl
def _parse_ttl(self) -> exp.Expression:
def _parse_ttl_action() -> t.Optional[exp.Expression]:
this = self._parse_bitwise()
if self._match_text_seq("DELETE"):
return self.expression(exp.MergeTreeTTLAction, this=this, delete=True)
if self._match_text_seq("RECOMPRESS"):
return self.expression(
exp.MergeTreeTTLAction, this=this, recompress=self._parse_bitwise()
)
if self._match_text_seq("TO", "DISK"):
return self.expression(
exp.MergeTreeTTLAction, this=this, to_disk=self._parse_string()
)
if self._match_text_seq("TO", "VOLUME"):
return self.expression(
exp.MergeTreeTTLAction, this=this, to_volume=self._parse_string()
)
return this
expressions = self._parse_csv(_parse_ttl_action)
where = self._parse_where()
group = self._parse_group()
aggregates = None
if group and self._match(TokenType.SET):
aggregates = self._parse_csv(self._parse_set_item)
return self.expression(
exp.MergeTreeTTL,
expressions=expressions,
where=where,
group=group,
aggregates=aggregates,
)
def _parse_statement(self) -> t.Optional[exp.Expression]:
if self._curr is None:
return None
if self._match_set(self.STATEMENT_PARSERS):
return self.STATEMENT_PARSERS[self._prev.token_type](self)
if self._match_set(Tokenizer.COMMANDS):
return self._parse_command()
expression = self._parse_expression()
expression = self._parse_set_operations(expression) if expression else self._parse_select()
return self._parse_query_modifiers(expression)
def _parse_drop(self, exists: bool = False) -> exp.Drop | exp.Command:
start = self._prev
temporary = self._match(TokenType.TEMPORARY)
materialized = self._match_text_seq("MATERIALIZED")
kind = self._match_set(self.CREATABLES) and self._prev.text
if not kind:
return self._parse_as_command(start)
if_exists = exists or self._parse_exists()
table = self._parse_table_parts(
schema=True, is_db_reference=self._prev.token_type == TokenType.SCHEMA
)
if self._match(TokenType.L_PAREN, advance=False):
expressions = self._parse_wrapped_csv(self._parse_types)
else:
expressions = None
return self.expression(
exp.Drop,
comments=start.comments,
exists=if_exists,
this=table,
expressions=expressions,
kind=kind,
temporary=temporary,
materialized=materialized,
cascade=self._match_text_seq("CASCADE"),
constraints=self._match_text_seq("CONSTRAINTS"),
purge=self._match_text_seq("PURGE"),
)
def _parse_exists(self, not_: bool = False) -> t.Optional[bool]:
return (
self._match_text_seq("IF")
and (not not_ or self._match(TokenType.NOT))
and self._match(TokenType.EXISTS)
)
def _parse_create(self) -> exp.Create | exp.Command:
# Note: this can't be None because we've matched a statement parser
start = self._prev
comments = self._prev_comments
replace = (
start.token_type == TokenType.REPLACE
or self._match_pair(TokenType.OR, TokenType.REPLACE)
or self._match_pair(TokenType.OR, TokenType.ALTER)
)
unique = self._match(TokenType.UNIQUE)
if self._match_pair(TokenType.TABLE, TokenType.FUNCTION, advance=False):
self._advance()
properties = None
create_token = self._match_set(self.CREATABLES) and self._prev
if not create_token:
# exp.Properties.Location.POST_CREATE
properties = self._parse_properties()
create_token = self._match_set(self.CREATABLES) and self._prev
if not properties or not create_token:
return self._parse_as_command(start)
exists = self._parse_exists(not_=True)
this = None
expression: t.Optional[exp.Expression] = None
indexes = None
no_schema_binding = None
begin = None
end = None
clone = None
def extend_props(temp_props: t.Optional[exp.Properties]) -> None:
nonlocal properties
if properties and temp_props:
properties.expressions.extend(temp_props.expressions)
elif temp_props:
properties = temp_props
if create_token.token_type in (TokenType.FUNCTION, TokenType.PROCEDURE):
this = self._parse_user_defined_function(kind=create_token.token_type)
# exp.Properties.Location.POST_SCHEMA ("schema" here is the UDF's type signature)
extend_props(self._parse_properties())
expression = self._match(TokenType.ALIAS) and self._parse_heredoc()
if not expression:
if self._match(TokenType.COMMAND):
expression = self._parse_as_command(self._prev)
else:
begin = self._match(TokenType.BEGIN)
return_ = self._match_text_seq("RETURN")
if self._match(TokenType.STRING, advance=False):
# Takes care of BigQuery's JavaScript UDF definitions that end in an OPTIONS property
# # https://cloud.google.com/bigquery/docs/reference/standard-sql/data-definition-language#create_function_statement
expression = self._parse_string()
extend_props(self._parse_properties())
else:
expression = self._parse_statement()
end = self._match_text_seq("END")
if return_:
expression = self.expression(exp.Return, this=expression)
elif create_token.token_type == TokenType.INDEX:
this = self._parse_index(index=self._parse_id_var())
elif create_token.token_type in self.DB_CREATABLES:
table_parts = self._parse_table_parts(
schema=True, is_db_reference=create_token.token_type == TokenType.SCHEMA
)
# exp.Properties.Location.POST_NAME
self._match(TokenType.COMMA)
extend_props(self._parse_properties(before=True))
this = self._parse_schema(this=table_parts)
# exp.Properties.Location.POST_SCHEMA and POST_WITH
extend_props(self._parse_properties())
self._match(TokenType.ALIAS)
if not self._match_set(self.DDL_SELECT_TOKENS, advance=False):
# exp.Properties.Location.POST_ALIAS
extend_props(self._parse_properties())
if create_token.token_type == TokenType.SEQUENCE:
expression = self._parse_types()
extend_props(self._parse_properties())
else:
expression = self._parse_ddl_select()
if create_token.token_type == TokenType.TABLE:
# exp.Properties.Location.POST_EXPRESSION
extend_props(self._parse_properties())
indexes = []
while True:
index = self._parse_index()
# exp.Properties.Location.POST_INDEX
extend_props(self._parse_properties())
if not index:
break
else:
self._match(TokenType.COMMA)
indexes.append(index)
elif create_token.token_type == TokenType.VIEW:
if self._match_text_seq("WITH", "NO", "SCHEMA", "BINDING"):
no_schema_binding = True
shallow = self._match_text_seq("SHALLOW")
if self._match_texts(self.CLONE_KEYWORDS):
copy = self._prev.text.lower() == "copy"
clone = self.expression(
exp.Clone, this=self._parse_table(schema=True), shallow=shallow, copy=copy
)
if self._curr:
return self._parse_as_command(start)
return self.expression(
exp.Create,
comments=comments,
this=this,
kind=create_token.text.upper(),
replace=replace,
unique=unique,
expression=expression,
exists=exists,
properties=properties,
indexes=indexes,
no_schema_binding=no_schema_binding,
begin=begin,
end=end,
clone=clone,
)
def _parse_sequence_properties(self) -> t.Optional[exp.SequenceProperties]:
seq = exp.SequenceProperties()
options = []
index = self._index
while self._curr:
if self._match_text_seq("INCREMENT"):
self._match_text_seq("BY")
self._match_text_seq("=")
seq.set("increment", self._parse_term())
elif self._match_text_seq("MINVALUE"):
seq.set("minvalue", self._parse_term())
elif self._match_text_seq("MAXVALUE"):
seq.set("maxvalue", self._parse_term())
elif self._match(TokenType.START_WITH) or self._match_text_seq("START"):
self._match_text_seq("=")
seq.set("start", self._parse_term())
elif self._match_text_seq("CACHE"):
# T-SQL allows empty CACHE which is initialized dynamically
seq.set("cache", self._parse_number() or True)
elif self._match_text_seq("OWNED", "BY"):
# "OWNED BY NONE" is the default
seq.set("owned", None if self._match_text_seq("NONE") else self._parse_column())
else:
opt = self._parse_var_from_options(self.CREATE_SEQUENCE, raise_unmatched=False)
if opt:
options.append(opt)
else:
break
seq.set("options", options if options else None)
return None if self._index == index else seq
def _parse_property_before(self) -> t.Optional[exp.Expression]:
# only used for teradata currently
self._match(TokenType.COMMA)
kwargs = {
"no": self._match_text_seq("NO"),
"dual": self._match_text_seq("DUAL"),
"before": self._match_text_seq("BEFORE"),
"default": self._match_text_seq("DEFAULT"),
"local": (self._match_text_seq("LOCAL") and "LOCAL")
or (self._match_text_seq("NOT", "LOCAL") and "NOT LOCAL"),
"after": self._match_text_seq("AFTER"),
"minimum": self._match_texts(("MIN", "MINIMUM")),
"maximum": self._match_texts(("MAX", "MAXIMUM")),
}
if self._match_texts(self.PROPERTY_PARSERS):
parser = self.PROPERTY_PARSERS[self._prev.text.upper()]
try:
return parser(self, **{k: v for k, v in kwargs.items() if v})
except TypeError:
self.raise_error(f"Cannot parse property '{self._prev.text}'")
return None
def _parse_wrapped_properties(self) -> t.List[exp.Expression]:
return self._parse_wrapped_csv(self._parse_property)
def _parse_property(self) -> t.Optional[exp.Expression]:
if self._match_texts(self.PROPERTY_PARSERS):
return self.PROPERTY_PARSERS[self._prev.text.upper()](self)
if self._match(TokenType.DEFAULT) and self._match_texts(self.PROPERTY_PARSERS):
return self.PROPERTY_PARSERS[self._prev.text.upper()](self, default=True)
if self._match_text_seq("COMPOUND", "SORTKEY"):
return self._parse_sortkey(compound=True)
if self._match_text_seq("SQL", "SECURITY"):
return self.expression(exp.SqlSecurityProperty, definer=self._match_text_seq("DEFINER"))
index = self._index
key = self._parse_column()
if not self._match(TokenType.EQ):
self._retreat(index)
return self._parse_sequence_properties()
return self.expression(
exp.Property,
this=key.to_dot() if isinstance(key, exp.Column) else key,
value=self._parse_column() or self._parse_var(any_token=True),
)
def _parse_stored(self) -> exp.FileFormatProperty:
self._match(TokenType.ALIAS)
input_format = self._parse_string() if self._match_text_seq("INPUTFORMAT") else None
output_format = self._parse_string() if self._match_text_seq("OUTPUTFORMAT") else None
return self.expression(
exp.FileFormatProperty,
this=(
self.expression(
exp.InputOutputFormat, input_format=input_format, output_format=output_format
)
if input_format or output_format
else self._parse_var_or_string() or self._parse_number() or self._parse_id_var()
),
)
def _parse_property_assignment(self, exp_class: t.Type[E], **kwargs: t.Any) -> E:
self._match(TokenType.EQ)
self._match(TokenType.ALIAS)
return self.expression(exp_class, this=self._parse_field(), **kwargs)
def _parse_properties(self, before: t.Optional[bool] = None) -> t.Optional[exp.Properties]:
properties = []
while True:
if before:
prop = self._parse_property_before()
else:
prop = self._parse_property()
if not prop:
break
for p in ensure_list(prop):
properties.append(p)
if properties:
return self.expression(exp.Properties, expressions=properties)
return None
def _parse_fallback(self, no: bool = False) -> exp.FallbackProperty:
return self.expression(
exp.FallbackProperty, no=no, protection=self._match_text_seq("PROTECTION")
)
def _parse_volatile_property(self) -> exp.VolatileProperty | exp.StabilityProperty:
if self._index >= 2:
pre_volatile_token = self._tokens[self._index - 2]
else:
pre_volatile_token = None
if pre_volatile_token and pre_volatile_token.token_type in self.PRE_VOLATILE_TOKENS:
return exp.VolatileProperty()
return self.expression(exp.StabilityProperty, this=exp.Literal.string("VOLATILE"))
def _parse_system_versioning_property(self) -> exp.WithSystemVersioningProperty:
self._match_pair(TokenType.EQ, TokenType.ON)
prop = self.expression(exp.WithSystemVersioningProperty)
if self._match(TokenType.L_PAREN):
self._match_text_seq("HISTORY_TABLE", "=")
prop.set("this", self._parse_table_parts())
if self._match(TokenType.COMMA):
self._match_text_seq("DATA_CONSISTENCY_CHECK", "=")
prop.set("expression", self._advance_any() and self._prev.text.upper())
self._match_r_paren()
return prop
def _parse_with_property(
self,
) -> t.Optional[exp.Expression] | t.List[exp.Expression]:
if self._match(TokenType.L_PAREN, advance=False):
return self._parse_wrapped_properties()
if self._match_text_seq("JOURNAL"):
return self._parse_withjournaltable()
if self._match_text_seq("DATA"):
return self._parse_withdata(no=False)
elif self._match_text_seq("NO", "DATA"):
return self._parse_withdata(no=True)
if not self._next:
return None
return self._parse_withisolatedloading()
# https://dev.mysql.com/doc/refman/8.0/en/create-view.html
def _parse_definer(self) -> t.Optional[exp.DefinerProperty]:
self._match(TokenType.EQ)
user = self._parse_id_var()
self._match(TokenType.PARAMETER)
host = self._parse_id_var() or (self._match(TokenType.MOD) and self._prev.text)
if not user or not host:
return None
return exp.DefinerProperty(this=f"{user}@{host}")
def _parse_withjournaltable(self) -> exp.WithJournalTableProperty:
self._match(TokenType.TABLE)
self._match(TokenType.EQ)
return self.expression(exp.WithJournalTableProperty, this=self._parse_table_parts())
def _parse_log(self, no: bool = False) -> exp.LogProperty:
return self.expression(exp.LogProperty, no=no)
def _parse_journal(self, **kwargs) -> exp.JournalProperty:
return self.expression(exp.JournalProperty, **kwargs)
def _parse_checksum(self) -> exp.ChecksumProperty:
self._match(TokenType.EQ)
on = None
if self._match(TokenType.ON):
on = True
elif self._match_text_seq("OFF"):
on = False
return self.expression(exp.ChecksumProperty, on=on, default=self._match(TokenType.DEFAULT))
def _parse_cluster(self, wrapped: bool = False) -> exp.Cluster:
return self.expression(
exp.Cluster,
expressions=(
self._parse_wrapped_csv(self._parse_ordered)
if wrapped
else self._parse_csv(self._parse_ordered)
),
)
def _parse_clustered_by(self) -> exp.ClusteredByProperty:
self._match_text_seq("BY")
self._match_l_paren()
expressions = self._parse_csv(self._parse_column)
self._match_r_paren()
if self._match_text_seq("SORTED", "BY"):
self._match_l_paren()
sorted_by = self._parse_csv(self._parse_ordered)
self._match_r_paren()
else:
sorted_by = None
self._match(TokenType.INTO)
buckets = self._parse_number()
self._match_text_seq("BUCKETS")
return self.expression(
exp.ClusteredByProperty,
expressions=expressions,
sorted_by=sorted_by,
buckets=buckets,
)
def _parse_copy_property(self) -> t.Optional[exp.CopyGrantsProperty]:
if not self._match_text_seq("GRANTS"):
self._retreat(self._index - 1)
return None
return self.expression(exp.CopyGrantsProperty)
def _parse_freespace(self) -> exp.FreespaceProperty:
self._match(TokenType.EQ)
return self.expression(
exp.FreespaceProperty, this=self._parse_number(), percent=self._match(TokenType.PERCENT)
)
def _parse_mergeblockratio(
self, no: bool = False, default: bool = False
) -> exp.MergeBlockRatioProperty:
if self._match(TokenType.EQ):
return self.expression(
exp.MergeBlockRatioProperty,
this=self._parse_number(),
percent=self._match(TokenType.PERCENT),
)
return self.expression(exp.MergeBlockRatioProperty, no=no, default=default)
def _parse_datablocksize(
self,
default: t.Optional[bool] = None,
minimum: t.Optional[bool] = None,
maximum: t.Optional[bool] = None,
) -> exp.DataBlocksizeProperty:
self._match(TokenType.EQ)
size = self._parse_number()
units = None
if self._match_texts(("BYTES", "KBYTES", "KILOBYTES")):
units = self._prev.text
return self.expression(
exp.DataBlocksizeProperty,
size=size,
units=units,
default=default,
minimum=minimum,
maximum=maximum,
)
def _parse_blockcompression(self) -> exp.BlockCompressionProperty:
self._match(TokenType.EQ)
always = self._match_text_seq("ALWAYS")
manual = self._match_text_seq("MANUAL")
never = self._match_text_seq("NEVER")
default = self._match_text_seq("DEFAULT")
autotemp = None
if self._match_text_seq("AUTOTEMP"):
autotemp = self._parse_schema()
return self.expression(
exp.BlockCompressionProperty,
always=always,
manual=manual,
never=never,
default=default,
autotemp=autotemp,
)
def _parse_withisolatedloading(self) -> exp.IsolatedLoadingProperty:
no = self._match_text_seq("NO")
concurrent = self._match_text_seq("CONCURRENT")
self._match_text_seq("ISOLATED", "LOADING")
for_all = self._match_text_seq("FOR", "ALL")
for_insert = self._match_text_seq("FOR", "INSERT")
for_none = self._match_text_seq("FOR", "NONE")
return self.expression(
exp.IsolatedLoadingProperty,
no=no,
concurrent=concurrent,
for_all=for_all,
for_insert=for_insert,
for_none=for_none,
)
def _parse_locking(self) -> exp.LockingProperty:
if self._match(TokenType.TABLE):
kind = "TABLE"
elif self._match(TokenType.VIEW):
kind = "VIEW"
elif self._match(TokenType.ROW):
kind = "ROW"
elif self._match_text_seq("DATABASE"):
kind = "DATABASE"
else:
kind = None
if kind in ("DATABASE", "TABLE", "VIEW"):
this = self._parse_table_parts()
else:
this = None
if self._match(TokenType.FOR):
for_or_in = "FOR"
elif self._match(TokenType.IN):
for_or_in = "IN"
else:
for_or_in = None
if self._match_text_seq("ACCESS"):
lock_type = "ACCESS"
elif self._match_texts(("EXCL", "EXCLUSIVE")):
lock_type = "EXCLUSIVE"
elif self._match_text_seq("SHARE"):
lock_type = "SHARE"
elif self._match_text_seq("READ"):
lock_type = "READ"
elif self._match_text_seq("WRITE"):
lock_type = "WRITE"
elif self._match_text_seq("CHECKSUM"):
lock_type = "CHECKSUM"
else:
lock_type = None
override = self._match_text_seq("OVERRIDE")
return self.expression(
exp.LockingProperty,
this=this,
kind=kind,
for_or_in=for_or_in,
lock_type=lock_type,
override=override,
)
def _parse_partition_by(self) -> t.List[exp.Expression]:
if self._match(TokenType.PARTITION_BY):
return self._parse_csv(self._parse_conjunction)
return []
def _parse_partition_bound_spec(self) -> exp.PartitionBoundSpec:
def _parse_partition_bound_expr() -> t.Optional[exp.Expression]:
if self._match_text_seq("MINVALUE"):
return exp.var("MINVALUE")
if self._match_text_seq("MAXVALUE"):
return exp.var("MAXVALUE")
return self._parse_bitwise()
this: t.Optional[exp.Expression | t.List[exp.Expression]] = None
expression = None
from_expressions = None
to_expressions = None
if self._match(TokenType.IN):
this = self._parse_wrapped_csv(self._parse_bitwise)
elif self._match(TokenType.FROM):
from_expressions = self._parse_wrapped_csv(_parse_partition_bound_expr)
self._match_text_seq("TO")
to_expressions = self._parse_wrapped_csv(_parse_partition_bound_expr)
elif self._match_text_seq("WITH", "(", "MODULUS"):
this = self._parse_number()
self._match_text_seq(",", "REMAINDER")
expression = self._parse_number()
self._match_r_paren()
else:
self.raise_error("Failed to parse partition bound spec.")
return self.expression(
exp.PartitionBoundSpec,
this=this,
expression=expression,
from_expressions=from_expressions,
to_expressions=to_expressions,
)
# https://www.postgresql.org/docs/current/sql-createtable.html
def _parse_partitioned_of(self) -> t.Optional[exp.PartitionedOfProperty]:
if not self._match_text_seq("OF"):
self._retreat(self._index - 1)
return None
this = self._parse_table(schema=True)
if self._match(TokenType.DEFAULT):
expression: exp.Var | exp.PartitionBoundSpec = exp.var("DEFAULT")
elif self._match_text_seq("FOR", "VALUES"):
expression = self._parse_partition_bound_spec()
else:
self.raise_error("Expecting either DEFAULT or FOR VALUES clause.")
return self.expression(exp.PartitionedOfProperty, this=this, expression=expression)
def _parse_partitioned_by(self) -> exp.PartitionedByProperty:
self._match(TokenType.EQ)
return self.expression(
exp.PartitionedByProperty,
this=self._parse_schema() or self._parse_bracket(self._parse_field()),
)
def _parse_withdata(self, no: bool = False) -> exp.WithDataProperty:
if self._match_text_seq("AND", "STATISTICS"):
statistics = True
elif self._match_text_seq("AND", "NO", "STATISTICS"):
statistics = False
else:
statistics = None
return self.expression(exp.WithDataProperty, no=no, statistics=statistics)
def _parse_contains_property(self) -> t.Optional[exp.SqlReadWriteProperty]:
if self._match_text_seq("SQL"):
return self.expression(exp.SqlReadWriteProperty, this="CONTAINS SQL")
return None
def _parse_modifies_property(self) -> t.Optional[exp.SqlReadWriteProperty]:
if self._match_text_seq("SQL", "DATA"):
return self.expression(exp.SqlReadWriteProperty, this="MODIFIES SQL DATA")
return None
def _parse_no_property(self) -> t.Optional[exp.Expression]:
if self._match_text_seq("PRIMARY", "INDEX"):
return exp.NoPrimaryIndexProperty()
if self._match_text_seq("SQL"):
return self.expression(exp.SqlReadWriteProperty, this="NO SQL")
return None
def _parse_on_property(self) -> t.Optional[exp.Expression]:
if self._match_text_seq("COMMIT", "PRESERVE", "ROWS"):
return exp.OnCommitProperty()
if self._match_text_seq("COMMIT", "DELETE", "ROWS"):
return exp.OnCommitProperty(delete=True)
return self.expression(exp.OnProperty, this=self._parse_schema(self._parse_id_var()))
def _parse_reads_property(self) -> t.Optional[exp.SqlReadWriteProperty]:
if self._match_text_seq("SQL", "DATA"):
return self.expression(exp.SqlReadWriteProperty, this="READS SQL DATA")
return None
def _parse_distkey(self) -> exp.DistKeyProperty:
return self.expression(exp.DistKeyProperty, this=self._parse_wrapped(self._parse_id_var))
def _parse_create_like(self) -> t.Optional[exp.LikeProperty]:
table = self._parse_table(schema=True)
options = []
while self._match_texts(("INCLUDING", "EXCLUDING")):
this = self._prev.text.upper()
id_var = self._parse_id_var()
if not id_var:
return None
options.append(
self.expression(exp.Property, this=this, value=exp.var(id_var.this.upper()))
)
return self.expression(exp.LikeProperty, this=table, expressions=options)
def _parse_sortkey(self, compound: bool = False) -> exp.SortKeyProperty:
return self.expression(
exp.SortKeyProperty, this=self._parse_wrapped_id_vars(), compound=compound
)
def _parse_character_set(self, default: bool = False) -> exp.CharacterSetProperty:
self._match(TokenType.EQ)
return self.expression(
exp.CharacterSetProperty, this=self._parse_var_or_string(), default=default
)
def _parse_remote_with_connection(self) -> exp.RemoteWithConnectionModelProperty:
self._match_text_seq("WITH", "CONNECTION")
return self.expression(
exp.RemoteWithConnectionModelProperty, this=self._parse_table_parts()
)
def _parse_returns(self) -> exp.ReturnsProperty:
value: t.Optional[exp.Expression]
is_table = self._match(TokenType.TABLE)
if is_table:
if self._match(TokenType.LT):
value = self.expression(
exp.Schema,
this="TABLE",
expressions=self._parse_csv(self._parse_struct_types),
)
if not self._match(TokenType.GT):
self.raise_error("Expecting >")
else:
value = self._parse_schema(exp.var("TABLE"))
else:
value = self._parse_types()
return self.expression(exp.ReturnsProperty, this=value, is_table=is_table)
def _parse_describe(self) -> exp.Describe:
kind = self._match_set(self.CREATABLES) and self._prev.text
extended = self._match_text_seq("EXTENDED")
this = self._parse_table(schema=True)
properties = self._parse_properties()
expressions = properties.expressions if properties else None
return self.expression(
exp.Describe, this=this, extended=extended, kind=kind, expressions=expressions
)
def _parse_insert(self) -> exp.Insert:
comments = ensure_list(self._prev_comments)
hint = self._parse_hint()
overwrite = self._match(TokenType.OVERWRITE)
ignore = self._match(TokenType.IGNORE)
local = self._match_text_seq("LOCAL")
alternative = None
is_function = None
if self._match_text_seq("DIRECTORY"):
this: t.Optional[exp.Expression] = self.expression(
exp.Directory,
this=self._parse_var_or_string(),
local=local,
row_format=self._parse_row_format(match_row=True),
)
else:
if self._match(TokenType.OR):
alternative = self._match_texts(self.INSERT_ALTERNATIVES) and self._prev.text
self._match(TokenType.INTO)
comments += ensure_list(self._prev_comments)
self._match(TokenType.TABLE)
is_function = self._match(TokenType.FUNCTION)
this = self._parse_table(schema=True) if not is_function else self._parse_function()
returning = self._parse_returning()
return self.expression(
exp.Insert,
comments=comments,
hint=hint,
is_function=is_function,
this=this,
by_name=self._match_text_seq("BY", "NAME"),
exists=self._parse_exists(),
partition=self._parse_partition(),
where=self._match_pair(TokenType.REPLACE, TokenType.WHERE)
and self._parse_conjunction(),
expression=self._parse_derived_table_values() or self._parse_ddl_select(),
conflict=self._parse_on_conflict(),
returning=returning or self._parse_returning(),
overwrite=overwrite,
alternative=alternative,
ignore=ignore,
)
def _parse_kill(self) -> exp.Kill:
kind = exp.var(self._prev.text) if self._match_texts(("CONNECTION", "QUERY")) else None
return self.expression(
exp.Kill,
this=self._parse_primary(),
kind=kind,
)
def _parse_on_conflict(self) -> t.Optional[exp.OnConflict]:
conflict = self._match_text_seq("ON", "CONFLICT")
duplicate = self._match_text_seq("ON", "DUPLICATE", "KEY")
if not conflict and not duplicate:
return None
conflict_keys = None
constraint = None
if conflict:
if self._match_text_seq("ON", "CONSTRAINT"):
constraint = self._parse_id_var()
elif self._match(TokenType.L_PAREN):
conflict_keys = self._parse_csv(self._parse_id_var)
self._match_r_paren()
action = self._parse_var_from_options(self.CONFLICT_ACTIONS)
if self._prev.token_type == TokenType.UPDATE:
self._match(TokenType.SET)
expressions = self._parse_csv(self._parse_equality)
else:
expressions = None
return self.expression(
exp.OnConflict,
duplicate=duplicate,
expressions=expressions,
action=action,
conflict_keys=conflict_keys,
constraint=constraint,
)
def _parse_returning(self) -> t.Optional[exp.Returning]:
if not self._match(TokenType.RETURNING):
return None
return self.expression(
exp.Returning,
expressions=self._parse_csv(self._parse_expression),
into=self._match(TokenType.INTO) and self._parse_table_part(),
)
def _parse_row(self) -> t.Optional[exp.RowFormatSerdeProperty | exp.RowFormatDelimitedProperty]:
if not self._match(TokenType.FORMAT):
return None
return self._parse_row_format()
def _parse_row_format(
self, match_row: bool = False
) -> t.Optional[exp.RowFormatSerdeProperty | exp.RowFormatDelimitedProperty]:
if match_row and not self._match_pair(TokenType.ROW, TokenType.FORMAT):
return None
if self._match_text_seq("SERDE"):
this = self._parse_string()
serde_properties = None
if self._match(TokenType.SERDE_PROPERTIES):
serde_properties = self.expression(
exp.SerdeProperties, expressions=self._parse_wrapped_properties()
)
return self.expression(
exp.RowFormatSerdeProperty, this=this, serde_properties=serde_properties
)
self._match_text_seq("DELIMITED")
kwargs = {}
if self._match_text_seq("FIELDS", "TERMINATED", "BY"):
kwargs["fields"] = self._parse_string()
if self._match_text_seq("ESCAPED", "BY"):
kwargs["escaped"] = self._parse_string()
if self._match_text_seq("COLLECTION", "ITEMS", "TERMINATED", "BY"):
kwargs["collection_items"] = self._parse_string()
if self._match_text_seq("MAP", "KEYS", "TERMINATED", "BY"):
kwargs["map_keys"] = self._parse_string()
if self._match_text_seq("LINES", "TERMINATED", "BY"):
kwargs["lines"] = self._parse_string()
if self._match_text_seq("NULL", "DEFINED", "AS"):
kwargs["null"] = self._parse_string()
return self.expression(exp.RowFormatDelimitedProperty, **kwargs) # type: ignore
def _parse_load(self) -> exp.LoadData | exp.Command:
if self._match_text_seq("DATA"):
local = self._match_text_seq("LOCAL")
self._match_text_seq("INPATH")
inpath = self._parse_string()
overwrite = self._match(TokenType.OVERWRITE)
self._match_pair(TokenType.INTO, TokenType.TABLE)
return self.expression(
exp.LoadData,
this=self._parse_table(schema=True),
local=local,
overwrite=overwrite,
inpath=inpath,
partition=self._parse_partition(),
input_format=self._match_text_seq("INPUTFORMAT") and self._parse_string(),
serde=self._match_text_seq("SERDE") and self._parse_string(),
)
return self._parse_as_command(self._prev)
def _parse_delete(self) -> exp.Delete:
# This handles MySQL's "Multiple-Table Syntax"
# https://dev.mysql.com/doc/refman/8.0/en/delete.html
tables = None
comments = self._prev_comments
if not self._match(TokenType.FROM, advance=False):
tables = self._parse_csv(self._parse_table) or None
returning = self._parse_returning()
return self.expression(
exp.Delete,
comments=comments,
tables=tables,
this=self._match(TokenType.FROM) and self._parse_table(joins=True),
using=self._match(TokenType.USING) and self._parse_table(joins=True),
where=self._parse_where(),
returning=returning or self._parse_returning(),
limit=self._parse_limit(),
)
def _parse_update(self) -> exp.Update:
comments = self._prev_comments
this = self._parse_table(joins=True, alias_tokens=self.UPDATE_ALIAS_TOKENS)
expressions = self._match(TokenType.SET) and self._parse_csv(self._parse_equality)
returning = self._parse_returning()
return self.expression(
exp.Update,
comments=comments,
**{ # type: ignore
"this": this,
"expressions": expressions,
"from": self._parse_from(joins=True),
"where": self._parse_where(),
"returning": returning or self._parse_returning(),
"order": self._parse_order(),
"limit": self._parse_limit(),
},
)
def _parse_uncache(self) -> exp.Uncache:
if not self._match(TokenType.TABLE):
self.raise_error("Expecting TABLE after UNCACHE")
return self.expression(
exp.Uncache, exists=self._parse_exists(), this=self._parse_table(schema=True)
)
def _parse_cache(self) -> exp.Cache:
lazy = self._match_text_seq("LAZY")
self._match(TokenType.TABLE)
table = self._parse_table(schema=True)
options = []
if self._match_text_seq("OPTIONS"):
self._match_l_paren()
k = self._parse_string()
self._match(TokenType.EQ)
v = self._parse_string()
options = [k, v]
self._match_r_paren()
self._match(TokenType.ALIAS)
return self.expression(
exp.Cache,
this=table,
lazy=lazy,
options=options,
expression=self._parse_select(nested=True),
)
def _parse_partition(self) -> t.Optional[exp.Partition]:
if not self._match(TokenType.PARTITION):
return None
return self.expression(
exp.Partition, expressions=self._parse_wrapped_csv(self._parse_conjunction)
)
def _parse_value(self) -> exp.Tuple:
if self._match(TokenType.L_PAREN):
expressions = self._parse_csv(self._parse_expression)
self._match_r_paren()
return self.expression(exp.Tuple, expressions=expressions)
# In some dialects we can have VALUES 1, 2 which results in 1 column & 2 rows.
return self.expression(exp.Tuple, expressions=[self._parse_expression()])
def _parse_projections(self) -> t.List[exp.Expression]:
return self._parse_expressions()
def _parse_select(
self,
nested: bool = False,
table: bool = False,
parse_subquery_alias: bool = True,
parse_set_operation: bool = True,
) -> t.Optional[exp.Expression]:
cte = self._parse_with()
if cte:
this = self._parse_statement()
if not this:
self.raise_error("Failed to parse any statement following CTE")
return cte
if "with" in this.arg_types:
this.set("with", cte)
else:
self.raise_error(f"{this.key} does not support CTE")
this = cte
return this
# duckdb supports leading with FROM x
from_ = self._parse_from() if self._match(TokenType.FROM, advance=False) else None
if self._match(TokenType.SELECT):
comments = self._prev_comments
hint = self._parse_hint()
all_ = self._match(TokenType.ALL)
distinct = self._match_set(self.DISTINCT_TOKENS)
kind = (
self._match(TokenType.ALIAS)
and self._match_texts(("STRUCT", "VALUE"))
and self._prev.text.upper()
)
if distinct:
distinct = self.expression(
exp.Distinct,
on=self._parse_value() if self._match(TokenType.ON) else None,
)
if all_ and distinct:
self.raise_error("Cannot specify both ALL and DISTINCT after SELECT")
limit = self._parse_limit(top=True)
projections = self._parse_projections()
this = self.expression(
exp.Select,
kind=kind,
hint=hint,
distinct=distinct,
expressions=projections,
limit=limit,
)
this.comments = comments
into = self._parse_into()
if into:
this.set("into", into)
if not from_:
from_ = self._parse_from()
if from_:
this.set("from", from_)
this = self._parse_query_modifiers(this)
elif (table or nested) and self._match(TokenType.L_PAREN):
if self._match(TokenType.PIVOT):
this = self._parse_simplified_pivot()
elif self._match(TokenType.FROM):
this = exp.select("*").from_(
t.cast(exp.From, self._parse_from(skip_from_token=True))
)
else:
this = (
self._parse_table()
if table
else self._parse_select(nested=True, parse_set_operation=False)
)
this = self._parse_query_modifiers(self._parse_set_operations(this))
self._match_r_paren()
# We return early here so that the UNION isn't attached to the subquery by the
# following call to _parse_set_operations, but instead becomes the parent node
return self._parse_subquery(this, parse_alias=parse_subquery_alias)
elif self._match(TokenType.VALUES, advance=False):
this = self._parse_derived_table_values()
elif from_:
this = exp.select("*").from_(from_.this, copy=False)
else:
this = None
if parse_set_operation:
return self._parse_set_operations(this)
return this
def _parse_with(self, skip_with_token: bool = False) -> t.Optional[exp.With]:
if not skip_with_token and not self._match(TokenType.WITH):
return None
comments = self._prev_comments
recursive = self._match(TokenType.RECURSIVE)
expressions = []
while True:
expressions.append(self._parse_cte())
if not self._match(TokenType.COMMA) and not self._match(TokenType.WITH):
break
else:
self._match(TokenType.WITH)
return self.expression(
exp.With, comments=comments, expressions=expressions, recursive=recursive
)
def _parse_cte(self) -> exp.CTE:
alias = self._parse_table_alias(self.ID_VAR_TOKENS)
if not alias or not alias.this:
self.raise_error("Expected CTE to have alias")
self._match(TokenType.ALIAS)
if self._match_text_seq("NOT", "MATERIALIZED"):
materialized = False
elif self._match_text_seq("MATERIALIZED"):
materialized = True
else:
materialized = None
return self.expression(
exp.CTE,
this=self._parse_wrapped(self._parse_statement),
alias=alias,
materialized=materialized,
)
def _parse_table_alias(
self, alias_tokens: t.Optional[t.Collection[TokenType]] = None
) -> t.Optional[exp.TableAlias]:
any_token = self._match(TokenType.ALIAS)
alias = (
self._parse_id_var(any_token=any_token, tokens=alias_tokens or self.TABLE_ALIAS_TOKENS)
or self._parse_string_as_identifier()
)
index = self._index
if self._match(TokenType.L_PAREN):
columns = self._parse_csv(self._parse_function_parameter)
self._match_r_paren() if columns else self._retreat(index)
else:
columns = None
if not alias and not columns:
return None
return self.expression(exp.TableAlias, this=alias, columns=columns)
def _parse_subquery(
self, this: t.Optional[exp.Expression], parse_alias: bool = True
) -> t.Optional[exp.Subquery]:
if not this:
return None
return self.expression(
exp.Subquery,
this=this,
pivots=self._parse_pivots(),
alias=self._parse_table_alias() if parse_alias else None,
)
def _implicit_unnests_to_explicit(self, this: E) -> E:
from sqlglot.optimizer.normalize_identifiers import (
normalize_identifiers as _norm,
)
refs = {_norm(this.args["from"].this.copy(), dialect=self.dialect).alias_or_name}
for i, join in enumerate(this.args.get("joins") or []):
table = join.this
normalized_table = table.copy()
normalized_table.meta["maybe_column"] = True
normalized_table = _norm(normalized_table, dialect=self.dialect)
if isinstance(table, exp.Table) and not join.args.get("on"):
if normalized_table.parts[0].name in refs:
table_as_column = table.to_column()
unnest = exp.Unnest(expressions=[table_as_column])
# Table.to_column creates a parent Alias node that we want to convert to
# a TableAlias and attach to the Unnest, so it matches the parser's output
if isinstance(table.args.get("alias"), exp.TableAlias):
table_as_column.replace(table_as_column.this)
exp.alias_(unnest, None, table=[table.args["alias"].this], copy=False)
table.replace(unnest)
refs.add(normalized_table.alias_or_name)
return this
def _parse_query_modifiers(
self, this: t.Optional[exp.Expression]
) -> t.Optional[exp.Expression]:
if isinstance(this, (exp.Query, exp.Table)):
for join in iter(self._parse_join, None):
this.append("joins", join)
for lateral in iter(self._parse_lateral, None):
this.append("laterals", lateral)
while True:
if self._match_set(self.QUERY_MODIFIER_PARSERS, advance=False):
parser = self.QUERY_MODIFIER_PARSERS[self._curr.token_type]
key, expression = parser(self)
if expression:
this.set(key, expression)
if key == "limit":
offset = expression.args.pop("offset", None)
if offset:
offset = exp.Offset(expression=offset)
this.set("offset", offset)
limit_by_expressions = expression.expressions
expression.set("expressions", None)
offset.set("expressions", limit_by_expressions)
continue
break
if self.SUPPORTS_IMPLICIT_UNNEST and this and "from" in this.args:
this = self._implicit_unnests_to_explicit(this)
return this
def _parse_hint(self) -> t.Optional[exp.Hint]:
if self._match(TokenType.HINT):
hints = []
for hint in iter(lambda: self._parse_csv(self._parse_function), []):
hints.extend(hint)
if not self._match_pair(TokenType.STAR, TokenType.SLASH):
self.raise_error("Expected */ after HINT")
return self.expression(exp.Hint, expressions=hints)
return None
def _parse_into(self) -> t.Optional[exp.Into]:
if not self._match(TokenType.INTO):
return None
temp = self._match(TokenType.TEMPORARY)
unlogged = self._match_text_seq("UNLOGGED")
self._match(TokenType.TABLE)
return self.expression(
exp.Into, this=self._parse_table(schema=True), temporary=temp, unlogged=unlogged
)
def _parse_from(
self, joins: bool = False, skip_from_token: bool = False
) -> t.Optional[exp.From]:
if not skip_from_token and not self._match(TokenType.FROM):
return None
return self.expression(
exp.From, comments=self._prev_comments, this=self._parse_table(joins=joins)
)
def _parse_match_recognize(self) -> t.Optional[exp.MatchRecognize]:
if not self._match(TokenType.MATCH_RECOGNIZE):
return None
self._match_l_paren()
partition = self._parse_partition_by()
order = self._parse_order()
measures = self._parse_expressions() if self._match_text_seq("MEASURES") else None
if self._match_text_seq("ONE", "ROW", "PER", "MATCH"):
rows = exp.var("ONE ROW PER MATCH")
elif self._match_text_seq("ALL", "ROWS", "PER", "MATCH"):
text = "ALL ROWS PER MATCH"
if self._match_text_seq("SHOW", "EMPTY", "MATCHES"):
text += " SHOW EMPTY MATCHES"
elif self._match_text_seq("OMIT", "EMPTY", "MATCHES"):
text += " OMIT EMPTY MATCHES"
elif self._match_text_seq("WITH", "UNMATCHED", "ROWS"):
text += " WITH UNMATCHED ROWS"
rows = exp.var(text)
else:
rows = None
if self._match_text_seq("AFTER", "MATCH", "SKIP"):
text = "AFTER MATCH SKIP"
if self._match_text_seq("PAST", "LAST", "ROW"):
text += " PAST LAST ROW"
elif self._match_text_seq("TO", "NEXT", "ROW"):
text += " TO NEXT ROW"
elif self._match_text_seq("TO", "FIRST"):
text += f" TO FIRST {self._advance_any().text}" # type: ignore
elif self._match_text_seq("TO", "LAST"):
text += f" TO LAST {self._advance_any().text}" # type: ignore
after = exp.var(text)
else:
after = None
if self._match_text_seq("PATTERN"):
self._match_l_paren()
if not self._curr:
self.raise_error("Expecting )", self._curr)
paren = 1
start = self._curr
while self._curr and paren > 0:
if self._curr.token_type == TokenType.L_PAREN:
paren += 1
if self._curr.token_type == TokenType.R_PAREN:
paren -= 1
end = self._prev
self._advance()
if paren > 0:
self.raise_error("Expecting )", self._curr)
pattern = exp.var(self._find_sql(start, end))
else:
pattern = None
define = (
self._parse_csv(self._parse_name_as_expression)
if self._match_text_seq("DEFINE")
else None
)
self._match_r_paren()
return self.expression(
exp.MatchRecognize,
partition_by=partition,
order=order,
measures=measures,
rows=rows,
after=after,
pattern=pattern,
define=define,
alias=self._parse_table_alias(),
)
def _parse_lateral(self) -> t.Optional[exp.Lateral]:
cross_apply = self._match_pair(TokenType.CROSS, TokenType.APPLY)
if not cross_apply and self._match_pair(TokenType.OUTER, TokenType.APPLY):
cross_apply = False
if cross_apply is not None:
this = self._parse_select(table=True)
view = None
outer = None
elif self._match(TokenType.LATERAL):
this = self._parse_select(table=True)
view = self._match(TokenType.VIEW)
outer = self._match(TokenType.OUTER)
else:
return None
if not this:
this = (
self._parse_unnest()
or self._parse_function()
or self._parse_id_var(any_token=False)
)
while self._match(TokenType.DOT):
this = exp.Dot(
this=this,
expression=self._parse_function() or self._parse_id_var(any_token=False),
)
if view:
table = self._parse_id_var(any_token=False)
columns = self._parse_csv(self._parse_id_var) if self._match(TokenType.ALIAS) else []
table_alias: t.Optional[exp.TableAlias] = self.expression(
exp.TableAlias, this=table, columns=columns
)
elif isinstance(this, (exp.Subquery, exp.Unnest)) and this.alias:
# We move the alias from the lateral's child node to the lateral itself
table_alias = this.args["alias"].pop()
else:
table_alias = self._parse_table_alias()
return self.expression(
exp.Lateral,
this=this,
view=view,
outer=outer,
alias=table_alias,
cross_apply=cross_apply,
)
def _parse_join_parts(
self,
) -> t.Tuple[t.Optional[Token], t.Optional[Token], t.Optional[Token]]:
return (
self._match_set(self.JOIN_METHODS) and self._prev,
self._match_set(self.JOIN_SIDES) and self._prev,
self._match_set(self.JOIN_KINDS) and self._prev,
)
def _parse_join(
self, skip_join_token: bool = False, parse_bracket: bool = False
) -> t.Optional[exp.Join]:
if self._match(TokenType.COMMA):
return self.expression(exp.Join, this=self._parse_table())
index = self._index
method, side, kind = self._parse_join_parts()
hint = self._prev.text if self._match_texts(self.JOIN_HINTS) else None
join = self._match(TokenType.JOIN)
if not skip_join_token and not join:
self._retreat(index)
kind = None
method = None
side = None
outer_apply = self._match_pair(TokenType.OUTER, TokenType.APPLY, False)
cross_apply = self._match_pair(TokenType.CROSS, TokenType.APPLY, False)
if not skip_join_token and not join and not outer_apply and not cross_apply:
return None
kwargs: t.Dict[str, t.Any] = {"this": self._parse_table(parse_bracket=parse_bracket)}
if method:
kwargs["method"] = method.text
if side:
kwargs["side"] = side.text
if kind:
kwargs["kind"] = kind.text
if hint:
kwargs["hint"] = hint
if self._match(TokenType.ON):
kwargs["on"] = self._parse_conjunction()
elif self._match(TokenType.USING):
kwargs["using"] = self._parse_wrapped_id_vars()
elif not (kind and kind.token_type == TokenType.CROSS):
index = self._index
join = self._parse_join()
if join and self._match(TokenType.ON):
kwargs["on"] = self._parse_conjunction()
elif join and self._match(TokenType.USING):
kwargs["using"] = self._parse_wrapped_id_vars()
else:
join = None
self._retreat(index)
kwargs["this"].set("joins", [join] if join else None)
comments = [c for token in (method, side, kind) if token for c in token.comments]
return self.expression(exp.Join, comments=comments, **kwargs)
def _parse_opclass(self) -> t.Optional[exp.Expression]:
this = self._parse_conjunction()
if self._match_texts(self.OPCLASS_FOLLOW_KEYWORDS, advance=False):
return this
if not self._match_set(self.OPTYPE_FOLLOW_TOKENS, advance=False):
return self.expression(exp.Opclass, this=this, expression=self._parse_table_parts())
return this
def _parse_index_params(self) -> exp.IndexParameters:
using = self._parse_var(any_token=True) if self._match(TokenType.USING) else None
if self._match(TokenType.L_PAREN, advance=False):
columns = self._parse_wrapped_csv(self._parse_with_operator)
else:
columns = None
include = self._parse_wrapped_id_vars() if self._match_text_seq("INCLUDE") else None
partition_by = self._parse_partition_by()
with_storage = self._match(TokenType.WITH) and self._parse_wrapped_properties()
tablespace = (
self._parse_var(any_token=True)
if self._match_text_seq("USING", "INDEX", "TABLESPACE")
else None
)
where = self._parse_where()
return self.expression(
exp.IndexParameters,
using=using,
columns=columns,
include=include,
partition_by=partition_by,
where=where,
with_storage=with_storage,
tablespace=tablespace,
)
def _parse_index(
self,
index: t.Optional[exp.Expression] = None,
) -> t.Optional[exp.Index]:
if index:
unique = None
primary = None
amp = None
self._match(TokenType.ON)
self._match(TokenType.TABLE) # hive
table = self._parse_table_parts(schema=True)
else:
unique = self._match(TokenType.UNIQUE)
primary = self._match_text_seq("PRIMARY")
amp = self._match_text_seq("AMP")
if not self._match(TokenType.INDEX):
return None
index = self._parse_id_var()
table = None
params = self._parse_index_params()
return self.expression(
exp.Index,
this=index,
table=table,
unique=unique,
primary=primary,
amp=amp,
params=params,
)
def _parse_table_hints(self) -> t.Optional[t.List[exp.Expression]]:
hints: t.List[exp.Expression] = []
if self._match_pair(TokenType.WITH, TokenType.L_PAREN):
# https://learn.microsoft.com/en-us/sql/t-sql/queries/hints-transact-sql-table?view=sql-server-ver16
hints.append(
self.expression(
exp.WithTableHint,
expressions=self._parse_csv(
lambda: self._parse_function() or self._parse_var(any_token=True)
),
)
)
self._match_r_paren()
else:
# https://dev.mysql.com/doc/refman/8.0/en/index-hints.html
while self._match_set(self.TABLE_INDEX_HINT_TOKENS):
hint = exp.IndexTableHint(this=self._prev.text.upper())
self._match_texts(("INDEX", "KEY"))
if self._match(TokenType.FOR):
hint.set("target", self._advance_any() and self._prev.text.upper())
hint.set("expressions", self._parse_wrapped_id_vars())
hints.append(hint)
return hints or None
def _parse_table_part(self, schema: bool = False) -> t.Optional[exp.Expression]:
return (
(not schema and self._parse_function(optional_parens=False))
or self._parse_id_var(any_token=False)
or self._parse_string_as_identifier()
or self._parse_placeholder()
)
def _parse_table_parts(
self, schema: bool = False, is_db_reference: bool = False, wildcard: bool = False
) -> exp.Table:
catalog = None
db = None
table: t.Optional[exp.Expression | str] = self._parse_table_part(schema=schema)
while self._match(TokenType.DOT):
if catalog:
# This allows nesting the table in arbitrarily many dot expressions if needed
table = self.expression(
exp.Dot, this=table, expression=self._parse_table_part(schema=schema)
)
else:
catalog = db
db = table
# "" used for tsql FROM a..b case
table = self._parse_table_part(schema=schema) or ""
if (
wildcard
and self._is_connected()
and (isinstance(table, exp.Identifier) or not table)
and self._match(TokenType.STAR)
):
if isinstance(table, exp.Identifier):
table.args["this"] += "*"
else:
table = exp.Identifier(this="*")
if is_db_reference:
catalog = db
db = table
table = None
if not table and not is_db_reference:
self.raise_error(f"Expected table name but got {self._curr}")
if not db and is_db_reference:
self.raise_error(f"Expected database name but got {self._curr}")
return self.expression(
exp.Table, this=table, db=db, catalog=catalog, pivots=self._parse_pivots()
)
def _parse_table(
self,
schema: bool = False,
joins: bool = False,
alias_tokens: t.Optional[t.Collection[TokenType]] = None,
parse_bracket: bool = False,
is_db_reference: bool = False,
) -> t.Optional[exp.Expression]:
lateral = self._parse_lateral()
if lateral:
return lateral
unnest = self._parse_unnest()
if unnest:
return unnest
values = self._parse_derived_table_values()
if values:
return values
subquery = self._parse_select(table=True)
if subquery:
if not subquery.args.get("pivots"):
subquery.set("pivots", self._parse_pivots())
return subquery
bracket = parse_bracket and self._parse_bracket(None)
bracket = self.expression(exp.Table, this=bracket) if bracket else None
only = self._match(TokenType.ONLY)
this = t.cast(
exp.Expression,
bracket
or self._parse_bracket(
self._parse_table_parts(schema=schema, is_db_reference=is_db_reference)
),
)
if only:
this.set("only", only)
# Postgres supports a wildcard (table) suffix operator, which is a no-op in this context
self._match_text_seq("*")
if schema:
return self._parse_schema(this=this)
version = self._parse_version()
if version:
this.set("version", version)
if self.dialect.ALIAS_POST_TABLESAMPLE:
table_sample = self._parse_table_sample()
alias = self._parse_table_alias(alias_tokens=alias_tokens or self.TABLE_ALIAS_TOKENS)
if alias:
this.set("alias", alias)
if isinstance(this, exp.Table) and self._match_text_seq("AT"):
return self.expression(
exp.AtIndex, this=this.to_column(copy=False), expression=self._parse_id_var()
)
this.set("hints", self._parse_table_hints())
if not this.args.get("pivots"):
this.set("pivots", self._parse_pivots())
if not self.dialect.ALIAS_POST_TABLESAMPLE:
table_sample = self._parse_table_sample()
if table_sample:
table_sample.set("this", this)
this = table_sample
if joins:
for join in iter(self._parse_join, None):
this.append("joins", join)
if self._match_pair(TokenType.WITH, TokenType.ORDINALITY):
this.set("ordinality", True)
this.set("alias", self._parse_table_alias())
return this
def _parse_version(self) -> t.Optional[exp.Version]:
if self._match(TokenType.TIMESTAMP_SNAPSHOT):
this = "TIMESTAMP"
elif self._match(TokenType.VERSION_SNAPSHOT):
this = "VERSION"
else:
return None
if self._match_set((TokenType.FROM, TokenType.BETWEEN)):
kind = self._prev.text.upper()
start = self._parse_bitwise()
self._match_texts(("TO", "AND"))
end = self._parse_bitwise()
expression: t.Optional[exp.Expression] = self.expression(
exp.Tuple, expressions=[start, end]
)
elif self._match_text_seq("CONTAINED", "IN"):
kind = "CONTAINED IN"
expression = self.expression(
exp.Tuple, expressions=self._parse_wrapped_csv(self._parse_bitwise)
)
elif self._match(TokenType.ALL):
kind = "ALL"
expression = None
else:
self._match_text_seq("AS", "OF")
kind = "AS OF"
expression = self._parse_type()
return self.expression(exp.Version, this=this, expression=expression, kind=kind)
def _parse_unnest(self, with_alias: bool = True) -> t.Optional[exp.Unnest]:
if not self._match(TokenType.UNNEST):
return None
expressions = self._parse_wrapped_csv(self._parse_equality)
offset = self._match_pair(TokenType.WITH, TokenType.ORDINALITY)
alias = self._parse_table_alias() if with_alias else None
if alias:
if self.dialect.UNNEST_COLUMN_ONLY:
if alias.args.get("columns"):
self.raise_error("Unexpected extra column alias in unnest.")
alias.set("columns", [alias.this])
alias.set("this", None)
columns = alias.args.get("columns") or []
if offset and len(expressions) < len(columns):
offset = columns.pop()
if not offset and self._match_pair(TokenType.WITH, TokenType.OFFSET):
self._match(TokenType.ALIAS)
offset = self._parse_id_var(
any_token=False, tokens=self.UNNEST_OFFSET_ALIAS_TOKENS
) or exp.to_identifier("offset")
return self.expression(exp.Unnest, expressions=expressions, alias=alias, offset=offset)
def _parse_derived_table_values(self) -> t.Optional[exp.Values]:
is_derived = self._match_pair(TokenType.L_PAREN, TokenType.VALUES)
if not is_derived and not self._match_text_seq("VALUES"):
return None
expressions = self._parse_csv(self._parse_value)
alias = self._parse_table_alias()
if is_derived:
self._match_r_paren()
return self.expression(
exp.Values, expressions=expressions, alias=alias or self._parse_table_alias()
)
def _parse_table_sample(self, as_modifier: bool = False) -> t.Optional[exp.TableSample]:
if not self._match(TokenType.TABLE_SAMPLE) and not (
as_modifier and self._match_text_seq("USING", "SAMPLE")
):
return None
bucket_numerator = None
bucket_denominator = None
bucket_field = None
percent = None
size = None
seed = None
method = self._parse_var(tokens=(TokenType.ROW,), upper=True)
matched_l_paren = self._match(TokenType.L_PAREN)
if self.TABLESAMPLE_CSV:
num = None
expressions = self._parse_csv(self._parse_primary)
else:
expressions = None
num = (
self._parse_factor()
if self._match(TokenType.NUMBER, advance=False)
else self._parse_primary() or self._parse_placeholder()
)
if self._match_text_seq("BUCKET"):
bucket_numerator = self._parse_number()
self._match_text_seq("OUT", "OF")
bucket_denominator = bucket_denominator = self._parse_number()
self._match(TokenType.ON)
bucket_field = self._parse_field()
elif self._match_set((TokenType.PERCENT, TokenType.MOD)):
percent = num
elif self._match(TokenType.ROWS) or not self.dialect.TABLESAMPLE_SIZE_IS_PERCENT:
size = num
else:
percent = num
if matched_l_paren:
self._match_r_paren()
if self._match(TokenType.L_PAREN):
method = self._parse_var(upper=True)
seed = self._match(TokenType.COMMA) and self._parse_number()
self._match_r_paren()
elif self._match_texts(("SEED", "REPEATABLE")):
seed = self._parse_wrapped(self._parse_number)
return self.expression(
exp.TableSample,
expressions=expressions,
method=method,
bucket_numerator=bucket_numerator,
bucket_denominator=bucket_denominator,
bucket_field=bucket_field,
percent=percent,
size=size,
seed=seed,
)
def _parse_pivots(self) -> t.Optional[t.List[exp.Pivot]]:
return list(iter(self._parse_pivot, None)) or None
# https://duckdb.org/docs/sql/statements/pivot
def _parse_simplified_pivot(self) -> exp.Pivot:
def _parse_on() -> t.Optional[exp.Expression]:
this = self._parse_bitwise()
return self._parse_in(this) if self._match(TokenType.IN) else this
this = self._parse_table()
expressions = self._match(TokenType.ON) and self._parse_csv(_parse_on)
using = self._match(TokenType.USING) and self._parse_csv(
lambda: self._parse_alias(self._parse_function())
)
group = self._parse_group()
return self.expression(
exp.Pivot, this=this, expressions=expressions, using=using, group=group
)
def _parse_pivot_in(self) -> exp.In:
def _parse_aliased_expression() -> t.Optional[exp.Expression]:
this = self._parse_conjunction()
self._match(TokenType.ALIAS)
alias = self._parse_field()
if alias:
return self.expression(exp.PivotAlias, this=this, alias=alias)
return this
value = self._parse_column()
if not self._match_pair(TokenType.IN, TokenType.L_PAREN):
self.raise_error("Expecting IN (")
aliased_expressions = self._parse_csv(_parse_aliased_expression)
self._match_r_paren()
return self.expression(exp.In, this=value, expressions=aliased_expressions)
def _parse_pivot(self) -> t.Optional[exp.Pivot]:
index = self._index
include_nulls = None
if self._match(TokenType.PIVOT):
unpivot = False
elif self._match(TokenType.UNPIVOT):
unpivot = True
# https://docs.databricks.com/en/sql/language-manual/sql-ref-syntax-qry-select-unpivot.html#syntax
if self._match_text_seq("INCLUDE", "NULLS"):
include_nulls = True
elif self._match_text_seq("EXCLUDE", "NULLS"):
include_nulls = False
else:
return None
expressions = []
if not self._match(TokenType.L_PAREN):
self._retreat(index)
return None
if unpivot:
expressions = self._parse_csv(self._parse_column)
else:
expressions = self._parse_csv(lambda: self._parse_alias(self._parse_function()))
if not expressions:
self.raise_error("Failed to parse PIVOT's aggregation list")
if not self._match(TokenType.FOR):
self.raise_error("Expecting FOR")
field = self._parse_pivot_in()
self._match_r_paren()
pivot = self.expression(
exp.Pivot,
expressions=expressions,
field=field,
unpivot=unpivot,
include_nulls=include_nulls,
)
if not self._match_set((TokenType.PIVOT, TokenType.UNPIVOT), advance=False):
pivot.set("alias", self._parse_table_alias())
if not unpivot:
names = self._pivot_column_names(t.cast(t.List[exp.Expression], expressions))
columns: t.List[exp.Expression] = []
for fld in pivot.args["field"].expressions:
field_name = fld.sql() if self.IDENTIFY_PIVOT_STRINGS else fld.alias_or_name
for name in names:
if self.PREFIXED_PIVOT_COLUMNS:
name = f"{name}_{field_name}" if name else field_name
else:
name = f"{field_name}_{name}" if name else field_name
columns.append(exp.to_identifier(name))
pivot.set("columns", columns)
return pivot
def _pivot_column_names(self, aggregations: t.List[exp.Expression]) -> t.List[str]:
return [agg.alias for agg in aggregations]
def _parse_prewhere(self, skip_where_token: bool = False) -> t.Optional[exp.PreWhere]:
if not skip_where_token and not self._match(TokenType.PREWHERE):
return None
return self.expression(
exp.PreWhere, comments=self._prev_comments, this=self._parse_conjunction()
)
def _parse_where(self, skip_where_token: bool = False) -> t.Optional[exp.Where]:
if not skip_where_token and not self._match(TokenType.WHERE):
return None
return self.expression(
exp.Where, comments=self._prev_comments, this=self._parse_conjunction()
)
def _parse_group(self, skip_group_by_token: bool = False) -> t.Optional[exp.Group]:
if not skip_group_by_token and not self._match(TokenType.GROUP_BY):
return None
elements = defaultdict(list)
if self._match(TokenType.ALL):
return self.expression(exp.Group, all=True)
while True:
expressions = self._parse_csv(self._parse_conjunction)
if expressions:
elements["expressions"].extend(expressions)
grouping_sets = self._parse_grouping_sets()
if grouping_sets:
elements["grouping_sets"].extend(grouping_sets)
rollup = None
cube = None
totals = None
index = self._index
with_ = self._match(TokenType.WITH)
if self._match(TokenType.ROLLUP):
rollup = with_ or self._parse_wrapped_csv(self._parse_column)
elements["rollup"].extend(ensure_list(rollup))
if self._match(TokenType.CUBE):
cube = with_ or self._parse_wrapped_csv(self._parse_column)
elements["cube"].extend(ensure_list(cube))
if self._match_text_seq("TOTALS"):
totals = True
elements["totals"] = True # type: ignore
if not (grouping_sets or rollup or cube or totals):
if with_:
self._retreat(index)
break
return self.expression(exp.Group, **elements) # type: ignore
def _parse_grouping_sets(self) -> t.Optional[t.List[exp.Expression]]:
if not self._match(TokenType.GROUPING_SETS):
return None
return self._parse_wrapped_csv(self._parse_grouping_set)
def _parse_grouping_set(self) -> t.Optional[exp.Expression]:
if self._match(TokenType.L_PAREN):
grouping_set = self._parse_csv(self._parse_column)
self._match_r_paren()
return self.expression(exp.Tuple, expressions=grouping_set)
return self._parse_column()
def _parse_having(self, skip_having_token: bool = False) -> t.Optional[exp.Having]:
if not skip_having_token and not self._match(TokenType.HAVING):
return None
return self.expression(exp.Having, this=self._parse_conjunction())
def _parse_qualify(self) -> t.Optional[exp.Qualify]:
if not self._match(TokenType.QUALIFY):
return None
return self.expression(exp.Qualify, this=self._parse_conjunction())
def _parse_connect(self, skip_start_token: bool = False) -> t.Optional[exp.Connect]:
if skip_start_token:
start = None
elif self._match(TokenType.START_WITH):
start = self._parse_conjunction()
else:
return None
self._match(TokenType.CONNECT_BY)
self.NO_PAREN_FUNCTION_PARSERS["PRIOR"] = lambda self: self.expression(
exp.Prior, this=self._parse_bitwise()
)
connect = self._parse_conjunction()
self.NO_PAREN_FUNCTION_PARSERS.pop("PRIOR")
if not start and self._match(TokenType.START_WITH):
start = self._parse_conjunction()
return self.expression(exp.Connect, start=start, connect=connect)
def _parse_name_as_expression(self) -> exp.Alias:
return self.expression(
exp.Alias,
alias=self._parse_id_var(any_token=True),
this=self._match(TokenType.ALIAS) and self._parse_conjunction(),
)
def _parse_interpolate(self) -> t.Optional[t.List[exp.Expression]]:
if self._match_text_seq("INTERPOLATE"):
return self._parse_wrapped_csv(self._parse_name_as_expression)
return None
def _parse_order(
self, this: t.Optional[exp.Expression] = None, skip_order_token: bool = False
) -> t.Optional[exp.Expression]:
siblings = None
if not skip_order_token and not self._match(TokenType.ORDER_BY):
if not self._match(TokenType.ORDER_SIBLINGS_BY):
return this
siblings = True
return self.expression(
exp.Order,
this=this,
expressions=self._parse_csv(self._parse_ordered),
interpolate=self._parse_interpolate(),
siblings=siblings,
)
def _parse_sort(self, exp_class: t.Type[E], token: TokenType) -> t.Optional[E]:
if not self._match(token):
return None
return self.expression(exp_class, expressions=self._parse_csv(self._parse_ordered))
def _parse_ordered(
self, parse_method: t.Optional[t.Callable] = None
) -> t.Optional[exp.Ordered]:
this = parse_method() if parse_method else self._parse_conjunction()
if not this:
return None
asc = self._match(TokenType.ASC)
desc = self._match(TokenType.DESC) or (asc and False)
is_nulls_first = self._match_text_seq("NULLS", "FIRST")
is_nulls_last = self._match_text_seq("NULLS", "LAST")
nulls_first = is_nulls_first or False
explicitly_null_ordered = is_nulls_first or is_nulls_last
if (
not explicitly_null_ordered
and (
(not desc and self.dialect.NULL_ORDERING == "nulls_are_small")
or (desc and self.dialect.NULL_ORDERING != "nulls_are_small")
)
and self.dialect.NULL_ORDERING != "nulls_are_last"
):
nulls_first = True
if self._match_text_seq("WITH", "FILL"):
with_fill = self.expression(
exp.WithFill,
**{ # type: ignore
"from": self._match(TokenType.FROM) and self._parse_bitwise(),
"to": self._match_text_seq("TO") and self._parse_bitwise(),
"step": self._match_text_seq("STEP") and self._parse_bitwise(),
},
)
else:
with_fill = None
return self.expression(
exp.Ordered, this=this, desc=desc, nulls_first=nulls_first, with_fill=with_fill
)
def _parse_limit(
self,
this: t.Optional[exp.Expression] = None,
top: bool = False,
skip_limit_token: bool = False,
) -> t.Optional[exp.Expression]:
if skip_limit_token or self._match(TokenType.TOP if top else TokenType.LIMIT):
comments = self._prev_comments
if top:
limit_paren = self._match(TokenType.L_PAREN)
expression = self._parse_term() if limit_paren else self._parse_number()
if limit_paren:
self._match_r_paren()
else:
expression = self._parse_term()
if self._match(TokenType.COMMA):
offset = expression
expression = self._parse_term()
else:
offset = None
limit_exp = self.expression(
exp.Limit,
this=this,
expression=expression,
offset=offset,
comments=comments,
expressions=self._parse_limit_by(),
)
return limit_exp
if self._match(TokenType.FETCH):
direction = self._match_set((TokenType.FIRST, TokenType.NEXT))
direction = self._prev.text.upper() if direction else "FIRST"
count = self._parse_field(tokens=self.FETCH_TOKENS)
percent = self._match(TokenType.PERCENT)
self._match_set((TokenType.ROW, TokenType.ROWS))
only = self._match_text_seq("ONLY")
with_ties = self._match_text_seq("WITH", "TIES")
if only and with_ties:
self.raise_error("Cannot specify both ONLY and WITH TIES in FETCH clause")
return self.expression(
exp.Fetch,
direction=direction,
count=count,
percent=percent,
with_ties=with_ties,
)
return this
def _parse_offset(self, this: t.Optional[exp.Expression] = None) -> t.Optional[exp.Expression]:
if not self._match(TokenType.OFFSET):
return this
count = self._parse_term()
self._match_set((TokenType.ROW, TokenType.ROWS))
return self.expression(
exp.Offset, this=this, expression=count, expressions=self._parse_limit_by()
)
def _parse_limit_by(self) -> t.Optional[t.List[exp.Expression]]:
return self._match_text_seq("BY") and self._parse_csv(self._parse_bitwise)
def _parse_locks(self) -> t.List[exp.Lock]:
locks = []
while True:
if self._match_text_seq("FOR", "UPDATE"):
update = True
elif self._match_text_seq("FOR", "SHARE") or self._match_text_seq(
"LOCK", "IN", "SHARE", "MODE"
):
update = False
else:
break
expressions = None
if self._match_text_seq("OF"):
expressions = self._parse_csv(lambda: self._parse_table(schema=True))
wait: t.Optional[bool | exp.Expression] = None
if self._match_text_seq("NOWAIT"):
wait = True
elif self._match_text_seq("WAIT"):
wait = self._parse_primary()
elif self._match_text_seq("SKIP", "LOCKED"):
wait = False
locks.append(
self.expression(exp.Lock, update=update, expressions=expressions, wait=wait)
)
return locks
def _parse_set_operations(self, this: t.Optional[exp.Expression]) -> t.Optional[exp.Expression]:
while this and self._match_set(self.SET_OPERATIONS):
token_type = self._prev.token_type
if token_type == TokenType.UNION:
operation = exp.Union
elif token_type == TokenType.EXCEPT:
operation = exp.Except
else:
operation = exp.Intersect
comments = self._prev.comments
distinct = self._match(TokenType.DISTINCT) or not self._match(TokenType.ALL)
by_name = self._match_text_seq("BY", "NAME")
expression = self._parse_select(nested=True, parse_set_operation=False)
this = self.expression(
operation,
comments=comments,
this=this,
distinct=distinct,
by_name=by_name,
expression=expression,
)
if isinstance(this, exp.Union) and self.MODIFIERS_ATTACHED_TO_UNION:
expression = this.expression
if expression:
for arg in self.UNION_MODIFIERS:
expr = expression.args.get(arg)
if expr:
this.set(arg, expr.pop())
return this
def _parse_expression(self) -> t.Optional[exp.Expression]:
return self._parse_alias(self._parse_conjunction())
def _parse_conjunction(self) -> t.Optional[exp.Expression]:
return self._parse_tokens(self._parse_equality, self.CONJUNCTION)
def _parse_equality(self) -> t.Optional[exp.Expression]:
return self._parse_tokens(self._parse_comparison, self.EQUALITY)
def _parse_comparison(self) -> t.Optional[exp.Expression]:
return self._parse_tokens(self._parse_range, self.COMPARISON)
def _parse_range(self, this: t.Optional[exp.Expression] = None) -> t.Optional[exp.Expression]:
this = this or self._parse_bitwise()
negate = self._match(TokenType.NOT)
if self._match_set(self.RANGE_PARSERS):
expression = self.RANGE_PARSERS[self._prev.token_type](self, this)
if not expression:
return this
this = expression
elif self._match(TokenType.ISNULL):
this = self.expression(exp.Is, this=this, expression=exp.Null())
# Postgres supports ISNULL and NOTNULL for conditions.
# https://blog.andreiavram.ro/postgresql-null-composite-type/
if self._match(TokenType.NOTNULL):
this = self.expression(exp.Is, this=this, expression=exp.Null())
this = self.expression(exp.Not, this=this)
if negate:
this = self.expression(exp.Not, this=this)
if self._match(TokenType.IS):
this = self._parse_is(this)
return this
def _parse_is(self, this: t.Optional[exp.Expression]) -> t.Optional[exp.Expression]:
index = self._index - 1
negate = self._match(TokenType.NOT)
if self._match_text_seq("DISTINCT", "FROM"):
klass = exp.NullSafeEQ if negate else exp.NullSafeNEQ
return self.expression(klass, this=this, expression=self._parse_bitwise())
expression = self._parse_null() or self._parse_boolean()
if not expression:
self._retreat(index)
return None
this = self.expression(exp.Is, this=this, expression=expression)
return self.expression(exp.Not, this=this) if negate else this
def _parse_in(self, this: t.Optional[exp.Expression], alias: bool = False) -> exp.In:
unnest = self._parse_unnest(with_alias=False)
if unnest:
this = self.expression(exp.In, this=this, unnest=unnest)
elif self._match_set((TokenType.L_PAREN, TokenType.L_BRACKET)):
matched_l_paren = self._prev.token_type == TokenType.L_PAREN
expressions = self._parse_csv(lambda: self._parse_select_or_expression(alias=alias))
if len(expressions) == 1 and isinstance(expressions[0], exp.Query):
this = self.expression(exp.In, this=this, query=expressions[0])
else:
this = self.expression(exp.In, this=this, expressions=expressions)
if matched_l_paren:
self._match_r_paren(this)
elif not self._match(TokenType.R_BRACKET, expression=this):
self.raise_error("Expecting ]")
else:
this = self.expression(exp.In, this=this, field=self._parse_field())
return this
def _parse_between(self, this: t.Optional[exp.Expression]) -> exp.Between:
low = self._parse_bitwise()
self._match(TokenType.AND)
high = self._parse_bitwise()
return self.expression(exp.Between, this=this, low=low, high=high)
def _parse_escape(self, this: t.Optional[exp.Expression]) -> t.Optional[exp.Expression]:
if not self._match(TokenType.ESCAPE):
return this
return self.expression(exp.Escape, this=this, expression=self._parse_string())
def _parse_interval(self, match_interval: bool = True) -> t.Optional[exp.Interval]:
index = self._index
if not self._match(TokenType.INTERVAL) and match_interval:
return None
if self._match(TokenType.STRING, advance=False):
this = self._parse_primary()
else:
this = self._parse_term()
if not this or (
isinstance(this, exp.Column)
and not this.table
and not this.this.quoted
and this.name.upper() == "IS"
):
self._retreat(index)
return None
unit = self._parse_function() or (
not self._match(TokenType.ALIAS, advance=False)
and self._parse_var(any_token=True, upper=True)
)
# Most dialects support, e.g., the form INTERVAL '5' day, thus we try to parse
# each INTERVAL expression into this canonical form so it's easy to transpile
if this and this.is_number:
this = exp.Literal.string(this.name)
elif this and this.is_string:
parts = this.name.split()
if len(parts) == 2:
if unit:
# This is not actually a unit, it's something else (e.g. a "window side")
unit = None
self._retreat(self._index - 1)
this = exp.Literal.string(parts[0])
unit = self.expression(exp.Var, this=parts[1].upper())
return self.expression(exp.Interval, this=this, unit=unit)
def _parse_bitwise(self) -> t.Optional[exp.Expression]:
this = self._parse_term()
while True:
if self._match_set(self.BITWISE):
this = self.expression(
self.BITWISE[self._prev.token_type],
this=this,
expression=self._parse_term(),
)
elif self.dialect.DPIPE_IS_STRING_CONCAT and self._match(TokenType.DPIPE):
this = self.expression(
exp.DPipe,
this=this,
expression=self._parse_term(),
safe=not self.dialect.STRICT_STRING_CONCAT,
)
elif self._match(TokenType.DQMARK):
this = self.expression(exp.Coalesce, this=this, expressions=self._parse_term())
elif self._match_pair(TokenType.LT, TokenType.LT):
this = self.expression(
exp.BitwiseLeftShift, this=this, expression=self._parse_term()
)
elif self._match_pair(TokenType.GT, TokenType.GT):
this = self.expression(
exp.BitwiseRightShift, this=this, expression=self._parse_term()
)
else:
break
return this
def _parse_term(self) -> t.Optional[exp.Expression]:
return self._parse_tokens(self._parse_factor, self.TERM)
def _parse_factor(self) -> t.Optional[exp.Expression]:
parse_method = self._parse_exponent if self.EXPONENT else self._parse_unary
this = parse_method()
while self._match_set(self.FACTOR):
this = self.expression(
self.FACTOR[self._prev.token_type],
this=this,
comments=self._prev_comments,
expression=parse_method(),
)
if isinstance(this, exp.Div):
this.args["typed"] = self.dialect.TYPED_DIVISION
this.args["safe"] = self.dialect.SAFE_DIVISION
return this
def _parse_exponent(self) -> t.Optional[exp.Expression]:
return self._parse_tokens(self._parse_unary, self.EXPONENT)
def _parse_unary(self) -> t.Optional[exp.Expression]:
if self._match_set(self.UNARY_PARSERS):
return self.UNARY_PARSERS[self._prev.token_type](self)
return self._parse_at_time_zone(self._parse_type())
def _parse_type(self, parse_interval: bool = True) -> t.Optional[exp.Expression]:
interval = parse_interval and self._parse_interval()
if interval:
# Convert INTERVAL 'val_1' unit_1 [+] ... [+] 'val_n' unit_n into a sum of intervals
while True:
index = self._index
self._match(TokenType.PLUS)
if not self._match_set((TokenType.STRING, TokenType.NUMBER), advance=False):
self._retreat(index)
break
interval = self.expression( # type: ignore
exp.Add, this=interval, expression=self._parse_interval(match_interval=False)
)
return interval
index = self._index
data_type = self._parse_types(check_func=True, allow_identifiers=False)
this = self._parse_column()
if data_type:
if isinstance(this, exp.Literal):
parser = self.TYPE_LITERAL_PARSERS.get(data_type.this)
if parser:
return parser(self, this, data_type)
return self.expression(exp.Cast, this=this, to=data_type)
if not data_type.expressions:
self._retreat(index)
return self._parse_column()
return self._parse_column_ops(data_type)
return this and self._parse_column_ops(this)
def _parse_type_size(self) -> t.Optional[exp.DataTypeParam]:
this = self._parse_type()
if not this:
return None
return self.expression(
exp.DataTypeParam, this=this, expression=self._parse_var(any_token=True)
)
def _parse_types(
self, check_func: bool = False, schema: bool = False, allow_identifiers: bool = True
) -> t.Optional[exp.Expression]:
index = self._index
prefix = self._match_text_seq("SYSUDTLIB", ".")
if not self._match_set(self.TYPE_TOKENS):
identifier = allow_identifiers and self._parse_id_var(
any_token=False, tokens=(TokenType.VAR,)
)
if identifier:
tokens = self.dialect.tokenize(identifier.name)
if len(tokens) != 1:
self.raise_error("Unexpected identifier", self._prev)
if tokens[0].token_type in self.TYPE_TOKENS:
self._prev = tokens[0]
elif self.dialect.SUPPORTS_USER_DEFINED_TYPES:
type_name = identifier.name
while self._match(TokenType.DOT):
type_name = f"{type_name}.{self._advance_any() and self._prev.text}"
return exp.DataType.build(type_name, udt=True)
else:
self._retreat(self._index - 1)
return None
else:
return None
type_token = self._prev.token_type
if type_token == TokenType.PSEUDO_TYPE:
return self.expression(exp.PseudoType, this=self._prev.text.upper())
if type_token == TokenType.OBJECT_IDENTIFIER:
return self.expression(exp.ObjectIdentifier, this=self._prev.text.upper())
nested = type_token in self.NESTED_TYPE_TOKENS
is_struct = type_token in self.STRUCT_TYPE_TOKENS
is_aggregate = type_token in self.AGGREGATE_TYPE_TOKENS
expressions = None
maybe_func = False
if self._match(TokenType.L_PAREN):
if is_struct:
expressions = self._parse_csv(self._parse_struct_types)
elif nested:
expressions = self._parse_csv(
lambda: self._parse_types(
check_func=check_func, schema=schema, allow_identifiers=allow_identifiers
)
)
elif type_token in self.ENUM_TYPE_TOKENS:
expressions = self._parse_csv(self._parse_equality)
elif is_aggregate:
func_or_ident = self._parse_function(anonymous=True) or self._parse_id_var(
any_token=False, tokens=(TokenType.VAR,)
)
if not func_or_ident or not self._match(TokenType.COMMA):
return None
expressions = self._parse_csv(
lambda: self._parse_types(
check_func=check_func, schema=schema, allow_identifiers=allow_identifiers
)
)
expressions.insert(0, func_or_ident)
else:
expressions = self._parse_csv(self._parse_type_size)
if not expressions or not self._match(TokenType.R_PAREN):
self._retreat(index)
return None
maybe_func = True
this: t.Optional[exp.Expression] = None
values: t.Optional[t.List[exp.Expression]] = None
if nested and self._match(TokenType.LT):
if is_struct:
expressions = self._parse_csv(lambda: self._parse_struct_types(type_required=True))
else:
expressions = self._parse_csv(
lambda: self._parse_types(
check_func=check_func, schema=schema, allow_identifiers=allow_identifiers
)
)
if not self._match(TokenType.GT):
self.raise_error("Expecting >")
if self._match_set((TokenType.L_BRACKET, TokenType.L_PAREN)):
values = self._parse_csv(self._parse_conjunction)
self._match_set((TokenType.R_BRACKET, TokenType.R_PAREN))
if type_token in self.TIMESTAMPS:
if self._match_text_seq("WITH", "TIME", "ZONE"):
maybe_func = False
tz_type = (
exp.DataType.Type.TIMETZ
if type_token in self.TIMES
else exp.DataType.Type.TIMESTAMPTZ
)
this = exp.DataType(this=tz_type, expressions=expressions)
elif self._match_text_seq("WITH", "LOCAL", "TIME", "ZONE"):
maybe_func = False
this = exp.DataType(this=exp.DataType.Type.TIMESTAMPLTZ, expressions=expressions)
elif self._match_text_seq("WITHOUT", "TIME", "ZONE"):
maybe_func = False
elif type_token == TokenType.INTERVAL:
unit = self._parse_var()
if self._match_text_seq("TO"):
span = [exp.IntervalSpan(this=unit, expression=self._parse_var())]
else:
span = None
if span or not unit:
this = self.expression(
exp.DataType, this=exp.DataType.Type.INTERVAL, expressions=span
)
else:
this = self.expression(exp.DataType, this=self.expression(exp.Interval, unit=unit))
if maybe_func and check_func:
index2 = self._index
peek = self._parse_string()
if not peek:
self._retreat(index)
return None
self._retreat(index2)
if not this:
if self._match_text_seq("UNSIGNED"):
unsigned_type_token = self.SIGNED_TO_UNSIGNED_TYPE_TOKEN.get(type_token)
if not unsigned_type_token:
self.raise_error(f"Cannot convert {type_token.value} to unsigned.")
type_token = unsigned_type_token or type_token
this = exp.DataType(
this=exp.DataType.Type[type_token.value],
expressions=expressions,
nested=nested,
values=values,
prefix=prefix,
)
while self._match_pair(TokenType.L_BRACKET, TokenType.R_BRACKET):
this = exp.DataType(this=exp.DataType.Type.ARRAY, expressions=[this], nested=True)
return this
def _parse_struct_types(self, type_required: bool = False) -> t.Optional[exp.Expression]:
index = self._index
this = self._parse_type(parse_interval=False) or self._parse_id_var()
self._match(TokenType.COLON)
column_def = self._parse_column_def(this)
if type_required and (
(isinstance(this, exp.Column) and this.this is column_def) or this is column_def
):
self._retreat(index)
return self._parse_types()
return column_def
def _parse_at_time_zone(self, this: t.Optional[exp.Expression]) -> t.Optional[exp.Expression]:
if not self._match_text_seq("AT", "TIME", "ZONE"):
return this
return self.expression(exp.AtTimeZone, this=this, zone=self._parse_unary())
def _parse_column(self) -> t.Optional[exp.Expression]:
this = self._parse_column_reference()
return self._parse_column_ops(this) if this else self._parse_bracket(this)
def _parse_column_reference(self) -> t.Optional[exp.Expression]:
this = self._parse_field()
if (
not this
and self._match(TokenType.VALUES, advance=False)
and self.VALUES_FOLLOWED_BY_PAREN
and (not self._next or self._next.token_type != TokenType.L_PAREN)
):
this = self._parse_id_var()
return self.expression(exp.Column, this=this) if isinstance(this, exp.Identifier) else this
def _parse_column_ops(self, this: t.Optional[exp.Expression]) -> t.Optional[exp.Expression]:
this = self._parse_bracket(this)
while self._match_set(self.COLUMN_OPERATORS):
op_token = self._prev.token_type
op = self.COLUMN_OPERATORS.get(op_token)
if op_token == TokenType.DCOLON:
field = self._parse_types()
if not field:
self.raise_error("Expected type")
elif op and self._curr:
field = self._parse_column_reference()
else:
field = self._parse_field(anonymous_func=True, any_token=True)
if isinstance(field, exp.Func) and this:
# bigquery allows function calls like x.y.count(...)
# SAFE.SUBSTR(...)
# https://cloud.google.com/bigquery/docs/reference/standard-sql/functions-reference#function_call_rules
this = exp.replace_tree(
this,
lambda n: (
self.expression(exp.Dot, this=n.args.get("table"), expression=n.this)
if n.table
else n.this
)
if isinstance(n, exp.Column)
else n,
)
if op:
this = op(self, this, field)
elif isinstance(this, exp.Column) and not this.args.get("catalog"):
this = self.expression(
exp.Column,
this=field,
table=this.this,
db=this.args.get("table"),
catalog=this.args.get("db"),
)
else:
this = self.expression(exp.Dot, this=this, expression=field)
this = self._parse_bracket(this)
return this
def _parse_primary(self) -> t.Optional[exp.Expression]:
if self._match_set(self.PRIMARY_PARSERS):
token_type = self._prev.token_type
primary = self.PRIMARY_PARSERS[token_type](self, self._prev)
if token_type == TokenType.STRING:
expressions = [primary]
while self._match(TokenType.STRING):
expressions.append(exp.Literal.string(self._prev.text))
if len(expressions) > 1:
return self.expression(exp.Concat, expressions=expressions)
return primary
if self._match_pair(TokenType.DOT, TokenType.NUMBER):
return exp.Literal.number(f"0.{self._prev.text}")
if self._match(TokenType.L_PAREN):
comments = self._prev_comments
query = self._parse_select()
if query:
expressions = [query]
else:
expressions = self._parse_expressions()
this = self._parse_query_modifiers(seq_get(expressions, 0))
if isinstance(this, exp.UNWRAPPED_QUERIES):
this = self._parse_set_operations(
self._parse_subquery(this=this, parse_alias=False)
)
elif isinstance(this, exp.Subquery):
this = self._parse_subquery(
this=self._parse_set_operations(this), parse_alias=False
)
elif len(expressions) > 1:
this = self.expression(exp.Tuple, expressions=expressions)
else:
this = self.expression(exp.Paren, this=this)
if this:
this.add_comments(comments)
self._match_r_paren(expression=this)
return this
return None
def _parse_field(
self,
any_token: bool = False,
tokens: t.Optional[t.Collection[TokenType]] = None,
anonymous_func: bool = False,
) -> t.Optional[exp.Expression]:
return (
self._parse_primary()
or self._parse_function(anonymous=anonymous_func)
or self._parse_id_var(any_token=any_token, tokens=tokens)
)
def _parse_function(
self,
functions: t.Optional[t.Dict[str, t.Callable]] = None,
anonymous: bool = False,
optional_parens: bool = True,
) -> t.Optional[exp.Expression]:
# This allows us to also parse {fn <function>} syntax (Snowflake, MySQL support this)
# See: https://community.snowflake.com/s/article/SQL-Escape-Sequences
fn_syntax = False
if (
self._match(TokenType.L_BRACE, advance=False)
and self._next
and self._next.text.upper() == "FN"
):
self._advance(2)
fn_syntax = True
func = self._parse_function_call(
functions=functions, anonymous=anonymous, optional_parens=optional_parens
)
if fn_syntax:
self._match(TokenType.R_BRACE)
return func
def _parse_function_call(
self,
functions: t.Optional[t.Dict[str, t.Callable]] = None,
anonymous: bool = False,
optional_parens: bool = True,
) -> t.Optional[exp.Expression]:
if not self._curr:
return None
comments = self._curr.comments
token_type = self._curr.token_type
this = self._curr.text
upper = this.upper()
parser = self.NO_PAREN_FUNCTION_PARSERS.get(upper)
if optional_parens and parser and token_type not in self.INVALID_FUNC_NAME_TOKENS:
self._advance()
return self._parse_window(parser(self))
if not self._next or self._next.token_type != TokenType.L_PAREN:
if optional_parens and token_type in self.NO_PAREN_FUNCTIONS:
self._advance()
return self.expression(self.NO_PAREN_FUNCTIONS[token_type])
return None
if token_type not in self.FUNC_TOKENS:
return None
self._advance(2)
parser = self.FUNCTION_PARSERS.get(upper)
if parser and not anonymous:
this = parser(self)
else:
subquery_predicate = self.SUBQUERY_PREDICATES.get(token_type)
if subquery_predicate and self._curr.token_type in (TokenType.SELECT, TokenType.WITH):
this = self.expression(subquery_predicate, this=self._parse_select())
self._match_r_paren()
return this
if functions is None:
functions = self.FUNCTIONS
function = functions.get(upper)
alias = upper in self.FUNCTIONS_WITH_ALIASED_ARGS
args = self._parse_csv(lambda: self._parse_lambda(alias=alias))
if alias:
args = self._kv_to_prop_eq(args)
if function and not anonymous:
if "dialect" in function.__code__.co_varnames:
func = function(args, dialect=self.dialect)
else:
func = function(args)
func = self.validate_expression(func, args)
if not self.dialect.NORMALIZE_FUNCTIONS:
func.meta["name"] = this
this = func
else:
if token_type == TokenType.IDENTIFIER:
this = exp.Identifier(this=this, quoted=True)
this = self.expression(exp.Anonymous, this=this, expressions=args)
if isinstance(this, exp.Expression):
this.add_comments(comments)
self._match_r_paren(this)
return self._parse_window(this)
def _kv_to_prop_eq(self, expressions: t.List[exp.Expression]) -> t.List[exp.Expression]:
transformed = []
for e in expressions:
if isinstance(e, self.KEY_VALUE_DEFINITIONS):
if isinstance(e, exp.Alias):
e = self.expression(exp.PropertyEQ, this=e.args.get("alias"), expression=e.this)
if not isinstance(e, exp.PropertyEQ):
e = self.expression(
exp.PropertyEQ, this=exp.to_identifier(e.this.name), expression=e.expression
)
if isinstance(e.this, exp.Column):
e.this.replace(e.this.this)
transformed.append(e)
return transformed
def _parse_function_parameter(self) -> t.Optional[exp.Expression]:
return self._parse_column_def(self._parse_id_var())
def _parse_user_defined_function(
self, kind: t.Optional[TokenType] = None
) -> t.Optional[exp.Expression]:
this = self._parse_id_var()
while self._match(TokenType.DOT):
this = self.expression(exp.Dot, this=this, expression=self._parse_id_var())
if not self._match(TokenType.L_PAREN):
return this
expressions = self._parse_csv(self._parse_function_parameter)
self._match_r_paren()
return self.expression(
exp.UserDefinedFunction, this=this, expressions=expressions, wrapped=True
)
def _parse_introducer(self, token: Token) -> exp.Introducer | exp.Identifier:
literal = self._parse_primary()
if literal:
return self.expression(exp.Introducer, this=token.text, expression=literal)
return self.expression(exp.Identifier, this=token.text)
def _parse_session_parameter(self) -> exp.SessionParameter:
kind = None
this = self._parse_id_var() or self._parse_primary()
if this and self._match(TokenType.DOT):
kind = this.name
this = self._parse_var() or self._parse_primary()
return self.expression(exp.SessionParameter, this=this, kind=kind)
def _parse_lambda(self, alias: bool = False) -> t.Optional[exp.Expression]:
index = self._index
if self._match(TokenType.L_PAREN):
expressions = t.cast(
t.List[t.Optional[exp.Expression]], self._parse_csv(self._parse_id_var)
)
if not self._match(TokenType.R_PAREN):
self._retreat(index)
else:
expressions = [self._parse_id_var()]
if self._match_set(self.LAMBDAS):
return self.LAMBDAS[self._prev.token_type](self, expressions)
self._retreat(index)
this: t.Optional[exp.Expression]
if self._match(TokenType.DISTINCT):
this = self.expression(
exp.Distinct, expressions=self._parse_csv(self._parse_conjunction)
)
else:
this = self._parse_select_or_expression(alias=alias)
return self._parse_limit(
self._parse_order(self._parse_having_max(self._parse_respect_or_ignore_nulls(this)))
)
def _parse_schema(self, this: t.Optional[exp.Expression] = None) -> t.Optional[exp.Expression]:
index = self._index
if not self.errors:
try:
if self._parse_select(nested=True):
return this
except ParseError:
pass
finally:
self.errors.clear()
self._retreat(index)
if not self._match(TokenType.L_PAREN):
return this
args = self._parse_csv(lambda: self._parse_constraint() or self._parse_field_def())
self._match_r_paren()
return self.expression(exp.Schema, this=this, expressions=args)
def _parse_field_def(self) -> t.Optional[exp.Expression]:
return self._parse_column_def(self._parse_field(any_token=True))
def _parse_column_def(self, this: t.Optional[exp.Expression]) -> t.Optional[exp.Expression]:
# column defs are not really columns, they're identifiers
if isinstance(this, exp.Column):
this = this.this
kind = self._parse_types(schema=True)
if self._match_text_seq("FOR", "ORDINALITY"):
return self.expression(exp.ColumnDef, this=this, ordinality=True)
constraints: t.List[exp.Expression] = []
if not kind and self._match(TokenType.ALIAS):
constraints.append(
self.expression(
exp.ComputedColumnConstraint,
this=self._parse_conjunction(),
persisted=self._match_text_seq("PERSISTED"),
not_null=self._match_pair(TokenType.NOT, TokenType.NULL),
)
)
elif kind and self._match_pair(TokenType.ALIAS, TokenType.L_PAREN, advance=False):
self._match(TokenType.ALIAS)
constraints.append(
self.expression(exp.TransformColumnConstraint, this=self._parse_field())
)
while True:
constraint = self._parse_column_constraint()
if not constraint:
break
constraints.append(constraint)
if not kind and not constraints:
return this
return self.expression(exp.ColumnDef, this=this, kind=kind, constraints=constraints)
def _parse_auto_increment(
self,
) -> exp.GeneratedAsIdentityColumnConstraint | exp.AutoIncrementColumnConstraint:
start = None
increment = None
if self._match(TokenType.L_PAREN, advance=False):
args = self._parse_wrapped_csv(self._parse_bitwise)
start = seq_get(args, 0)
increment = seq_get(args, 1)
elif self._match_text_seq("START"):
start = self._parse_bitwise()
self._match_text_seq("INCREMENT")
increment = self._parse_bitwise()
if start and increment:
return exp.GeneratedAsIdentityColumnConstraint(start=start, increment=increment)
return exp.AutoIncrementColumnConstraint()
def _parse_auto_property(self) -> t.Optional[exp.AutoRefreshProperty]:
if not self._match_text_seq("REFRESH"):
self._retreat(self._index - 1)
return None
return self.expression(exp.AutoRefreshProperty, this=self._parse_var(upper=True))
def _parse_compress(self) -> exp.CompressColumnConstraint:
if self._match(TokenType.L_PAREN, advance=False):
return self.expression(
exp.CompressColumnConstraint, this=self._parse_wrapped_csv(self._parse_bitwise)
)
return self.expression(exp.CompressColumnConstraint, this=self._parse_bitwise())
def _parse_generated_as_identity(
self,
) -> (
exp.GeneratedAsIdentityColumnConstraint
| exp.ComputedColumnConstraint
| exp.GeneratedAsRowColumnConstraint
):
if self._match_text_seq("BY", "DEFAULT"):
on_null = self._match_pair(TokenType.ON, TokenType.NULL)
this = self.expression(
exp.GeneratedAsIdentityColumnConstraint, this=False, on_null=on_null
)
else:
self._match_text_seq("ALWAYS")
this = self.expression(exp.GeneratedAsIdentityColumnConstraint, this=True)
self._match(TokenType.ALIAS)
if self._match_text_seq("ROW"):
start = self._match_text_seq("START")
if not start:
self._match(TokenType.END)
hidden = self._match_text_seq("HIDDEN")
return self.expression(exp.GeneratedAsRowColumnConstraint, start=start, hidden=hidden)
identity = self._match_text_seq("IDENTITY")
if self._match(TokenType.L_PAREN):
if self._match(TokenType.START_WITH):
this.set("start", self._parse_bitwise())
if self._match_text_seq("INCREMENT", "BY"):
this.set("increment", self._parse_bitwise())
if self._match_text_seq("MINVALUE"):
this.set("minvalue", self._parse_bitwise())
if self._match_text_seq("MAXVALUE"):
this.set("maxvalue", self._parse_bitwise())
if self._match_text_seq("CYCLE"):
this.set("cycle", True)
elif self._match_text_seq("NO", "CYCLE"):
this.set("cycle", False)
if not identity:
this.set("expression", self._parse_bitwise())
elif not this.args.get("start") and self._match(TokenType.NUMBER, advance=False):
args = self._parse_csv(self._parse_bitwise)
this.set("start", seq_get(args, 0))
this.set("increment", seq_get(args, 1))
self._match_r_paren()
return this
def _parse_inline(self) -> exp.InlineLengthColumnConstraint:
self._match_text_seq("LENGTH")
return self.expression(exp.InlineLengthColumnConstraint, this=self._parse_bitwise())
def _parse_not_constraint(self) -> t.Optional[exp.Expression]:
if self._match_text_seq("NULL"):
return self.expression(exp.NotNullColumnConstraint)
if self._match_text_seq("CASESPECIFIC"):
return self.expression(exp.CaseSpecificColumnConstraint, not_=True)
if self._match_text_seq("FOR", "REPLICATION"):
return self.expression(exp.NotForReplicationColumnConstraint)
return None
def _parse_column_constraint(self) -> t.Optional[exp.Expression]:
if self._match(TokenType.CONSTRAINT):
this = self._parse_id_var()
else:
this = None
if self._match_texts(self.CONSTRAINT_PARSERS):
return self.expression(
exp.ColumnConstraint,
this=this,
kind=self.CONSTRAINT_PARSERS[self._prev.text.upper()](self),
)
return this
def _parse_constraint(self) -> t.Optional[exp.Expression]:
if not self._match(TokenType.CONSTRAINT):
return self._parse_unnamed_constraint(constraints=self.SCHEMA_UNNAMED_CONSTRAINTS)
return self.expression(
exp.Constraint,
this=self._parse_id_var(),
expressions=self._parse_unnamed_constraints(),
)
def _parse_unnamed_constraints(self) -> t.List[exp.Expression]:
constraints = []
while True:
constraint = self._parse_unnamed_constraint() or self._parse_function()
if not constraint:
break
constraints.append(constraint)
return constraints
def _parse_unnamed_constraint(
self, constraints: t.Optional[t.Collection[str]] = None
) -> t.Optional[exp.Expression]:
if self._match(TokenType.IDENTIFIER, advance=False) or not self._match_texts(
constraints or self.CONSTRAINT_PARSERS
):
return None
constraint = self._prev.text.upper()
if constraint not in self.CONSTRAINT_PARSERS:
self.raise_error(f"No parser found for schema constraint {constraint}.")
return self.CONSTRAINT_PARSERS[constraint](self)
def _parse_unique(self) -> exp.UniqueColumnConstraint:
self._match_text_seq("KEY")
return self.expression(
exp.UniqueColumnConstraint,
this=self._parse_schema(self._parse_id_var(any_token=False)),
index_type=self._match(TokenType.USING) and self._advance_any() and self._prev.text,
on_conflict=self._parse_on_conflict(),
)
def _parse_key_constraint_options(self) -> t.List[str]:
options = []
while True:
if not self._curr:
break
if self._match(TokenType.ON):
action = None
on = self._advance_any() and self._prev.text
if self._match_text_seq("NO", "ACTION"):
action = "NO ACTION"
elif self._match_text_seq("CASCADE"):
action = "CASCADE"
elif self._match_text_seq("RESTRICT"):
action = "RESTRICT"
elif self._match_pair(TokenType.SET, TokenType.NULL):
action = "SET NULL"
elif self._match_pair(TokenType.SET, TokenType.DEFAULT):
action = "SET DEFAULT"
else:
self.raise_error("Invalid key constraint")
options.append(f"ON {on} {action}")
elif self._match_text_seq("NOT", "ENFORCED"):
options.append("NOT ENFORCED")
elif self._match_text_seq("DEFERRABLE"):
options.append("DEFERRABLE")
elif self._match_text_seq("INITIALLY", "DEFERRED"):
options.append("INITIALLY DEFERRED")
elif self._match_text_seq("NORELY"):
options.append("NORELY")
elif self._match_text_seq("MATCH", "FULL"):
options.append("MATCH FULL")
else:
break
return options
def _parse_references(self, match: bool = True) -> t.Optional[exp.Reference]:
if match and not self._match(TokenType.REFERENCES):
return None
expressions = None
this = self._parse_table(schema=True)
options = self._parse_key_constraint_options()
return self.expression(exp.Reference, this=this, expressions=expressions, options=options)
def _parse_foreign_key(self) -> exp.ForeignKey:
expressions = self._parse_wrapped_id_vars()
reference = self._parse_references()
options = {}
while self._match(TokenType.ON):
if not self._match_set((TokenType.DELETE, TokenType.UPDATE)):
self.raise_error("Expected DELETE or UPDATE")
kind = self._prev.text.lower()
if self._match_text_seq("NO", "ACTION"):
action = "NO ACTION"
elif self._match(TokenType.SET):
self._match_set((TokenType.NULL, TokenType.DEFAULT))
action = "SET " + self._prev.text.upper()
else:
self._advance()
action = self._prev.text.upper()
options[kind] = action
return self.expression(
exp.ForeignKey,
expressions=expressions,
reference=reference,
**options, # type: ignore
)
def _parse_primary_key_part(self) -> t.Optional[exp.Expression]:
return self._parse_field()
def _parse_period_for_system_time(self) -> t.Optional[exp.PeriodForSystemTimeConstraint]:
if not self._match(TokenType.TIMESTAMP_SNAPSHOT):
self._retreat(self._index - 1)
return None
id_vars = self._parse_wrapped_id_vars()
return self.expression(
exp.PeriodForSystemTimeConstraint,
this=seq_get(id_vars, 0),
expression=seq_get(id_vars, 1),
)
def _parse_primary_key(
self, wrapped_optional: bool = False, in_props: bool = False
) -> exp.PrimaryKeyColumnConstraint | exp.PrimaryKey:
desc = (
self._match_set((TokenType.ASC, TokenType.DESC))
and self._prev.token_type == TokenType.DESC
)
if not in_props and not self._match(TokenType.L_PAREN, advance=False):
return self.expression(exp.PrimaryKeyColumnConstraint, desc=desc)
expressions = self._parse_wrapped_csv(
self._parse_primary_key_part, optional=wrapped_optional
)
options = self._parse_key_constraint_options()
return self.expression(exp.PrimaryKey, expressions=expressions, options=options)
def _parse_bracket_key_value(self, is_map: bool = False) -> t.Optional[exp.Expression]:
return self._parse_slice(self._parse_alias(self._parse_conjunction(), explicit=True))
def _parse_bracket(self, this: t.Optional[exp.Expression]) -> t.Optional[exp.Expression]:
if not self._match_set((TokenType.L_BRACKET, TokenType.L_BRACE)):
return this
bracket_kind = self._prev.token_type
expressions = self._parse_csv(
lambda: self._parse_bracket_key_value(is_map=bracket_kind == TokenType.L_BRACE)
)
if bracket_kind == TokenType.L_BRACKET and not self._match(TokenType.R_BRACKET):
self.raise_error("Expected ]")
elif bracket_kind == TokenType.L_BRACE and not self._match(TokenType.R_BRACE):
self.raise_error("Expected }")
# https://duckdb.org/docs/sql/data_types/struct.html#creating-structs
if bracket_kind == TokenType.L_BRACE:
this = self.expression(exp.Struct, expressions=self._kv_to_prop_eq(expressions))
elif not this or this.name.upper() == "ARRAY":
this = self.expression(exp.Array, expressions=expressions)
else:
expressions = apply_index_offset(this, expressions, -self.dialect.INDEX_OFFSET)
this = self.expression(exp.Bracket, this=this, expressions=expressions)
self._add_comments(this)
return self._parse_bracket(this)
def _parse_slice(self, this: t.Optional[exp.Expression]) -> t.Optional[exp.Expression]:
if self._match(TokenType.COLON):
return self.expression(exp.Slice, this=this, expression=self._parse_conjunction())
return this
def _parse_case(self) -> t.Optional[exp.Expression]:
ifs = []
default = None
comments = self._prev_comments
expression = self._parse_conjunction()
while self._match(TokenType.WHEN):
this = self._parse_conjunction()
self._match(TokenType.THEN)
then = self._parse_conjunction()
ifs.append(self.expression(exp.If, this=this, true=then))
if self._match(TokenType.ELSE):
default = self._parse_conjunction()
if not self._match(TokenType.END):
if isinstance(default, exp.Interval) and default.this.sql().upper() == "END":
default = exp.column("interval")
else:
self.raise_error("Expected END after CASE", self._prev)
return self.expression(
exp.Case, comments=comments, this=expression, ifs=ifs, default=default
)
def _parse_if(self) -> t.Optional[exp.Expression]:
if self._match(TokenType.L_PAREN):
args = self._parse_csv(self._parse_conjunction)
this = self.validate_expression(exp.If.from_arg_list(args), args)
self._match_r_paren()
else:
index = self._index - 1
if self.NO_PAREN_IF_COMMANDS and index == 0:
return self._parse_as_command(self._prev)
condition = self._parse_conjunction()
if not condition:
self._retreat(index)
return None
self._match(TokenType.THEN)
true = self._parse_conjunction()
false = self._parse_conjunction() if self._match(TokenType.ELSE) else None
self._match(TokenType.END)
this = self.expression(exp.If, this=condition, true=true, false=false)
return this
def _parse_next_value_for(self) -> t.Optional[exp.Expression]:
if not self._match_text_seq("VALUE", "FOR"):
self._retreat(self._index - 1)
return None
return self.expression(
exp.NextValueFor,
this=self._parse_column(),
order=self._match(TokenType.OVER) and self._parse_wrapped(self._parse_order),
)
def _parse_extract(self) -> exp.Extract:
this = self._parse_function() or self._parse_var() or self._parse_type()
if self._match(TokenType.FROM):
return self.expression(exp.Extract, this=this, expression=self._parse_bitwise())
if not self._match(TokenType.COMMA):
self.raise_error("Expected FROM or comma after EXTRACT", self._prev)
return self.expression(exp.Extract, this=this, expression=self._parse_bitwise())
def _parse_cast(self, strict: bool, safe: t.Optional[bool] = None) -> exp.Expression:
this = self._parse_conjunction()
if not self._match(TokenType.ALIAS):
if self._match(TokenType.COMMA):
return self.expression(exp.CastToStrType, this=this, to=self._parse_string())
self.raise_error("Expected AS after CAST")
fmt = None
to = self._parse_types()
if self._match(TokenType.FORMAT):
fmt_string = self._parse_string()
fmt = self._parse_at_time_zone(fmt_string)
if not to:
to = exp.DataType.build(exp.DataType.Type.UNKNOWN)
if to.this in exp.DataType.TEMPORAL_TYPES:
this = self.expression(
exp.StrToDate if to.this == exp.DataType.Type.DATE else exp.StrToTime,
this=this,
format=exp.Literal.string(
format_time(
fmt_string.this if fmt_string else "",
self.dialect.FORMAT_MAPPING or self.dialect.TIME_MAPPING,
self.dialect.FORMAT_TRIE or self.dialect.TIME_TRIE,
)
),
)
if isinstance(fmt, exp.AtTimeZone) and isinstance(this, exp.StrToTime):
this.set("zone", fmt.args["zone"])
return this
elif not to:
self.raise_error("Expected TYPE after CAST")
elif isinstance(to, exp.Identifier):
to = exp.DataType.build(to.name, udt=True)
elif to.this == exp.DataType.Type.CHAR:
if self._match(TokenType.CHARACTER_SET):
to = self.expression(exp.CharacterSet, this=self._parse_var_or_string())
return self.expression(
exp.Cast if strict else exp.TryCast,
this=this,
to=to,
format=fmt,
safe=safe,
action=self._parse_var_from_options(self.CAST_ACTIONS, raise_unmatched=False),
)
def _parse_string_agg(self) -> exp.Expression:
if self._match(TokenType.DISTINCT):
args: t.List[t.Optional[exp.Expression]] = [
self.expression(exp.Distinct, expressions=[self._parse_conjunction()])
]
if self._match(TokenType.COMMA):
args.extend(self._parse_csv(self._parse_conjunction))
else:
args = self._parse_csv(self._parse_conjunction) # type: ignore
index = self._index
if not self._match(TokenType.R_PAREN) and args:
# postgres: STRING_AGG([DISTINCT] expression, separator [ORDER BY expression1 {ASC | DESC} [, ...]])
# bigquery: STRING_AGG([DISTINCT] expression [, separator] [ORDER BY key [{ASC | DESC}] [, ... ]] [LIMIT n])
args[-1] = self._parse_limit(this=self._parse_order(this=args[-1]))
return self.expression(exp.GroupConcat, this=args[0], separator=seq_get(args, 1))
# Checks if we can parse an order clause: WITHIN GROUP (ORDER BY <order_by_expression_list> [ASC | DESC]).
# This is done "manually", instead of letting _parse_window parse it into an exp.WithinGroup node, so that
# the STRING_AGG call is parsed like in MySQL / SQLite and can thus be transpiled more easily to them.
if not self._match_text_seq("WITHIN", "GROUP"):
self._retreat(index)
return self.validate_expression(exp.GroupConcat.from_arg_list(args), args)
self._match_l_paren() # The corresponding match_r_paren will be called in parse_function (caller)
order = self._parse_order(this=seq_get(args, 0))
return self.expression(exp.GroupConcat, this=order, separator=seq_get(args, 1))
def _parse_convert(
self, strict: bool, safe: t.Optional[bool] = None
) -> t.Optional[exp.Expression]:
this = self._parse_bitwise()
if self._match(TokenType.USING):
to: t.Optional[exp.Expression] = self.expression(
exp.CharacterSet, this=self._parse_var()
)
elif self._match(TokenType.COMMA):
to = self._parse_types()
else:
to = None
return self.expression(exp.Cast if strict else exp.TryCast, this=this, to=to, safe=safe)
def _parse_decode(self) -> t.Optional[exp.Decode | exp.Case]:
"""
There are generally two variants of the DECODE function:
- DECODE(bin, charset)
- DECODE(expression, search, result [, search, result] ... [, default])
The second variant will always be parsed into a CASE expression. Note that NULL
needs special treatment, since we need to explicitly check for it with `IS NULL`,
instead of relying on pattern matching.
"""
args = self._parse_csv(self._parse_conjunction)
if len(args) < 3:
return self.expression(exp.Decode, this=seq_get(args, 0), charset=seq_get(args, 1))
expression, *expressions = args
if not expression:
return None
ifs = []
for search, result in zip(expressions[::2], expressions[1::2]):
if not search or not result:
return None
if isinstance(search, exp.Literal):
ifs.append(
exp.If(this=exp.EQ(this=expression.copy(), expression=search), true=result)
)
elif isinstance(search, exp.Null):
ifs.append(
exp.If(this=exp.Is(this=expression.copy(), expression=exp.Null()), true=result)
)
else:
cond = exp.or_(
exp.EQ(this=expression.copy(), expression=search),
exp.and_(
exp.Is(this=expression.copy(), expression=exp.Null()),
exp.Is(this=search.copy(), expression=exp.Null()),
copy=False,
),
copy=False,
)
ifs.append(exp.If(this=cond, true=result))
return exp.Case(ifs=ifs, default=expressions[-1] if len(expressions) % 2 == 1 else None)
def _parse_json_key_value(self) -> t.Optional[exp.JSONKeyValue]:
self._match_text_seq("KEY")
key = self._parse_column()
self._match_set(self.JSON_KEY_VALUE_SEPARATOR_TOKENS)
self._match_text_seq("VALUE")
value = self._parse_bitwise()
if not key and not value:
return None
return self.expression(exp.JSONKeyValue, this=key, expression=value)
def _parse_format_json(self, this: t.Optional[exp.Expression]) -> t.Optional[exp.Expression]:
if not this or not self._match_text_seq("FORMAT", "JSON"):
return this
return self.expression(exp.FormatJson, this=this)
def _parse_on_handling(self, on: str, *values: str) -> t.Optional[str]:
# Parses the "X ON Y" syntax, i.e. NULL ON NULL (Oracle, T-SQL)
for value in values:
if self._match_text_seq(value, "ON", on):
return f"{value} ON {on}"
return None
def _parse_json_object(self, agg: Lit[False]) -> exp.JSONObject: ...
def _parse_json_object(self, agg: Lit[True]) -> exp.JSONObjectAgg: ...
def _parse_json_object(self, agg=False):
star = self._parse_star()
expressions = (
[star]
if star
else self._parse_csv(lambda: self._parse_format_json(self._parse_json_key_value()))
)
null_handling = self._parse_on_handling("NULL", "NULL", "ABSENT")
unique_keys = None
if self._match_text_seq("WITH", "UNIQUE"):
unique_keys = True
elif self._match_text_seq("WITHOUT", "UNIQUE"):
unique_keys = False
self._match_text_seq("KEYS")
return_type = self._match_text_seq("RETURNING") and self._parse_format_json(
self._parse_type()
)
encoding = self._match_text_seq("ENCODING") and self._parse_var()
return self.expression(
exp.JSONObjectAgg if agg else exp.JSONObject,
expressions=expressions,
null_handling=null_handling,
unique_keys=unique_keys,
return_type=return_type,
encoding=encoding,
)
# Note: this is currently incomplete; it only implements the "JSON_value_column" part
def _parse_json_column_def(self) -> exp.JSONColumnDef:
if not self._match_text_seq("NESTED"):
this = self._parse_id_var()
kind = self._parse_types(allow_identifiers=False)
nested = None
else:
this = None
kind = None
nested = True
path = self._match_text_seq("PATH") and self._parse_string()
nested_schema = nested and self._parse_json_schema()
return self.expression(
exp.JSONColumnDef,
this=this,
kind=kind,
path=path,
nested_schema=nested_schema,
)
def _parse_json_schema(self) -> exp.JSONSchema:
self._match_text_seq("COLUMNS")
return self.expression(
exp.JSONSchema,
expressions=self._parse_wrapped_csv(self._parse_json_column_def, optional=True),
)
def _parse_json_table(self) -> exp.JSONTable:
this = self._parse_format_json(self._parse_bitwise())
path = self._match(TokenType.COMMA) and self._parse_string()
error_handling = self._parse_on_handling("ERROR", "ERROR", "NULL")
empty_handling = self._parse_on_handling("EMPTY", "ERROR", "NULL")
schema = self._parse_json_schema()
return exp.JSONTable(
this=this,
schema=schema,
path=path,
error_handling=error_handling,
empty_handling=empty_handling,
)
def _parse_match_against(self) -> exp.MatchAgainst:
expressions = self._parse_csv(self._parse_column)
self._match_text_seq(")", "AGAINST", "(")
this = self._parse_string()
if self._match_text_seq("IN", "NATURAL", "LANGUAGE", "MODE"):
modifier = "IN NATURAL LANGUAGE MODE"
if self._match_text_seq("WITH", "QUERY", "EXPANSION"):
modifier = f"{modifier} WITH QUERY EXPANSION"
elif self._match_text_seq("IN", "BOOLEAN", "MODE"):
modifier = "IN BOOLEAN MODE"
elif self._match_text_seq("WITH", "QUERY", "EXPANSION"):
modifier = "WITH QUERY EXPANSION"
else:
modifier = None
return self.expression(
exp.MatchAgainst, this=this, expressions=expressions, modifier=modifier
)
# https://learn.microsoft.com/en-us/sql/t-sql/functions/openjson-transact-sql?view=sql-server-ver16
def _parse_open_json(self) -> exp.OpenJSON:
this = self._parse_bitwise()
path = self._match(TokenType.COMMA) and self._parse_string()
def _parse_open_json_column_def() -> exp.OpenJSONColumnDef:
this = self._parse_field(any_token=True)
kind = self._parse_types()
path = self._parse_string()
as_json = self._match_pair(TokenType.ALIAS, TokenType.JSON)
return self.expression(
exp.OpenJSONColumnDef, this=this, kind=kind, path=path, as_json=as_json
)
expressions = None
if self._match_pair(TokenType.R_PAREN, TokenType.WITH):
self._match_l_paren()
expressions = self._parse_csv(_parse_open_json_column_def)
return self.expression(exp.OpenJSON, this=this, path=path, expressions=expressions)
def _parse_position(self, haystack_first: bool = False) -> exp.StrPosition:
args = self._parse_csv(self._parse_bitwise)
if self._match(TokenType.IN):
return self.expression(
exp.StrPosition, this=self._parse_bitwise(), substr=seq_get(args, 0)
)
if haystack_first:
haystack = seq_get(args, 0)
needle = seq_get(args, 1)
else:
needle = seq_get(args, 0)
haystack = seq_get(args, 1)
return self.expression(
exp.StrPosition, this=haystack, substr=needle, position=seq_get(args, 2)
)
def _parse_predict(self) -> exp.Predict:
self._match_text_seq("MODEL")
this = self._parse_table()
self._match(TokenType.COMMA)
self._match_text_seq("TABLE")
return self.expression(
exp.Predict,
this=this,
expression=self._parse_table(),
params_struct=self._match(TokenType.COMMA) and self._parse_bitwise(),
)
def _parse_join_hint(self, func_name: str) -> exp.JoinHint:
args = self._parse_csv(self._parse_table)
return exp.JoinHint(this=func_name.upper(), expressions=args)
def _parse_substring(self) -> exp.Substring:
# Postgres supports the form: substring(string [from int] [for int])
# https://www.postgresql.org/docs/9.1/functions-string.html @ Table 9-6
args = t.cast(t.List[t.Optional[exp.Expression]], self._parse_csv(self._parse_bitwise))
if self._match(TokenType.FROM):
args.append(self._parse_bitwise())
if self._match(TokenType.FOR):
args.append(self._parse_bitwise())
return self.validate_expression(exp.Substring.from_arg_list(args), args)
def _parse_trim(self) -> exp.Trim:
# https://www.w3resource.com/sql/character-functions/trim.php
# https://docs.oracle.com/javadb/10.8.3.0/ref/rreftrimfunc.html
position = None
collation = None
expression = None
if self._match_texts(self.TRIM_TYPES):
position = self._prev.text.upper()
this = self._parse_bitwise()
if self._match_set((TokenType.FROM, TokenType.COMMA)):
invert_order = self._prev.token_type == TokenType.FROM or self.TRIM_PATTERN_FIRST
expression = self._parse_bitwise()
if invert_order:
this, expression = expression, this
if self._match(TokenType.COLLATE):
collation = self._parse_bitwise()
return self.expression(
exp.Trim, this=this, position=position, expression=expression, collation=collation
)
def _parse_window_clause(self) -> t.Optional[t.List[exp.Expression]]:
return self._match(TokenType.WINDOW) and self._parse_csv(self._parse_named_window)
def _parse_named_window(self) -> t.Optional[exp.Expression]:
return self._parse_window(self._parse_id_var(), alias=True)
def _parse_respect_or_ignore_nulls(
self, this: t.Optional[exp.Expression]
) -> t.Optional[exp.Expression]:
if self._match_text_seq("IGNORE", "NULLS"):
return self.expression(exp.IgnoreNulls, this=this)
if self._match_text_seq("RESPECT", "NULLS"):
return self.expression(exp.RespectNulls, this=this)
return this
def _parse_having_max(self, this: t.Optional[exp.Expression]) -> t.Optional[exp.Expression]:
if self._match(TokenType.HAVING):
self._match_texts(("MAX", "MIN"))
max = self._prev.text.upper() != "MIN"
return self.expression(
exp.HavingMax, this=this, expression=self._parse_column(), max=max
)
return this
def _parse_window(
self, this: t.Optional[exp.Expression], alias: bool = False
) -> t.Optional[exp.Expression]:
if self._match_pair(TokenType.FILTER, TokenType.L_PAREN):
self._match(TokenType.WHERE)
this = self.expression(
exp.Filter, this=this, expression=self._parse_where(skip_where_token=True)
)
self._match_r_paren()
# T-SQL allows the OVER (...) syntax after WITHIN GROUP.
# https://learn.microsoft.com/en-us/sql/t-sql/functions/percentile-disc-transact-sql?view=sql-server-ver16
if self._match_text_seq("WITHIN", "GROUP"):
order = self._parse_wrapped(self._parse_order)
this = self.expression(exp.WithinGroup, this=this, expression=order)
# SQL spec defines an optional [ { IGNORE | RESPECT } NULLS ] OVER
# Some dialects choose to implement and some do not.
# https://dev.mysql.com/doc/refman/8.0/en/window-function-descriptions.html
# There is some code above in _parse_lambda that handles
# SELECT FIRST_VALUE(TABLE.COLUMN IGNORE|RESPECT NULLS) OVER ...
# The below changes handle
# SELECT FIRST_VALUE(TABLE.COLUMN) IGNORE|RESPECT NULLS OVER ...
# Oracle allows both formats
# (https://docs.oracle.com/en/database/oracle/oracle-database/19/sqlrf/img_text/first_value.html)
# and Snowflake chose to do the same for familiarity
# https://docs.snowflake.com/en/sql-reference/functions/first_value.html#usage-notes
if isinstance(this, exp.AggFunc):
ignore_respect = this.find(exp.IgnoreNulls, exp.RespectNulls)
if ignore_respect and ignore_respect is not this:
ignore_respect.replace(ignore_respect.this)
this = self.expression(ignore_respect.__class__, this=this)
this = self._parse_respect_or_ignore_nulls(this)
# bigquery select from window x AS (partition by ...)
if alias:
over = None
self._match(TokenType.ALIAS)
elif not self._match_set(self.WINDOW_BEFORE_PAREN_TOKENS):
return this
else:
over = self._prev.text.upper()
if not self._match(TokenType.L_PAREN):
return self.expression(
exp.Window, this=this, alias=self._parse_id_var(False), over=over
)
window_alias = self._parse_id_var(any_token=False, tokens=self.WINDOW_ALIAS_TOKENS)
first = self._match(TokenType.FIRST)
if self._match_text_seq("LAST"):
first = False
partition, order = self._parse_partition_and_order()
kind = self._match_set((TokenType.ROWS, TokenType.RANGE)) and self._prev.text
if kind:
self._match(TokenType.BETWEEN)
start = self._parse_window_spec()
self._match(TokenType.AND)
end = self._parse_window_spec()
spec = self.expression(
exp.WindowSpec,
kind=kind,
start=start["value"],
start_side=start["side"],
end=end["value"],
end_side=end["side"],
)
else:
spec = None
self._match_r_paren()
window = self.expression(
exp.Window,
this=this,
partition_by=partition,
order=order,
spec=spec,
alias=window_alias,
over=over,
first=first,
)
# This covers Oracle's FIRST/LAST syntax: aggregate KEEP (...) OVER (...)
if self._match_set(self.WINDOW_BEFORE_PAREN_TOKENS, advance=False):
return self._parse_window(window, alias=alias)
return window
def _parse_partition_and_order(
self,
) -> t.Tuple[t.List[exp.Expression], t.Optional[exp.Expression]]:
return self._parse_partition_by(), self._parse_order()
def _parse_window_spec(self) -> t.Dict[str, t.Optional[str | exp.Expression]]:
self._match(TokenType.BETWEEN)
return {
"value": (
(self._match_text_seq("UNBOUNDED") and "UNBOUNDED")
or (self._match_text_seq("CURRENT", "ROW") and "CURRENT ROW")
or self._parse_bitwise()
),
"side": self._match_texts(self.WINDOW_SIDES) and self._prev.text,
}
def _parse_alias(
self, this: t.Optional[exp.Expression], explicit: bool = False
) -> t.Optional[exp.Expression]:
any_token = self._match(TokenType.ALIAS)
comments = self._prev_comments
if explicit and not any_token:
return this
if self._match(TokenType.L_PAREN):
aliases = self.expression(
exp.Aliases,
comments=comments,
this=this,
expressions=self._parse_csv(lambda: self._parse_id_var(any_token)),
)
self._match_r_paren(aliases)
return aliases
alias = self._parse_id_var(any_token) or (
self.STRING_ALIASES and self._parse_string_as_identifier()
)
if alias:
this = self.expression(exp.Alias, comments=comments, this=this, alias=alias)
column = this.this
# Moves the comment next to the alias in `expr /* comment */ AS alias`
if not this.comments and column and column.comments:
this.comments = column.comments
column.comments = None
return this
def _parse_id_var(
self,
any_token: bool = True,
tokens: t.Optional[t.Collection[TokenType]] = None,
) -> t.Optional[exp.Expression]:
identifier = self._parse_identifier()
if identifier:
return identifier
if (any_token and self._advance_any()) or self._match_set(tokens or self.ID_VAR_TOKENS):
quoted = self._prev.token_type == TokenType.STRING
return exp.Identifier(this=self._prev.text, quoted=quoted)
return None
def _parse_string(self) -> t.Optional[exp.Expression]:
if self._match_set(self.STRING_PARSERS):
return self.STRING_PARSERS[self._prev.token_type](self, self._prev)
return self._parse_placeholder()
def _parse_string_as_identifier(self) -> t.Optional[exp.Identifier]:
return exp.to_identifier(self._match(TokenType.STRING) and self._prev.text, quoted=True)
def _parse_number(self) -> t.Optional[exp.Expression]:
if self._match_set(self.NUMERIC_PARSERS):
return self.NUMERIC_PARSERS[self._prev.token_type](self, self._prev)
return self._parse_placeholder()
def _parse_identifier(self) -> t.Optional[exp.Expression]:
if self._match(TokenType.IDENTIFIER):
return self.expression(exp.Identifier, this=self._prev.text, quoted=True)
return self._parse_placeholder()
def _parse_var(
self,
any_token: bool = False,
tokens: t.Optional[t.Collection[TokenType]] = None,
upper: bool = False,
) -> t.Optional[exp.Expression]:
if (
(any_token and self._advance_any())
or self._match(TokenType.VAR)
or (self._match_set(tokens) if tokens else False)
):
return self.expression(
exp.Var, this=self._prev.text.upper() if upper else self._prev.text
)
return self._parse_placeholder()
def _advance_any(self, ignore_reserved: bool = False) -> t.Optional[Token]:
if self._curr and (ignore_reserved or self._curr.token_type not in self.RESERVED_TOKENS):
self._advance()
return self._prev
return None
def _parse_var_or_string(self) -> t.Optional[exp.Expression]:
return self._parse_var() or self._parse_string()
def _parse_primary_or_var(self) -> t.Optional[exp.Expression]:
return self._parse_primary() or self._parse_var(any_token=True)
def _parse_null(self) -> t.Optional[exp.Expression]:
if self._match_set(self.NULL_TOKENS):
return self.PRIMARY_PARSERS[TokenType.NULL](self, self._prev)
return self._parse_placeholder()
def _parse_boolean(self) -> t.Optional[exp.Expression]:
if self._match(TokenType.TRUE):
return self.PRIMARY_PARSERS[TokenType.TRUE](self, self._prev)
if self._match(TokenType.FALSE):
return self.PRIMARY_PARSERS[TokenType.FALSE](self, self._prev)
return self._parse_placeholder()
def _parse_star(self) -> t.Optional[exp.Expression]:
if self._match(TokenType.STAR):
return self.PRIMARY_PARSERS[TokenType.STAR](self, self._prev)
return self._parse_placeholder()
def _parse_parameter(self) -> exp.Parameter:
self._match(TokenType.L_BRACE)
this = self._parse_identifier() or self._parse_primary_or_var()
expression = self._match(TokenType.COLON) and (
self._parse_identifier() or self._parse_primary_or_var()
)
self._match(TokenType.R_BRACE)
return self.expression(exp.Parameter, this=this, expression=expression)
def _parse_placeholder(self) -> t.Optional[exp.Expression]:
if self._match_set(self.PLACEHOLDER_PARSERS):
placeholder = self.PLACEHOLDER_PARSERS[self._prev.token_type](self)
if placeholder:
return placeholder
self._advance(-1)
return None
def _parse_except(self) -> t.Optional[t.List[exp.Expression]]:
if not self._match(TokenType.EXCEPT):
return None
if self._match(TokenType.L_PAREN, advance=False):
return self._parse_wrapped_csv(self._parse_column)
except_column = self._parse_column()
return [except_column] if except_column else None
def _parse_replace(self) -> t.Optional[t.List[exp.Expression]]:
if not self._match(TokenType.REPLACE):
return None
if self._match(TokenType.L_PAREN, advance=False):
return self._parse_wrapped_csv(self._parse_expression)
replace_expression = self._parse_expression()
return [replace_expression] if replace_expression else None
def _parse_csv(
self, parse_method: t.Callable, sep: TokenType = TokenType.COMMA
) -> t.List[exp.Expression]:
parse_result = parse_method()
items = [parse_result] if parse_result is not None else []
while self._match(sep):
self._add_comments(parse_result)
parse_result = parse_method()
if parse_result is not None:
items.append(parse_result)
return items
def _parse_tokens(
self, parse_method: t.Callable, expressions: t.Dict
) -> t.Optional[exp.Expression]:
this = parse_method()
while self._match_set(expressions):
this = self.expression(
expressions[self._prev.token_type],
this=this,
comments=self._prev_comments,
expression=parse_method(),
)
return this
def _parse_wrapped_id_vars(self, optional: bool = False) -> t.List[exp.Expression]:
return self._parse_wrapped_csv(self._parse_id_var, optional=optional)
def _parse_wrapped_csv(
self, parse_method: t.Callable, sep: TokenType = TokenType.COMMA, optional: bool = False
) -> t.List[exp.Expression]:
return self._parse_wrapped(
lambda: self._parse_csv(parse_method, sep=sep), optional=optional
)
def _parse_wrapped(self, parse_method: t.Callable, optional: bool = False) -> t.Any:
wrapped = self._match(TokenType.L_PAREN)
if not wrapped and not optional:
self.raise_error("Expecting (")
parse_result = parse_method()
if wrapped:
self._match_r_paren()
return parse_result
def _parse_expressions(self) -> t.List[exp.Expression]:
return self._parse_csv(self._parse_expression)
def _parse_select_or_expression(self, alias: bool = False) -> t.Optional[exp.Expression]:
return self._parse_select() or self._parse_set_operations(
self._parse_expression() if alias else self._parse_conjunction()
)
def _parse_ddl_select(self) -> t.Optional[exp.Expression]:
return self._parse_query_modifiers(
self._parse_set_operations(self._parse_select(nested=True, parse_subquery_alias=False))
)
def _parse_transaction(self) -> exp.Transaction | exp.Command:
this = None
if self._match_texts(self.TRANSACTION_KIND):
this = self._prev.text
self._match_texts(("TRANSACTION", "WORK"))
modes = []
while True:
mode = []
while self._match(TokenType.VAR):
mode.append(self._prev.text)
if mode:
modes.append(" ".join(mode))
if not self._match(TokenType.COMMA):
break
return self.expression(exp.Transaction, this=this, modes=modes)
def _parse_commit_or_rollback(self) -> exp.Commit | exp.Rollback:
chain = None
savepoint = None
is_rollback = self._prev.token_type == TokenType.ROLLBACK
self._match_texts(("TRANSACTION", "WORK"))
if self._match_text_seq("TO"):
self._match_text_seq("SAVEPOINT")
savepoint = self._parse_id_var()
if self._match(TokenType.AND):
chain = not self._match_text_seq("NO")
self._match_text_seq("CHAIN")
if is_rollback:
return self.expression(exp.Rollback, savepoint=savepoint)
return self.expression(exp.Commit, chain=chain)
def _parse_refresh(self) -> exp.Refresh:
self._match(TokenType.TABLE)
return self.expression(exp.Refresh, this=self._parse_string() or self._parse_table())
def _parse_add_column(self) -> t.Optional[exp.Expression]:
if not self._match_text_seq("ADD"):
return None
self._match(TokenType.COLUMN)
exists_column = self._parse_exists(not_=True)
expression = self._parse_field_def()
if expression:
expression.set("exists", exists_column)
# https://docs.databricks.com/delta/update-schema.html#explicitly-update-schema-to-add-columns
if self._match_texts(("FIRST", "AFTER")):
position = self._prev.text
column_position = self.expression(
exp.ColumnPosition, this=self._parse_column(), position=position
)
expression.set("position", column_position)
return expression
def _parse_drop_column(self) -> t.Optional[exp.Drop | exp.Command]:
drop = self._match(TokenType.DROP) and self._parse_drop()
if drop and not isinstance(drop, exp.Command):
drop.set("kind", drop.args.get("kind", "COLUMN"))
return drop
# https://docs.aws.amazon.com/athena/latest/ug/alter-table-drop-partition.html
def _parse_drop_partition(self, exists: t.Optional[bool] = None) -> exp.DropPartition:
return self.expression(
exp.DropPartition, expressions=self._parse_csv(self._parse_partition), exists=exists
)
def _parse_alter_table_add(self) -> t.List[exp.Expression]:
index = self._index - 1
if self._match_set(self.ADD_CONSTRAINT_TOKENS, advance=False):
return self._parse_csv(
lambda: self.expression(
exp.AddConstraint, expressions=self._parse_csv(self._parse_constraint)
)
)
self._retreat(index)
if not self.ALTER_TABLE_ADD_REQUIRED_FOR_EACH_COLUMN and self._match_text_seq("ADD"):
return self._parse_wrapped_csv(self._parse_field_def, optional=True)
return self._parse_wrapped_csv(self._parse_add_column, optional=True)
def _parse_alter_table_alter(self) -> exp.AlterColumn:
self._match(TokenType.COLUMN)
column = self._parse_field(any_token=True)
if self._match_pair(TokenType.DROP, TokenType.DEFAULT):
return self.expression(exp.AlterColumn, this=column, drop=True)
if self._match_pair(TokenType.SET, TokenType.DEFAULT):
return self.expression(exp.AlterColumn, this=column, default=self._parse_conjunction())
if self._match(TokenType.COMMENT):
return self.expression(exp.AlterColumn, this=column, comment=self._parse_string())
self._match_text_seq("SET", "DATA")
return self.expression(
exp.AlterColumn,
this=column,
dtype=self._match_text_seq("TYPE") and self._parse_types(),
collate=self._match(TokenType.COLLATE) and self._parse_term(),
using=self._match(TokenType.USING) and self._parse_conjunction(),
)
def _parse_alter_table_drop(self) -> t.List[exp.Expression]:
index = self._index - 1
partition_exists = self._parse_exists()
if self._match(TokenType.PARTITION, advance=False):
return self._parse_csv(lambda: self._parse_drop_partition(exists=partition_exists))
self._retreat(index)
return self._parse_csv(self._parse_drop_column)
def _parse_alter_table_rename(self) -> t.Optional[exp.RenameTable | exp.RenameColumn]:
if self._match(TokenType.COLUMN):
exists = self._parse_exists()
old_column = self._parse_column()
to = self._match_text_seq("TO")
new_column = self._parse_column()
if old_column is None or to is None or new_column is None:
return None
return self.expression(exp.RenameColumn, this=old_column, to=new_column, exists=exists)
self._match_text_seq("TO")
return self.expression(exp.RenameTable, this=self._parse_table(schema=True))
def _parse_alter(self) -> exp.AlterTable | exp.Command:
start = self._prev
if not self._match(TokenType.TABLE):
return self._parse_as_command(start)
exists = self._parse_exists()
only = self._match_text_seq("ONLY")
this = self._parse_table(schema=True)
if self._next:
self._advance()
parser = self.ALTER_PARSERS.get(self._prev.text.upper()) if self._prev else None
if parser:
actions = ensure_list(parser(self))
options = self._parse_csv(self._parse_property)
if not self._curr and actions:
return self.expression(
exp.AlterTable,
this=this,
exists=exists,
actions=actions,
only=only,
options=options,
)
return self._parse_as_command(start)
def _parse_merge(self) -> exp.Merge:
self._match(TokenType.INTO)
target = self._parse_table()
if target and self._match(TokenType.ALIAS, advance=False):
target.set("alias", self._parse_table_alias())
self._match(TokenType.USING)
using = self._parse_table()
self._match(TokenType.ON)
on = self._parse_conjunction()
return self.expression(
exp.Merge,
this=target,
using=using,
on=on,
expressions=self._parse_when_matched(),
)
def _parse_when_matched(self) -> t.List[exp.When]:
whens = []
while self._match(TokenType.WHEN):
matched = not self._match(TokenType.NOT)
self._match_text_seq("MATCHED")
source = (
False
if self._match_text_seq("BY", "TARGET")
else self._match_text_seq("BY", "SOURCE")
)
condition = self._parse_conjunction() if self._match(TokenType.AND) else None
self._match(TokenType.THEN)
if self._match(TokenType.INSERT):
_this = self._parse_star()
if _this:
then: t.Optional[exp.Expression] = self.expression(exp.Insert, this=_this)
else:
then = self.expression(
exp.Insert,
this=self._parse_value(),
expression=self._match_text_seq("VALUES") and self._parse_value(),
)
elif self._match(TokenType.UPDATE):
expressions = self._parse_star()
if expressions:
then = self.expression(exp.Update, expressions=expressions)
else:
then = self.expression(
exp.Update,
expressions=self._match(TokenType.SET)
and self._parse_csv(self._parse_equality),
)
elif self._match(TokenType.DELETE):
then = self.expression(exp.Var, this=self._prev.text)
else:
then = None
whens.append(
self.expression(
exp.When,
matched=matched,
source=source,
condition=condition,
then=then,
)
)
return whens
def _parse_show(self) -> t.Optional[exp.Expression]:
parser = self._find_parser(self.SHOW_PARSERS, self.SHOW_TRIE)
if parser:
return parser(self)
return self._parse_as_command(self._prev)
def _parse_set_item_assignment(
self, kind: t.Optional[str] = None
) -> t.Optional[exp.Expression]:
index = self._index
if kind in ("GLOBAL", "SESSION") and self._match_text_seq("TRANSACTION"):
return self._parse_set_transaction(global_=kind == "GLOBAL")
left = self._parse_primary() or self._parse_id_var()
assignment_delimiter = self._match_texts(("=", "TO"))
if not left or (self.SET_REQUIRES_ASSIGNMENT_DELIMITER and not assignment_delimiter):
self._retreat(index)
return None
right = self._parse_statement() or self._parse_id_var()
this = self.expression(exp.EQ, this=left, expression=right)
return self.expression(exp.SetItem, this=this, kind=kind)
def _parse_set_transaction(self, global_: bool = False) -> exp.Expression:
self._match_text_seq("TRANSACTION")
characteristics = self._parse_csv(
lambda: self._parse_var_from_options(self.TRANSACTION_CHARACTERISTICS)
)
return self.expression(
exp.SetItem,
expressions=characteristics,
kind="TRANSACTION",
**{"global": global_}, # type: ignore
)
def _parse_set_item(self) -> t.Optional[exp.Expression]:
parser = self._find_parser(self.SET_PARSERS, self.SET_TRIE)
return parser(self) if parser else self._parse_set_item_assignment(kind=None)
def _parse_set(self, unset: bool = False, tag: bool = False) -> exp.Set | exp.Command:
index = self._index
set_ = self.expression(
exp.Set, expressions=self._parse_csv(self._parse_set_item), unset=unset, tag=tag
)
if self._curr:
self._retreat(index)
return self._parse_as_command(self._prev)
return set_
def _parse_var_from_options(
self, options: OPTIONS_TYPE, raise_unmatched: bool = True
) -> t.Optional[exp.Var]:
start = self._curr
if not start:
return None
option = start.text.upper()
continuations = options.get(option)
index = self._index
self._advance()
for keywords in continuations or []:
if isinstance(keywords, str):
keywords = (keywords,)
if self._match_text_seq(*keywords):
option = f"{option} {' '.join(keywords)}"
break
else:
if continuations or continuations is None:
if raise_unmatched:
self.raise_error(f"Unknown option {option}")
self._retreat(index)
return None
return exp.var(option)
def _parse_as_command(self, start: Token) -> exp.Command:
while self._curr:
self._advance()
text = self._find_sql(start, self._prev)
size = len(start.text)
self._warn_unsupported()
return exp.Command(this=text[:size], expression=text[size:])
def _parse_dict_property(self, this: str) -> exp.DictProperty:
settings = []
self._match_l_paren()
kind = self._parse_id_var()
if self._match(TokenType.L_PAREN):
while True:
key = self._parse_id_var()
value = self._parse_primary()
if not key and value is None:
break
settings.append(self.expression(exp.DictSubProperty, this=key, value=value))
self._match(TokenType.R_PAREN)
self._match_r_paren()
return self.expression(
exp.DictProperty,
this=this,
kind=kind.this if kind else None,
settings=settings,
)
def _parse_dict_range(self, this: str) -> exp.DictRange:
self._match_l_paren()
has_min = self._match_text_seq("MIN")
if has_min:
min = self._parse_var() or self._parse_primary()
self._match_text_seq("MAX")
max = self._parse_var() or self._parse_primary()
else:
max = self._parse_var() or self._parse_primary()
min = exp.Literal.number(0)
self._match_r_paren()
return self.expression(exp.DictRange, this=this, min=min, max=max)
def _parse_comprehension(
self, this: t.Optional[exp.Expression]
) -> t.Optional[exp.Comprehension]:
index = self._index
expression = self._parse_column()
if not self._match(TokenType.IN):
self._retreat(index - 1)
return None
iterator = self._parse_column()
condition = self._parse_conjunction() if self._match_text_seq("IF") else None
return self.expression(
exp.Comprehension,
this=this,
expression=expression,
iterator=iterator,
condition=condition,
)
def _parse_heredoc(self) -> t.Optional[exp.Heredoc]:
if self._match(TokenType.HEREDOC_STRING):
return self.expression(exp.Heredoc, this=self._prev.text)
if not self._match_text_seq("$"):
return None
tags = ["$"]
tag_text = None
if self._is_connected():
self._advance()
tags.append(self._prev.text.upper())
else:
self.raise_error("No closing $ found")
if tags[-1] != "$":
if self._is_connected() and self._match_text_seq("$"):
tag_text = tags[-1]
tags.append("$")
else:
self.raise_error("No closing $ found")
heredoc_start = self._curr
while self._curr:
if self._match_text_seq(*tags, advance=False):
this = self._find_sql(heredoc_start, self._prev)
self._advance(len(tags))
return self.expression(exp.Heredoc, this=this, tag=tag_text)
self._advance()
self.raise_error(f"No closing {''.join(tags)} found")
return None
def _find_parser(
self, parsers: t.Dict[str, t.Callable], trie: t.Dict
) -> t.Optional[t.Callable]:
if not self._curr:
return None
index = self._index
this = []
while True:
# The current token might be multiple words
curr = self._curr.text.upper()
key = curr.split(" ")
this.append(curr)
self._advance()
result, trie = in_trie(trie, key)
if result == TrieResult.FAILED:
break
if result == TrieResult.EXISTS:
subparser = parsers[" ".join(this)]
return subparser
self._retreat(index)
return None
def _match(self, token_type, advance=True, expression=None):
if not self._curr:
return None
if self._curr.token_type == token_type:
if advance:
self._advance()
self._add_comments(expression)
return True
return None
def _match_set(self, types, advance=True):
if not self._curr:
return None
if self._curr.token_type in types:
if advance:
self._advance()
return True
return None
def _match_pair(self, token_type_a, token_type_b, advance=True):
if not self._curr or not self._next:
return None
if self._curr.token_type == token_type_a and self._next.token_type == token_type_b:
if advance:
self._advance(2)
return True
return None
def _match_l_paren(self, expression: t.Optional[exp.Expression] = None) -> None:
if not self._match(TokenType.L_PAREN, expression=expression):
self.raise_error("Expecting (")
def _match_r_paren(self, expression: t.Optional[exp.Expression] = None) -> None:
if not self._match(TokenType.R_PAREN, expression=expression):
self.raise_error("Expecting )")
def _match_texts(self, texts, advance=True):
if self._curr and self._curr.text.upper() in texts:
if advance:
self._advance()
return True
return None
def _match_text_seq(self, *texts, advance=True):
index = self._index
for text in texts:
if self._curr and self._curr.text.upper() == text:
self._advance()
else:
self._retreat(index)
return None
if not advance:
self._retreat(index)
return True
def _replace_lambda(
self, node: t.Optional[exp.Expression], lambda_variables: t.Set[str]
) -> t.Optional[exp.Expression]:
if not node:
return node
for column in node.find_all(exp.Column):
if column.parts[0].name in lambda_variables:
dot_or_id = column.to_dot() if column.table else column.this
parent = column.parent
while isinstance(parent, exp.Dot):
if not isinstance(parent.parent, exp.Dot):
parent.replace(dot_or_id)
break
parent = parent.parent
else:
if column is node:
node = dot_or_id
else:
column.replace(dot_or_id)
return node
def _parse_truncate_table(self) -> t.Optional[exp.TruncateTable] | exp.Expression:
start = self._prev
# Not to be confused with TRUNCATE(number, decimals) function call
if self._match(TokenType.L_PAREN):
self._retreat(self._index - 2)
return self._parse_function()
# Clickhouse supports TRUNCATE DATABASE as well
is_database = self._match(TokenType.DATABASE)
self._match(TokenType.TABLE)
exists = self._parse_exists(not_=False)
expressions = self._parse_csv(
lambda: self._parse_table(schema=True, is_db_reference=is_database)
)
cluster = self._parse_on_property() if self._match(TokenType.ON) else None
if self._match_text_seq("RESTART", "IDENTITY"):
identity = "RESTART"
elif self._match_text_seq("CONTINUE", "IDENTITY"):
identity = "CONTINUE"
else:
identity = None
if self._match_text_seq("CASCADE") or self._match_text_seq("RESTRICT"):
option = self._prev.text
else:
option = None
partition = self._parse_partition()
# Fallback case
if self._curr:
return self._parse_as_command(start)
return self.expression(
exp.TruncateTable,
expressions=expressions,
is_database=is_database,
exists=exists,
cluster=cluster,
identity=identity,
option=option,
partition=partition,
)
def _parse_with_operator(self) -> t.Optional[exp.Expression]:
this = self._parse_ordered(self._parse_opclass)
if not self._match(TokenType.WITH):
return this
op = self._parse_var(any_token=True)
return self.expression(exp.WithOperator, this=this, op=op)
def binary_range_parser(
expr_type: t.Type[exp.Expression],
) -> t.Callable[[Parser, t.Optional[exp.Expression]], t.Optional[exp.Expression]]:
return lambda self, this: self._parse_escape(
self.expression(expr_type, this=this, expression=self._parse_bitwise())
) | null |
152,904 | from __future__ import annotations
import logging
import typing as t
from collections import defaultdict
from sqlglot import exp
from sqlglot.errors import ErrorLevel, ParseError, concat_messages, merge_errors
from sqlglot.helper import apply_index_offset, ensure_list, seq_get
from sqlglot.time import format_time
from sqlglot.tokens import Token, Tokenizer, TokenType
from sqlglot.trie import TrieResult, in_trie, new_trie
if t.TYPE_CHECKING:
from sqlglot._typing import E, Lit
from sqlglot.dialects.dialect import Dialect, DialectType
def seq_get(seq: t.Sequence[T], index: int) -> t.Optional[T]:
class Dialect(metaclass=_Dialect):
def get_or_raise(cls, dialect: DialectType) -> Dialect:
def format_time(
cls, expression: t.Optional[str | exp.Expression]
) -> t.Optional[exp.Expression]:
def __init__(self, **kwargs) -> None:
def __eq__(self, other: t.Any) -> bool:
def __hash__(self) -> int:
def normalize_identifier(self, expression: E) -> E:
def case_sensitive(self, text: str) -> bool:
def can_identify(self, text: str, identify: str | bool = "safe") -> bool:
def quote_identifier(self, expression: E, identify: bool = True) -> E:
def to_json_path(self, path: t.Optional[exp.Expression]) -> t.Optional[exp.Expression]:
def parse(self, sql: str, **opts) -> t.List[t.Optional[exp.Expression]]:
def parse_into(
self, expression_type: exp.IntoType, sql: str, **opts
) -> t.List[t.Optional[exp.Expression]]:
def generate(self, expression: exp.Expression, copy: bool = True, **opts) -> str:
def transpile(self, sql: str, **opts) -> t.List[str]:
def tokenize(self, sql: str) -> t.List[Token]:
def tokenizer(self) -> Tokenizer:
def parser(self, **opts) -> Parser:
def generator(self, **opts) -> Generator:
def build_logarithm(args: t.List, dialect: Dialect) -> exp.Func:
# Default argument order is base, expression
this = seq_get(args, 0)
expression = seq_get(args, 1)
if expression:
if not dialect.LOG_BASE_FIRST:
this, expression = expression, this
return exp.Log(this=this, expression=expression)
return (exp.Ln if dialect.parser_class.LOG_DEFAULTS_TO_LN else exp.Log)(this=this) | null |
152,905 | from __future__ import annotations
import logging
import typing as t
from collections import defaultdict
from sqlglot import exp
from sqlglot.errors import ErrorLevel, ParseError, concat_messages, merge_errors
from sqlglot.helper import apply_index_offset, ensure_list, seq_get
from sqlglot.time import format_time
from sqlglot.tokens import Token, Tokenizer, TokenType
from sqlglot.trie import TrieResult, in_trie, new_trie
if t.TYPE_CHECKING:
from sqlglot._typing import E, Lit
from sqlglot.dialects.dialect import Dialect, DialectType
def seq_get(seq: t.Sequence[T], index: int) -> t.Optional[T]:
"""Returns the value in `seq` at position `index`, or `None` if `index` is out of bounds."""
try:
return seq[index]
except IndexError:
return None
E = t.TypeVar("E", bound="sqlglot.exp.Expression")
class Dialect(metaclass=_Dialect):
INDEX_OFFSET = 0
"""The base index offset for arrays."""
WEEK_OFFSET = 0
"""First day of the week in DATE_TRUNC(week). Defaults to 0 (Monday). -1 would be Sunday."""
UNNEST_COLUMN_ONLY = False
"""Whether `UNNEST` table aliases are treated as column aliases."""
ALIAS_POST_TABLESAMPLE = False
"""Whether the table alias comes after tablesample."""
TABLESAMPLE_SIZE_IS_PERCENT = False
"""Whether a size in the table sample clause represents percentage."""
NORMALIZATION_STRATEGY = NormalizationStrategy.LOWERCASE
"""Specifies the strategy according to which identifiers should be normalized."""
IDENTIFIERS_CAN_START_WITH_DIGIT = False
"""Whether an unquoted identifier can start with a digit."""
DPIPE_IS_STRING_CONCAT = True
"""Whether the DPIPE token (`||`) is a string concatenation operator."""
STRICT_STRING_CONCAT = False
"""Whether `CONCAT`'s arguments must be strings."""
SUPPORTS_USER_DEFINED_TYPES = True
"""Whether user-defined data types are supported."""
SUPPORTS_SEMI_ANTI_JOIN = True
"""Whether `SEMI` or `ANTI` joins are supported."""
NORMALIZE_FUNCTIONS: bool | str = "upper"
"""
Determines how function names are going to be normalized.
Possible values:
"upper" or True: Convert names to uppercase.
"lower": Convert names to lowercase.
False: Disables function name normalization.
"""
LOG_BASE_FIRST: t.Optional[bool] = True
"""
Whether the base comes first in the `LOG` function.
Possible values: `True`, `False`, `None` (two arguments are not supported by `LOG`)
"""
NULL_ORDERING = "nulls_are_small"
"""
Default `NULL` ordering method to use if not explicitly set.
Possible values: `"nulls_are_small"`, `"nulls_are_large"`, `"nulls_are_last"`
"""
TYPED_DIVISION = False
"""
Whether the behavior of `a / b` depends on the types of `a` and `b`.
False means `a / b` is always float division.
True means `a / b` is integer division if both `a` and `b` are integers.
"""
SAFE_DIVISION = False
"""Whether division by zero throws an error (`False`) or returns NULL (`True`)."""
CONCAT_COALESCE = False
"""A `NULL` arg in `CONCAT` yields `NULL` by default, but in some dialects it yields an empty string."""
DATE_FORMAT = "'%Y-%m-%d'"
DATEINT_FORMAT = "'%Y%m%d'"
TIME_FORMAT = "'%Y-%m-%d %H:%M:%S'"
TIME_MAPPING: t.Dict[str, str] = {}
"""Associates this dialect's time formats with their equivalent Python `strftime` formats."""
# https://cloud.google.com/bigquery/docs/reference/standard-sql/format-elements#format_model_rules_date_time
# https://docs.teradata.com/r/Teradata-Database-SQL-Functions-Operators-Expressions-and-Predicates/March-2017/Data-Type-Conversions/Character-to-DATE-Conversion/Forcing-a-FORMAT-on-CAST-for-Converting-Character-to-DATE
FORMAT_MAPPING: t.Dict[str, str] = {}
"""
Helper which is used for parsing the special syntax `CAST(x AS DATE FORMAT 'yyyy')`.
If empty, the corresponding trie will be constructed off of `TIME_MAPPING`.
"""
ESCAPE_SEQUENCES: t.Dict[str, str] = {}
"""Mapping of an unescaped escape sequence to the corresponding character."""
PSEUDOCOLUMNS: t.Set[str] = set()
"""
Columns that are auto-generated by the engine corresponding to this dialect.
For example, such columns may be excluded from `SELECT *` queries.
"""
PREFER_CTE_ALIAS_COLUMN = False
"""
Some dialects, such as Snowflake, allow you to reference a CTE column alias in the
HAVING clause of the CTE. This flag will cause the CTE alias columns to override
any projection aliases in the subquery.
For example,
WITH y(c) AS (
SELECT SUM(a) FROM (SELECT 1 a) AS x HAVING c > 0
) SELECT c FROM y;
will be rewritten as
WITH y(c) AS (
SELECT SUM(a) AS c FROM (SELECT 1 AS a) AS x HAVING c > 0
) SELECT c FROM y;
"""
# --- Autofilled ---
tokenizer_class = Tokenizer
parser_class = Parser
generator_class = Generator
# A trie of the time_mapping keys
TIME_TRIE: t.Dict = {}
FORMAT_TRIE: t.Dict = {}
INVERSE_TIME_MAPPING: t.Dict[str, str] = {}
INVERSE_TIME_TRIE: t.Dict = {}
INVERSE_ESCAPE_SEQUENCES: t.Dict[str, str] = {}
# Delimiters for string literals and identifiers
QUOTE_START = "'"
QUOTE_END = "'"
IDENTIFIER_START = '"'
IDENTIFIER_END = '"'
# Delimiters for bit, hex, byte and unicode literals
BIT_START: t.Optional[str] = None
BIT_END: t.Optional[str] = None
HEX_START: t.Optional[str] = None
HEX_END: t.Optional[str] = None
BYTE_START: t.Optional[str] = None
BYTE_END: t.Optional[str] = None
UNICODE_START: t.Optional[str] = None
UNICODE_END: t.Optional[str] = None
def get_or_raise(cls, dialect: DialectType) -> Dialect:
"""
Look up a dialect in the global dialect registry and return it if it exists.
Args:
dialect: The target dialect. If this is a string, it can be optionally followed by
additional key-value pairs that are separated by commas and are used to specify
dialect settings, such as whether the dialect's identifiers are case-sensitive.
Example:
>>> dialect = dialect_class = get_or_raise("duckdb")
>>> dialect = get_or_raise("mysql, normalization_strategy = case_sensitive")
Returns:
The corresponding Dialect instance.
"""
if not dialect:
return cls()
if isinstance(dialect, _Dialect):
return dialect()
if isinstance(dialect, Dialect):
return dialect
if isinstance(dialect, str):
try:
dialect_name, *kv_pairs = dialect.split(",")
kwargs = {k.strip(): v.strip() for k, v in (kv.split("=") for kv in kv_pairs)}
except ValueError:
raise ValueError(
f"Invalid dialect format: '{dialect}'. "
"Please use the correct format: 'dialect [, k1 = v2 [, ...]]'."
)
result = cls.get(dialect_name.strip())
if not result:
from difflib import get_close_matches
similar = seq_get(get_close_matches(dialect_name, cls.classes, n=1), 0) or ""
if similar:
similar = f" Did you mean {similar}?"
raise ValueError(f"Unknown dialect '{dialect_name}'.{similar}")
return result(**kwargs)
raise ValueError(f"Invalid dialect type for '{dialect}': '{type(dialect)}'.")
def format_time(
cls, expression: t.Optional[str | exp.Expression]
) -> t.Optional[exp.Expression]:
"""Converts a time format in this dialect to its equivalent Python `strftime` format."""
if isinstance(expression, str):
return exp.Literal.string(
# the time formats are quoted
format_time(expression[1:-1], cls.TIME_MAPPING, cls.TIME_TRIE)
)
if expression and expression.is_string:
return exp.Literal.string(format_time(expression.this, cls.TIME_MAPPING, cls.TIME_TRIE))
return expression
def __init__(self, **kwargs) -> None:
normalization_strategy = kwargs.get("normalization_strategy")
if normalization_strategy is None:
self.normalization_strategy = self.NORMALIZATION_STRATEGY
else:
self.normalization_strategy = NormalizationStrategy(normalization_strategy.upper())
def __eq__(self, other: t.Any) -> bool:
# Does not currently take dialect state into account
return type(self) == other
def __hash__(self) -> int:
# Does not currently take dialect state into account
return hash(type(self))
def normalize_identifier(self, expression: E) -> E:
"""
Transforms an identifier in a way that resembles how it'd be resolved by this dialect.
For example, an identifier like `FoO` would be resolved as `foo` in Postgres, because it
lowercases all unquoted identifiers. On the other hand, Snowflake uppercases them, so
it would resolve it as `FOO`. If it was quoted, it'd need to be treated as case-sensitive,
and so any normalization would be prohibited in order to avoid "breaking" the identifier.
There are also dialects like Spark, which are case-insensitive even when quotes are
present, and dialects like MySQL, whose resolution rules match those employed by the
underlying operating system, for example they may always be case-sensitive in Linux.
Finally, the normalization behavior of some engines can even be controlled through flags,
like in Redshift's case, where users can explicitly set enable_case_sensitive_identifier.
SQLGlot aims to understand and handle all of these different behaviors gracefully, so
that it can analyze queries in the optimizer and successfully capture their semantics.
"""
if (
isinstance(expression, exp.Identifier)
and self.normalization_strategy is not NormalizationStrategy.CASE_SENSITIVE
and (
not expression.quoted
or self.normalization_strategy is NormalizationStrategy.CASE_INSENSITIVE
)
):
expression.set(
"this",
(
expression.this.upper()
if self.normalization_strategy is NormalizationStrategy.UPPERCASE
else expression.this.lower()
),
)
return expression
def case_sensitive(self, text: str) -> bool:
"""Checks if text contains any case sensitive characters, based on the dialect's rules."""
if self.normalization_strategy is NormalizationStrategy.CASE_INSENSITIVE:
return False
unsafe = (
str.islower
if self.normalization_strategy is NormalizationStrategy.UPPERCASE
else str.isupper
)
return any(unsafe(char) for char in text)
def can_identify(self, text: str, identify: str | bool = "safe") -> bool:
"""Checks if text can be identified given an identify option.
Args:
text: The text to check.
identify:
`"always"` or `True`: Always returns `True`.
`"safe"`: Only returns `True` if the identifier is case-insensitive.
Returns:
Whether the given text can be identified.
"""
if identify is True or identify == "always":
return True
if identify == "safe":
return not self.case_sensitive(text)
return False
def quote_identifier(self, expression: E, identify: bool = True) -> E:
"""
Adds quotes to a given identifier.
Args:
expression: The expression of interest. If it's not an `Identifier`, this method is a no-op.
identify: If set to `False`, the quotes will only be added if the identifier is deemed
"unsafe", with respect to its characters and this dialect's normalization strategy.
"""
if isinstance(expression, exp.Identifier) and not isinstance(expression.parent, exp.Func):
name = expression.this
expression.set(
"quoted",
identify or self.case_sensitive(name) or not exp.SAFE_IDENTIFIER_RE.match(name),
)
return expression
def to_json_path(self, path: t.Optional[exp.Expression]) -> t.Optional[exp.Expression]:
if isinstance(path, exp.Literal):
path_text = path.name
if path.is_number:
path_text = f"[{path_text}]"
try:
return parse_json_path(path_text)
except ParseError as e:
logger.warning(f"Invalid JSON path syntax. {str(e)}")
return path
def parse(self, sql: str, **opts) -> t.List[t.Optional[exp.Expression]]:
return self.parser(**opts).parse(self.tokenize(sql), sql)
def parse_into(
self, expression_type: exp.IntoType, sql: str, **opts
) -> t.List[t.Optional[exp.Expression]]:
return self.parser(**opts).parse_into(expression_type, self.tokenize(sql), sql)
def generate(self, expression: exp.Expression, copy: bool = True, **opts) -> str:
return self.generator(**opts).generate(expression, copy=copy)
def transpile(self, sql: str, **opts) -> t.List[str]:
return [
self.generate(expression, copy=False, **opts) if expression else ""
for expression in self.parse(sql)
]
def tokenize(self, sql: str) -> t.List[Token]:
return self.tokenizer.tokenize(sql)
def tokenizer(self) -> Tokenizer:
if not hasattr(self, "_tokenizer"):
self._tokenizer = self.tokenizer_class(dialect=self)
return self._tokenizer
def parser(self, **opts) -> Parser:
return self.parser_class(dialect=self, **opts)
def generator(self, **opts) -> Generator:
return self.generator_class(dialect=self, **opts)
def build_extract_json_with_path(expr_type: t.Type[E]) -> t.Callable[[t.List, Dialect], E]:
def _builder(args: t.List, dialect: Dialect) -> E:
expression = expr_type(
this=seq_get(args, 0), expression=dialect.to_json_path(seq_get(args, 1))
)
if len(args) > 2 and expr_type is exp.JSONExtract:
expression.set("expressions", args[2:])
return expression
return _builder | null |
152,906 | from __future__ import annotations
import typing as t
from enum import auto
from sqlglot.helper import AutoName
def concat_messages(errors: t.Sequence[t.Any], maximum: int) -> str:
msg = [str(e) for e in errors[:maximum]]
remaining = len(errors) - maximum
if remaining > 0:
msg.append(f"... and {remaining} more")
return "\n\n".join(msg) | null |
152,907 | from __future__ import annotations
import typing as t
from enum import auto
from sqlglot.helper import AutoName
class ParseError(SqlglotError):
def __init__(
self,
message: str,
errors: t.Optional[t.List[t.Dict[str, t.Any]]] = None,
):
super().__init__(message)
self.errors = errors or []
def new(
cls,
message: str,
description: t.Optional[str] = None,
line: t.Optional[int] = None,
col: t.Optional[int] = None,
start_context: t.Optional[str] = None,
highlight: t.Optional[str] = None,
end_context: t.Optional[str] = None,
into_expression: t.Optional[str] = None,
) -> ParseError:
return cls(
message,
[
{
"description": description,
"line": line,
"col": col,
"start_context": start_context,
"highlight": highlight,
"end_context": end_context,
"into_expression": into_expression,
}
],
)
def merge_errors(errors: t.Sequence[ParseError]) -> t.List[t.Dict[str, t.Any]]:
return [e_dict for error in errors for e_dict in error.errors] | null |
152,908 | from __future__ import annotations
import typing as t
import sqlglot.expressions as exp
from sqlglot.errors import ParseError
from sqlglot.tokens import Token, Tokenizer, TokenType
if t.TYPE_CHECKING:
from sqlglot._typing import Lit
class JSONPathTokenizer(Tokenizer):
SINGLE_TOKENS = {
"(": TokenType.L_PAREN,
")": TokenType.R_PAREN,
"[": TokenType.L_BRACKET,
"]": TokenType.R_BRACKET,
":": TokenType.COLON,
",": TokenType.COMMA,
"-": TokenType.DASH,
".": TokenType.DOT,
"?": TokenType.PLACEHOLDER,
"@": TokenType.PARAMETER,
"'": TokenType.QUOTE,
'"': TokenType.QUOTE,
"$": TokenType.DOLLAR,
"*": TokenType.STAR,
}
KEYWORDS = {
"..": TokenType.DOT,
}
IDENTIFIER_ESCAPES = ["\\"]
STRING_ESCAPES = ["\\"]
class ParseError(SqlglotError):
def __init__(
self,
message: str,
errors: t.Optional[t.List[t.Dict[str, t.Any]]] = None,
):
super().__init__(message)
self.errors = errors or []
def new(
cls,
message: str,
description: t.Optional[str] = None,
line: t.Optional[int] = None,
col: t.Optional[int] = None,
start_context: t.Optional[str] = None,
highlight: t.Optional[str] = None,
end_context: t.Optional[str] = None,
into_expression: t.Optional[str] = None,
) -> ParseError:
return cls(
message,
[
{
"description": description,
"line": line,
"col": col,
"start_context": start_context,
"highlight": highlight,
"end_context": end_context,
"into_expression": into_expression,
}
],
)
class TokenType(AutoName):
L_PAREN = auto()
R_PAREN = auto()
L_BRACKET = auto()
R_BRACKET = auto()
L_BRACE = auto()
R_BRACE = auto()
COMMA = auto()
DOT = auto()
DASH = auto()
PLUS = auto()
COLON = auto()
DCOLON = auto()
DQMARK = auto()
SEMICOLON = auto()
STAR = auto()
BACKSLASH = auto()
SLASH = auto()
LT = auto()
LTE = auto()
GT = auto()
GTE = auto()
NOT = auto()
EQ = auto()
NEQ = auto()
NULLSAFE_EQ = auto()
COLON_EQ = auto()
AND = auto()
OR = auto()
AMP = auto()
DPIPE = auto()
PIPE = auto()
PIPE_SLASH = auto()
DPIPE_SLASH = auto()
CARET = auto()
TILDA = auto()
ARROW = auto()
DARROW = auto()
FARROW = auto()
HASH = auto()
HASH_ARROW = auto()
DHASH_ARROW = auto()
LR_ARROW = auto()
DAT = auto()
LT_AT = auto()
AT_GT = auto()
DOLLAR = auto()
PARAMETER = auto()
SESSION_PARAMETER = auto()
DAMP = auto()
XOR = auto()
DSTAR = auto()
BLOCK_START = auto()
BLOCK_END = auto()
SPACE = auto()
BREAK = auto()
STRING = auto()
NUMBER = auto()
IDENTIFIER = auto()
DATABASE = auto()
COLUMN = auto()
COLUMN_DEF = auto()
SCHEMA = auto()
TABLE = auto()
VAR = auto()
BIT_STRING = auto()
HEX_STRING = auto()
BYTE_STRING = auto()
NATIONAL_STRING = auto()
RAW_STRING = auto()
HEREDOC_STRING = auto()
UNICODE_STRING = auto()
# types
BIT = auto()
BOOLEAN = auto()
TINYINT = auto()
UTINYINT = auto()
SMALLINT = auto()
USMALLINT = auto()
MEDIUMINT = auto()
UMEDIUMINT = auto()
INT = auto()
UINT = auto()
BIGINT = auto()
UBIGINT = auto()
INT128 = auto()
UINT128 = auto()
INT256 = auto()
UINT256 = auto()
FLOAT = auto()
DOUBLE = auto()
DECIMAL = auto()
UDECIMAL = auto()
BIGDECIMAL = auto()
CHAR = auto()
NCHAR = auto()
VARCHAR = auto()
NVARCHAR = auto()
BPCHAR = auto()
TEXT = auto()
MEDIUMTEXT = auto()
LONGTEXT = auto()
MEDIUMBLOB = auto()
LONGBLOB = auto()
TINYBLOB = auto()
TINYTEXT = auto()
NAME = auto()
BINARY = auto()
VARBINARY = auto()
JSON = auto()
JSONB = auto()
TIME = auto()
TIMETZ = auto()
TIMESTAMP = auto()
TIMESTAMPTZ = auto()
TIMESTAMPLTZ = auto()
TIMESTAMP_S = auto()
TIMESTAMP_MS = auto()
TIMESTAMP_NS = auto()
DATETIME = auto()
DATETIME64 = auto()
DATE = auto()
DATE32 = auto()
INT4RANGE = auto()
INT4MULTIRANGE = auto()
INT8RANGE = auto()
INT8MULTIRANGE = auto()
NUMRANGE = auto()
NUMMULTIRANGE = auto()
TSRANGE = auto()
TSMULTIRANGE = auto()
TSTZRANGE = auto()
TSTZMULTIRANGE = auto()
DATERANGE = auto()
DATEMULTIRANGE = auto()
UUID = auto()
GEOGRAPHY = auto()
NULLABLE = auto()
GEOMETRY = auto()
HLLSKETCH = auto()
HSTORE = auto()
SUPER = auto()
SERIAL = auto()
SMALLSERIAL = auto()
BIGSERIAL = auto()
XML = auto()
YEAR = auto()
UNIQUEIDENTIFIER = auto()
USERDEFINED = auto()
MONEY = auto()
SMALLMONEY = auto()
ROWVERSION = auto()
IMAGE = auto()
VARIANT = auto()
OBJECT = auto()
INET = auto()
IPADDRESS = auto()
IPPREFIX = auto()
IPV4 = auto()
IPV6 = auto()
ENUM = auto()
ENUM8 = auto()
ENUM16 = auto()
FIXEDSTRING = auto()
LOWCARDINALITY = auto()
NESTED = auto()
AGGREGATEFUNCTION = auto()
SIMPLEAGGREGATEFUNCTION = auto()
UNKNOWN = auto()
# keywords
ALIAS = auto()
ALTER = auto()
ALWAYS = auto()
ALL = auto()
ANTI = auto()
ANY = auto()
APPLY = auto()
ARRAY = auto()
ASC = auto()
ASOF = auto()
AUTO_INCREMENT = auto()
BEGIN = auto()
BETWEEN = auto()
CACHE = auto()
CASE = auto()
CHARACTER_SET = auto()
CLUSTER_BY = auto()
COLLATE = auto()
COMMAND = auto()
COMMENT = auto()
COMMIT = auto()
CONNECT_BY = auto()
CONSTRAINT = auto()
CREATE = auto()
CROSS = auto()
CUBE = auto()
CURRENT_DATE = auto()
CURRENT_DATETIME = auto()
CURRENT_TIME = auto()
CURRENT_TIMESTAMP = auto()
CURRENT_USER = auto()
DEFAULT = auto()
DELETE = auto()
DESC = auto()
DESCRIBE = auto()
DICTIONARY = auto()
DISTINCT = auto()
DISTRIBUTE_BY = auto()
DIV = auto()
DROP = auto()
ELSE = auto()
END = auto()
ESCAPE = auto()
EXCEPT = auto()
EXECUTE = auto()
EXISTS = auto()
FALSE = auto()
FETCH = auto()
FILTER = auto()
FINAL = auto()
FIRST = auto()
FOR = auto()
FORCE = auto()
FOREIGN_KEY = auto()
FORMAT = auto()
FROM = auto()
FULL = auto()
FUNCTION = auto()
GLOB = auto()
GLOBAL = auto()
GROUP_BY = auto()
GROUPING_SETS = auto()
HAVING = auto()
HINT = auto()
IGNORE = auto()
ILIKE = auto()
ILIKE_ANY = auto()
IN = auto()
INDEX = auto()
INNER = auto()
INSERT = auto()
INTERSECT = auto()
INTERVAL = auto()
INTO = auto()
INTRODUCER = auto()
IRLIKE = auto()
IS = auto()
ISNULL = auto()
JOIN = auto()
JOIN_MARKER = auto()
KEEP = auto()
KILL = auto()
LANGUAGE = auto()
LATERAL = auto()
LEFT = auto()
LIKE = auto()
LIKE_ANY = auto()
LIMIT = auto()
LOAD = auto()
LOCK = auto()
MAP = auto()
MATCH_RECOGNIZE = auto()
MEMBER_OF = auto()
MERGE = auto()
MOD = auto()
MODEL = auto()
NATURAL = auto()
NEXT = auto()
NOTNULL = auto()
NULL = auto()
OBJECT_IDENTIFIER = auto()
OFFSET = auto()
ON = auto()
ONLY = auto()
OPERATOR = auto()
ORDER_BY = auto()
ORDER_SIBLINGS_BY = auto()
ORDERED = auto()
ORDINALITY = auto()
OUTER = auto()
OVER = auto()
OVERLAPS = auto()
OVERWRITE = auto()
PARTITION = auto()
PARTITION_BY = auto()
PERCENT = auto()
PIVOT = auto()
PLACEHOLDER = auto()
POSITIONAL = auto()
PRAGMA = auto()
PREWHERE = auto()
PRIMARY_KEY = auto()
PROCEDURE = auto()
PROPERTIES = auto()
PSEUDO_TYPE = auto()
QUALIFY = auto()
QUOTE = auto()
RANGE = auto()
RECURSIVE = auto()
REFRESH = auto()
REPLACE = auto()
RETURNING = auto()
REFERENCES = auto()
RIGHT = auto()
RLIKE = auto()
ROLLBACK = auto()
ROLLUP = auto()
ROW = auto()
ROWS = auto()
SELECT = auto()
SEMI = auto()
SEPARATOR = auto()
SEQUENCE = auto()
SERDE_PROPERTIES = auto()
SET = auto()
SETTINGS = auto()
SHOW = auto()
SIMILAR_TO = auto()
SOME = auto()
SORT_BY = auto()
START_WITH = auto()
STORAGE_INTEGRATION = auto()
STRUCT = auto()
TABLE_SAMPLE = auto()
TEMPORARY = auto()
TOP = auto()
THEN = auto()
TRUE = auto()
TRUNCATE = auto()
UNCACHE = auto()
UNION = auto()
UNNEST = auto()
UNPIVOT = auto()
UPDATE = auto()
USE = auto()
USING = auto()
VALUES = auto()
VIEW = auto()
VOLATILE = auto()
WHEN = auto()
WHERE = auto()
WINDOW = auto()
WITH = auto()
UNIQUE = auto()
VERSION_SNAPSHOT = auto()
TIMESTAMP_SNAPSHOT = auto()
OPTION = auto()
class Token:
__slots__ = ("token_type", "text", "line", "col", "start", "end", "comments")
def number(cls, number: int) -> Token:
"""Returns a NUMBER token with `number` as its text."""
return cls(TokenType.NUMBER, str(number))
def string(cls, string: str) -> Token:
"""Returns a STRING token with `string` as its text."""
return cls(TokenType.STRING, string)
def identifier(cls, identifier: str) -> Token:
"""Returns an IDENTIFIER token with `identifier` as its text."""
return cls(TokenType.IDENTIFIER, identifier)
def var(cls, var: str) -> Token:
"""Returns an VAR token with `var` as its text."""
return cls(TokenType.VAR, var)
def __init__(
self,
token_type: TokenType,
text: str,
line: int = 1,
col: int = 1,
start: int = 0,
end: int = 0,
comments: t.Optional[t.List[str]] = None,
) -> None:
"""Token initializer.
Args:
token_type: The TokenType Enum.
text: The text of the token.
line: The line that the token ends on.
col: The column that the token ends on.
start: The start index of the token.
end: The ending index of the token.
comments: The comments to attach to the token.
"""
self.token_type = token_type
self.text = text
self.line = line
self.col = col
self.start = start
self.end = end
self.comments = [] if comments is None else comments
def __repr__(self) -> str:
attributes = ", ".join(f"{k}: {getattr(self, k)}" for k in self.__slots__)
return f"<Token {attributes}>"
The provided code snippet includes necessary dependencies for implementing the `parse` function. Write a Python function `def parse(path: str) -> exp.JSONPath` to solve the following problem:
Takes in a JSON path string and parses it into a JSONPath expression.
Here is the function:
def parse(path: str) -> exp.JSONPath:
"""Takes in a JSON path string and parses it into a JSONPath expression."""
tokens = JSONPathTokenizer().tokenize(path)
size = len(tokens)
i = 0
def _curr() -> t.Optional[TokenType]:
return tokens[i].token_type if i < size else None
def _prev() -> Token:
return tokens[i - 1]
def _advance() -> Token:
nonlocal i
i += 1
return _prev()
def _error(msg: str) -> str:
return f"{msg} at index {i}: {path}"
@t.overload
def _match(token_type: TokenType, raise_unmatched: Lit[True] = True) -> Token:
pass
@t.overload
def _match(token_type: TokenType, raise_unmatched: Lit[False] = False) -> t.Optional[Token]:
pass
def _match(token_type, raise_unmatched=False):
if _curr() == token_type:
return _advance()
if raise_unmatched:
raise ParseError(_error(f"Expected {token_type}"))
return None
def _parse_literal() -> t.Any:
token = _match(TokenType.STRING) or _match(TokenType.IDENTIFIER)
if token:
return token.text
if _match(TokenType.STAR):
return exp.JSONPathWildcard()
if _match(TokenType.PLACEHOLDER) or _match(TokenType.L_PAREN):
script = _prev().text == "("
start = i
while True:
if _match(TokenType.L_BRACKET):
_parse_bracket() # nested call which we can throw away
if _curr() in (TokenType.R_BRACKET, None):
break
_advance()
expr_type = exp.JSONPathScript if script else exp.JSONPathFilter
return expr_type(this=path[tokens[start].start : tokens[i].end])
number = "-" if _match(TokenType.DASH) else ""
token = _match(TokenType.NUMBER)
if token:
number += token.text
if number:
return int(number)
return False
def _parse_slice() -> t.Any:
start = _parse_literal()
end = _parse_literal() if _match(TokenType.COLON) else None
step = _parse_literal() if _match(TokenType.COLON) else None
if end is None and step is None:
return start
return exp.JSONPathSlice(start=start, end=end, step=step)
def _parse_bracket() -> exp.JSONPathPart:
literal = _parse_slice()
if isinstance(literal, str) or literal is not False:
indexes = [literal]
while _match(TokenType.COMMA):
literal = _parse_slice()
if literal:
indexes.append(literal)
if len(indexes) == 1:
if isinstance(literal, str):
node: exp.JSONPathPart = exp.JSONPathKey(this=indexes[0])
elif isinstance(literal, exp.JSONPathPart) and isinstance(
literal, (exp.JSONPathScript, exp.JSONPathFilter)
):
node = exp.JSONPathSelector(this=indexes[0])
else:
node = exp.JSONPathSubscript(this=indexes[0])
else:
node = exp.JSONPathUnion(expressions=indexes)
else:
raise ParseError(_error("Cannot have empty segment"))
_match(TokenType.R_BRACKET, raise_unmatched=True)
return node
# We canonicalize the JSON path AST so that it always starts with a
# "root" element, so paths like "field" will be generated as "$.field"
_match(TokenType.DOLLAR)
expressions: t.List[exp.JSONPathPart] = [exp.JSONPathRoot()]
while _curr():
if _match(TokenType.DOT) or _match(TokenType.COLON):
recursive = _prev().text == ".."
if _match(TokenType.VAR) or _match(TokenType.IDENTIFIER):
value: t.Optional[str | exp.JSONPathWildcard] = _prev().text
elif _match(TokenType.STAR):
value = exp.JSONPathWildcard()
else:
value = None
if recursive:
expressions.append(exp.JSONPathRecursive(this=value))
elif value:
expressions.append(exp.JSONPathKey(this=value))
else:
raise ParseError(_error("Expected key name or * after DOT"))
elif _match(TokenType.L_BRACKET):
expressions.append(_parse_bracket())
elif _match(TokenType.VAR) or _match(TokenType.IDENTIFIER):
expressions.append(exp.JSONPathKey(this=_prev().text))
elif _match(TokenType.STAR):
expressions.append(exp.JSONPathWildcard())
else:
raise ParseError(_error(f"Unexpected {tokens[i].token_type}"))
return exp.JSONPath(expressions=expressions) | Takes in a JSON path string and parses it into a JSONPath expression. |
152,909 | from __future__ import annotations
import functools
import typing as t
from sqlglot import exp
from sqlglot.helper import (
ensure_list,
is_date_unit,
is_iso_date,
is_iso_datetime,
seq_get,
subclasses,
)
from sqlglot.optimizer.scope import Scope, traverse_scope
from sqlglot.schema import Schema, ensure_schema
if t.TYPE_CHECKING:
from sqlglot._typing import B, E
BinaryCoercionFunc = t.Callable[[exp.Expression, exp.Expression], exp.DataType.Type]
BinaryCoercions = t.Dict[
t.Tuple[exp.DataType.Type, exp.DataType.Type],
BinaryCoercionFunc,
]
class TypeAnnotator(metaclass=_TypeAnnotator):
TYPE_TO_EXPRESSIONS: t.Dict[exp.DataType.Type, t.Set[t.Type[exp.Expression]]] = {
exp.DataType.Type.BIGINT: {
exp.ApproxDistinct,
exp.ArraySize,
exp.Count,
exp.Length,
},
exp.DataType.Type.BOOLEAN: {
exp.Between,
exp.Boolean,
exp.In,
exp.RegexpLike,
},
exp.DataType.Type.DATE: {
exp.CurrentDate,
exp.Date,
exp.DateFromParts,
exp.DateStrToDate,
exp.DiToDate,
exp.StrToDate,
exp.TimeStrToDate,
exp.TsOrDsToDate,
},
exp.DataType.Type.DATETIME: {
exp.CurrentDatetime,
exp.DatetimeAdd,
exp.DatetimeSub,
},
exp.DataType.Type.DOUBLE: {
exp.ApproxQuantile,
exp.Avg,
exp.Div,
exp.Exp,
exp.Ln,
exp.Log,
exp.Pow,
exp.Quantile,
exp.Round,
exp.SafeDivide,
exp.Sqrt,
exp.Stddev,
exp.StddevPop,
exp.StddevSamp,
exp.Variance,
exp.VariancePop,
},
exp.DataType.Type.INT: {
exp.Ceil,
exp.DatetimeDiff,
exp.DateDiff,
exp.Extract,
exp.TimestampDiff,
exp.TimeDiff,
exp.DateToDi,
exp.Floor,
exp.Levenshtein,
exp.Sign,
exp.StrPosition,
exp.TsOrDiToDi,
},
exp.DataType.Type.JSON: {
exp.ParseJSON,
},
exp.DataType.Type.TIMESTAMP: {
exp.CurrentTime,
exp.CurrentTimestamp,
exp.StrToTime,
exp.TimeAdd,
exp.TimeStrToTime,
exp.TimeSub,
exp.TimestampAdd,
exp.TimestampSub,
exp.UnixToTime,
},
exp.DataType.Type.TINYINT: {
exp.Day,
exp.Month,
exp.Week,
exp.Year,
},
exp.DataType.Type.VARCHAR: {
exp.ArrayConcat,
exp.Concat,
exp.ConcatWs,
exp.DateToDateStr,
exp.GroupConcat,
exp.Initcap,
exp.Lower,
exp.Substring,
exp.TimeToStr,
exp.TimeToTimeStr,
exp.Trim,
exp.TsOrDsToDateStr,
exp.UnixToStr,
exp.UnixToTimeStr,
exp.Upper,
},
}
ANNOTATORS: t.Dict = {
**{
expr_type: lambda self, e: self._annotate_unary(e)
for expr_type in subclasses(exp.__name__, (exp.Unary, exp.Alias))
},
**{
expr_type: lambda self, e: self._annotate_binary(e)
for expr_type in subclasses(exp.__name__, exp.Binary)
},
**{
expr_type: _annotate_with_type_lambda(data_type)
for data_type, expressions in TYPE_TO_EXPRESSIONS.items()
for expr_type in expressions
},
exp.Abs: lambda self, e: self._annotate_by_args(e, "this"),
exp.Anonymous: lambda self, e: self._annotate_with_type(e, exp.DataType.Type.UNKNOWN),
exp.Array: lambda self, e: self._annotate_by_args(e, "expressions", array=True),
exp.ArrayAgg: lambda self, e: self._annotate_by_args(e, "this", array=True),
exp.ArrayConcat: lambda self, e: self._annotate_by_args(e, "this", "expressions"),
exp.Bracket: lambda self, e: self._annotate_bracket(e),
exp.Cast: lambda self, e: self._annotate_with_type(e, e.args["to"]),
exp.Case: lambda self, e: self._annotate_by_args(e, "default", "ifs"),
exp.Coalesce: lambda self, e: self._annotate_by_args(e, "this", "expressions"),
exp.DataType: lambda self, e: self._annotate_with_type(e, e.copy()),
exp.DateAdd: lambda self, e: self._annotate_timeunit(e),
exp.DateSub: lambda self, e: self._annotate_timeunit(e),
exp.DateTrunc: lambda self, e: self._annotate_timeunit(e),
exp.Distinct: lambda self, e: self._annotate_by_args(e, "expressions"),
exp.Div: lambda self, e: self._annotate_div(e),
exp.Dot: lambda self, e: self._annotate_dot(e),
exp.Explode: lambda self, e: self._annotate_explode(e),
exp.Filter: lambda self, e: self._annotate_by_args(e, "this"),
exp.GenerateDateArray: lambda self, e: self._annotate_with_type(
e, exp.DataType.build("ARRAY<DATE>")
),
exp.If: lambda self, e: self._annotate_by_args(e, "true", "false"),
exp.Interval: lambda self, e: self._annotate_with_type(e, exp.DataType.Type.INTERVAL),
exp.Least: lambda self, e: self._annotate_by_args(e, "expressions"),
exp.Literal: lambda self, e: self._annotate_literal(e),
exp.Map: lambda self, e: self._annotate_with_type(e, exp.DataType.Type.MAP),
exp.Max: lambda self, e: self._annotate_by_args(e, "this", "expressions"),
exp.Min: lambda self, e: self._annotate_by_args(e, "this", "expressions"),
exp.Null: lambda self, e: self._annotate_with_type(e, exp.DataType.Type.NULL),
exp.Nullif: lambda self, e: self._annotate_by_args(e, "this", "expression"),
exp.PropertyEQ: lambda self, e: self._annotate_by_args(e, "expression"),
exp.Slice: lambda self, e: self._annotate_with_type(e, exp.DataType.Type.UNKNOWN),
exp.Struct: lambda self, e: self._annotate_by_args(e, "expressions", struct=True),
exp.Sum: lambda self, e: self._annotate_by_args(e, "this", "expressions", promote=True),
exp.Timestamp: lambda self, e: self._annotate_with_type(
e,
exp.DataType.Type.TIMESTAMPTZ if e.args.get("with_tz") else exp.DataType.Type.TIMESTAMP,
),
exp.TryCast: lambda self, e: self._annotate_with_type(e, e.args["to"]),
exp.Unnest: lambda self, e: self._annotate_unnest(e),
exp.VarMap: lambda self, e: self._annotate_with_type(e, exp.DataType.Type.MAP),
}
NESTED_TYPES = {
exp.DataType.Type.ARRAY,
}
# Specifies what types a given type can be coerced into (autofilled)
COERCES_TO: t.Dict[exp.DataType.Type, t.Set[exp.DataType.Type]] = {}
# Coercion functions for binary operations.
# Map of type pairs to a callable that takes both sides of the binary operation and returns the resulting type.
BINARY_COERCIONS: BinaryCoercions = {
**swap_all(
{
(t, exp.DataType.Type.INTERVAL): lambda l, r: _coerce_date_literal(
l, r.args.get("unit")
)
for t in exp.DataType.TEXT_TYPES
}
),
**swap_all(
{
# text + numeric will yield the numeric type to match most dialects' semantics
(text, numeric): lambda l, r: t.cast(
exp.DataType.Type, l.type if l.type in exp.DataType.NUMERIC_TYPES else r.type
)
for text in exp.DataType.TEXT_TYPES
for numeric in exp.DataType.NUMERIC_TYPES
}
),
**swap_all(
{
(exp.DataType.Type.DATE, exp.DataType.Type.INTERVAL): lambda l, r: _coerce_date(
l, r.args.get("unit")
),
}
),
}
def __init__(
self,
schema: Schema,
annotators: t.Optional[t.Dict[t.Type[E], t.Callable[[TypeAnnotator, E], E]]] = None,
coerces_to: t.Optional[t.Dict[exp.DataType.Type, t.Set[exp.DataType.Type]]] = None,
binary_coercions: t.Optional[BinaryCoercions] = None,
) -> None:
self.schema = schema
self.annotators = annotators or self.ANNOTATORS
self.coerces_to = coerces_to or self.COERCES_TO
self.binary_coercions = binary_coercions or self.BINARY_COERCIONS
# Caches the ids of annotated sub-Expressions, to ensure we only visit them once
self._visited: t.Set[int] = set()
def _set_type(
self, expression: exp.Expression, target_type: t.Optional[exp.DataType | exp.DataType.Type]
) -> None:
expression.type = target_type or exp.DataType.Type.UNKNOWN # type: ignore
self._visited.add(id(expression))
def annotate(self, expression: E) -> E:
for scope in traverse_scope(expression):
selects = {}
for name, source in scope.sources.items():
if not isinstance(source, Scope):
continue
if isinstance(source.expression, exp.UDTF):
values = []
if isinstance(source.expression, exp.Lateral):
if isinstance(source.expression.this, exp.Explode):
values = [source.expression.this.this]
elif isinstance(source.expression, exp.Unnest):
values = [source.expression]
else:
values = source.expression.expressions[0].expressions
if not values:
continue
selects[name] = {
alias: column
for alias, column in zip(
source.expression.alias_column_names,
values,
)
}
else:
selects[name] = {
select.alias_or_name: select for select in source.expression.selects
}
# First annotate the current scope's column references
for col in scope.columns:
if not col.table:
continue
source = scope.sources.get(col.table)
if isinstance(source, exp.Table):
self._set_type(col, self.schema.get_column_type(source, col))
elif source:
if col.table in selects and col.name in selects[col.table]:
self._set_type(col, selects[col.table][col.name].type)
elif isinstance(source.expression, exp.Unnest):
self._set_type(col, source.expression.type)
# Then (possibly) annotate the remaining expressions in the scope
self._maybe_annotate(scope.expression)
return self._maybe_annotate(expression) # This takes care of non-traversable expressions
def _maybe_annotate(self, expression: E) -> E:
if id(expression) in self._visited:
return expression # We've already inferred the expression's type
annotator = self.annotators.get(expression.__class__)
return (
annotator(self, expression)
if annotator
else self._annotate_with_type(expression, exp.DataType.Type.UNKNOWN)
)
def _annotate_args(self, expression: E) -> E:
for value in expression.iter_expressions():
self._maybe_annotate(value)
return expression
def _maybe_coerce(
self, type1: exp.DataType | exp.DataType.Type, type2: exp.DataType | exp.DataType.Type
) -> exp.DataType | exp.DataType.Type:
type1_value = type1.this if isinstance(type1, exp.DataType) else type1
type2_value = type2.this if isinstance(type2, exp.DataType) else type2
# We propagate the NULL / UNKNOWN types upwards if found
if exp.DataType.Type.NULL in (type1_value, type2_value):
return exp.DataType.Type.NULL
if exp.DataType.Type.UNKNOWN in (type1_value, type2_value):
return exp.DataType.Type.UNKNOWN
if type1_value in self.NESTED_TYPES:
return type1
if type2_value in self.NESTED_TYPES:
return type2
return type2_value if type2_value in self.coerces_to.get(type1_value, {}) else type1_value # type: ignore
# Note: the following "no_type_check" decorators were added because mypy was yelling due
# to assigning Type values to expression.type (since its getter returns Optional[DataType]).
# This is a known mypy issue: https://github.com/python/mypy/issues/3004
def _annotate_binary(self, expression: B) -> B:
self._annotate_args(expression)
left, right = expression.left, expression.right
left_type, right_type = left.type.this, right.type.this
if isinstance(expression, exp.Connector):
if left_type == exp.DataType.Type.NULL and right_type == exp.DataType.Type.NULL:
self._set_type(expression, exp.DataType.Type.NULL)
elif exp.DataType.Type.NULL in (left_type, right_type):
self._set_type(
expression,
exp.DataType.build("NULLABLE", expressions=exp.DataType.build("BOOLEAN")),
)
else:
self._set_type(expression, exp.DataType.Type.BOOLEAN)
elif isinstance(expression, exp.Predicate):
self._set_type(expression, exp.DataType.Type.BOOLEAN)
elif (left_type, right_type) in self.binary_coercions:
self._set_type(expression, self.binary_coercions[(left_type, right_type)](left, right))
else:
self._set_type(expression, self._maybe_coerce(left_type, right_type))
return expression
def _annotate_unary(self, expression: E) -> E:
self._annotate_args(expression)
if isinstance(expression, exp.Condition) and not isinstance(expression, exp.Paren):
self._set_type(expression, exp.DataType.Type.BOOLEAN)
else:
self._set_type(expression, expression.this.type)
return expression
def _annotate_literal(self, expression: exp.Literal) -> exp.Literal:
if expression.is_string:
self._set_type(expression, exp.DataType.Type.VARCHAR)
elif expression.is_int:
self._set_type(expression, exp.DataType.Type.INT)
else:
self._set_type(expression, exp.DataType.Type.DOUBLE)
return expression
def _annotate_with_type(self, expression: E, target_type: exp.DataType.Type) -> E:
self._set_type(expression, target_type)
return self._annotate_args(expression)
def _annotate_struct_value(
self, expression: exp.Expression
) -> t.Optional[exp.DataType] | exp.ColumnDef:
alias = expression.args.get("alias")
if alias:
return exp.ColumnDef(this=alias.copy(), kind=expression.type)
# Case: key = value or key := value
if expression.expression:
return exp.ColumnDef(this=expression.this.copy(), kind=expression.expression.type)
return expression.type
def _annotate_by_args(
self,
expression: E,
*args: str,
promote: bool = False,
array: bool = False,
struct: bool = False,
) -> E:
self._annotate_args(expression)
expressions: t.List[exp.Expression] = []
for arg in args:
arg_expr = expression.args.get(arg)
expressions.extend(expr for expr in ensure_list(arg_expr) if expr)
last_datatype = None
for expr in expressions:
expr_type = expr.type
# Stop at the first nested data type found - we don't want to _maybe_coerce nested types
if expr_type.args.get("nested"):
last_datatype = expr_type
break
last_datatype = self._maybe_coerce(last_datatype or expr_type, expr_type)
self._set_type(expression, last_datatype or exp.DataType.Type.UNKNOWN)
if promote:
if expression.type.this in exp.DataType.INTEGER_TYPES:
self._set_type(expression, exp.DataType.Type.BIGINT)
elif expression.type.this in exp.DataType.FLOAT_TYPES:
self._set_type(expression, exp.DataType.Type.DOUBLE)
if array:
self._set_type(
expression,
exp.DataType(
this=exp.DataType.Type.ARRAY, expressions=[expression.type], nested=True
),
)
if struct:
self._set_type(
expression,
exp.DataType(
this=exp.DataType.Type.STRUCT,
expressions=[self._annotate_struct_value(expr) for expr in expressions],
nested=True,
),
)
return expression
def _annotate_timeunit(
self, expression: exp.TimeUnit | exp.DateTrunc
) -> exp.TimeUnit | exp.DateTrunc:
self._annotate_args(expression)
if expression.this.type.this in exp.DataType.TEXT_TYPES:
datatype = _coerce_date_literal(expression.this, expression.unit)
elif expression.this.type.this in exp.DataType.TEMPORAL_TYPES:
datatype = _coerce_date(expression.this, expression.unit)
else:
datatype = exp.DataType.Type.UNKNOWN
self._set_type(expression, datatype)
return expression
def _annotate_bracket(self, expression: exp.Bracket) -> exp.Bracket:
self._annotate_args(expression)
bracket_arg = expression.expressions[0]
this = expression.this
if isinstance(bracket_arg, exp.Slice):
self._set_type(expression, this.type)
elif this.type.is_type(exp.DataType.Type.ARRAY):
self._set_type(expression, seq_get(this.type.expressions, 0))
elif isinstance(this, (exp.Map, exp.VarMap)) and bracket_arg in this.keys:
index = this.keys.index(bracket_arg)
value = seq_get(this.values, index)
self._set_type(expression, value.type if value else None)
else:
self._set_type(expression, exp.DataType.Type.UNKNOWN)
return expression
def _annotate_div(self, expression: exp.Div) -> exp.Div:
self._annotate_args(expression)
left_type, right_type = expression.left.type.this, expression.right.type.this # type: ignore
if (
expression.args.get("typed")
and left_type in exp.DataType.INTEGER_TYPES
and right_type in exp.DataType.INTEGER_TYPES
):
self._set_type(expression, exp.DataType.Type.BIGINT)
else:
self._set_type(expression, self._maybe_coerce(left_type, right_type))
if expression.type and expression.type.this not in exp.DataType.REAL_TYPES:
self._set_type(
expression, self._maybe_coerce(expression.type, exp.DataType.Type.DOUBLE)
)
return expression
def _annotate_dot(self, expression: exp.Dot) -> exp.Dot:
self._annotate_args(expression)
self._set_type(expression, None)
this_type = expression.this.type
if this_type and this_type.is_type(exp.DataType.Type.STRUCT):
for e in this_type.expressions:
if e.name == expression.expression.name:
self._set_type(expression, e.kind)
break
return expression
def _annotate_explode(self, expression: exp.Explode) -> exp.Explode:
self._annotate_args(expression)
self._set_type(expression, seq_get(expression.this.type.expressions, 0))
return expression
def _annotate_unnest(self, expression: exp.Unnest) -> exp.Unnest:
self._annotate_args(expression)
child = seq_get(expression.expressions, 0)
self._set_type(expression, child and seq_get(child.type.expressions, 0))
return expression
E = t.TypeVar("E", bound="sqlglot.exp.Expression")
def _annotate_with_type_lambda(data_type: exp.DataType.Type) -> t.Callable[[TypeAnnotator, E], E]:
return lambda self, e: self._annotate_with_type(e, data_type) | null |
152,910 | from __future__ import annotations
import functools
import typing as t
from sqlglot import exp
from sqlglot.helper import (
ensure_list,
is_date_unit,
is_iso_date,
is_iso_datetime,
seq_get,
subclasses,
)
from sqlglot.optimizer.scope import Scope, traverse_scope
from sqlglot.schema import Schema, ensure_schema
if t.TYPE_CHECKING:
from sqlglot._typing import B, E
BinaryCoercionFunc = t.Callable[[exp.Expression, exp.Expression], exp.DataType.Type]
BinaryCoercions = t.Dict[
t.Tuple[exp.DataType.Type, exp.DataType.Type],
BinaryCoercionFunc,
]
def is_iso_date(text: str) -> bool:
try:
datetime.date.fromisoformat(text)
return True
except ValueError:
return False
def is_iso_datetime(text: str) -> bool:
try:
datetime.datetime.fromisoformat(text)
return True
except ValueError:
return False
def is_date_unit(expression: t.Optional[exp.Expression]) -> bool:
return expression is not None and expression.name.lower() in DATE_UNITS
def _coerce_date_literal(l: exp.Expression, unit: t.Optional[exp.Expression]) -> exp.DataType.Type:
date_text = l.name
is_iso_date_ = is_iso_date(date_text)
if is_iso_date_ and is_date_unit(unit):
return exp.DataType.Type.DATE
# An ISO date is also an ISO datetime, but not vice versa
if is_iso_date_ or is_iso_datetime(date_text):
return exp.DataType.Type.DATETIME
return exp.DataType.Type.UNKNOWN | null |
152,911 | from __future__ import annotations
import functools
import typing as t
from sqlglot import exp
from sqlglot.helper import (
ensure_list,
is_date_unit,
is_iso_date,
is_iso_datetime,
seq_get,
subclasses,
)
from sqlglot.optimizer.scope import Scope, traverse_scope
from sqlglot.schema import Schema, ensure_schema
def swap_args(func: BinaryCoercionFunc) -> BinaryCoercionFunc:
def _swapped(l: exp.Expression, r: exp.Expression) -> exp.DataType.Type:
return func(r, l)
return _swapped
def swap_all(coercions: BinaryCoercions) -> BinaryCoercions:
return {**coercions, **{(b, a): swap_args(func) for (a, b), func in coercions.items()}} | null |
152,912 | from __future__ import annotations
import logging
from sqlglot import exp
from sqlglot.errors import OptimizeError
from sqlglot.helper import while_changing
from sqlglot.optimizer.scope import find_all_in_scope
from sqlglot.optimizer.simplify import flatten, rewrite_between, uniq_sort
logger = logging.getLogger("sqlglot")
def normalized(expression: exp.Expression, dnf: bool = False) -> bool:
"""
Checks whether a given expression is in a normal form of interest.
Example:
>>> from sqlglot import parse_one
>>> normalized(parse_one("(a AND b) OR c OR (d AND e)"), dnf=True)
True
>>> normalized(parse_one("(a OR b) AND c")) # Checks CNF by default
True
>>> normalized(parse_one("a AND (b OR c)"), dnf=True)
False
Args:
expression: The expression to check if it's normalized.
dnf: Whether to check if the expression is in Disjunctive Normal Form (DNF).
Default: False, i.e. we check if it's in Conjunctive Normal Form (CNF).
"""
ancestor, root = (exp.And, exp.Or) if dnf else (exp.Or, exp.And)
return not any(
connector.find_ancestor(ancestor) for connector in find_all_in_scope(expression, root)
)
def normalization_distance(expression: exp.Expression, dnf: bool = False) -> int:
"""
The difference in the number of predicates between a given expression and its normalized form.
This is used as an estimate of the cost of the conversion which is exponential in complexity.
Example:
>>> import sqlglot
>>> expression = sqlglot.parse_one("(a AND b) OR (c AND d)")
>>> normalization_distance(expression)
4
Args:
expression: The expression to compute the normalization distance for.
dnf: Whether to check if the expression is in Disjunctive Normal Form (DNF).
Default: False, i.e. we check if it's in Conjunctive Normal Form (CNF).
Returns:
The normalization distance.
"""
return sum(_predicate_lengths(expression, dnf)) - (
sum(1 for _ in expression.find_all(exp.Connector)) + 1
)
def distributive_law(expression, dnf, max_distance):
"""
x OR (y AND z) -> (x OR y) AND (x OR z)
(x AND y) OR (y AND z) -> (x OR y) AND (x OR z) AND (y OR y) AND (y OR z)
"""
if normalized(expression, dnf=dnf):
return expression
distance = normalization_distance(expression, dnf=dnf)
if distance > max_distance:
raise OptimizeError(f"Normalization distance {distance} exceeds max {max_distance}")
exp.replace_children(expression, lambda e: distributive_law(e, dnf, max_distance))
to_exp, from_exp = (exp.Or, exp.And) if dnf else (exp.And, exp.Or)
if isinstance(expression, from_exp):
a, b = expression.unnest_operands()
from_func = exp.and_ if from_exp == exp.And else exp.or_
to_func = exp.and_ if to_exp == exp.And else exp.or_
if isinstance(a, to_exp) and isinstance(b, to_exp):
if len(tuple(a.find_all(exp.Connector))) > len(tuple(b.find_all(exp.Connector))):
return _distribute(a, b, from_func, to_func)
return _distribute(b, a, from_func, to_func)
if isinstance(a, to_exp):
return _distribute(b, a, from_func, to_func)
if isinstance(b, to_exp):
return _distribute(a, b, from_func, to_func)
return expression
class OptimizeError(SqlglotError):
pass
def while_changing(expression: Expression, func: t.Callable[[Expression], E]) -> E:
"""
Applies a transformation to a given expression until a fix point is reached.
Args:
expression: The expression to be transformed.
func: The transformation to be applied.
Returns:
The transformed expression.
"""
while True:
for n in reversed(tuple(expression.walk())):
n._hash = hash(n)
start = hash(expression)
expression = func(expression)
for n in expression.walk():
n._hash = None
if start == hash(expression):
break
return expression
def rewrite_between(expression: exp.Expression) -> exp.Expression:
"""Rewrite x between y and z to x >= y AND x <= z.
This is done because comparison simplification is only done on lt/lte/gt/gte.
"""
if isinstance(expression, exp.Between):
negate = isinstance(expression.parent, exp.Not)
expression = exp.and_(
exp.GTE(this=expression.this.copy(), expression=expression.args["low"]),
exp.LTE(this=expression.this.copy(), expression=expression.args["high"]),
copy=False,
)
if negate:
expression = exp.paren(expression, copy=False)
return expression
The provided code snippet includes necessary dependencies for implementing the `normalize` function. Write a Python function `def normalize(expression: exp.Expression, dnf: bool = False, max_distance: int = 128)` to solve the following problem:
Rewrite sqlglot AST into conjunctive normal form or disjunctive normal form. Example: >>> import sqlglot >>> expression = sqlglot.parse_one("(x AND y) OR z") >>> normalize(expression, dnf=False).sql() '(x OR z) AND (y OR z)' Args: expression: expression to normalize dnf: rewrite in disjunctive normal form instead. max_distance (int): the maximal estimated distance from cnf/dnf to attempt conversion Returns: sqlglot.Expression: normalized expression
Here is the function:
def normalize(expression: exp.Expression, dnf: bool = False, max_distance: int = 128):
"""
Rewrite sqlglot AST into conjunctive normal form or disjunctive normal form.
Example:
>>> import sqlglot
>>> expression = sqlglot.parse_one("(x AND y) OR z")
>>> normalize(expression, dnf=False).sql()
'(x OR z) AND (y OR z)'
Args:
expression: expression to normalize
dnf: rewrite in disjunctive normal form instead.
max_distance (int): the maximal estimated distance from cnf/dnf to attempt conversion
Returns:
sqlglot.Expression: normalized expression
"""
for node in tuple(expression.walk(prune=lambda e: isinstance(e, exp.Connector))):
if isinstance(node, exp.Connector):
if normalized(node, dnf=dnf):
continue
root = node is expression
original = node.copy()
node.transform(rewrite_between, copy=False)
distance = normalization_distance(node, dnf=dnf)
if distance > max_distance:
logger.info(
f"Skipping normalization because distance {distance} exceeds max {max_distance}"
)
return expression
try:
node = node.replace(
while_changing(node, lambda e: distributive_law(e, dnf, max_distance))
)
except OptimizeError as e:
logger.info(e)
node.replace(original)
if root:
return original
return expression
if root:
expression = node
return expression | Rewrite sqlglot AST into conjunctive normal form or disjunctive normal form. Example: >>> import sqlglot >>> expression = sqlglot.parse_one("(x AND y) OR z") >>> normalize(expression, dnf=False).sql() '(x OR z) AND (y OR z)' Args: expression: expression to normalize dnf: rewrite in disjunctive normal form instead. max_distance (int): the maximal estimated distance from cnf/dnf to attempt conversion Returns: sqlglot.Expression: normalized expression |
152,913 | import itertools
from sqlglot import expressions as exp
from sqlglot.helper import find_new_name
from sqlglot.optimizer.scope import build_scope
def _eliminate(scope, existing_ctes, taken):
if scope.is_derived_table:
return _eliminate_derived_table(scope, existing_ctes, taken)
if scope.is_cte:
return _eliminate_cte(scope, existing_ctes, taken)
def build_scope(expression: exp.Expression) -> t.Optional[Scope]:
"""
Build a scope tree.
Args:
expression: Expression to build the scope tree for.
Returns:
The root scope
"""
return seq_get(traverse_scope(expression), -1)
The provided code snippet includes necessary dependencies for implementing the `eliminate_subqueries` function. Write a Python function `def eliminate_subqueries(expression)` to solve the following problem:
Rewrite derived tables as CTES, deduplicating if possible. Example: >>> import sqlglot >>> expression = sqlglot.parse_one("SELECT a FROM (SELECT * FROM x) AS y") >>> eliminate_subqueries(expression).sql() 'WITH y AS (SELECT * FROM x) SELECT a FROM y AS y' This also deduplicates common subqueries: >>> expression = sqlglot.parse_one("SELECT a FROM (SELECT * FROM x) AS y CROSS JOIN (SELECT * FROM x) AS z") >>> eliminate_subqueries(expression).sql() 'WITH y AS (SELECT * FROM x) SELECT a FROM y AS y CROSS JOIN y AS z' Args: expression (sqlglot.Expression): expression Returns: sqlglot.Expression: expression
Here is the function:
def eliminate_subqueries(expression):
"""
Rewrite derived tables as CTES, deduplicating if possible.
Example:
>>> import sqlglot
>>> expression = sqlglot.parse_one("SELECT a FROM (SELECT * FROM x) AS y")
>>> eliminate_subqueries(expression).sql()
'WITH y AS (SELECT * FROM x) SELECT a FROM y AS y'
This also deduplicates common subqueries:
>>> expression = sqlglot.parse_one("SELECT a FROM (SELECT * FROM x) AS y CROSS JOIN (SELECT * FROM x) AS z")
>>> eliminate_subqueries(expression).sql()
'WITH y AS (SELECT * FROM x) SELECT a FROM y AS y CROSS JOIN y AS z'
Args:
expression (sqlglot.Expression): expression
Returns:
sqlglot.Expression: expression
"""
if isinstance(expression, exp.Subquery):
# It's possible to have subqueries at the root, e.g. (SELECT * FROM x) LIMIT 1
eliminate_subqueries(expression.this)
return expression
root = build_scope(expression)
if not root:
return expression
# Map of alias->Scope|Table
# These are all aliases that are already used in the expression.
# We don't want to create new CTEs that conflict with these names.
taken = {}
# All CTE aliases in the root scope are taken
for scope in root.cte_scopes:
taken[scope.expression.parent.alias] = scope
# All table names are taken
for scope in root.traverse():
taken.update(
{
source.name: source
for _, source in scope.sources.items()
if isinstance(source, exp.Table)
}
)
# Map of Expression->alias
# Existing CTES in the root expression. We'll use this for deduplication.
existing_ctes = {}
with_ = root.expression.args.get("with")
recursive = False
if with_:
recursive = with_.args.get("recursive")
for cte in with_.expressions:
existing_ctes[cte.this] = cte.alias
new_ctes = []
# We're adding more CTEs, but we want to maintain the DAG order.
# Derived tables within an existing CTE need to come before the existing CTE.
for cte_scope in root.cte_scopes:
# Append all the new CTEs from this existing CTE
for scope in cte_scope.traverse():
if scope is cte_scope:
# Don't try to eliminate this CTE itself
continue
new_cte = _eliminate(scope, existing_ctes, taken)
if new_cte:
new_ctes.append(new_cte)
# Append the existing CTE itself
new_ctes.append(cte_scope.expression.parent)
# Now append the rest
for scope in itertools.chain(root.union_scopes, root.subquery_scopes, root.table_scopes):
for child_scope in scope.traverse():
new_cte = _eliminate(child_scope, existing_ctes, taken)
if new_cte:
new_ctes.append(new_cte)
if new_ctes:
expression.set("with", exp.With(expressions=new_ctes, recursive=recursive))
return expression | Rewrite derived tables as CTES, deduplicating if possible. Example: >>> import sqlglot >>> expression = sqlglot.parse_one("SELECT a FROM (SELECT * FROM x) AS y") >>> eliminate_subqueries(expression).sql() 'WITH y AS (SELECT * FROM x) SELECT a FROM y AS y' This also deduplicates common subqueries: >>> expression = sqlglot.parse_one("SELECT a FROM (SELECT * FROM x) AS y CROSS JOIN (SELECT * FROM x) AS z") >>> eliminate_subqueries(expression).sql() 'WITH y AS (SELECT * FROM x) SELECT a FROM y AS y CROSS JOIN y AS z' Args: expression (sqlglot.Expression): expression Returns: sqlglot.Expression: expression |
152,914 | from sqlglot import expressions as exp
from sqlglot.optimizer.normalize import normalized
from sqlglot.optimizer.scope import Scope, traverse_scope
def _should_eliminate_join(scope, join, alias):
inner_source = scope.sources.get(alias)
return (
isinstance(inner_source, Scope)
and not _join_is_used(scope, join, alias)
and (
(join.side == "LEFT" and _is_joined_on_all_unique_outputs(inner_source, join))
or (not join.args.get("on") and _has_single_output_row(inner_source))
)
)
def traverse_scope(expression: exp.Expression) -> t.List[Scope]:
"""
Traverse an expression by its "scopes".
"Scope" represents the current context of a Select statement.
This is helpful for optimizing queries, where we need more information than
the expression tree itself. For example, we might care about the source
names within a subquery. Returns a list because a generator could result in
incomplete properties which is confusing.
Examples:
>>> import sqlglot
>>> expression = sqlglot.parse_one("SELECT a FROM (SELECT a FROM x) AS y")
>>> scopes = traverse_scope(expression)
>>> scopes[0].expression.sql(), list(scopes[0].sources)
('SELECT a FROM x', ['x'])
>>> scopes[1].expression.sql(), list(scopes[1].sources)
('SELECT a FROM (SELECT a FROM x) AS y', ['y'])
Args:
expression: Expression to traverse
Returns:
A list of the created scope instances
"""
if isinstance(expression, exp.DDL) and isinstance(expression.expression, exp.Query):
# We ignore the DDL expression and build a scope for its query instead
ddl_with = expression.args.get("with")
expression = expression.expression
# If the DDL has CTEs attached, we need to add them to the query, or
# prepend them if the query itself already has CTEs attached to it
if ddl_with:
ddl_with.pop()
query_ctes = expression.ctes
if not query_ctes:
expression.set("with", ddl_with)
else:
expression.args["with"].set("recursive", ddl_with.recursive)
expression.args["with"].set("expressions", [*ddl_with.expressions, *query_ctes])
if isinstance(expression, exp.Query):
return list(_traverse_scope(Scope(expression)))
return []
The provided code snippet includes necessary dependencies for implementing the `eliminate_joins` function. Write a Python function `def eliminate_joins(expression)` to solve the following problem:
Remove unused joins from an expression. This only removes joins when we know that the join condition doesn't produce duplicate rows. Example: >>> import sqlglot >>> sql = "SELECT x.a FROM x LEFT JOIN (SELECT DISTINCT y.b FROM y) AS y ON x.b = y.b" >>> expression = sqlglot.parse_one(sql) >>> eliminate_joins(expression).sql() 'SELECT x.a FROM x' Args: expression (sqlglot.Expression): expression to optimize Returns: sqlglot.Expression: optimized expression
Here is the function:
def eliminate_joins(expression):
"""
Remove unused joins from an expression.
This only removes joins when we know that the join condition doesn't produce duplicate rows.
Example:
>>> import sqlglot
>>> sql = "SELECT x.a FROM x LEFT JOIN (SELECT DISTINCT y.b FROM y) AS y ON x.b = y.b"
>>> expression = sqlglot.parse_one(sql)
>>> eliminate_joins(expression).sql()
'SELECT x.a FROM x'
Args:
expression (sqlglot.Expression): expression to optimize
Returns:
sqlglot.Expression: optimized expression
"""
for scope in traverse_scope(expression):
# If any columns in this scope aren't qualified, it's hard to determine if a join isn't used.
# It's probably possible to infer this from the outputs of derived tables.
# But for now, let's just skip this rule.
if scope.unqualified_columns:
continue
joins = scope.expression.args.get("joins", [])
# Reverse the joins so we can remove chains of unused joins
for join in reversed(joins):
alias = join.alias_or_name
if _should_eliminate_join(scope, join, alias):
join.pop()
scope.remove_source(alias)
return expression | Remove unused joins from an expression. This only removes joins when we know that the join condition doesn't produce duplicate rows. Example: >>> import sqlglot >>> sql = "SELECT x.a FROM x LEFT JOIN (SELECT DISTINCT y.b FROM y) AS y ON x.b = y.b" >>> expression = sqlglot.parse_one(sql) >>> eliminate_joins(expression).sql() 'SELECT x.a FROM x' Args: expression (sqlglot.Expression): expression to optimize Returns: sqlglot.Expression: optimized expression |
152,915 | from sqlglot import exp
from sqlglot.optimizer.normalize import normalized
from sqlglot.optimizer.scope import build_scope, find_in_scope
from sqlglot.optimizer.simplify import simplify
def pushdown(condition, sources, scope_ref_count, dialect, join_index=None):
if not condition:
return
condition = condition.replace(simplify(condition, dialect=dialect))
cnf_like = normalized(condition) or not normalized(condition, dnf=True)
predicates = list(
condition.flatten()
if isinstance(condition, exp.And if cnf_like else exp.Or)
else [condition]
)
if cnf_like:
pushdown_cnf(predicates, sources, scope_ref_count, join_index=join_index)
else:
pushdown_dnf(predicates, sources, scope_ref_count)
def build_scope(expression: exp.Expression) -> t.Optional[Scope]:
"""
Build a scope tree.
Args:
expression: Expression to build the scope tree for.
Returns:
The root scope
"""
return seq_get(traverse_scope(expression), -1)
The provided code snippet includes necessary dependencies for implementing the `pushdown_predicates` function. Write a Python function `def pushdown_predicates(expression, dialect=None)` to solve the following problem:
Rewrite sqlglot AST to pushdown predicates in FROMS and JOINS Example: >>> import sqlglot >>> sql = "SELECT y.a AS a FROM (SELECT x.a AS a FROM x AS x) AS y WHERE y.a = 1" >>> expression = sqlglot.parse_one(sql) >>> pushdown_predicates(expression).sql() 'SELECT y.a AS a FROM (SELECT x.a AS a FROM x AS x WHERE x.a = 1) AS y WHERE TRUE' Args: expression (sqlglot.Expression): expression to optimize Returns: sqlglot.Expression: optimized expression
Here is the function:
def pushdown_predicates(expression, dialect=None):
"""
Rewrite sqlglot AST to pushdown predicates in FROMS and JOINS
Example:
>>> import sqlglot
>>> sql = "SELECT y.a AS a FROM (SELECT x.a AS a FROM x AS x) AS y WHERE y.a = 1"
>>> expression = sqlglot.parse_one(sql)
>>> pushdown_predicates(expression).sql()
'SELECT y.a AS a FROM (SELECT x.a AS a FROM x AS x WHERE x.a = 1) AS y WHERE TRUE'
Args:
expression (sqlglot.Expression): expression to optimize
Returns:
sqlglot.Expression: optimized expression
"""
root = build_scope(expression)
if root:
scope_ref_count = root.ref_count()
for scope in reversed(list(root.traverse())):
select = scope.expression
where = select.args.get("where")
if where:
selected_sources = scope.selected_sources
join_index = {
join.alias_or_name: i for i, join in enumerate(select.args.get("joins") or [])
}
# a right join can only push down to itself and not the source FROM table
for k, (node, source) in selected_sources.items():
parent = node.find_ancestor(exp.Join, exp.From)
if isinstance(parent, exp.Join) and parent.side == "RIGHT":
selected_sources = {k: (node, source)}
break
pushdown(where.this, selected_sources, scope_ref_count, dialect, join_index)
# joins should only pushdown into itself, not to other joins
# so we limit the selected sources to only itself
for join in select.args.get("joins") or []:
name = join.alias_or_name
if name in scope.selected_sources:
pushdown(
join.args.get("on"),
{name: scope.selected_sources[name]},
scope_ref_count,
dialect,
)
return expression | Rewrite sqlglot AST to pushdown predicates in FROMS and JOINS Example: >>> import sqlglot >>> sql = "SELECT y.a AS a FROM (SELECT x.a AS a FROM x AS x) AS y WHERE y.a = 1" >>> expression = sqlglot.parse_one(sql) >>> pushdown_predicates(expression).sql() 'SELECT y.a AS a FROM (SELECT x.a AS a FROM x AS x WHERE x.a = 1) AS y WHERE TRUE' Args: expression (sqlglot.Expression): expression to optimize Returns: sqlglot.Expression: optimized expression |
152,916 | from sqlglot.optimizer.scope import Scope, build_scope
class Scope:
"""
Selection scope.
Attributes:
expression (exp.Select|exp.Union): Root expression of this scope
sources (dict[str, exp.Table|Scope]): Mapping of source name to either
a Table expression or another Scope instance. For example:
SELECT * FROM x {"x": Table(this="x")}
SELECT * FROM x AS y {"y": Table(this="x")}
SELECT * FROM (SELECT ...) AS y {"y": Scope(...)}
lateral_sources (dict[str, exp.Table|Scope]): Sources from laterals
For example:
SELECT c FROM x LATERAL VIEW EXPLODE (a) AS c;
The LATERAL VIEW EXPLODE gets x as a source.
cte_sources (dict[str, Scope]): Sources from CTES
outer_columns (list[str]): If this is a derived table or CTE, and the outer query
defines a column list for the alias of this scope, this is that list of columns.
For example:
SELECT * FROM (SELECT ...) AS y(col1, col2)
The inner query would have `["col1", "col2"]` for its `outer_columns`
parent (Scope): Parent scope
scope_type (ScopeType): Type of this scope, relative to it's parent
subquery_scopes (list[Scope]): List of all child scopes for subqueries
cte_scopes (list[Scope]): List of all child scopes for CTEs
derived_table_scopes (list[Scope]): List of all child scopes for derived_tables
udtf_scopes (list[Scope]): List of all child scopes for user defined tabular functions
table_scopes (list[Scope]): derived_table_scopes + udtf_scopes, in the order that they're defined
union_scopes (list[Scope, Scope]): If this Scope is for a Union expression, this will be
a list of the left and right child scopes.
"""
def __init__(
self,
expression,
sources=None,
outer_columns=None,
parent=None,
scope_type=ScopeType.ROOT,
lateral_sources=None,
cte_sources=None,
):
self.expression = expression
self.sources = sources or {}
self.lateral_sources = lateral_sources or {}
self.cte_sources = cte_sources or {}
self.sources.update(self.lateral_sources)
self.sources.update(self.cte_sources)
self.outer_columns = outer_columns or []
self.parent = parent
self.scope_type = scope_type
self.subquery_scopes = []
self.derived_table_scopes = []
self.table_scopes = []
self.cte_scopes = []
self.union_scopes = []
self.udtf_scopes = []
self.clear_cache()
def clear_cache(self):
self._collected = False
self._raw_columns = None
self._derived_tables = None
self._udtfs = None
self._tables = None
self._ctes = None
self._subqueries = None
self._selected_sources = None
self._columns = None
self._external_columns = None
self._join_hints = None
self._pivots = None
self._references = None
def branch(
self, expression, scope_type, sources=None, cte_sources=None, lateral_sources=None, **kwargs
):
"""Branch from the current scope to a new, inner scope"""
return Scope(
expression=expression.unnest(),
sources=sources.copy() if sources else None,
parent=self,
scope_type=scope_type,
cte_sources={**self.cte_sources, **(cte_sources or {})},
lateral_sources=lateral_sources.copy() if lateral_sources else None,
**kwargs,
)
def _collect(self):
self._tables = []
self._ctes = []
self._subqueries = []
self._derived_tables = []
self._udtfs = []
self._raw_columns = []
self._join_hints = []
for node in self.walk(bfs=False):
if node is self.expression:
continue
if isinstance(node, exp.Column) and not isinstance(node.this, exp.Star):
self._raw_columns.append(node)
elif isinstance(node, exp.Table) and not isinstance(node.parent, exp.JoinHint):
self._tables.append(node)
elif isinstance(node, exp.JoinHint):
self._join_hints.append(node)
elif isinstance(node, exp.UDTF):
self._udtfs.append(node)
elif isinstance(node, exp.CTE):
self._ctes.append(node)
elif _is_derived_table(node) and isinstance(
node.parent, (exp.From, exp.Join, exp.Subquery)
):
self._derived_tables.append(node)
elif isinstance(node, exp.UNWRAPPED_QUERIES):
self._subqueries.append(node)
self._collected = True
def _ensure_collected(self):
if not self._collected:
self._collect()
def walk(self, bfs=True, prune=None):
return walk_in_scope(self.expression, bfs=bfs, prune=None)
def find(self, *expression_types, bfs=True):
return find_in_scope(self.expression, expression_types, bfs=bfs)
def find_all(self, *expression_types, bfs=True):
return find_all_in_scope(self.expression, expression_types, bfs=bfs)
def replace(self, old, new):
"""
Replace `old` with `new`.
This can be used instead of `exp.Expression.replace` to ensure the `Scope` is kept up-to-date.
Args:
old (exp.Expression): old node
new (exp.Expression): new node
"""
old.replace(new)
self.clear_cache()
def tables(self):
"""
List of tables in this scope.
Returns:
list[exp.Table]: tables
"""
self._ensure_collected()
return self._tables
def ctes(self):
"""
List of CTEs in this scope.
Returns:
list[exp.CTE]: ctes
"""
self._ensure_collected()
return self._ctes
def derived_tables(self):
"""
List of derived tables in this scope.
For example:
SELECT * FROM (SELECT ...) <- that's a derived table
Returns:
list[exp.Subquery]: derived tables
"""
self._ensure_collected()
return self._derived_tables
def udtfs(self):
"""
List of "User Defined Tabular Functions" in this scope.
Returns:
list[exp.UDTF]: UDTFs
"""
self._ensure_collected()
return self._udtfs
def subqueries(self):
"""
List of subqueries in this scope.
For example:
SELECT * FROM x WHERE a IN (SELECT ...) <- that's a subquery
Returns:
list[exp.Select | exp.Union]: subqueries
"""
self._ensure_collected()
return self._subqueries
def columns(self):
"""
List of columns in this scope.
Returns:
list[exp.Column]: Column instances in this scope, plus any
Columns that reference this scope from correlated subqueries.
"""
if self._columns is None:
self._ensure_collected()
columns = self._raw_columns
external_columns = [
column
for scope in itertools.chain(self.subquery_scopes, self.udtf_scopes)
for column in scope.external_columns
]
named_selects = set(self.expression.named_selects)
self._columns = []
for column in columns + external_columns:
ancestor = column.find_ancestor(
exp.Select, exp.Qualify, exp.Order, exp.Having, exp.Hint, exp.Table, exp.Star
)
if (
not ancestor
or column.table
or isinstance(ancestor, exp.Select)
or (isinstance(ancestor, exp.Table) and not isinstance(ancestor.this, exp.Func))
or (
isinstance(ancestor, exp.Order)
and (
isinstance(ancestor.parent, exp.Window)
or column.name not in named_selects
)
)
):
self._columns.append(column)
return self._columns
def selected_sources(self):
"""
Mapping of nodes and sources that are actually selected from in this scope.
That is, all tables in a schema are selectable at any point. But a
table only becomes a selected source if it's included in a FROM or JOIN clause.
Returns:
dict[str, (exp.Table|exp.Select, exp.Table|Scope)]: selected sources and nodes
"""
if self._selected_sources is None:
result = {}
for name, node in self.references:
if name in result:
raise OptimizeError(f"Alias already used: {name}")
if name in self.sources:
result[name] = (node, self.sources[name])
self._selected_sources = result
return self._selected_sources
def references(self) -> t.List[t.Tuple[str, exp.Expression]]:
if self._references is None:
self._references = []
for table in self.tables:
self._references.append((table.alias_or_name, table))
for expression in itertools.chain(self.derived_tables, self.udtfs):
self._references.append(
(
expression.alias,
expression if expression.args.get("pivots") else expression.unnest(),
)
)
return self._references
def external_columns(self):
"""
Columns that appear to reference sources in outer scopes.
Returns:
list[exp.Column]: Column instances that don't reference
sources in the current scope.
"""
if self._external_columns is None:
if isinstance(self.expression, exp.Union):
left, right = self.union_scopes
self._external_columns = left.external_columns + right.external_columns
else:
self._external_columns = [
c for c in self.columns if c.table not in self.selected_sources
]
return self._external_columns
def unqualified_columns(self):
"""
Unqualified columns in the current scope.
Returns:
list[exp.Column]: Unqualified columns
"""
return [c for c in self.columns if not c.table]
def join_hints(self):
"""
Hints that exist in the scope that reference tables
Returns:
list[exp.JoinHint]: Join hints that are referenced within the scope
"""
if self._join_hints is None:
return []
return self._join_hints
def pivots(self):
if not self._pivots:
self._pivots = [
pivot for _, node in self.references for pivot in node.args.get("pivots") or []
]
return self._pivots
def source_columns(self, source_name):
"""
Get all columns in the current scope for a particular source.
Args:
source_name (str): Name of the source
Returns:
list[exp.Column]: Column instances that reference `source_name`
"""
return [column for column in self.columns if column.table == source_name]
def is_subquery(self):
"""Determine if this scope is a subquery"""
return self.scope_type == ScopeType.SUBQUERY
def is_derived_table(self):
"""Determine if this scope is a derived table"""
return self.scope_type == ScopeType.DERIVED_TABLE
def is_union(self):
"""Determine if this scope is a union"""
return self.scope_type == ScopeType.UNION
def is_cte(self):
"""Determine if this scope is a common table expression"""
return self.scope_type == ScopeType.CTE
def is_root(self):
"""Determine if this is the root scope"""
return self.scope_type == ScopeType.ROOT
def is_udtf(self):
"""Determine if this scope is a UDTF (User Defined Table Function)"""
return self.scope_type == ScopeType.UDTF
def is_correlated_subquery(self):
"""Determine if this scope is a correlated subquery"""
return bool(
(self.is_subquery or (self.parent and isinstance(self.parent.expression, exp.Lateral)))
and self.external_columns
)
def rename_source(self, old_name, new_name):
"""Rename a source in this scope"""
columns = self.sources.pop(old_name or "", [])
self.sources[new_name] = columns
def add_source(self, name, source):
"""Add a source to this scope"""
self.sources[name] = source
self.clear_cache()
def remove_source(self, name):
"""Remove a source from this scope"""
self.sources.pop(name, None)
self.clear_cache()
def __repr__(self):
return f"Scope<{self.expression.sql()}>"
def traverse(self):
"""
Traverse the scope tree from this node.
Yields:
Scope: scope instances in depth-first-search post-order
"""
stack = [self]
result = []
while stack:
scope = stack.pop()
result.append(scope)
stack.extend(
itertools.chain(
scope.cte_scopes,
scope.union_scopes,
scope.table_scopes,
scope.subquery_scopes,
)
)
yield from reversed(result)
def ref_count(self):
"""
Count the number of times each scope in this tree is referenced.
Returns:
dict[int, int]: Mapping of Scope instance ID to reference count
"""
scope_ref_count = defaultdict(lambda: 0)
for scope in self.traverse():
for _, source in scope.selected_sources.values():
scope_ref_count[id(source)] += 1
return scope_ref_count
def build_scope(expression: exp.Expression) -> t.Optional[Scope]:
"""
Build a scope tree.
Args:
expression: Expression to build the scope tree for.
Returns:
The root scope
"""
return seq_get(traverse_scope(expression), -1)
The provided code snippet includes necessary dependencies for implementing the `eliminate_ctes` function. Write a Python function `def eliminate_ctes(expression)` to solve the following problem:
Remove unused CTEs from an expression. Example: >>> import sqlglot >>> sql = "WITH y AS (SELECT a FROM x) SELECT a FROM z" >>> expression = sqlglot.parse_one(sql) >>> eliminate_ctes(expression).sql() 'SELECT a FROM z' Args: expression (sqlglot.Expression): expression to optimize Returns: sqlglot.Expression: optimized expression
Here is the function:
def eliminate_ctes(expression):
"""
Remove unused CTEs from an expression.
Example:
>>> import sqlglot
>>> sql = "WITH y AS (SELECT a FROM x) SELECT a FROM z"
>>> expression = sqlglot.parse_one(sql)
>>> eliminate_ctes(expression).sql()
'SELECT a FROM z'
Args:
expression (sqlglot.Expression): expression to optimize
Returns:
sqlglot.Expression: optimized expression
"""
root = build_scope(expression)
if root:
ref_count = root.ref_count()
# Traverse the scope tree in reverse so we can remove chains of unused CTEs
for scope in reversed(list(root.traverse())):
if scope.is_cte:
count = ref_count[id(scope)]
if count <= 0:
cte_node = scope.expression.parent
with_node = cte_node.parent
cte_node.pop()
# Pop the entire WITH clause if this is the last CTE
if with_node and len(with_node.expressions) <= 0:
with_node.pop()
# Decrement the ref count for all sources this CTE selects from
for _, source in scope.selected_sources.values():
if isinstance(source, Scope):
ref_count[id(source)] -= 1
return expression | Remove unused CTEs from an expression. Example: >>> import sqlglot >>> sql = "WITH y AS (SELECT a FROM x) SELECT a FROM z" >>> expression = sqlglot.parse_one(sql) >>> eliminate_ctes(expression).sql() 'SELECT a FROM z' Args: expression (sqlglot.Expression): expression to optimize Returns: sqlglot.Expression: optimized expression |
152,917 | from collections import defaultdict
from sqlglot import expressions as exp
from sqlglot.helper import find_new_name
from sqlglot.optimizer.scope import Scope, traverse_scope
def merge_ctes(expression, leave_tables_isolated=False):
scopes = traverse_scope(expression)
# All places where we select from CTEs.
# We key on the CTE scope so we can detect CTES that are selected from multiple times.
cte_selections = defaultdict(list)
for outer_scope in scopes:
for table, inner_scope in outer_scope.selected_sources.values():
if isinstance(inner_scope, Scope) and inner_scope.is_cte:
cte_selections[id(inner_scope)].append(
(
outer_scope,
inner_scope,
table,
)
)
singular_cte_selections = [v[0] for k, v in cte_selections.items() if len(v) == 1]
for outer_scope, inner_scope, table in singular_cte_selections:
from_or_join = table.find_ancestor(exp.From, exp.Join)
if _mergeable(outer_scope, inner_scope, leave_tables_isolated, from_or_join):
alias = table.alias_or_name
_rename_inner_sources(outer_scope, inner_scope, alias)
_merge_from(outer_scope, inner_scope, table, alias)
_merge_expressions(outer_scope, inner_scope, alias)
_merge_joins(outer_scope, inner_scope, from_or_join)
_merge_where(outer_scope, inner_scope, from_or_join)
_merge_order(outer_scope, inner_scope)
_merge_hints(outer_scope, inner_scope)
_pop_cte(inner_scope)
outer_scope.clear_cache()
return expression
def merge_derived_tables(expression, leave_tables_isolated=False):
for outer_scope in traverse_scope(expression):
for subquery in outer_scope.derived_tables:
from_or_join = subquery.find_ancestor(exp.From, exp.Join)
alias = subquery.alias_or_name
inner_scope = outer_scope.sources[alias]
if _mergeable(outer_scope, inner_scope, leave_tables_isolated, from_or_join):
_rename_inner_sources(outer_scope, inner_scope, alias)
_merge_from(outer_scope, inner_scope, subquery, alias)
_merge_expressions(outer_scope, inner_scope, alias)
_merge_joins(outer_scope, inner_scope, from_or_join)
_merge_where(outer_scope, inner_scope, from_or_join)
_merge_order(outer_scope, inner_scope)
_merge_hints(outer_scope, inner_scope)
outer_scope.clear_cache()
return expression
The provided code snippet includes necessary dependencies for implementing the `merge_subqueries` function. Write a Python function `def merge_subqueries(expression, leave_tables_isolated=False)` to solve the following problem:
Rewrite sqlglot AST to merge derived tables into the outer query. This also merges CTEs if they are selected from only once. Example: >>> import sqlglot >>> expression = sqlglot.parse_one("SELECT a FROM (SELECT x.a FROM x) CROSS JOIN y") >>> merge_subqueries(expression).sql() 'SELECT x.a FROM x CROSS JOIN y' If `leave_tables_isolated` is True, this will not merge inner queries into outer queries if it would result in multiple table selects in a single query: >>> expression = sqlglot.parse_one("SELECT a FROM (SELECT x.a FROM x) CROSS JOIN y") >>> merge_subqueries(expression, leave_tables_isolated=True).sql() 'SELECT a FROM (SELECT x.a FROM x) CROSS JOIN y' Inspired by https://dev.mysql.com/doc/refman/8.0/en/derived-table-optimization.html Args: expression (sqlglot.Expression): expression to optimize leave_tables_isolated (bool): Returns: sqlglot.Expression: optimized expression
Here is the function:
def merge_subqueries(expression, leave_tables_isolated=False):
"""
Rewrite sqlglot AST to merge derived tables into the outer query.
This also merges CTEs if they are selected from only once.
Example:
>>> import sqlglot
>>> expression = sqlglot.parse_one("SELECT a FROM (SELECT x.a FROM x) CROSS JOIN y")
>>> merge_subqueries(expression).sql()
'SELECT x.a FROM x CROSS JOIN y'
If `leave_tables_isolated` is True, this will not merge inner queries into outer
queries if it would result in multiple table selects in a single query:
>>> expression = sqlglot.parse_one("SELECT a FROM (SELECT x.a FROM x) CROSS JOIN y")
>>> merge_subqueries(expression, leave_tables_isolated=True).sql()
'SELECT a FROM (SELECT x.a FROM x) CROSS JOIN y'
Inspired by https://dev.mysql.com/doc/refman/8.0/en/derived-table-optimization.html
Args:
expression (sqlglot.Expression): expression to optimize
leave_tables_isolated (bool):
Returns:
sqlglot.Expression: optimized expression
"""
expression = merge_ctes(expression, leave_tables_isolated)
expression = merge_derived_tables(expression, leave_tables_isolated)
return expression | Rewrite sqlglot AST to merge derived tables into the outer query. This also merges CTEs if they are selected from only once. Example: >>> import sqlglot >>> expression = sqlglot.parse_one("SELECT a FROM (SELECT x.a FROM x) CROSS JOIN y") >>> merge_subqueries(expression).sql() 'SELECT x.a FROM x CROSS JOIN y' If `leave_tables_isolated` is True, this will not merge inner queries into outer queries if it would result in multiple table selects in a single query: >>> expression = sqlglot.parse_one("SELECT a FROM (SELECT x.a FROM x) CROSS JOIN y") >>> merge_subqueries(expression, leave_tables_isolated=True).sql() 'SELECT a FROM (SELECT x.a FROM x) CROSS JOIN y' Inspired by https://dev.mysql.com/doc/refman/8.0/en/derived-table-optimization.html Args: expression (sqlglot.Expression): expression to optimize leave_tables_isolated (bool): Returns: sqlglot.Expression: optimized expression |
152,918 | from sqlglot import exp
from sqlglot.helper import name_sequence
from sqlglot.optimizer.scope import ScopeType, traverse_scope
def unnest(select, parent_select, next_alias_name):
if len(select.selects) > 1:
return
predicate = select.find_ancestor(exp.Condition)
alias = next_alias_name()
if (
not predicate
or parent_select is not predicate.parent_select
or not parent_select.args.get("from")
):
return
clause = predicate.find_ancestor(exp.Having, exp.Where, exp.Join)
# This subquery returns a scalar and can just be converted to a cross join
if not isinstance(predicate, (exp.In, exp.Any)):
column = exp.column(select.selects[0].alias_or_name, alias)
clause_parent_select = clause.parent_select if clause else None
if (isinstance(clause, exp.Having) and clause_parent_select is parent_select) or (
(not clause or clause_parent_select is not parent_select)
and (
parent_select.args.get("group")
or any(projection.find(exp.AggFunc) for projection in parent_select.selects)
)
):
column = exp.Max(this=column)
elif not isinstance(select.parent, exp.Subquery):
return
_replace(select.parent, column)
parent_select.join(select, join_type="CROSS", join_alias=alias, copy=False)
return
if select.find(exp.Limit, exp.Offset):
return
if isinstance(predicate, exp.Any):
predicate = predicate.find_ancestor(exp.EQ)
if not predicate or parent_select is not predicate.parent_select:
return
column = _other_operand(predicate)
value = select.selects[0]
join_key = exp.column(value.alias, alias)
join_key_not_null = join_key.is_(exp.null()).not_()
if isinstance(clause, exp.Join):
_replace(predicate, exp.true())
parent_select.where(join_key_not_null, copy=False)
else:
_replace(predicate, join_key_not_null)
group = select.args.get("group")
if group:
if {value.this} != set(group.expressions):
select = (
exp.select(exp.column(value.alias, "_q"))
.from_(select.subquery("_q", copy=False), copy=False)
.group_by(exp.column(value.alias, "_q"), copy=False)
)
else:
select = select.group_by(value.this, copy=False)
parent_select.join(
select,
on=column.eq(join_key),
join_type="LEFT",
join_alias=alias,
copy=False,
)
def decorrelate(select, parent_select, external_columns, next_alias_name):
where = select.args.get("where")
if not where or where.find(exp.Or) or select.find(exp.Limit, exp.Offset):
return
table_alias = next_alias_name()
keys = []
# for all external columns in the where statement, find the relevant predicate
# keys to convert it into a join
for column in external_columns:
if column.find_ancestor(exp.Where) is not where:
return
predicate = column.find_ancestor(exp.Predicate)
if not predicate or predicate.find_ancestor(exp.Where) is not where:
return
if isinstance(predicate, exp.Binary):
key = (
predicate.right
if any(node is column for node in predicate.left.walk())
else predicate.left
)
else:
return
keys.append((key, column, predicate))
if not any(isinstance(predicate, exp.EQ) for *_, predicate in keys):
return
is_subquery_projection = any(
node is select.parent for node in parent_select.selects if isinstance(node, exp.Subquery)
)
value = select.selects[0]
key_aliases = {}
group_by = []
for key, _, predicate in keys:
# if we filter on the value of the subquery, it needs to be unique
if key == value.this:
key_aliases[key] = value.alias
group_by.append(key)
else:
if key not in key_aliases:
key_aliases[key] = next_alias_name()
# all predicates that are equalities must also be in the unique
# so that we don't do a many to many join
if isinstance(predicate, exp.EQ) and key not in group_by:
group_by.append(key)
parent_predicate = select.find_ancestor(exp.Predicate)
# if the value of the subquery is not an agg or a key, we need to collect it into an array
# so that it can be grouped. For subquery projections, we use a MAX aggregation instead.
agg_func = exp.Max if is_subquery_projection else exp.ArrayAgg
if not value.find(exp.AggFunc) and value.this not in group_by:
select.select(
exp.alias_(agg_func(this=value.this), value.alias, quoted=False),
append=False,
copy=False,
)
# exists queries should not have any selects as it only checks if there are any rows
# all selects will be added by the optimizer and only used for join keys
if isinstance(parent_predicate, exp.Exists):
select.args["expressions"] = []
for key, alias in key_aliases.items():
if key in group_by:
# add all keys to the projections of the subquery
# so that we can use it as a join key
if isinstance(parent_predicate, exp.Exists) or key != value.this:
select.select(f"{key} AS {alias}", copy=False)
else:
select.select(exp.alias_(agg_func(this=key.copy()), alias, quoted=False), copy=False)
alias = exp.column(value.alias, table_alias)
other = _other_operand(parent_predicate)
if isinstance(parent_predicate, exp.Exists):
alias = exp.column(list(key_aliases.values())[0], table_alias)
parent_predicate = _replace(parent_predicate, f"NOT {alias} IS NULL")
elif isinstance(parent_predicate, exp.All):
parent_predicate = _replace(
parent_predicate.parent, f"ARRAY_ALL({alias}, _x -> _x = {other})"
)
elif isinstance(parent_predicate, exp.Any):
if value.this in group_by:
parent_predicate = _replace(parent_predicate.parent, f"{other} = {alias}")
else:
parent_predicate = _replace(parent_predicate, f"ARRAY_ANY({alias}, _x -> _x = {other})")
elif isinstance(parent_predicate, exp.In):
if value.this in group_by:
parent_predicate = _replace(parent_predicate, f"{other} = {alias}")
else:
parent_predicate = _replace(
parent_predicate,
f"ARRAY_ANY({alias}, _x -> _x = {parent_predicate.this})",
)
else:
if is_subquery_projection:
alias = exp.alias_(alias, select.parent.alias)
# COUNT always returns 0 on empty datasets, so we need take that into consideration here
# by transforming all counts into 0 and using that as the coalesced value
if value.find(exp.Count):
def remove_aggs(node):
if isinstance(node, exp.Count):
return exp.Literal.number(0)
elif isinstance(node, exp.AggFunc):
return exp.null()
return node
alias = exp.Coalesce(
this=alias,
expressions=[value.this.transform(remove_aggs)],
)
select.parent.replace(alias)
for key, column, predicate in keys:
predicate.replace(exp.true())
nested = exp.column(key_aliases[key], table_alias)
if is_subquery_projection:
key.replace(nested)
continue
if key in group_by:
key.replace(nested)
elif isinstance(predicate, exp.EQ):
parent_predicate = _replace(
parent_predicate,
f"({parent_predicate} AND ARRAY_CONTAINS({nested}, {column}))",
)
else:
key.replace(exp.to_identifier("_x"))
parent_predicate = _replace(
parent_predicate,
f"({parent_predicate} AND ARRAY_ANY({nested}, _x -> {predicate}))",
)
parent_select.join(
select.group_by(*group_by, copy=False),
on=[predicate for *_, predicate in keys if isinstance(predicate, exp.EQ)],
join_type="LEFT",
join_alias=table_alias,
copy=False,
)
def name_sequence(prefix: str) -> t.Callable[[], str]:
"""Returns a name generator given a prefix (e.g. a0, a1, a2, ... if the prefix is "a")."""
sequence = count()
return lambda: f"{prefix}{next(sequence)}"
class ScopeType(Enum):
ROOT = auto()
SUBQUERY = auto()
DERIVED_TABLE = auto()
CTE = auto()
UNION = auto()
UDTF = auto()
def traverse_scope(expression: exp.Expression) -> t.List[Scope]:
"""
Traverse an expression by its "scopes".
"Scope" represents the current context of a Select statement.
This is helpful for optimizing queries, where we need more information than
the expression tree itself. For example, we might care about the source
names within a subquery. Returns a list because a generator could result in
incomplete properties which is confusing.
Examples:
>>> import sqlglot
>>> expression = sqlglot.parse_one("SELECT a FROM (SELECT a FROM x) AS y")
>>> scopes = traverse_scope(expression)
>>> scopes[0].expression.sql(), list(scopes[0].sources)
('SELECT a FROM x', ['x'])
>>> scopes[1].expression.sql(), list(scopes[1].sources)
('SELECT a FROM (SELECT a FROM x) AS y', ['y'])
Args:
expression: Expression to traverse
Returns:
A list of the created scope instances
"""
if isinstance(expression, exp.DDL) and isinstance(expression.expression, exp.Query):
# We ignore the DDL expression and build a scope for its query instead
ddl_with = expression.args.get("with")
expression = expression.expression
# If the DDL has CTEs attached, we need to add them to the query, or
# prepend them if the query itself already has CTEs attached to it
if ddl_with:
ddl_with.pop()
query_ctes = expression.ctes
if not query_ctes:
expression.set("with", ddl_with)
else:
expression.args["with"].set("recursive", ddl_with.recursive)
expression.args["with"].set("expressions", [*ddl_with.expressions, *query_ctes])
if isinstance(expression, exp.Query):
return list(_traverse_scope(Scope(expression)))
return []
The provided code snippet includes necessary dependencies for implementing the `unnest_subqueries` function. Write a Python function `def unnest_subqueries(expression)` to solve the following problem:
Rewrite sqlglot AST to convert some predicates with subqueries into joins. Convert scalar subqueries into cross joins. Convert correlated or vectorized subqueries into a group by so it is not a many to many left join. Example: >>> import sqlglot >>> expression = sqlglot.parse_one("SELECT * FROM x AS x WHERE (SELECT y.a AS a FROM y AS y WHERE x.a = y.a) = 1 ") >>> unnest_subqueries(expression).sql() 'SELECT * FROM x AS x LEFT JOIN (SELECT y.a AS a FROM y AS y WHERE TRUE GROUP BY y.a) AS _u_0 ON x.a = _u_0.a WHERE _u_0.a = 1' Args: expression (sqlglot.Expression): expression to unnest Returns: sqlglot.Expression: unnested expression
Here is the function:
def unnest_subqueries(expression):
"""
Rewrite sqlglot AST to convert some predicates with subqueries into joins.
Convert scalar subqueries into cross joins.
Convert correlated or vectorized subqueries into a group by so it is not a many to many left join.
Example:
>>> import sqlglot
>>> expression = sqlglot.parse_one("SELECT * FROM x AS x WHERE (SELECT y.a AS a FROM y AS y WHERE x.a = y.a) = 1 ")
>>> unnest_subqueries(expression).sql()
'SELECT * FROM x AS x LEFT JOIN (SELECT y.a AS a FROM y AS y WHERE TRUE GROUP BY y.a) AS _u_0 ON x.a = _u_0.a WHERE _u_0.a = 1'
Args:
expression (sqlglot.Expression): expression to unnest
Returns:
sqlglot.Expression: unnested expression
"""
next_alias_name = name_sequence("_u_")
for scope in traverse_scope(expression):
select = scope.expression
parent = select.parent_select
if not parent:
continue
if scope.external_columns:
decorrelate(select, parent, scope.external_columns, next_alias_name)
elif scope.scope_type == ScopeType.SUBQUERY:
unnest(select, parent, next_alias_name)
return expression | Rewrite sqlglot AST to convert some predicates with subqueries into joins. Convert scalar subqueries into cross joins. Convert correlated or vectorized subqueries into a group by so it is not a many to many left join. Example: >>> import sqlglot >>> expression = sqlglot.parse_one("SELECT * FROM x AS x WHERE (SELECT y.a AS a FROM y AS y WHERE x.a = y.a) = 1 ") >>> unnest_subqueries(expression).sql() 'SELECT * FROM x AS x LEFT JOIN (SELECT y.a AS a FROM y AS y WHERE TRUE GROUP BY y.a) AS _u_0 ON x.a = _u_0.a WHERE _u_0.a = 1' Args: expression (sqlglot.Expression): expression to unnest Returns: sqlglot.Expression: unnested expression |
152,919 | from __future__ import annotations
import typing as t
import sqlglot
from sqlglot import Schema, exp
from sqlglot.dialects.dialect import DialectType
from sqlglot.optimizer.annotate_types import annotate_types
from sqlglot.optimizer.canonicalize import canonicalize
from sqlglot.optimizer.eliminate_ctes import eliminate_ctes
from sqlglot.optimizer.eliminate_joins import eliminate_joins
from sqlglot.optimizer.eliminate_subqueries import eliminate_subqueries
from sqlglot.optimizer.merge_subqueries import merge_subqueries
from sqlglot.optimizer.normalize import normalize
from sqlglot.optimizer.optimize_joins import optimize_joins
from sqlglot.optimizer.pushdown_predicates import pushdown_predicates
from sqlglot.optimizer.pushdown_projections import pushdown_projections
from sqlglot.optimizer.qualify import qualify
from sqlglot.optimizer.qualify_columns import quote_identifiers
from sqlglot.optimizer.simplify import simplify
from sqlglot.optimizer.unnest_subqueries import unnest_subqueries
from sqlglot.schema import ensure_schema
RULES = (
qualify,
pushdown_projections,
normalize,
unnest_subqueries,
pushdown_predicates,
optimize_joins,
eliminate_subqueries,
merge_subqueries,
eliminate_joins,
eliminate_ctes,
quote_identifiers,
annotate_types,
canonicalize,
simplify,
)
DialectType = t.Union[str, Dialect, t.Type[Dialect], None]
def ensure_schema(schema: Schema | t.Optional[t.Dict], **kwargs: t.Any) -> Schema:
if isinstance(schema, Schema):
return schema
return MappingSchema(schema, **kwargs)
The provided code snippet includes necessary dependencies for implementing the `optimize` function. Write a Python function `def optimize( expression: str | exp.Expression, schema: t.Optional[dict | Schema] = None, db: t.Optional[str | exp.Identifier] = None, catalog: t.Optional[str | exp.Identifier] = None, dialect: DialectType = None, rules: t.Sequence[t.Callable] = RULES, **kwargs, ) -> exp.Expression` to solve the following problem:
Rewrite a sqlglot AST into an optimized form. Args: expression: expression to optimize schema: database schema. This can either be an instance of `sqlglot.optimizer.Schema` or a mapping in one of the following forms: 1. {table: {col: type}} 2. {db: {table: {col: type}}} 3. {catalog: {db: {table: {col: type}}}} If no schema is provided then the default schema defined at `sqlgot.schema` will be used db: specify the default database, as might be set by a `USE DATABASE db` statement catalog: specify the default catalog, as might be set by a `USE CATALOG c` statement dialect: The dialect to parse the sql string. rules: sequence of optimizer rules to use. Many of the rules require tables and columns to be qualified. Do not remove `qualify` from the sequence of rules unless you know what you're doing! **kwargs: If a rule has a keyword argument with a same name in **kwargs, it will be passed in. Returns: The optimized expression.
Here is the function:
def optimize(
expression: str | exp.Expression,
schema: t.Optional[dict | Schema] = None,
db: t.Optional[str | exp.Identifier] = None,
catalog: t.Optional[str | exp.Identifier] = None,
dialect: DialectType = None,
rules: t.Sequence[t.Callable] = RULES,
**kwargs,
) -> exp.Expression:
"""
Rewrite a sqlglot AST into an optimized form.
Args:
expression: expression to optimize
schema: database schema.
This can either be an instance of `sqlglot.optimizer.Schema` or a mapping in one of
the following forms:
1. {table: {col: type}}
2. {db: {table: {col: type}}}
3. {catalog: {db: {table: {col: type}}}}
If no schema is provided then the default schema defined at `sqlgot.schema` will be used
db: specify the default database, as might be set by a `USE DATABASE db` statement
catalog: specify the default catalog, as might be set by a `USE CATALOG c` statement
dialect: The dialect to parse the sql string.
rules: sequence of optimizer rules to use.
Many of the rules require tables and columns to be qualified.
Do not remove `qualify` from the sequence of rules unless you know what you're doing!
**kwargs: If a rule has a keyword argument with a same name in **kwargs, it will be passed in.
Returns:
The optimized expression.
"""
schema = ensure_schema(schema or sqlglot.schema, dialect=dialect)
possible_kwargs = {
"db": db,
"catalog": catalog,
"schema": schema,
"dialect": dialect,
"isolate_tables": True, # needed for other optimizations to perform well
"quote_identifiers": False,
**kwargs,
}
optimized = exp.maybe_parse(expression, dialect=dialect, copy=True)
for rule in rules:
# Find any additional rule parameters, beyond `expression`
rule_params = rule.__code__.co_varnames
rule_kwargs = {
param: possible_kwargs[param] for param in rule_params if param in possible_kwargs
}
optimized = rule(optimized, **rule_kwargs)
return optimized | Rewrite a sqlglot AST into an optimized form. Args: expression: expression to optimize schema: database schema. This can either be an instance of `sqlglot.optimizer.Schema` or a mapping in one of the following forms: 1. {table: {col: type}} 2. {db: {table: {col: type}}} 3. {catalog: {db: {table: {col: type}}}} If no schema is provided then the default schema defined at `sqlgot.schema` will be used db: specify the default database, as might be set by a `USE DATABASE db` statement catalog: specify the default catalog, as might be set by a `USE CATALOG c` statement dialect: The dialect to parse the sql string. rules: sequence of optimizer rules to use. Many of the rules require tables and columns to be qualified. Do not remove `qualify` from the sequence of rules unless you know what you're doing! **kwargs: If a rule has a keyword argument with a same name in **kwargs, it will be passed in. Returns: The optimized expression. |
152,920 | from __future__ import annotations
import typing as t
from sqlglot import exp
from sqlglot.helper import tsort
def reorder_joins(expression):
"""
Reorder joins by topological sort order based on predicate references.
"""
for from_ in expression.find_all(exp.From):
parent = from_.parent
joins = {join.alias_or_name: join for join in parent.args.get("joins", [])}
dag = {name: other_table_names(join) for name, join in joins.items()}
parent.set(
"joins",
[joins[name] for name in tsort(dag) if name != from_.alias_or_name and name in joins],
)
return expression
def normalize(expression):
"""
Remove INNER and OUTER from joins as they are optional.
"""
for join in expression.find_all(exp.Join):
if not any(join.args.get(k) for k in JOIN_ATTRS):
join.set("kind", "CROSS")
if join.kind == "CROSS":
join.set("on", None)
else:
join.set("kind", None)
if not join.args.get("on") and not join.args.get("using"):
join.set("on", exp.true())
return expression
def other_table_names(join: exp.Join) -> t.Set[str]:
on = join.args.get("on")
return exp.column_table_names(on, join.alias_or_name) if on else set()
The provided code snippet includes necessary dependencies for implementing the `optimize_joins` function. Write a Python function `def optimize_joins(expression)` to solve the following problem:
Removes cross joins if possible and reorder joins based on predicate dependencies. Example: >>> from sqlglot import parse_one >>> optimize_joins(parse_one("SELECT * FROM x CROSS JOIN y JOIN z ON x.a = z.a AND y.a = z.a")).sql() 'SELECT * FROM x JOIN z ON x.a = z.a AND TRUE JOIN y ON y.a = z.a'
Here is the function:
def optimize_joins(expression):
"""
Removes cross joins if possible and reorder joins based on predicate dependencies.
Example:
>>> from sqlglot import parse_one
>>> optimize_joins(parse_one("SELECT * FROM x CROSS JOIN y JOIN z ON x.a = z.a AND y.a = z.a")).sql()
'SELECT * FROM x JOIN z ON x.a = z.a AND TRUE JOIN y ON y.a = z.a'
"""
for select in expression.find_all(exp.Select):
references = {}
cross_joins = []
for join in select.args.get("joins", []):
tables = other_table_names(join)
if tables:
for table in tables:
references[table] = references.get(table, []) + [join]
else:
cross_joins.append((join.alias_or_name, join))
for name, join in cross_joins:
for dep in references.get(name, []):
on = dep.args["on"]
if isinstance(on, exp.Connector):
if len(other_table_names(dep)) < 2:
continue
operator = type(on)
for predicate in on.flatten():
if name in exp.column_table_names(predicate):
predicate.replace(exp.true())
predicate = exp._combine(
[join.args.get("on"), predicate], operator, copy=False
)
join.on(predicate, append=False, copy=False)
expression = reorder_joins(expression)
expression = normalize(expression)
return expression | Removes cross joins if possible and reorder joins based on predicate dependencies. Example: >>> from sqlglot import parse_one >>> optimize_joins(parse_one("SELECT * FROM x CROSS JOIN y JOIN z ON x.a = z.a AND y.a = z.a")).sql() 'SELECT * FROM x JOIN z ON x.a = z.a AND TRUE JOIN y ON y.a = z.a' |
152,921 | from __future__ import annotations
import datetime
import functools
import itertools
import typing as t
from collections import deque
from decimal import Decimal
import sqlglot
from sqlglot import Dialect, exp
from sqlglot.helper import first, merge_ranges, while_changing
from sqlglot.optimizer.scope import find_all_in_scope, walk_in_scope
The provided code snippet includes necessary dependencies for implementing the `catch` function. Write a Python function `def catch(*exceptions)` to solve the following problem:
Decorator that ignores a simplification function if any of `exceptions` are raised
Here is the function:
def catch(*exceptions):
"""Decorator that ignores a simplification function if any of `exceptions` are raised"""
def decorator(func):
def wrapped(expression, *args, **kwargs):
try:
return func(expression, *args, **kwargs)
except exceptions:
return expression
return wrapped
return decorator | Decorator that ignores a simplification function if any of `exceptions` are raised |
152,922 | from __future__ import annotations
import datetime
import functools
import itertools
import typing as t
from collections import deque
from decimal import Decimal
import sqlglot
from sqlglot import Dialect, exp
from sqlglot.helper import first, merge_ranges, while_changing
from sqlglot.optimizer.scope import find_all_in_scope, walk_in_scope
if t.TYPE_CHECKING:
from sqlglot.dialects.dialect import DialectType
DateTruncBinaryTransform = t.Callable[
[exp.Expression, datetime.date, str, Dialect], t.Optional[exp.Expression]
]
def _datetrunc_range(date: datetime.date, unit: str, dialect: Dialect) -> t.Optional[DateRange]:
"""
Get the date range for a DATE_TRUNC equality comparison:
Example:
_datetrunc_range(date(2021-01-01), 'year') == (date(2021-01-01), date(2022-01-01))
Returns:
tuple of [min, max) or None if a value can never be equal to `date` for `unit`
"""
floor = date_floor(date, unit, dialect)
if date != floor:
# This will always be False, except for NULL values.
return None
return floor, floor + interval(unit)
def _datetrunc_eq_expression(left: exp.Expression, drange: DateRange) -> exp.Expression:
"""Get the logical expression for a date range"""
return exp.and_(
left >= date_literal(drange[0]),
left < date_literal(drange[1]),
copy=False,
)
def _datetrunc_eq(
left: exp.Expression, date: datetime.date, unit: str, dialect: Dialect
) -> t.Optional[exp.Expression]:
drange = _datetrunc_range(date, unit, dialect)
if not drange:
return None
return _datetrunc_eq_expression(left, drange) | null |
152,923 | from __future__ import annotations
import datetime
import functools
import itertools
import typing as t
from collections import deque
from decimal import Decimal
import sqlglot
from sqlglot import Dialect, exp
from sqlglot.helper import first, merge_ranges, while_changing
from sqlglot.optimizer.scope import find_all_in_scope, walk_in_scope
if t.TYPE_CHECKING:
from sqlglot.dialects.dialect import DialectType
DateTruncBinaryTransform = t.Callable[
[exp.Expression, datetime.date, str, Dialect], t.Optional[exp.Expression]
]
def _datetrunc_range(date: datetime.date, unit: str, dialect: Dialect) -> t.Optional[DateRange]:
"""
Get the date range for a DATE_TRUNC equality comparison:
Example:
_datetrunc_range(date(2021-01-01), 'year') == (date(2021-01-01), date(2022-01-01))
Returns:
tuple of [min, max) or None if a value can never be equal to `date` for `unit`
"""
floor = date_floor(date, unit, dialect)
if date != floor:
# This will always be False, except for NULL values.
return None
return floor, floor + interval(unit)
def date_literal(date):
return exp.cast(
exp.Literal.string(date),
(
exp.DataType.Type.DATETIME
if isinstance(date, datetime.datetime)
else exp.DataType.Type.DATE
),
)
def _datetrunc_neq(
left: exp.Expression, date: datetime.date, unit: str, dialect: Dialect
) -> t.Optional[exp.Expression]:
drange = _datetrunc_range(date, unit, dialect)
if not drange:
return None
return exp.and_(
left < date_literal(drange[0]),
left >= date_literal(drange[1]),
copy=False,
) | null |
152,924 | from __future__ import annotations
import datetime
import functools
import itertools
import typing as t
from collections import deque
from decimal import Decimal
import sqlglot
from sqlglot import Dialect, exp
from sqlglot.helper import first, merge_ranges, while_changing
from sqlglot.optimizer.scope import find_all_in_scope, walk_in_scope
def interval(unit: str, n: int = 1):
from dateutil.relativedelta import relativedelta
if unit == "year":
return relativedelta(years=1 * n)
if unit == "quarter":
return relativedelta(months=3 * n)
if unit == "month":
return relativedelta(months=1 * n)
if unit == "week":
return relativedelta(weeks=1 * n)
if unit == "day":
return relativedelta(days=1 * n)
if unit == "hour":
return relativedelta(hours=1 * n)
if unit == "minute":
return relativedelta(minutes=1 * n)
if unit == "second":
return relativedelta(seconds=1 * n)
raise UnsupportedUnit(f"Unsupported unit: {unit}")
def date_floor(d: datetime.date, unit: str, dialect: Dialect) -> datetime.date:
if unit == "year":
return d.replace(month=1, day=1)
if unit == "quarter":
if d.month <= 3:
return d.replace(month=1, day=1)
elif d.month <= 6:
return d.replace(month=4, day=1)
elif d.month <= 9:
return d.replace(month=7, day=1)
else:
return d.replace(month=10, day=1)
if unit == "month":
return d.replace(month=d.month, day=1)
if unit == "week":
# Assuming week starts on Monday (0) and ends on Sunday (6)
return d - datetime.timedelta(days=d.weekday() - dialect.WEEK_OFFSET)
if unit == "day":
return d
raise UnsupportedUnit(f"Unsupported unit: {unit}")
def date_ceil(d: datetime.date, unit: str, dialect: Dialect) -> datetime.date:
floor = date_floor(d, unit, dialect)
if floor == d:
return d
return floor + interval(unit) | null |
152,925 | from __future__ import annotations
import itertools
import typing as t
from sqlglot import alias, exp
from sqlglot.dialects.dialect import Dialect, DialectType
from sqlglot.errors import OptimizeError
from sqlglot.helper import seq_get, SingleValuedMapping
from sqlglot.optimizer.scope import Scope, build_scope, traverse_scope, walk_in_scope
from sqlglot.optimizer.simplify import simplify_parens
from sqlglot.schema import Schema, ensure_schema
def _unpivot_columns(unpivot: exp.Pivot) -> t.Iterator[exp.Column]:
name_column = []
field = unpivot.args.get("field")
if isinstance(field, exp.In) and isinstance(field.this, exp.Column):
name_column.append(field.this)
value_columns = (c for e in unpivot.expressions for c in e.find_all(exp.Column))
return itertools.chain(name_column, value_columns)
class OptimizeError(SqlglotError):
pass
def traverse_scope(expression: exp.Expression) -> t.List[Scope]:
"""
Traverse an expression by its "scopes".
"Scope" represents the current context of a Select statement.
This is helpful for optimizing queries, where we need more information than
the expression tree itself. For example, we might care about the source
names within a subquery. Returns a list because a generator could result in
incomplete properties which is confusing.
Examples:
>>> import sqlglot
>>> expression = sqlglot.parse_one("SELECT a FROM (SELECT a FROM x) AS y")
>>> scopes = traverse_scope(expression)
>>> scopes[0].expression.sql(), list(scopes[0].sources)
('SELECT a FROM x', ['x'])
>>> scopes[1].expression.sql(), list(scopes[1].sources)
('SELECT a FROM (SELECT a FROM x) AS y', ['y'])
Args:
expression: Expression to traverse
Returns:
A list of the created scope instances
"""
if isinstance(expression, exp.DDL) and isinstance(expression.expression, exp.Query):
# We ignore the DDL expression and build a scope for its query instead
ddl_with = expression.args.get("with")
expression = expression.expression
# If the DDL has CTEs attached, we need to add them to the query, or
# prepend them if the query itself already has CTEs attached to it
if ddl_with:
ddl_with.pop()
query_ctes = expression.ctes
if not query_ctes:
expression.set("with", ddl_with)
else:
expression.args["with"].set("recursive", ddl_with.recursive)
expression.args["with"].set("expressions", [*ddl_with.expressions, *query_ctes])
if isinstance(expression, exp.Query):
return list(_traverse_scope(Scope(expression)))
return []
E = t.TypeVar("E", bound="sqlglot.exp.Expression")
The provided code snippet includes necessary dependencies for implementing the `validate_qualify_columns` function. Write a Python function `def validate_qualify_columns(expression: E) -> E` to solve the following problem:
Raise an `OptimizeError` if any columns aren't qualified
Here is the function:
def validate_qualify_columns(expression: E) -> E:
"""Raise an `OptimizeError` if any columns aren't qualified"""
all_unqualified_columns = []
for scope in traverse_scope(expression):
if isinstance(scope.expression, exp.Select):
unqualified_columns = scope.unqualified_columns
if scope.external_columns and not scope.is_correlated_subquery and not scope.pivots:
column = scope.external_columns[0]
for_table = f" for table: '{column.table}'" if column.table else ""
raise OptimizeError(f"Column '{column}' could not be resolved{for_table}")
if unqualified_columns and scope.pivots and scope.pivots[0].unpivot:
# New columns produced by the UNPIVOT can't be qualified, but there may be columns
# under the UNPIVOT's IN clause that can and should be qualified. We recompute
# this list here to ensure those in the former category will be excluded.
unpivot_columns = set(_unpivot_columns(scope.pivots[0]))
unqualified_columns = [c for c in unqualified_columns if c not in unpivot_columns]
all_unqualified_columns.extend(unqualified_columns)
if all_unqualified_columns:
raise OptimizeError(f"Ambiguous columns: {all_unqualified_columns}")
return expression | Raise an `OptimizeError` if any columns aren't qualified |
152,926 | from __future__ import annotations
import itertools
import typing as t
from sqlglot import alias, exp
from sqlglot.dialects.dialect import Dialect, DialectType
from sqlglot.errors import OptimizeError
from sqlglot.helper import seq_get, SingleValuedMapping
from sqlglot.optimizer.scope import Scope, build_scope, traverse_scope, walk_in_scope
from sqlglot.optimizer.simplify import simplify_parens
from sqlglot.schema import Schema, ensure_schema
class Dialect(metaclass=_Dialect):
INDEX_OFFSET = 0
"""The base index offset for arrays."""
WEEK_OFFSET = 0
"""First day of the week in DATE_TRUNC(week). Defaults to 0 (Monday). -1 would be Sunday."""
UNNEST_COLUMN_ONLY = False
"""Whether `UNNEST` table aliases are treated as column aliases."""
ALIAS_POST_TABLESAMPLE = False
"""Whether the table alias comes after tablesample."""
TABLESAMPLE_SIZE_IS_PERCENT = False
"""Whether a size in the table sample clause represents percentage."""
NORMALIZATION_STRATEGY = NormalizationStrategy.LOWERCASE
"""Specifies the strategy according to which identifiers should be normalized."""
IDENTIFIERS_CAN_START_WITH_DIGIT = False
"""Whether an unquoted identifier can start with a digit."""
DPIPE_IS_STRING_CONCAT = True
"""Whether the DPIPE token (`||`) is a string concatenation operator."""
STRICT_STRING_CONCAT = False
"""Whether `CONCAT`'s arguments must be strings."""
SUPPORTS_USER_DEFINED_TYPES = True
"""Whether user-defined data types are supported."""
SUPPORTS_SEMI_ANTI_JOIN = True
"""Whether `SEMI` or `ANTI` joins are supported."""
NORMALIZE_FUNCTIONS: bool | str = "upper"
"""
Determines how function names are going to be normalized.
Possible values:
"upper" or True: Convert names to uppercase.
"lower": Convert names to lowercase.
False: Disables function name normalization.
"""
LOG_BASE_FIRST: t.Optional[bool] = True
"""
Whether the base comes first in the `LOG` function.
Possible values: `True`, `False`, `None` (two arguments are not supported by `LOG`)
"""
NULL_ORDERING = "nulls_are_small"
"""
Default `NULL` ordering method to use if not explicitly set.
Possible values: `"nulls_are_small"`, `"nulls_are_large"`, `"nulls_are_last"`
"""
TYPED_DIVISION = False
"""
Whether the behavior of `a / b` depends on the types of `a` and `b`.
False means `a / b` is always float division.
True means `a / b` is integer division if both `a` and `b` are integers.
"""
SAFE_DIVISION = False
"""Whether division by zero throws an error (`False`) or returns NULL (`True`)."""
CONCAT_COALESCE = False
"""A `NULL` arg in `CONCAT` yields `NULL` by default, but in some dialects it yields an empty string."""
DATE_FORMAT = "'%Y-%m-%d'"
DATEINT_FORMAT = "'%Y%m%d'"
TIME_FORMAT = "'%Y-%m-%d %H:%M:%S'"
TIME_MAPPING: t.Dict[str, str] = {}
"""Associates this dialect's time formats with their equivalent Python `strftime` formats."""
# https://cloud.google.com/bigquery/docs/reference/standard-sql/format-elements#format_model_rules_date_time
# https://docs.teradata.com/r/Teradata-Database-SQL-Functions-Operators-Expressions-and-Predicates/March-2017/Data-Type-Conversions/Character-to-DATE-Conversion/Forcing-a-FORMAT-on-CAST-for-Converting-Character-to-DATE
FORMAT_MAPPING: t.Dict[str, str] = {}
"""
Helper which is used for parsing the special syntax `CAST(x AS DATE FORMAT 'yyyy')`.
If empty, the corresponding trie will be constructed off of `TIME_MAPPING`.
"""
ESCAPE_SEQUENCES: t.Dict[str, str] = {}
"""Mapping of an unescaped escape sequence to the corresponding character."""
PSEUDOCOLUMNS: t.Set[str] = set()
"""
Columns that are auto-generated by the engine corresponding to this dialect.
For example, such columns may be excluded from `SELECT *` queries.
"""
PREFER_CTE_ALIAS_COLUMN = False
"""
Some dialects, such as Snowflake, allow you to reference a CTE column alias in the
HAVING clause of the CTE. This flag will cause the CTE alias columns to override
any projection aliases in the subquery.
For example,
WITH y(c) AS (
SELECT SUM(a) FROM (SELECT 1 a) AS x HAVING c > 0
) SELECT c FROM y;
will be rewritten as
WITH y(c) AS (
SELECT SUM(a) AS c FROM (SELECT 1 AS a) AS x HAVING c > 0
) SELECT c FROM y;
"""
# --- Autofilled ---
tokenizer_class = Tokenizer
parser_class = Parser
generator_class = Generator
# A trie of the time_mapping keys
TIME_TRIE: t.Dict = {}
FORMAT_TRIE: t.Dict = {}
INVERSE_TIME_MAPPING: t.Dict[str, str] = {}
INVERSE_TIME_TRIE: t.Dict = {}
INVERSE_ESCAPE_SEQUENCES: t.Dict[str, str] = {}
# Delimiters for string literals and identifiers
QUOTE_START = "'"
QUOTE_END = "'"
IDENTIFIER_START = '"'
IDENTIFIER_END = '"'
# Delimiters for bit, hex, byte and unicode literals
BIT_START: t.Optional[str] = None
BIT_END: t.Optional[str] = None
HEX_START: t.Optional[str] = None
HEX_END: t.Optional[str] = None
BYTE_START: t.Optional[str] = None
BYTE_END: t.Optional[str] = None
UNICODE_START: t.Optional[str] = None
UNICODE_END: t.Optional[str] = None
def get_or_raise(cls, dialect: DialectType) -> Dialect:
"""
Look up a dialect in the global dialect registry and return it if it exists.
Args:
dialect: The target dialect. If this is a string, it can be optionally followed by
additional key-value pairs that are separated by commas and are used to specify
dialect settings, such as whether the dialect's identifiers are case-sensitive.
Example:
>>> dialect = dialect_class = get_or_raise("duckdb")
>>> dialect = get_or_raise("mysql, normalization_strategy = case_sensitive")
Returns:
The corresponding Dialect instance.
"""
if not dialect:
return cls()
if isinstance(dialect, _Dialect):
return dialect()
if isinstance(dialect, Dialect):
return dialect
if isinstance(dialect, str):
try:
dialect_name, *kv_pairs = dialect.split(",")
kwargs = {k.strip(): v.strip() for k, v in (kv.split("=") for kv in kv_pairs)}
except ValueError:
raise ValueError(
f"Invalid dialect format: '{dialect}'. "
"Please use the correct format: 'dialect [, k1 = v2 [, ...]]'."
)
result = cls.get(dialect_name.strip())
if not result:
from difflib import get_close_matches
similar = seq_get(get_close_matches(dialect_name, cls.classes, n=1), 0) or ""
if similar:
similar = f" Did you mean {similar}?"
raise ValueError(f"Unknown dialect '{dialect_name}'.{similar}")
return result(**kwargs)
raise ValueError(f"Invalid dialect type for '{dialect}': '{type(dialect)}'.")
def format_time(
cls, expression: t.Optional[str | exp.Expression]
) -> t.Optional[exp.Expression]:
"""Converts a time format in this dialect to its equivalent Python `strftime` format."""
if isinstance(expression, str):
return exp.Literal.string(
# the time formats are quoted
format_time(expression[1:-1], cls.TIME_MAPPING, cls.TIME_TRIE)
)
if expression and expression.is_string:
return exp.Literal.string(format_time(expression.this, cls.TIME_MAPPING, cls.TIME_TRIE))
return expression
def __init__(self, **kwargs) -> None:
normalization_strategy = kwargs.get("normalization_strategy")
if normalization_strategy is None:
self.normalization_strategy = self.NORMALIZATION_STRATEGY
else:
self.normalization_strategy = NormalizationStrategy(normalization_strategy.upper())
def __eq__(self, other: t.Any) -> bool:
# Does not currently take dialect state into account
return type(self) == other
def __hash__(self) -> int:
# Does not currently take dialect state into account
return hash(type(self))
def normalize_identifier(self, expression: E) -> E:
"""
Transforms an identifier in a way that resembles how it'd be resolved by this dialect.
For example, an identifier like `FoO` would be resolved as `foo` in Postgres, because it
lowercases all unquoted identifiers. On the other hand, Snowflake uppercases them, so
it would resolve it as `FOO`. If it was quoted, it'd need to be treated as case-sensitive,
and so any normalization would be prohibited in order to avoid "breaking" the identifier.
There are also dialects like Spark, which are case-insensitive even when quotes are
present, and dialects like MySQL, whose resolution rules match those employed by the
underlying operating system, for example they may always be case-sensitive in Linux.
Finally, the normalization behavior of some engines can even be controlled through flags,
like in Redshift's case, where users can explicitly set enable_case_sensitive_identifier.
SQLGlot aims to understand and handle all of these different behaviors gracefully, so
that it can analyze queries in the optimizer and successfully capture their semantics.
"""
if (
isinstance(expression, exp.Identifier)
and self.normalization_strategy is not NormalizationStrategy.CASE_SENSITIVE
and (
not expression.quoted
or self.normalization_strategy is NormalizationStrategy.CASE_INSENSITIVE
)
):
expression.set(
"this",
(
expression.this.upper()
if self.normalization_strategy is NormalizationStrategy.UPPERCASE
else expression.this.lower()
),
)
return expression
def case_sensitive(self, text: str) -> bool:
"""Checks if text contains any case sensitive characters, based on the dialect's rules."""
if self.normalization_strategy is NormalizationStrategy.CASE_INSENSITIVE:
return False
unsafe = (
str.islower
if self.normalization_strategy is NormalizationStrategy.UPPERCASE
else str.isupper
)
return any(unsafe(char) for char in text)
def can_identify(self, text: str, identify: str | bool = "safe") -> bool:
"""Checks if text can be identified given an identify option.
Args:
text: The text to check.
identify:
`"always"` or `True`: Always returns `True`.
`"safe"`: Only returns `True` if the identifier is case-insensitive.
Returns:
Whether the given text can be identified.
"""
if identify is True or identify == "always":
return True
if identify == "safe":
return not self.case_sensitive(text)
return False
def quote_identifier(self, expression: E, identify: bool = True) -> E:
"""
Adds quotes to a given identifier.
Args:
expression: The expression of interest. If it's not an `Identifier`, this method is a no-op.
identify: If set to `False`, the quotes will only be added if the identifier is deemed
"unsafe", with respect to its characters and this dialect's normalization strategy.
"""
if isinstance(expression, exp.Identifier) and not isinstance(expression.parent, exp.Func):
name = expression.this
expression.set(
"quoted",
identify or self.case_sensitive(name) or not exp.SAFE_IDENTIFIER_RE.match(name),
)
return expression
def to_json_path(self, path: t.Optional[exp.Expression]) -> t.Optional[exp.Expression]:
if isinstance(path, exp.Literal):
path_text = path.name
if path.is_number:
path_text = f"[{path_text}]"
try:
return parse_json_path(path_text)
except ParseError as e:
logger.warning(f"Invalid JSON path syntax. {str(e)}")
return path
def parse(self, sql: str, **opts) -> t.List[t.Optional[exp.Expression]]:
return self.parser(**opts).parse(self.tokenize(sql), sql)
def parse_into(
self, expression_type: exp.IntoType, sql: str, **opts
) -> t.List[t.Optional[exp.Expression]]:
return self.parser(**opts).parse_into(expression_type, self.tokenize(sql), sql)
def generate(self, expression: exp.Expression, copy: bool = True, **opts) -> str:
return self.generator(**opts).generate(expression, copy=copy)
def transpile(self, sql: str, **opts) -> t.List[str]:
return [
self.generate(expression, copy=False, **opts) if expression else ""
for expression in self.parse(sql)
]
def tokenize(self, sql: str) -> t.List[Token]:
return self.tokenizer.tokenize(sql)
def tokenizer(self) -> Tokenizer:
if not hasattr(self, "_tokenizer"):
self._tokenizer = self.tokenizer_class(dialect=self)
return self._tokenizer
def parser(self, **opts) -> Parser:
return self.parser_class(dialect=self, **opts)
def generator(self, **opts) -> Generator:
return self.generator_class(dialect=self, **opts)
DialectType = t.Union[str, Dialect, t.Type[Dialect], None]
E = t.TypeVar("E", bound="sqlglot.exp.Expression")
The provided code snippet includes necessary dependencies for implementing the `quote_identifiers` function. Write a Python function `def quote_identifiers(expression: E, dialect: DialectType = None, identify: bool = True) -> E` to solve the following problem:
Makes sure all identifiers that need to be quoted are quoted.
Here is the function:
def quote_identifiers(expression: E, dialect: DialectType = None, identify: bool = True) -> E:
"""Makes sure all identifiers that need to be quoted are quoted."""
return expression.transform(
Dialect.get_or_raise(dialect).quote_identifier, identify=identify, copy=False
) # type: ignore | Makes sure all identifiers that need to be quoted are quoted. |
152,927 | from __future__ import annotations
import itertools
import typing as t
from sqlglot import alias, exp
from sqlglot.dialects.dialect import Dialect, DialectType
from sqlglot.errors import OptimizeError
from sqlglot.helper import seq_get, SingleValuedMapping
from sqlglot.optimizer.scope import Scope, build_scope, traverse_scope, walk_in_scope
from sqlglot.optimizer.simplify import simplify_parens
from sqlglot.schema import Schema, ensure_schema
The provided code snippet includes necessary dependencies for implementing the `pushdown_cte_alias_columns` function. Write a Python function `def pushdown_cte_alias_columns(expression: exp.Expression) -> exp.Expression` to solve the following problem:
Pushes down the CTE alias columns into the projection, This step is useful in Snowflake where the CTE alias columns can be referenced in the HAVING. Example: >>> import sqlglot >>> expression = sqlglot.parse_one("WITH y (c) AS (SELECT SUM(a) FROM ( SELECT 1 a ) AS x HAVING c > 0) SELECT c FROM y") >>> pushdown_cte_alias_columns(expression).sql() 'WITH y(c) AS (SELECT SUM(a) AS c FROM (SELECT 1 AS a) AS x HAVING c > 0) SELECT c FROM y' Args: expression: Expression to pushdown. Returns: The expression with the CTE aliases pushed down into the projection.
Here is the function:
def pushdown_cte_alias_columns(expression: exp.Expression) -> exp.Expression:
"""
Pushes down the CTE alias columns into the projection,
This step is useful in Snowflake where the CTE alias columns can be referenced in the HAVING.
Example:
>>> import sqlglot
>>> expression = sqlglot.parse_one("WITH y (c) AS (SELECT SUM(a) FROM ( SELECT 1 a ) AS x HAVING c > 0) SELECT c FROM y")
>>> pushdown_cte_alias_columns(expression).sql()
'WITH y(c) AS (SELECT SUM(a) AS c FROM (SELECT 1 AS a) AS x HAVING c > 0) SELECT c FROM y'
Args:
expression: Expression to pushdown.
Returns:
The expression with the CTE aliases pushed down into the projection.
"""
for cte in expression.find_all(exp.CTE):
if cte.alias_column_names:
new_expressions = []
for _alias, projection in zip(cte.alias_column_names, cte.this.expressions):
if isinstance(projection, exp.Alias):
projection.set("alias", _alias)
else:
projection = alias(projection, alias=_alias)
new_expressions.append(projection)
cte.this.set("expressions", new_expressions)
return expression | Pushes down the CTE alias columns into the projection, This step is useful in Snowflake where the CTE alias columns can be referenced in the HAVING. Example: >>> import sqlglot >>> expression = sqlglot.parse_one("WITH y (c) AS (SELECT SUM(a) FROM ( SELECT 1 a ) AS x HAVING c > 0) SELECT c FROM y") >>> pushdown_cte_alias_columns(expression).sql() 'WITH y(c) AS (SELECT SUM(a) AS c FROM (SELECT 1 AS a) AS x HAVING c > 0) SELECT c FROM y' Args: expression: Expression to pushdown. Returns: The expression with the CTE aliases pushed down into the projection. |
152,928 | from collections import defaultdict
from sqlglot import alias, exp
from sqlglot.optimizer.qualify_columns import Resolver
from sqlglot.optimizer.scope import Scope, traverse_scope
from sqlglot.schema import ensure_schema
SELECT_ALL = object()
def _remove_unused_selections(scope, parent_selections, schema, alias_count):
order = scope.expression.args.get("order")
if order:
# Assume columns without a qualified table are references to output columns
order_refs = {c.name for c in order.find_all(exp.Column) if not c.table}
else:
order_refs = set()
new_selections = []
removed = False
star = False
is_agg = False
select_all = SELECT_ALL in parent_selections
for selection in scope.expression.selects:
name = selection.alias_or_name
if select_all or name in parent_selections or name in order_refs or alias_count > 0:
new_selections.append(selection)
alias_count -= 1
else:
if selection.is_star:
star = True
removed = True
if not is_agg and selection.find(exp.AggFunc):
is_agg = True
if star:
resolver = Resolver(scope, schema)
names = {s.alias_or_name for s in new_selections}
for name in sorted(parent_selections):
if name not in names:
new_selections.append(
alias(exp.column(name, table=resolver.get_table(name)), name, copy=False)
)
# If there are no remaining selections, just select a single constant
if not new_selections:
new_selections.append(default_selection(is_agg))
scope.expression.select(*new_selections, append=False, copy=False)
if removed:
scope.clear_cache()
class Scope:
"""
Selection scope.
Attributes:
expression (exp.Select|exp.Union): Root expression of this scope
sources (dict[str, exp.Table|Scope]): Mapping of source name to either
a Table expression or another Scope instance. For example:
SELECT * FROM x {"x": Table(this="x")}
SELECT * FROM x AS y {"y": Table(this="x")}
SELECT * FROM (SELECT ...) AS y {"y": Scope(...)}
lateral_sources (dict[str, exp.Table|Scope]): Sources from laterals
For example:
SELECT c FROM x LATERAL VIEW EXPLODE (a) AS c;
The LATERAL VIEW EXPLODE gets x as a source.
cte_sources (dict[str, Scope]): Sources from CTES
outer_columns (list[str]): If this is a derived table or CTE, and the outer query
defines a column list for the alias of this scope, this is that list of columns.
For example:
SELECT * FROM (SELECT ...) AS y(col1, col2)
The inner query would have `["col1", "col2"]` for its `outer_columns`
parent (Scope): Parent scope
scope_type (ScopeType): Type of this scope, relative to it's parent
subquery_scopes (list[Scope]): List of all child scopes for subqueries
cte_scopes (list[Scope]): List of all child scopes for CTEs
derived_table_scopes (list[Scope]): List of all child scopes for derived_tables
udtf_scopes (list[Scope]): List of all child scopes for user defined tabular functions
table_scopes (list[Scope]): derived_table_scopes + udtf_scopes, in the order that they're defined
union_scopes (list[Scope, Scope]): If this Scope is for a Union expression, this will be
a list of the left and right child scopes.
"""
def __init__(
self,
expression,
sources=None,
outer_columns=None,
parent=None,
scope_type=ScopeType.ROOT,
lateral_sources=None,
cte_sources=None,
):
self.expression = expression
self.sources = sources or {}
self.lateral_sources = lateral_sources or {}
self.cte_sources = cte_sources or {}
self.sources.update(self.lateral_sources)
self.sources.update(self.cte_sources)
self.outer_columns = outer_columns or []
self.parent = parent
self.scope_type = scope_type
self.subquery_scopes = []
self.derived_table_scopes = []
self.table_scopes = []
self.cte_scopes = []
self.union_scopes = []
self.udtf_scopes = []
self.clear_cache()
def clear_cache(self):
self._collected = False
self._raw_columns = None
self._derived_tables = None
self._udtfs = None
self._tables = None
self._ctes = None
self._subqueries = None
self._selected_sources = None
self._columns = None
self._external_columns = None
self._join_hints = None
self._pivots = None
self._references = None
def branch(
self, expression, scope_type, sources=None, cte_sources=None, lateral_sources=None, **kwargs
):
"""Branch from the current scope to a new, inner scope"""
return Scope(
expression=expression.unnest(),
sources=sources.copy() if sources else None,
parent=self,
scope_type=scope_type,
cte_sources={**self.cte_sources, **(cte_sources or {})},
lateral_sources=lateral_sources.copy() if lateral_sources else None,
**kwargs,
)
def _collect(self):
self._tables = []
self._ctes = []
self._subqueries = []
self._derived_tables = []
self._udtfs = []
self._raw_columns = []
self._join_hints = []
for node in self.walk(bfs=False):
if node is self.expression:
continue
if isinstance(node, exp.Column) and not isinstance(node.this, exp.Star):
self._raw_columns.append(node)
elif isinstance(node, exp.Table) and not isinstance(node.parent, exp.JoinHint):
self._tables.append(node)
elif isinstance(node, exp.JoinHint):
self._join_hints.append(node)
elif isinstance(node, exp.UDTF):
self._udtfs.append(node)
elif isinstance(node, exp.CTE):
self._ctes.append(node)
elif _is_derived_table(node) and isinstance(
node.parent, (exp.From, exp.Join, exp.Subquery)
):
self._derived_tables.append(node)
elif isinstance(node, exp.UNWRAPPED_QUERIES):
self._subqueries.append(node)
self._collected = True
def _ensure_collected(self):
if not self._collected:
self._collect()
def walk(self, bfs=True, prune=None):
return walk_in_scope(self.expression, bfs=bfs, prune=None)
def find(self, *expression_types, bfs=True):
return find_in_scope(self.expression, expression_types, bfs=bfs)
def find_all(self, *expression_types, bfs=True):
return find_all_in_scope(self.expression, expression_types, bfs=bfs)
def replace(self, old, new):
"""
Replace `old` with `new`.
This can be used instead of `exp.Expression.replace` to ensure the `Scope` is kept up-to-date.
Args:
old (exp.Expression): old node
new (exp.Expression): new node
"""
old.replace(new)
self.clear_cache()
def tables(self):
"""
List of tables in this scope.
Returns:
list[exp.Table]: tables
"""
self._ensure_collected()
return self._tables
def ctes(self):
"""
List of CTEs in this scope.
Returns:
list[exp.CTE]: ctes
"""
self._ensure_collected()
return self._ctes
def derived_tables(self):
"""
List of derived tables in this scope.
For example:
SELECT * FROM (SELECT ...) <- that's a derived table
Returns:
list[exp.Subquery]: derived tables
"""
self._ensure_collected()
return self._derived_tables
def udtfs(self):
"""
List of "User Defined Tabular Functions" in this scope.
Returns:
list[exp.UDTF]: UDTFs
"""
self._ensure_collected()
return self._udtfs
def subqueries(self):
"""
List of subqueries in this scope.
For example:
SELECT * FROM x WHERE a IN (SELECT ...) <- that's a subquery
Returns:
list[exp.Select | exp.Union]: subqueries
"""
self._ensure_collected()
return self._subqueries
def columns(self):
"""
List of columns in this scope.
Returns:
list[exp.Column]: Column instances in this scope, plus any
Columns that reference this scope from correlated subqueries.
"""
if self._columns is None:
self._ensure_collected()
columns = self._raw_columns
external_columns = [
column
for scope in itertools.chain(self.subquery_scopes, self.udtf_scopes)
for column in scope.external_columns
]
named_selects = set(self.expression.named_selects)
self._columns = []
for column in columns + external_columns:
ancestor = column.find_ancestor(
exp.Select, exp.Qualify, exp.Order, exp.Having, exp.Hint, exp.Table, exp.Star
)
if (
not ancestor
or column.table
or isinstance(ancestor, exp.Select)
or (isinstance(ancestor, exp.Table) and not isinstance(ancestor.this, exp.Func))
or (
isinstance(ancestor, exp.Order)
and (
isinstance(ancestor.parent, exp.Window)
or column.name not in named_selects
)
)
):
self._columns.append(column)
return self._columns
def selected_sources(self):
"""
Mapping of nodes and sources that are actually selected from in this scope.
That is, all tables in a schema are selectable at any point. But a
table only becomes a selected source if it's included in a FROM or JOIN clause.
Returns:
dict[str, (exp.Table|exp.Select, exp.Table|Scope)]: selected sources and nodes
"""
if self._selected_sources is None:
result = {}
for name, node in self.references:
if name in result:
raise OptimizeError(f"Alias already used: {name}")
if name in self.sources:
result[name] = (node, self.sources[name])
self._selected_sources = result
return self._selected_sources
def references(self) -> t.List[t.Tuple[str, exp.Expression]]:
if self._references is None:
self._references = []
for table in self.tables:
self._references.append((table.alias_or_name, table))
for expression in itertools.chain(self.derived_tables, self.udtfs):
self._references.append(
(
expression.alias,
expression if expression.args.get("pivots") else expression.unnest(),
)
)
return self._references
def external_columns(self):
"""
Columns that appear to reference sources in outer scopes.
Returns:
list[exp.Column]: Column instances that don't reference
sources in the current scope.
"""
if self._external_columns is None:
if isinstance(self.expression, exp.Union):
left, right = self.union_scopes
self._external_columns = left.external_columns + right.external_columns
else:
self._external_columns = [
c for c in self.columns if c.table not in self.selected_sources
]
return self._external_columns
def unqualified_columns(self):
"""
Unqualified columns in the current scope.
Returns:
list[exp.Column]: Unqualified columns
"""
return [c for c in self.columns if not c.table]
def join_hints(self):
"""
Hints that exist in the scope that reference tables
Returns:
list[exp.JoinHint]: Join hints that are referenced within the scope
"""
if self._join_hints is None:
return []
return self._join_hints
def pivots(self):
if not self._pivots:
self._pivots = [
pivot for _, node in self.references for pivot in node.args.get("pivots") or []
]
return self._pivots
def source_columns(self, source_name):
"""
Get all columns in the current scope for a particular source.
Args:
source_name (str): Name of the source
Returns:
list[exp.Column]: Column instances that reference `source_name`
"""
return [column for column in self.columns if column.table == source_name]
def is_subquery(self):
"""Determine if this scope is a subquery"""
return self.scope_type == ScopeType.SUBQUERY
def is_derived_table(self):
"""Determine if this scope is a derived table"""
return self.scope_type == ScopeType.DERIVED_TABLE
def is_union(self):
"""Determine if this scope is a union"""
return self.scope_type == ScopeType.UNION
def is_cte(self):
"""Determine if this scope is a common table expression"""
return self.scope_type == ScopeType.CTE
def is_root(self):
"""Determine if this is the root scope"""
return self.scope_type == ScopeType.ROOT
def is_udtf(self):
"""Determine if this scope is a UDTF (User Defined Table Function)"""
return self.scope_type == ScopeType.UDTF
def is_correlated_subquery(self):
"""Determine if this scope is a correlated subquery"""
return bool(
(self.is_subquery or (self.parent and isinstance(self.parent.expression, exp.Lateral)))
and self.external_columns
)
def rename_source(self, old_name, new_name):
"""Rename a source in this scope"""
columns = self.sources.pop(old_name or "", [])
self.sources[new_name] = columns
def add_source(self, name, source):
"""Add a source to this scope"""
self.sources[name] = source
self.clear_cache()
def remove_source(self, name):
"""Remove a source from this scope"""
self.sources.pop(name, None)
self.clear_cache()
def __repr__(self):
return f"Scope<{self.expression.sql()}>"
def traverse(self):
"""
Traverse the scope tree from this node.
Yields:
Scope: scope instances in depth-first-search post-order
"""
stack = [self]
result = []
while stack:
scope = stack.pop()
result.append(scope)
stack.extend(
itertools.chain(
scope.cte_scopes,
scope.union_scopes,
scope.table_scopes,
scope.subquery_scopes,
)
)
yield from reversed(result)
def ref_count(self):
"""
Count the number of times each scope in this tree is referenced.
Returns:
dict[int, int]: Mapping of Scope instance ID to reference count
"""
scope_ref_count = defaultdict(lambda: 0)
for scope in self.traverse():
for _, source in scope.selected_sources.values():
scope_ref_count[id(source)] += 1
return scope_ref_count
def traverse_scope(expression: exp.Expression) -> t.List[Scope]:
"""
Traverse an expression by its "scopes".
"Scope" represents the current context of a Select statement.
This is helpful for optimizing queries, where we need more information than
the expression tree itself. For example, we might care about the source
names within a subquery. Returns a list because a generator could result in
incomplete properties which is confusing.
Examples:
>>> import sqlglot
>>> expression = sqlglot.parse_one("SELECT a FROM (SELECT a FROM x) AS y")
>>> scopes = traverse_scope(expression)
>>> scopes[0].expression.sql(), list(scopes[0].sources)
('SELECT a FROM x', ['x'])
>>> scopes[1].expression.sql(), list(scopes[1].sources)
('SELECT a FROM (SELECT a FROM x) AS y', ['y'])
Args:
expression: Expression to traverse
Returns:
A list of the created scope instances
"""
if isinstance(expression, exp.DDL) and isinstance(expression.expression, exp.Query):
# We ignore the DDL expression and build a scope for its query instead
ddl_with = expression.args.get("with")
expression = expression.expression
# If the DDL has CTEs attached, we need to add them to the query, or
# prepend them if the query itself already has CTEs attached to it
if ddl_with:
ddl_with.pop()
query_ctes = expression.ctes
if not query_ctes:
expression.set("with", ddl_with)
else:
expression.args["with"].set("recursive", ddl_with.recursive)
expression.args["with"].set("expressions", [*ddl_with.expressions, *query_ctes])
if isinstance(expression, exp.Query):
return list(_traverse_scope(Scope(expression)))
return []
def ensure_schema(schema: Schema | t.Optional[t.Dict], **kwargs: t.Any) -> Schema:
if isinstance(schema, Schema):
return schema
return MappingSchema(schema, **kwargs)
The provided code snippet includes necessary dependencies for implementing the `pushdown_projections` function. Write a Python function `def pushdown_projections(expression, schema=None, remove_unused_selections=True)` to solve the following problem:
Rewrite sqlglot AST to remove unused columns projections. Example: >>> import sqlglot >>> sql = "SELECT y.a AS a FROM (SELECT x.a AS a, x.b AS b FROM x) AS y" >>> expression = sqlglot.parse_one(sql) >>> pushdown_projections(expression).sql() 'SELECT y.a AS a FROM (SELECT x.a AS a FROM x) AS y' Args: expression (sqlglot.Expression): expression to optimize remove_unused_selections (bool): remove selects that are unused Returns: sqlglot.Expression: optimized expression
Here is the function:
def pushdown_projections(expression, schema=None, remove_unused_selections=True):
"""
Rewrite sqlglot AST to remove unused columns projections.
Example:
>>> import sqlglot
>>> sql = "SELECT y.a AS a FROM (SELECT x.a AS a, x.b AS b FROM x) AS y"
>>> expression = sqlglot.parse_one(sql)
>>> pushdown_projections(expression).sql()
'SELECT y.a AS a FROM (SELECT x.a AS a FROM x) AS y'
Args:
expression (sqlglot.Expression): expression to optimize
remove_unused_selections (bool): remove selects that are unused
Returns:
sqlglot.Expression: optimized expression
"""
# Map of Scope to all columns being selected by outer queries.
schema = ensure_schema(schema)
source_column_alias_count = {}
referenced_columns = defaultdict(set)
# We build the scope tree (which is traversed in DFS postorder), then iterate
# over the result in reverse order. This should ensure that the set of selected
# columns for a particular scope are completely build by the time we get to it.
for scope in reversed(traverse_scope(expression)):
parent_selections = referenced_columns.get(scope, {SELECT_ALL})
alias_count = source_column_alias_count.get(scope, 0)
# We can't remove columns SELECT DISTINCT nor UNION DISTINCT.
if scope.expression.args.get("distinct"):
parent_selections = {SELECT_ALL}
if isinstance(scope.expression, exp.Union):
left, right = scope.union_scopes
referenced_columns[left] = parent_selections
if any(select.is_star for select in right.expression.selects):
referenced_columns[right] = parent_selections
elif not any(select.is_star for select in left.expression.selects):
referenced_columns[right] = [
right.expression.selects[i].alias_or_name
for i, select in enumerate(left.expression.selects)
if SELECT_ALL in parent_selections or select.alias_or_name in parent_selections
]
if isinstance(scope.expression, exp.Select):
if remove_unused_selections:
_remove_unused_selections(scope, parent_selections, schema, alias_count)
if scope.expression.is_star:
continue
# Group columns by source name
selects = defaultdict(set)
for col in scope.columns:
table_name = col.table
col_name = col.name
selects[table_name].add(col_name)
# Push the selected columns down to the next scope
for name, (node, source) in scope.selected_sources.items():
if isinstance(source, Scope):
columns = {SELECT_ALL} if scope.pivots else selects.get(name) or set()
referenced_columns[source].update(columns)
column_aliases = node.alias_column_names
if column_aliases:
source_column_alias_count[source] = len(column_aliases)
return expression | Rewrite sqlglot AST to remove unused columns projections. Example: >>> import sqlglot >>> sql = "SELECT y.a AS a FROM (SELECT x.a AS a, x.b AS b FROM x) AS y" >>> expression = sqlglot.parse_one(sql) >>> pushdown_projections(expression).sql() 'SELECT y.a AS a FROM (SELECT x.a AS a FROM x) AS y' Args: expression (sqlglot.Expression): expression to optimize remove_unused_selections (bool): remove selects that are unused Returns: sqlglot.Expression: optimized expression |
152,929 | from __future__ import annotations
import typing as t
from sqlglot import expressions as exp
if t.TYPE_CHECKING:
JSON = t.Union[dict, list, str, float, int, bool, None]
Node = t.Union[t.List["Node"], exp.DataType.Type, exp.Expression, JSON]
The provided code snippet includes necessary dependencies for implementing the `dump` function. Write a Python function `def dump(node: Node) -> JSON` to solve the following problem:
Recursively dump an AST into a JSON-serializable dict.
Here is the function:
def dump(node: Node) -> JSON:
"""
Recursively dump an AST into a JSON-serializable dict.
"""
if isinstance(node, list):
return [dump(i) for i in node]
if isinstance(node, exp.DataType.Type):
return {
"class": "DataType.Type",
"value": node.value,
}
if isinstance(node, exp.Expression):
klass = node.__class__.__qualname__
if node.__class__.__module__ != exp.__name__:
klass = f"{node.__module__}.{klass}"
obj: t.Dict = {
"class": klass,
"args": {k: dump(v) for k, v in node.args.items() if v is not None and v != []},
}
if node.type:
obj["type"] = dump(node.type)
if node.comments:
obj["comments"] = node.comments
if node._meta is not None:
obj["meta"] = node._meta
return obj
return node | Recursively dump an AST into a JSON-serializable dict. |
152,930 | from __future__ import annotations
import typing as t
from sqlglot import expressions as exp
if t.TYPE_CHECKING:
JSON = t.Union[dict, list, str, float, int, bool, None]
Node = t.Union[t.List["Node"], exp.DataType.Type, exp.Expression, JSON]
The provided code snippet includes necessary dependencies for implementing the `load` function. Write a Python function `def load(obj: JSON) -> Node` to solve the following problem:
Recursively load a dict (as returned by `dump`) into an AST.
Here is the function:
def load(obj: JSON) -> Node:
"""
Recursively load a dict (as returned by `dump`) into an AST.
"""
if isinstance(obj, list):
return [load(i) for i in obj]
if isinstance(obj, dict):
class_name = obj["class"]
if class_name == "DataType.Type":
return exp.DataType.Type(obj["value"])
if "." in class_name:
module_path, class_name = class_name.rsplit(".", maxsplit=1)
module = __import__(module_path, fromlist=[class_name])
else:
module = exp
klass = getattr(module, class_name)
expression = klass(**{k: load(v) for k, v in obj["args"].items()})
expression.type = t.cast(exp.DataType, load(obj.get("type")))
expression.comments = obj.get("comments")
expression._meta = obj.get("meta")
return expression
return obj | Recursively load a dict (as returned by `dump`) into an AST. |
152,931 | from __future__ import annotations
import json
import logging
import typing as t
from dataclasses import dataclass, field
from sqlglot import Schema, exp, maybe_parse
from sqlglot.errors import SqlglotError
from sqlglot.optimizer import Scope, build_scope, find_all_in_scope, normalize_identifiers, qualify
if t.TYPE_CHECKING:
from sqlglot.dialects.dialect import DialectType
class Node:
name: str
expression: exp.Expression
source: exp.Expression
downstream: t.List[Node] = field(default_factory=list)
source_name: str = ""
reference_node_name: str = ""
def walk(self) -> t.Iterator[Node]:
yield self
for d in self.downstream:
yield from d.walk()
def to_html(self, dialect: DialectType = None, **opts) -> GraphHTML:
nodes = {}
edges = []
for node in self.walk():
if isinstance(node.expression, exp.Table):
label = f"FROM {node.expression.this}"
title = f"<pre>SELECT {node.name} FROM {node.expression.this}</pre>"
group = 1
else:
label = node.expression.sql(pretty=True, dialect=dialect)
source = node.source.transform(
lambda n: (
exp.Tag(this=n, prefix="<b>", postfix="</b>") if n is node.expression else n
),
copy=False,
).sql(pretty=True, dialect=dialect)
title = f"<pre>{source}</pre>"
group = 0
node_id = id(node)
nodes[node_id] = {
"id": node_id,
"label": label,
"title": title,
"group": group,
}
for d in node.downstream:
edges.append({"from": node_id, "to": id(d)})
return GraphHTML(nodes, edges, **opts)
def to_node(
column: str | int,
scope: Scope,
dialect: DialectType,
scope_name: t.Optional[str] = None,
upstream: t.Optional[Node] = None,
source_name: t.Optional[str] = None,
reference_node_name: t.Optional[str] = None,
) -> Node:
source_names = {
dt.alias: dt.comments[0].split()[1]
for dt in scope.derived_tables
if dt.comments and dt.comments[0].startswith("source: ")
}
# Find the specific select clause that is the source of the column we want.
# This can either be a specific, named select or a generic `*` clause.
select = (
scope.expression.selects[column]
if isinstance(column, int)
else next(
(select for select in scope.expression.selects if select.alias_or_name == column),
exp.Star() if scope.expression.is_star else scope.expression,
)
)
if isinstance(scope.expression, exp.Subquery):
for source in scope.subquery_scopes:
return to_node(
column,
scope=source,
dialect=dialect,
upstream=upstream,
source_name=source_name,
reference_node_name=reference_node_name,
)
if isinstance(scope.expression, exp.Union):
upstream = upstream or Node(name="UNION", source=scope.expression, expression=select)
index = (
column
if isinstance(column, int)
else next(
(
i
for i, select in enumerate(scope.expression.selects)
if select.alias_or_name == column or select.is_star
),
-1, # mypy will not allow a None here, but a negative index should never be returned
)
)
if index == -1:
raise ValueError(f"Could not find {column} in {scope.expression}")
for s in scope.union_scopes:
to_node(
index,
scope=s,
dialect=dialect,
upstream=upstream,
source_name=source_name,
reference_node_name=reference_node_name,
)
return upstream
if isinstance(scope.expression, exp.Select):
# For better ergonomics in our node labels, replace the full select with
# a version that has only the column we care about.
# "x", SELECT x, y FROM foo
# => "x", SELECT x FROM foo
source = t.cast(exp.Expression, scope.expression.select(select, append=False))
else:
source = scope.expression
# Create the node for this step in the lineage chain, and attach it to the previous one.
node = Node(
name=f"{scope_name}.{column}" if scope_name else str(column),
source=source,
expression=select,
source_name=source_name or "",
reference_node_name=reference_node_name or "",
)
if upstream:
upstream.downstream.append(node)
subquery_scopes = {
id(subquery_scope.expression): subquery_scope for subquery_scope in scope.subquery_scopes
}
for subquery in find_all_in_scope(select, exp.UNWRAPPED_QUERIES):
subquery_scope = subquery_scopes.get(id(subquery))
if not subquery_scope:
logger.warning(f"Unknown subquery scope: {subquery.sql(dialect=dialect)}")
continue
for name in subquery.named_selects:
to_node(name, scope=subquery_scope, dialect=dialect, upstream=node)
# if the select is a star add all scope sources as downstreams
if select.is_star:
for source in scope.sources.values():
if isinstance(source, Scope):
source = source.expression
node.downstream.append(Node(name=select.sql(), source=source, expression=source))
# Find all columns that went into creating this one to list their lineage nodes.
source_columns = set(find_all_in_scope(select, exp.Column))
# If the source is a UDTF find columns used in the UTDF to generate the table
if isinstance(source, exp.UDTF):
source_columns |= set(source.find_all(exp.Column))
for c in source_columns:
table = c.table
source = scope.sources.get(table)
if isinstance(source, Scope):
selected_node, _ = scope.selected_sources.get(table, (None, None))
# The table itself came from a more specific scope. Recurse into that one using the unaliased column name.
to_node(
c.name,
scope=source,
dialect=dialect,
scope_name=table,
upstream=node,
source_name=source_names.get(table) or source_name,
reference_node_name=selected_node.name if selected_node else None,
)
else:
# The source is not a scope - we've reached the end of the line. At this point, if a source is not found
# it means this column's lineage is unknown. This can happen if the definition of a source used in a query
# is not passed into the `sources` map.
source = source or exp.Placeholder()
node.downstream.append(Node(name=c.sql(), source=source, expression=source))
return node
class SqlglotError(Exception):
pass
def normalize_identifiers(expression: E, dialect: DialectType = None) -> E: ...
def normalize_identifiers(expression: str, dialect: DialectType = None) -> exp.Identifier: ...
def normalize_identifiers(expression, dialect=None):
"""
Normalize all unquoted identifiers to either lower or upper case, depending
on the dialect. This essentially makes those identifiers case-insensitive.
It's possible to make this a no-op by adding a special comment next to the
identifier of interest:
SELECT a /* sqlglot.meta case_sensitive */ FROM table
In this example, the identifier `a` will not be normalized.
Note:
Some dialects (e.g. BigQuery) treat identifiers as case-insensitive even
when they're quoted, so in these cases all identifiers are normalized.
Example:
>>> import sqlglot
>>> expression = sqlglot.parse_one('SELECT Bar.A AS A FROM "Foo".Bar')
>>> normalize_identifiers(expression).sql()
'SELECT bar.a AS a FROM "Foo".bar'
>>> normalize_identifiers("foo", dialect="snowflake").sql(dialect="snowflake")
'FOO'
Args:
expression: The expression to transform.
dialect: The dialect to use in order to decide how to normalize identifiers.
Returns:
The transformed expression.
"""
dialect = Dialect.get_or_raise(dialect)
if isinstance(expression, str):
expression = exp.parse_identifier(expression, dialect=dialect)
for node in expression.walk(prune=lambda n: n.meta.get("case_sensitive")):
if not node.meta.get("case_sensitive"):
dialect.normalize_identifier(node)
return expression
def qualify(
expression: exp.Expression,
dialect: DialectType = None,
db: t.Optional[str] = None,
catalog: t.Optional[str] = None,
schema: t.Optional[dict | Schema] = None,
expand_alias_refs: bool = True,
expand_stars: bool = True,
infer_schema: t.Optional[bool] = None,
isolate_tables: bool = False,
qualify_columns: bool = True,
validate_qualify_columns: bool = True,
quote_identifiers: bool = True,
identify: bool = True,
) -> exp.Expression:
"""
Rewrite sqlglot AST to have normalized and qualified tables and columns.
This step is necessary for all further SQLGlot optimizations.
Example:
>>> import sqlglot
>>> schema = {"tbl": {"col": "INT"}}
>>> expression = sqlglot.parse_one("SELECT col FROM tbl")
>>> qualify(expression, schema=schema).sql()
'SELECT "tbl"."col" AS "col" FROM "tbl" AS "tbl"'
Args:
expression: Expression to qualify.
db: Default database name for tables.
catalog: Default catalog name for tables.
schema: Schema to infer column names and types.
expand_alias_refs: Whether to expand references to aliases.
expand_stars: Whether to expand star queries. This is a necessary step
for most of the optimizer's rules to work; do not set to False unless you
know what you're doing!
infer_schema: Whether to infer the schema if missing.
isolate_tables: Whether to isolate table selects.
qualify_columns: Whether to qualify columns.
validate_qualify_columns: Whether to validate columns.
quote_identifiers: Whether to run the quote_identifiers step.
This step is necessary to ensure correctness for case sensitive queries.
But this flag is provided in case this step is performed at a later time.
identify: If True, quote all identifiers, else only necessary ones.
Returns:
The qualified expression.
"""
schema = ensure_schema(schema, dialect=dialect)
expression = normalize_identifiers(expression, dialect=dialect)
expression = qualify_tables(expression, db=db, catalog=catalog, schema=schema)
if isolate_tables:
expression = isolate_table_selects(expression, schema=schema)
if Dialect.get_or_raise(dialect).PREFER_CTE_ALIAS_COLUMN:
expression = pushdown_cte_alias_columns_func(expression)
if qualify_columns:
expression = qualify_columns_func(
expression,
schema,
expand_alias_refs=expand_alias_refs,
expand_stars=expand_stars,
infer_schema=infer_schema,
)
if quote_identifiers:
expression = quote_identifiers_func(expression, dialect=dialect, identify=identify)
if validate_qualify_columns:
validate_qualify_columns_func(expression)
return expression
DialectType = t.Union[str, Dialect, t.Type[Dialect], None]
The provided code snippet includes necessary dependencies for implementing the `lineage` function. Write a Python function `def lineage( column: str | exp.Column, sql: str | exp.Expression, schema: t.Optional[t.Dict | Schema] = None, sources: t.Optional[t.Mapping[str, str | exp.Query]] = None, dialect: DialectType = None, **kwargs, ) -> Node` to solve the following problem:
Build the lineage graph for a column of a SQL query. Args: column: The column to build the lineage for. sql: The SQL string or expression. schema: The schema of tables. sources: A mapping of queries which will be used to continue building lineage. dialect: The dialect of input SQL. **kwargs: Qualification optimizer kwargs. Returns: A lineage node.
Here is the function:
def lineage(
column: str | exp.Column,
sql: str | exp.Expression,
schema: t.Optional[t.Dict | Schema] = None,
sources: t.Optional[t.Mapping[str, str | exp.Query]] = None,
dialect: DialectType = None,
**kwargs,
) -> Node:
"""Build the lineage graph for a column of a SQL query.
Args:
column: The column to build the lineage for.
sql: The SQL string or expression.
schema: The schema of tables.
sources: A mapping of queries which will be used to continue building lineage.
dialect: The dialect of input SQL.
**kwargs: Qualification optimizer kwargs.
Returns:
A lineage node.
"""
expression = maybe_parse(sql, dialect=dialect)
column = normalize_identifiers.normalize_identifiers(column, dialect=dialect).name
if sources:
expression = exp.expand(
expression,
{k: t.cast(exp.Query, maybe_parse(v, dialect=dialect)) for k, v in sources.items()},
dialect=dialect,
)
qualified = qualify.qualify(
expression,
dialect=dialect,
schema=schema,
**{"validate_qualify_columns": False, "identify": False, **kwargs}, # type: ignore
)
scope = build_scope(qualified)
if not scope:
raise SqlglotError("Cannot build lineage, sql must be SELECT")
if not any(select.alias_or_name == column for select in scope.expression.selects):
raise SqlglotError(f"Cannot find column '{column}' in query.")
return to_node(column, scope, dialect) | Build the lineage graph for a column of a SQL query. Args: column: The column to build the lineage for. sql: The SQL string or expression. schema: The schema of tables. sources: A mapping of queries which will be used to continue building lineage. dialect: The dialect of input SQL. **kwargs: Qualification optimizer kwargs. Returns: A lineage node. |
152,932 | from __future__ import annotations
import datetime
import math
import numbers
import re
import textwrap
import typing as t
from collections import deque
from copy import deepcopy
from enum import auto
from functools import reduce
from sqlglot.errors import ErrorLevel, ParseError
from sqlglot.helper import (
AutoName,
camel_to_snake_case,
ensure_collection,
ensure_list,
is_int,
seq_get,
subclasses,
)
from sqlglot.tokens import Token
def _norm_arg(arg):
return arg.lower() if type(arg) is str else arg | null |
152,933 | from __future__ import annotations
import datetime
import math
import numbers
import re
import textwrap
import typing as t
from collections import deque
from copy import deepcopy
from enum import auto
from functools import reduce
from sqlglot.errors import ErrorLevel, ParseError
from sqlglot.helper import (
AutoName,
camel_to_snake_case,
ensure_collection,
ensure_list,
is_int,
seq_get,
subclasses,
)
from sqlglot.tokens import Token
if t.TYPE_CHECKING:
from sqlglot._typing import E, Lit
from sqlglot.dialects.dialect import DialectType
Q = t.TypeVar("Q", bound="Query")
class Expression(metaclass=_Expression):
"""
The base class for all expressions in a syntax tree. Each Expression encapsulates any necessary
context, such as its child expressions, their names (arg keys), and whether a given child expression
is optional or not.
Attributes:
key: a unique key for each class in the Expression hierarchy. This is useful for hashing
and representing expressions as strings.
arg_types: determines the arguments (child nodes) supported by an expression. It maps
arg keys to booleans that indicate whether the corresponding args are optional.
parent: a reference to the parent expression (or None, in case of root expressions).
arg_key: the arg key an expression is associated with, i.e. the name its parent expression
uses to refer to it.
index: the index of an expression if it is inside of a list argument in its parent
comments: a list of comments that are associated with a given expression. This is used in
order to preserve comments when transpiling SQL code.
type: the `sqlglot.expressions.DataType` type of an expression. This is inferred by the
optimizer, in order to enable some transformations that require type information.
meta: a dictionary that can be used to store useful metadata for a given expression.
Example:
>>> class Foo(Expression):
... arg_types = {"this": True, "expression": False}
The above definition informs us that Foo is an Expression that requires an argument called
"this" and may also optionally receive an argument called "expression".
Args:
args: a mapping used for retrieving the arguments of an expression, given their arg keys.
"""
key = "expression"
arg_types = {"this": True}
__slots__ = ("args", "parent", "arg_key", "index", "comments", "_type", "_meta", "_hash")
def __init__(self, **args: t.Any):
self.args: t.Dict[str, t.Any] = args
self.parent: t.Optional[Expression] = None
self.arg_key: t.Optional[str] = None
self.index: t.Optional[int] = None
self.comments: t.Optional[t.List[str]] = None
self._type: t.Optional[DataType] = None
self._meta: t.Optional[t.Dict[str, t.Any]] = None
self._hash: t.Optional[int] = None
for arg_key, value in self.args.items():
self._set_parent(arg_key, value)
def __eq__(self, other) -> bool:
return type(self) is type(other) and hash(self) == hash(other)
def hashable_args(self) -> t.Any:
return frozenset(
(k, tuple(_norm_arg(a) for a in v) if type(v) is list else _norm_arg(v))
for k, v in self.args.items()
if not (v is None or v is False or (type(v) is list and not v))
)
def __hash__(self) -> int:
if self._hash is not None:
return self._hash
return hash((self.__class__, self.hashable_args))
def this(self) -> t.Any:
"""
Retrieves the argument with key "this".
"""
return self.args.get("this")
def expression(self) -> t.Any:
"""
Retrieves the argument with key "expression".
"""
return self.args.get("expression")
def expressions(self) -> t.List[t.Any]:
"""
Retrieves the argument with key "expressions".
"""
return self.args.get("expressions") or []
def text(self, key) -> str:
"""
Returns a textual representation of the argument corresponding to "key". This can only be used
for args that are strings or leaf Expression instances, such as identifiers and literals.
"""
field = self.args.get(key)
if isinstance(field, str):
return field
if isinstance(field, (Identifier, Literal, Var)):
return field.this
if isinstance(field, (Star, Null)):
return field.name
return ""
def is_string(self) -> bool:
"""
Checks whether a Literal expression is a string.
"""
return isinstance(self, Literal) and self.args["is_string"]
def is_number(self) -> bool:
"""
Checks whether a Literal expression is a number.
"""
return isinstance(self, Literal) and not self.args["is_string"]
def is_int(self) -> bool:
"""
Checks whether a Literal expression is an integer.
"""
return self.is_number and is_int(self.name)
def is_star(self) -> bool:
"""Checks whether an expression is a star."""
return isinstance(self, Star) or (isinstance(self, Column) and isinstance(self.this, Star))
def alias(self) -> str:
"""
Returns the alias of the expression, or an empty string if it's not aliased.
"""
if isinstance(self.args.get("alias"), TableAlias):
return self.args["alias"].name
return self.text("alias")
def alias_column_names(self) -> t.List[str]:
table_alias = self.args.get("alias")
if not table_alias:
return []
return [c.name for c in table_alias.args.get("columns") or []]
def name(self) -> str:
return self.text("this")
def alias_or_name(self) -> str:
return self.alias or self.name
def output_name(self) -> str:
"""
Name of the output column if this expression is a selection.
If the Expression has no output name, an empty string is returned.
Example:
>>> from sqlglot import parse_one
>>> parse_one("SELECT a").expressions[0].output_name
'a'
>>> parse_one("SELECT b AS c").expressions[0].output_name
'c'
>>> parse_one("SELECT 1 + 2").expressions[0].output_name
''
"""
return ""
def type(self) -> t.Optional[DataType]:
return self._type
def type(self, dtype: t.Optional[DataType | DataType.Type | str]) -> None:
if dtype and not isinstance(dtype, DataType):
dtype = DataType.build(dtype)
self._type = dtype # type: ignore
def is_type(self, *dtypes) -> bool:
return self.type is not None and self.type.is_type(*dtypes)
def is_leaf(self) -> bool:
return not any(isinstance(v, (Expression, list)) for v in self.args.values())
def meta(self) -> t.Dict[str, t.Any]:
if self._meta is None:
self._meta = {}
return self._meta
def __deepcopy__(self, memo):
root = self.__class__()
stack = [(self, root)]
while stack:
node, copy = stack.pop()
if node.comments is not None:
copy.comments = deepcopy(node.comments)
if node._type is not None:
copy._type = deepcopy(node._type)
if node._meta is not None:
copy._meta = deepcopy(node._meta)
if node._hash is not None:
copy._hash = node._hash
for k, vs in node.args.items():
if hasattr(vs, "parent"):
stack.append((vs, vs.__class__()))
copy.set(k, stack[-1][-1])
elif type(vs) is list:
copy.args[k] = []
for v in vs:
if hasattr(v, "parent"):
stack.append((v, v.__class__()))
copy.append(k, stack[-1][-1])
else:
copy.append(k, v)
else:
copy.args[k] = vs
return root
def copy(self):
"""
Returns a deep copy of the expression.
"""
return deepcopy(self)
def add_comments(self, comments: t.Optional[t.List[str]]) -> None:
if self.comments is None:
self.comments = []
if comments:
for comment in comments:
_, *meta = comment.split(SQLGLOT_META)
if meta:
for kv in "".join(meta).split(","):
k, *v = kv.split("=")
value = v[0].strip() if v else True
self.meta[k.strip()] = value
self.comments.append(comment)
def append(self, arg_key: str, value: t.Any) -> None:
"""
Appends value to arg_key if it's a list or sets it as a new list.
Args:
arg_key (str): name of the list expression arg
value (Any): value to append to the list
"""
if type(self.args.get(arg_key)) is not list:
self.args[arg_key] = []
self._set_parent(arg_key, value)
values = self.args[arg_key]
if hasattr(value, "parent"):
value.index = len(values)
values.append(value)
def set(self, arg_key: str, value: t.Any) -> None:
"""
Sets arg_key to value.
Args:
arg_key: name of the expression arg.
value: value to set the arg to.
"""
if value is None:
self.args.pop(arg_key, None)
else:
self.args[arg_key] = value
self._set_parent(arg_key, value)
def _set_parent(self, arg_key: str, value: t.Any, index: t.Optional[int] = None) -> None:
if hasattr(value, "parent"):
value.parent = self
value.arg_key = arg_key
value.index = index
elif type(value) is list:
for index, v in enumerate(value):
if hasattr(v, "parent"):
v.parent = self
v.arg_key = arg_key
v.index = index
def depth(self) -> int:
"""
Returns the depth of this tree.
"""
if self.parent:
return self.parent.depth + 1
return 0
def iter_expressions(self, reverse: bool = False) -> t.Iterator[Expression]:
"""Yields the key and expression for all arguments, exploding list args."""
# remove tuple when python 3.7 is deprecated
for vs in reversed(tuple(self.args.values())) if reverse else self.args.values():
if type(vs) is list:
for v in reversed(vs) if reverse else vs:
if hasattr(v, "parent"):
yield v
else:
if hasattr(vs, "parent"):
yield vs
def find(self, *expression_types: t.Type[E], bfs: bool = True) -> t.Optional[E]:
"""
Returns the first node in this tree which matches at least one of
the specified types.
Args:
expression_types: the expression type(s) to match.
bfs: whether to search the AST using the BFS algorithm (DFS is used if false).
Returns:
The node which matches the criteria or None if no such node was found.
"""
return next(self.find_all(*expression_types, bfs=bfs), None)
def find_all(self, *expression_types: t.Type[E], bfs: bool = True) -> t.Iterator[E]:
"""
Returns a generator object which visits all nodes in this tree and only
yields those that match at least one of the specified expression types.
Args:
expression_types: the expression type(s) to match.
bfs: whether to search the AST using the BFS algorithm (DFS is used if false).
Returns:
The generator object.
"""
for expression in self.walk(bfs=bfs):
if isinstance(expression, expression_types):
yield expression
def find_ancestor(self, *expression_types: t.Type[E]) -> t.Optional[E]:
"""
Returns a nearest parent matching expression_types.
Args:
expression_types: the expression type(s) to match.
Returns:
The parent node.
"""
ancestor = self.parent
while ancestor and not isinstance(ancestor, expression_types):
ancestor = ancestor.parent
return ancestor # type: ignore
def parent_select(self) -> t.Optional[Select]:
"""
Returns the parent select statement.
"""
return self.find_ancestor(Select)
def same_parent(self) -> bool:
"""Returns if the parent is the same class as itself."""
return type(self.parent) is self.__class__
def root(self) -> Expression:
"""
Returns the root expression of this tree.
"""
expression = self
while expression.parent:
expression = expression.parent
return expression
def walk(
self, bfs: bool = True, prune: t.Optional[t.Callable[[Expression], bool]] = None
) -> t.Iterator[Expression]:
"""
Returns a generator object which visits all nodes in this tree.
Args:
bfs (bool): if set to True the BFS traversal order will be applied,
otherwise the DFS traversal will be used instead.
prune ((node, parent, arg_key) -> bool): callable that returns True if
the generator should stop traversing this branch of the tree.
Returns:
the generator object.
"""
if bfs:
yield from self.bfs(prune=prune)
else:
yield from self.dfs(prune=prune)
def dfs(
self, prune: t.Optional[t.Callable[[Expression], bool]] = None
) -> t.Iterator[Expression]:
"""
Returns a generator object which visits all nodes in this tree in
the DFS (Depth-first) order.
Returns:
The generator object.
"""
stack = [self]
while stack:
node = stack.pop()
yield node
if prune and prune(node):
continue
for v in node.iter_expressions(reverse=True):
stack.append(v)
def bfs(
self, prune: t.Optional[t.Callable[[Expression], bool]] = None
) -> t.Iterator[Expression]:
"""
Returns a generator object which visits all nodes in this tree in
the BFS (Breadth-first) order.
Returns:
The generator object.
"""
queue = deque([self])
while queue:
node = queue.popleft()
yield node
if prune and prune(node):
continue
for v in node.iter_expressions():
queue.append(v)
def unnest(self):
"""
Returns the first non parenthesis child or self.
"""
expression = self
while type(expression) is Paren:
expression = expression.this
return expression
def unalias(self):
"""
Returns the inner expression if this is an Alias.
"""
if isinstance(self, Alias):
return self.this
return self
def unnest_operands(self):
"""
Returns unnested operands as a tuple.
"""
return tuple(arg.unnest() for arg in self.iter_expressions())
def flatten(self, unnest=True):
"""
Returns a generator which yields child nodes whose parents are the same class.
A AND B AND C -> [A, B, C]
"""
for node in self.dfs(prune=lambda n: n.parent and type(n) is not self.__class__):
if type(node) is not self.__class__:
yield node.unnest() if unnest and not isinstance(node, Subquery) else node
def __str__(self) -> str:
return self.sql()
def __repr__(self) -> str:
return _to_s(self)
def to_s(self) -> str:
"""
Same as __repr__, but includes additional information which can be useful
for debugging, like empty or missing args and the AST nodes' object IDs.
"""
return _to_s(self, verbose=True)
def sql(self, dialect: DialectType = None, **opts) -> str:
"""
Returns SQL string representation of this tree.
Args:
dialect: the dialect of the output SQL string (eg. "spark", "hive", "presto", "mysql").
opts: other `sqlglot.generator.Generator` options.
Returns:
The SQL string.
"""
from sqlglot.dialects import Dialect
return Dialect.get_or_raise(dialect).generate(self, **opts)
def transform(self, fun: t.Callable, *args: t.Any, copy: bool = True, **kwargs) -> Expression:
"""
Visits all tree nodes (excluding already transformed ones)
and applies the given transformation function to each node.
Args:
fun (function): a function which takes a node as an argument and returns a
new transformed node or the same node without modifications. If the function
returns None, then the corresponding node will be removed from the syntax tree.
copy (bool): if set to True a new tree instance is constructed, otherwise the tree is
modified in place.
Returns:
The transformed tree.
"""
root = None
new_node = None
for node in (self.copy() if copy else self).dfs(prune=lambda n: n is not new_node):
new_node = fun(node, *args, **kwargs)
if root:
if new_node is not node:
node.replace(new_node)
else:
root = new_node
assert root
return root.assert_is(Expression)
def replace(self, expression: E) -> E: ...
def replace(self, expression: None) -> None: ...
def replace(self, expression):
"""
Swap out this expression with a new expression.
For example::
>>> tree = Select().select("x").from_("tbl")
>>> tree.find(Column).replace(column("y"))
Column(
this=Identifier(this=y, quoted=False))
>>> tree.sql()
'SELECT y FROM tbl'
Args:
expression: new node
Returns:
The new expression or expressions.
"""
parent = self.parent
if not parent:
return expression
key = self.arg_key
value = parent.args.get(key)
if isinstance(value, list):
index = self.index
if isinstance(expression, list):
value.pop(index)
value[index:index] = expression
parent._set_parent(key, value)
else:
if expression is None:
value.pop(index)
for v in value[index:]:
v.index = v.index - 1
else:
value[index] = expression
parent._set_parent(key, expression, index=index)
elif value is not None:
if expression is None:
parent.args.pop(key)
else:
parent.set(key, expression)
if expression is not self:
self.parent = None
self.arg_key = None
self.index = None
return expression
def pop(self: E) -> E:
"""
Remove this expression from its AST.
Returns:
The popped expression.
"""
self.replace(None)
return self
def assert_is(self, type_: t.Type[E]) -> E:
"""
Assert that this `Expression` is an instance of `type_`.
If it is NOT an instance of `type_`, this raises an assertion error.
Otherwise, this returns this expression.
Examples:
This is useful for type security in chained expressions:
>>> import sqlglot
>>> sqlglot.parse_one("SELECT x from y").assert_is(Select).select("z").sql()
'SELECT x, z FROM y'
"""
if not isinstance(self, type_):
raise AssertionError(f"{self} is not {type_}.")
return self
def error_messages(self, args: t.Optional[t.Sequence] = None) -> t.List[str]:
"""
Checks if this expression is valid (e.g. all mandatory args are set).
Args:
args: a sequence of values that were used to instantiate a Func expression. This is used
to check that the provided arguments don't exceed the function argument limit.
Returns:
A list of error messages for all possible errors that were found.
"""
errors: t.List[str] = []
for k in self.args:
if k not in self.arg_types:
errors.append(f"Unexpected keyword: '{k}' for {self.__class__}")
for k, mandatory in self.arg_types.items():
v = self.args.get(k)
if mandatory and (v is None or (isinstance(v, list) and not v)):
errors.append(f"Required keyword: '{k}' missing for {self.__class__}")
if (
args
and isinstance(self, Func)
and len(args) > len(self.arg_types)
and not self.is_var_len_args
):
errors.append(
f"The number of provided arguments ({len(args)}) is greater than "
f"the maximum number of supported arguments ({len(self.arg_types)})"
)
return errors
def dump(self):
"""
Dump this Expression to a JSON-serializable dict.
"""
from sqlglot.serde import dump
return dump(self)
def load(cls, obj):
"""
Load a dict (as returned by `Expression.dump`) into an Expression instance.
"""
from sqlglot.serde import load
return load(obj)
def and_(
self,
*expressions: t.Optional[ExpOrStr],
dialect: DialectType = None,
copy: bool = True,
**opts,
) -> Condition:
"""
AND this condition with one or multiple expressions.
Example:
>>> condition("x=1").and_("y=1").sql()
'x = 1 AND y = 1'
Args:
*expressions: the SQL code strings to parse.
If an `Expression` instance is passed, it will be used as-is.
dialect: the dialect used to parse the input expression.
copy: whether to copy the involved expressions (only applies to Expressions).
opts: other options to use to parse the input expressions.
Returns:
The new And condition.
"""
return and_(self, *expressions, dialect=dialect, copy=copy, **opts)
def or_(
self,
*expressions: t.Optional[ExpOrStr],
dialect: DialectType = None,
copy: bool = True,
**opts,
) -> Condition:
"""
OR this condition with one or multiple expressions.
Example:
>>> condition("x=1").or_("y=1").sql()
'x = 1 OR y = 1'
Args:
*expressions: the SQL code strings to parse.
If an `Expression` instance is passed, it will be used as-is.
dialect: the dialect used to parse the input expression.
copy: whether to copy the involved expressions (only applies to Expressions).
opts: other options to use to parse the input expressions.
Returns:
The new Or condition.
"""
return or_(self, *expressions, dialect=dialect, copy=copy, **opts)
def not_(self, copy: bool = True):
"""
Wrap this condition with NOT.
Example:
>>> condition("x=1").not_().sql()
'NOT x = 1'
Args:
copy: whether to copy this object.
Returns:
The new Not instance.
"""
return not_(self, copy=copy)
def as_(
self,
alias: str | Identifier,
quoted: t.Optional[bool] = None,
dialect: DialectType = None,
copy: bool = True,
**opts,
) -> Alias:
return alias_(self, alias, quoted=quoted, dialect=dialect, copy=copy, **opts)
def _binop(self, klass: t.Type[E], other: t.Any, reverse: bool = False) -> E:
this = self.copy()
other = convert(other, copy=True)
if not isinstance(this, klass) and not isinstance(other, klass):
this = _wrap(this, Binary)
other = _wrap(other, Binary)
if reverse:
return klass(this=other, expression=this)
return klass(this=this, expression=other)
def __getitem__(self, other: ExpOrStr | t.Tuple[ExpOrStr]) -> Bracket:
return Bracket(
this=self.copy(), expressions=[convert(e, copy=True) for e in ensure_list(other)]
)
def __iter__(self) -> t.Iterator:
if "expressions" in self.arg_types:
return iter(self.args.get("expressions") or [])
# We define this because __getitem__ converts Expression into an iterable, which is
# problematic because one can hit infinite loops if they do "for x in some_expr: ..."
# See: https://peps.python.org/pep-0234/
raise TypeError(f"'{self.__class__.__name__}' object is not iterable")
def isin(
self,
*expressions: t.Any,
query: t.Optional[ExpOrStr] = None,
unnest: t.Optional[ExpOrStr] | t.Collection[ExpOrStr] = None,
copy: bool = True,
**opts,
) -> In:
return In(
this=maybe_copy(self, copy),
expressions=[convert(e, copy=copy) for e in expressions],
query=maybe_parse(query, copy=copy, **opts) if query else None,
unnest=(
Unnest(
expressions=[
maybe_parse(t.cast(ExpOrStr, e), copy=copy, **opts)
for e in ensure_list(unnest)
]
)
if unnest
else None
),
)
def between(self, low: t.Any, high: t.Any, copy: bool = True, **opts) -> Between:
return Between(
this=maybe_copy(self, copy),
low=convert(low, copy=copy, **opts),
high=convert(high, copy=copy, **opts),
)
def is_(self, other: ExpOrStr) -> Is:
return self._binop(Is, other)
def like(self, other: ExpOrStr) -> Like:
return self._binop(Like, other)
def ilike(self, other: ExpOrStr) -> ILike:
return self._binop(ILike, other)
def eq(self, other: t.Any) -> EQ:
return self._binop(EQ, other)
def neq(self, other: t.Any) -> NEQ:
return self._binop(NEQ, other)
def rlike(self, other: ExpOrStr) -> RegexpLike:
return self._binop(RegexpLike, other)
def div(self, other: ExpOrStr, typed: bool = False, safe: bool = False) -> Div:
div = self._binop(Div, other)
div.args["typed"] = typed
div.args["safe"] = safe
return div
def desc(self, nulls_first: bool = False) -> Ordered:
return Ordered(this=self.copy(), desc=True, nulls_first=nulls_first)
def __lt__(self, other: t.Any) -> LT:
return self._binop(LT, other)
def __le__(self, other: t.Any) -> LTE:
return self._binop(LTE, other)
def __gt__(self, other: t.Any) -> GT:
return self._binop(GT, other)
def __ge__(self, other: t.Any) -> GTE:
return self._binop(GTE, other)
def __add__(self, other: t.Any) -> Add:
return self._binop(Add, other)
def __radd__(self, other: t.Any) -> Add:
return self._binop(Add, other, reverse=True)
def __sub__(self, other: t.Any) -> Sub:
return self._binop(Sub, other)
def __rsub__(self, other: t.Any) -> Sub:
return self._binop(Sub, other, reverse=True)
def __mul__(self, other: t.Any) -> Mul:
return self._binop(Mul, other)
def __rmul__(self, other: t.Any) -> Mul:
return self._binop(Mul, other, reverse=True)
def __truediv__(self, other: t.Any) -> Div:
return self._binop(Div, other)
def __rtruediv__(self, other: t.Any) -> Div:
return self._binop(Div, other, reverse=True)
def __floordiv__(self, other: t.Any) -> IntDiv:
return self._binop(IntDiv, other)
def __rfloordiv__(self, other: t.Any) -> IntDiv:
return self._binop(IntDiv, other, reverse=True)
def __mod__(self, other: t.Any) -> Mod:
return self._binop(Mod, other)
def __rmod__(self, other: t.Any) -> Mod:
return self._binop(Mod, other, reverse=True)
def __pow__(self, other: t.Any) -> Pow:
return self._binop(Pow, other)
def __rpow__(self, other: t.Any) -> Pow:
return self._binop(Pow, other, reverse=True)
def __and__(self, other: t.Any) -> And:
return self._binop(And, other)
def __rand__(self, other: t.Any) -> And:
return self._binop(And, other, reverse=True)
def __or__(self, other: t.Any) -> Or:
return self._binop(Or, other)
def __ror__(self, other: t.Any) -> Or:
return self._binop(Or, other, reverse=True)
def __neg__(self) -> Neg:
return Neg(this=_wrap(self.copy(), Binary))
def __invert__(self) -> Not:
return not_(self.copy())
class DataType(Expression):
arg_types = {
"this": True,
"expressions": False,
"nested": False,
"values": False,
"prefix": False,
"kind": False,
}
class Type(AutoName):
ARRAY = auto()
AGGREGATEFUNCTION = auto()
SIMPLEAGGREGATEFUNCTION = auto()
BIGDECIMAL = auto()
BIGINT = auto()
BIGSERIAL = auto()
BINARY = auto()
BIT = auto()
BOOLEAN = auto()
BPCHAR = auto()
CHAR = auto()
DATE = auto()
DATE32 = auto()
DATEMULTIRANGE = auto()
DATERANGE = auto()
DATETIME = auto()
DATETIME64 = auto()
DECIMAL = auto()
DOUBLE = auto()
ENUM = auto()
ENUM8 = auto()
ENUM16 = auto()
FIXEDSTRING = auto()
FLOAT = auto()
GEOGRAPHY = auto()
GEOMETRY = auto()
HLLSKETCH = auto()
HSTORE = auto()
IMAGE = auto()
INET = auto()
INT = auto()
INT128 = auto()
INT256 = auto()
INT4MULTIRANGE = auto()
INT4RANGE = auto()
INT8MULTIRANGE = auto()
INT8RANGE = auto()
INTERVAL = auto()
IPADDRESS = auto()
IPPREFIX = auto()
IPV4 = auto()
IPV6 = auto()
JSON = auto()
JSONB = auto()
LONGBLOB = auto()
LONGTEXT = auto()
LOWCARDINALITY = auto()
MAP = auto()
MEDIUMBLOB = auto()
MEDIUMINT = auto()
MEDIUMTEXT = auto()
MONEY = auto()
NAME = auto()
NCHAR = auto()
NESTED = auto()
NULL = auto()
NULLABLE = auto()
NUMMULTIRANGE = auto()
NUMRANGE = auto()
NVARCHAR = auto()
OBJECT = auto()
ROWVERSION = auto()
SERIAL = auto()
SET = auto()
SMALLINT = auto()
SMALLMONEY = auto()
SMALLSERIAL = auto()
STRUCT = auto()
SUPER = auto()
TEXT = auto()
TINYBLOB = auto()
TINYTEXT = auto()
TIME = auto()
TIMETZ = auto()
TIMESTAMP = auto()
TIMESTAMPLTZ = auto()
TIMESTAMPTZ = auto()
TIMESTAMP_S = auto()
TIMESTAMP_MS = auto()
TIMESTAMP_NS = auto()
TINYINT = auto()
TSMULTIRANGE = auto()
TSRANGE = auto()
TSTZMULTIRANGE = auto()
TSTZRANGE = auto()
UBIGINT = auto()
UINT = auto()
UINT128 = auto()
UINT256 = auto()
UMEDIUMINT = auto()
UDECIMAL = auto()
UNIQUEIDENTIFIER = auto()
UNKNOWN = auto() # Sentinel value, useful for type annotation
USERDEFINED = "USER-DEFINED"
USMALLINT = auto()
UTINYINT = auto()
UUID = auto()
VARBINARY = auto()
VARCHAR = auto()
VARIANT = auto()
XML = auto()
YEAR = auto()
STRUCT_TYPES = {
Type.NESTED,
Type.OBJECT,
Type.STRUCT,
}
NESTED_TYPES = {
*STRUCT_TYPES,
Type.ARRAY,
Type.MAP,
}
TEXT_TYPES = {
Type.CHAR,
Type.NCHAR,
Type.NVARCHAR,
Type.TEXT,
Type.VARCHAR,
Type.NAME,
}
INTEGER_TYPES = {
Type.BIGINT,
Type.BIT,
Type.INT,
Type.INT128,
Type.INT256,
Type.MEDIUMINT,
Type.SMALLINT,
Type.TINYINT,
Type.UBIGINT,
Type.UINT,
Type.UINT128,
Type.UINT256,
Type.UMEDIUMINT,
Type.USMALLINT,
Type.UTINYINT,
}
FLOAT_TYPES = {
Type.DOUBLE,
Type.FLOAT,
}
REAL_TYPES = {
*FLOAT_TYPES,
Type.BIGDECIMAL,
Type.DECIMAL,
Type.MONEY,
Type.SMALLMONEY,
Type.UDECIMAL,
}
NUMERIC_TYPES = {
*INTEGER_TYPES,
*REAL_TYPES,
}
TEMPORAL_TYPES = {
Type.DATE,
Type.DATE32,
Type.DATETIME,
Type.DATETIME64,
Type.TIME,
Type.TIMESTAMP,
Type.TIMESTAMPLTZ,
Type.TIMESTAMPTZ,
Type.TIMESTAMP_MS,
Type.TIMESTAMP_NS,
Type.TIMESTAMP_S,
Type.TIMETZ,
}
def build(
cls,
dtype: DATA_TYPE,
dialect: DialectType = None,
udt: bool = False,
copy: bool = True,
**kwargs,
) -> DataType:
"""
Constructs a DataType object.
Args:
dtype: the data type of interest.
dialect: the dialect to use for parsing `dtype`, in case it's a string.
udt: when set to True, `dtype` will be used as-is if it can't be parsed into a
DataType, thus creating a user-defined type.
copy: whether to copy the data type.
kwargs: additional arguments to pass in the constructor of DataType.
Returns:
The constructed DataType object.
"""
from sqlglot import parse_one
if isinstance(dtype, str):
if dtype.upper() == "UNKNOWN":
return DataType(this=DataType.Type.UNKNOWN, **kwargs)
try:
data_type_exp = parse_one(
dtype, read=dialect, into=DataType, error_level=ErrorLevel.IGNORE
)
except ParseError:
if udt:
return DataType(this=DataType.Type.USERDEFINED, kind=dtype, **kwargs)
raise
elif isinstance(dtype, DataType.Type):
data_type_exp = DataType(this=dtype)
elif isinstance(dtype, DataType):
return maybe_copy(dtype, copy)
else:
raise ValueError(f"Invalid data type: {type(dtype)}. Expected str or DataType.Type")
return DataType(**{**data_type_exp.args, **kwargs})
def is_type(self, *dtypes: DATA_TYPE) -> bool:
"""
Checks whether this DataType matches one of the provided data types. Nested types or precision
will be compared using "structural equivalence" semantics, so e.g. array<int> != array<float>.
Args:
dtypes: the data types to compare this DataType to.
Returns:
True, if and only if there is a type in `dtypes` which is equal to this DataType.
"""
for dtype in dtypes:
other = DataType.build(dtype, copy=False, udt=True)
if (
other.expressions
or self.this == DataType.Type.USERDEFINED
or other.this == DataType.Type.USERDEFINED
):
matches = self == other
else:
matches = self.this == other.this
if matches:
return True
return False
class Any(SubqueryPredicate):
pass
The provided code snippet includes necessary dependencies for implementing the `_to_s` function. Write a Python function `def _to_s(node: t.Any, verbose: bool = False, level: int = 0) -> str` to solve the following problem:
Generate a textual representation of an Expression tree
Here is the function:
def _to_s(node: t.Any, verbose: bool = False, level: int = 0) -> str:
"""Generate a textual representation of an Expression tree"""
indent = "\n" + (" " * (level + 1))
delim = f",{indent}"
if isinstance(node, Expression):
args = {k: v for k, v in node.args.items() if (v is not None and v != []) or verbose}
if (node.type or verbose) and not isinstance(node, DataType):
args["_type"] = node.type
if node.comments or verbose:
args["_comments"] = node.comments
if verbose:
args["_id"] = id(node)
# Inline leaves for a more compact representation
if node.is_leaf():
indent = ""
delim = ", "
items = delim.join([f"{k}={_to_s(v, verbose, level + 1)}" for k, v in args.items()])
return f"{node.__class__.__name__}({indent}{items})"
if isinstance(node, list):
items = delim.join(_to_s(i, verbose, level + 1) for i in node)
items = f"{indent}{items}" if items else ""
return f"[{items}]"
# Indent multiline strings to match the current level
return indent.join(textwrap.dedent(str(node).strip("\n")).splitlines()) | Generate a textual representation of an Expression tree |
152,934 | from __future__ import annotations
import datetime
import math
import numbers
import re
import textwrap
import typing as t
from collections import deque
from copy import deepcopy
from enum import auto
from functools import reduce
from sqlglot.errors import ErrorLevel, ParseError
from sqlglot.helper import (
AutoName,
camel_to_snake_case,
ensure_collection,
ensure_list,
is_int,
seq_get,
subclasses,
)
from sqlglot.tokens import Token
def maybe_parse(
sql_or_expression: ExpOrStr,
*,
into: t.Type[E],
dialect: DialectType = None,
prefix: t.Optional[str] = None,
copy: bool = False,
**opts,
) -> E: ...
def maybe_parse(
sql_or_expression: str | E,
*,
into: t.Optional[IntoType] = None,
dialect: DialectType = None,
prefix: t.Optional[str] = None,
copy: bool = False,
**opts,
) -> E: ...
def maybe_parse(
sql_or_expression: ExpOrStr,
*,
into: t.Optional[IntoType] = None,
dialect: DialectType = None,
prefix: t.Optional[str] = None,
copy: bool = False,
**opts,
) -> Expression:
"""Gracefully handle a possible string or expression.
Example:
>>> maybe_parse("1")
Literal(this=1, is_string=False)
>>> maybe_parse(to_identifier("x"))
Identifier(this=x, quoted=False)
Args:
sql_or_expression: the SQL code string or an expression
into: the SQLGlot Expression to parse into
dialect: the dialect used to parse the input expressions (in the case that an
input expression is a SQL string).
prefix: a string to prefix the sql with before it gets parsed
(automatically includes a space)
copy: whether to copy the expression.
**opts: other options to use to parse the input expressions (again, in the case
that an input expression is a SQL string).
Returns:
Expression: the parsed or given expression.
"""
if isinstance(sql_or_expression, Expression):
if copy:
return sql_or_expression.copy()
return sql_or_expression
if sql_or_expression is None:
raise ParseError("SQL cannot be None")
import sqlglot
sql = str(sql_or_expression)
if prefix:
sql = f"{prefix} {sql}"
return sqlglot.parse_one(sql, read=dialect, into=into, **opts)
def maybe_copy(instance: None, copy: bool = True) -> None: ...
def maybe_copy(instance: E, copy: bool = True) -> E: ...
def maybe_copy(instance, copy=True):
return instance.copy() if copy and instance else instance
def _is_wrong_expression(expression, into):
return isinstance(expression, Expression) and not isinstance(expression, into)
def _apply_builder(
expression,
instance,
arg,
copy=True,
prefix=None,
into=None,
dialect=None,
into_arg="this",
**opts,
):
if _is_wrong_expression(expression, into):
expression = into(**{into_arg: expression})
instance = maybe_copy(instance, copy)
expression = maybe_parse(
sql_or_expression=expression,
prefix=prefix,
into=into,
dialect=dialect,
**opts,
)
instance.set(arg, expression)
return instance | null |
152,935 | from __future__ import annotations
import datetime
import math
import numbers
import re
import textwrap
import typing as t
from collections import deque
from copy import deepcopy
from enum import auto
from functools import reduce
from sqlglot.errors import ErrorLevel, ParseError
from sqlglot.helper import (
AutoName,
camel_to_snake_case,
ensure_collection,
ensure_list,
is_int,
seq_get,
subclasses,
)
from sqlglot.tokens import Token
def maybe_parse(
sql_or_expression: ExpOrStr,
*,
into: t.Type[E],
dialect: DialectType = None,
prefix: t.Optional[str] = None,
copy: bool = False,
**opts,
) -> E: ...
def maybe_parse(
sql_or_expression: str | E,
*,
into: t.Optional[IntoType] = None,
dialect: DialectType = None,
prefix: t.Optional[str] = None,
copy: bool = False,
**opts,
) -> E: ...
def maybe_parse(
sql_or_expression: ExpOrStr,
*,
into: t.Optional[IntoType] = None,
dialect: DialectType = None,
prefix: t.Optional[str] = None,
copy: bool = False,
**opts,
) -> Expression:
"""Gracefully handle a possible string or expression.
Example:
>>> maybe_parse("1")
Literal(this=1, is_string=False)
>>> maybe_parse(to_identifier("x"))
Identifier(this=x, quoted=False)
Args:
sql_or_expression: the SQL code string or an expression
into: the SQLGlot Expression to parse into
dialect: the dialect used to parse the input expressions (in the case that an
input expression is a SQL string).
prefix: a string to prefix the sql with before it gets parsed
(automatically includes a space)
copy: whether to copy the expression.
**opts: other options to use to parse the input expressions (again, in the case
that an input expression is a SQL string).
Returns:
Expression: the parsed or given expression.
"""
if isinstance(sql_or_expression, Expression):
if copy:
return sql_or_expression.copy()
return sql_or_expression
if sql_or_expression is None:
raise ParseError("SQL cannot be None")
import sqlglot
sql = str(sql_or_expression)
if prefix:
sql = f"{prefix} {sql}"
return sqlglot.parse_one(sql, read=dialect, into=into, **opts)
def maybe_copy(instance: None, copy: bool = True) -> None: ...
def maybe_copy(instance: E, copy: bool = True) -> E: ...
def maybe_copy(instance, copy=True):
return instance.copy() if copy and instance else instance
def _apply_list_builder(
*expressions,
instance,
arg,
append=True,
copy=True,
prefix=None,
into=None,
dialect=None,
**opts,
):
inst = maybe_copy(instance, copy)
expressions = [
maybe_parse(
sql_or_expression=expression,
into=into,
prefix=prefix,
dialect=dialect,
**opts,
)
for expression in expressions
if expression is not None
]
existing_expressions = inst.args.get(arg)
if append and existing_expressions:
expressions = existing_expressions + expressions
inst.set(arg, expressions)
return inst | null |
152,936 | from __future__ import annotations
import datetime
import math
import numbers
import re
import textwrap
import typing as t
from collections import deque
from copy import deepcopy
from enum import auto
from functools import reduce
from sqlglot.errors import ErrorLevel, ParseError
from sqlglot.helper import (
AutoName,
camel_to_snake_case,
ensure_collection,
ensure_list,
is_int,
seq_get,
subclasses,
)
from sqlglot.tokens import Token
def maybe_copy(instance: None, copy: bool = True) -> None: ...
def maybe_copy(instance: E, copy: bool = True) -> E: ...
def maybe_copy(instance, copy=True):
return instance.copy() if copy and instance else instance
def and_(
*expressions: t.Optional[ExpOrStr], dialect: DialectType = None, copy: bool = True, **opts
) -> Condition:
"""
Combine multiple conditions with an AND logical operator.
Example:
>>> and_("x=1", and_("y=1", "z=1")).sql()
'x = 1 AND (y = 1 AND z = 1)'
Args:
*expressions: the SQL code strings to parse.
If an Expression instance is passed, this is used as-is.
dialect: the dialect used to parse the input expression.
copy: whether to copy `expressions` (only applies to Expressions).
**opts: other options to use to parse the input expressions.
Returns:
And: the new condition
"""
return t.cast(Condition, _combine(expressions, And, dialect, copy=copy, **opts))
def _apply_conjunction_builder(
*expressions,
instance,
arg,
into=None,
append=True,
copy=True,
dialect=None,
**opts,
):
expressions = [exp for exp in expressions if exp is not None and exp != ""]
if not expressions:
return instance
inst = maybe_copy(instance, copy)
existing = inst.args.get(arg)
if append and existing is not None:
expressions = [existing.this if into else existing] + list(expressions)
node = and_(*expressions, dialect=dialect, copy=copy, **opts)
inst.set(arg, into(this=node) if into else node)
return inst | null |
152,937 | from __future__ import annotations
import datetime
import math
import numbers
import re
import textwrap
import typing as t
from collections import deque
from copy import deepcopy
from enum import auto
from functools import reduce
from sqlglot.errors import ErrorLevel, ParseError
from sqlglot.helper import (
AutoName,
camel_to_snake_case,
ensure_collection,
ensure_list,
is_int,
seq_get,
subclasses,
)
from sqlglot.tokens import Token
if t.TYPE_CHECKING:
from sqlglot._typing import E, Lit
from sqlglot.dialects.dialect import DialectType
Q = t.TypeVar("Q", bound="Query")
ExpOrStr = t.Union[str, Expression]
class With(Expression):
def recursive(self) -> bool:
class CTE(DerivedTable):
class TableAlias(Expression):
def columns(self):
def maybe_parse(
sql_or_expression: ExpOrStr,
*,
into: t.Type[E],
dialect: DialectType = None,
prefix: t.Optional[str] = None,
copy: bool = False,
**opts,
) -> E:
def maybe_parse(
sql_or_expression: str | E,
*,
into: t.Optional[IntoType] = None,
dialect: DialectType = None,
prefix: t.Optional[str] = None,
copy: bool = False,
**opts,
) -> E:
def maybe_parse(
sql_or_expression: ExpOrStr,
*,
into: t.Optional[IntoType] = None,
dialect: DialectType = None,
prefix: t.Optional[str] = None,
copy: bool = False,
**opts,
) -> Expression:
def _apply_child_list_builder(
*expressions,
instance,
arg,
append=True,
copy=True,
prefix=None,
into=None,
dialect=None,
properties=None,
**opts,
):
E = t.TypeVar("E", bound="sqlglot.exp.Expression")
DialectType = t.Union[str, Dialect, t.Type[Dialect], None]
def _apply_cte_builder(
instance: E,
alias: ExpOrStr,
as_: ExpOrStr,
recursive: t.Optional[bool] = None,
append: bool = True,
dialect: DialectType = None,
copy: bool = True,
**opts,
) -> E:
alias_expression = maybe_parse(alias, dialect=dialect, into=TableAlias, **opts)
as_expression = maybe_parse(as_, dialect=dialect, **opts)
cte = CTE(this=as_expression, alias=alias_expression)
return _apply_child_list_builder(
cte,
instance=instance,
arg="with",
append=append,
copy=copy,
into=With,
properties={"recursive": recursive or False},
) | null |
152,938 | from __future__ import annotations
import datetime
import math
import numbers
import re
import textwrap
import typing as t
from collections import deque
from copy import deepcopy
from enum import auto
from functools import reduce
from sqlglot.errors import ErrorLevel, ParseError
from sqlglot.helper import (
AutoName,
camel_to_snake_case,
ensure_collection,
ensure_list,
is_int,
seq_get,
subclasses,
)
from sqlglot.tokens import Token
ExpOrStr = t.Union[str, Expression]
class Intersect(Union):
pass
def maybe_parse(
sql_or_expression: ExpOrStr,
*,
into: t.Type[E],
dialect: DialectType = None,
prefix: t.Optional[str] = None,
copy: bool = False,
**opts,
) -> E: ...
def maybe_parse(
sql_or_expression: str | E,
*,
into: t.Optional[IntoType] = None,
dialect: DialectType = None,
prefix: t.Optional[str] = None,
copy: bool = False,
**opts,
) -> E: ...
def maybe_parse(
sql_or_expression: ExpOrStr,
*,
into: t.Optional[IntoType] = None,
dialect: DialectType = None,
prefix: t.Optional[str] = None,
copy: bool = False,
**opts,
) -> Expression:
"""Gracefully handle a possible string or expression.
Example:
>>> maybe_parse("1")
Literal(this=1, is_string=False)
>>> maybe_parse(to_identifier("x"))
Identifier(this=x, quoted=False)
Args:
sql_or_expression: the SQL code string or an expression
into: the SQLGlot Expression to parse into
dialect: the dialect used to parse the input expressions (in the case that an
input expression is a SQL string).
prefix: a string to prefix the sql with before it gets parsed
(automatically includes a space)
copy: whether to copy the expression.
**opts: other options to use to parse the input expressions (again, in the case
that an input expression is a SQL string).
Returns:
Expression: the parsed or given expression.
"""
if isinstance(sql_or_expression, Expression):
if copy:
return sql_or_expression.copy()
return sql_or_expression
if sql_or_expression is None:
raise ParseError("SQL cannot be None")
import sqlglot
sql = str(sql_or_expression)
if prefix:
sql = f"{prefix} {sql}"
return sqlglot.parse_one(sql, read=dialect, into=into, **opts)
DialectType = t.Union[str, Dialect, t.Type[Dialect], None]
The provided code snippet includes necessary dependencies for implementing the `intersect` function. Write a Python function `def intersect( left: ExpOrStr, right: ExpOrStr, distinct: bool = True, dialect: DialectType = None, copy: bool = True, **opts, ) -> Intersect` to solve the following problem:
Initializes a syntax tree from one INTERSECT expression. Example: >>> intersect("SELECT * FROM foo", "SELECT * FROM bla").sql() 'SELECT * FROM foo INTERSECT SELECT * FROM bla' Args: left: the SQL code string corresponding to the left-hand side. If an `Expression` instance is passed, it will be used as-is. right: the SQL code string corresponding to the right-hand side. If an `Expression` instance is passed, it will be used as-is. distinct: set the DISTINCT flag if and only if this is true. dialect: the dialect used to parse the input expression. copy: whether to copy the expression. opts: other options to use to parse the input expressions. Returns: The new Intersect instance.
Here is the function:
def intersect(
left: ExpOrStr,
right: ExpOrStr,
distinct: bool = True,
dialect: DialectType = None,
copy: bool = True,
**opts,
) -> Intersect:
"""
Initializes a syntax tree from one INTERSECT expression.
Example:
>>> intersect("SELECT * FROM foo", "SELECT * FROM bla").sql()
'SELECT * FROM foo INTERSECT SELECT * FROM bla'
Args:
left: the SQL code string corresponding to the left-hand side.
If an `Expression` instance is passed, it will be used as-is.
right: the SQL code string corresponding to the right-hand side.
If an `Expression` instance is passed, it will be used as-is.
distinct: set the DISTINCT flag if and only if this is true.
dialect: the dialect used to parse the input expression.
copy: whether to copy the expression.
opts: other options to use to parse the input expressions.
Returns:
The new Intersect instance.
"""
left = maybe_parse(sql_or_expression=left, dialect=dialect, copy=copy, **opts)
right = maybe_parse(sql_or_expression=right, dialect=dialect, copy=copy, **opts)
return Intersect(this=left, expression=right, distinct=distinct) | Initializes a syntax tree from one INTERSECT expression. Example: >>> intersect("SELECT * FROM foo", "SELECT * FROM bla").sql() 'SELECT * FROM foo INTERSECT SELECT * FROM bla' Args: left: the SQL code string corresponding to the left-hand side. If an `Expression` instance is passed, it will be used as-is. right: the SQL code string corresponding to the right-hand side. If an `Expression` instance is passed, it will be used as-is. distinct: set the DISTINCT flag if and only if this is true. dialect: the dialect used to parse the input expression. copy: whether to copy the expression. opts: other options to use to parse the input expressions. Returns: The new Intersect instance. |
152,939 | from __future__ import annotations
import datetime
import math
import numbers
import re
import textwrap
import typing as t
from collections import deque
from copy import deepcopy
from enum import auto
from functools import reduce
from sqlglot.errors import ErrorLevel, ParseError
from sqlglot.helper import (
AutoName,
camel_to_snake_case,
ensure_collection,
ensure_list,
is_int,
seq_get,
subclasses,
)
from sqlglot.tokens import Token
ExpOrStr = t.Union[str, Expression]
class Except(Union):
pass
def maybe_parse(
sql_or_expression: ExpOrStr,
*,
into: t.Type[E],
dialect: DialectType = None,
prefix: t.Optional[str] = None,
copy: bool = False,
**opts,
) -> E: ...
def maybe_parse(
sql_or_expression: str | E,
*,
into: t.Optional[IntoType] = None,
dialect: DialectType = None,
prefix: t.Optional[str] = None,
copy: bool = False,
**opts,
) -> E: ...
def maybe_parse(
sql_or_expression: ExpOrStr,
*,
into: t.Optional[IntoType] = None,
dialect: DialectType = None,
prefix: t.Optional[str] = None,
copy: bool = False,
**opts,
) -> Expression:
"""Gracefully handle a possible string or expression.
Example:
>>> maybe_parse("1")
Literal(this=1, is_string=False)
>>> maybe_parse(to_identifier("x"))
Identifier(this=x, quoted=False)
Args:
sql_or_expression: the SQL code string or an expression
into: the SQLGlot Expression to parse into
dialect: the dialect used to parse the input expressions (in the case that an
input expression is a SQL string).
prefix: a string to prefix the sql with before it gets parsed
(automatically includes a space)
copy: whether to copy the expression.
**opts: other options to use to parse the input expressions (again, in the case
that an input expression is a SQL string).
Returns:
Expression: the parsed or given expression.
"""
if isinstance(sql_or_expression, Expression):
if copy:
return sql_or_expression.copy()
return sql_or_expression
if sql_or_expression is None:
raise ParseError("SQL cannot be None")
import sqlglot
sql = str(sql_or_expression)
if prefix:
sql = f"{prefix} {sql}"
return sqlglot.parse_one(sql, read=dialect, into=into, **opts)
DialectType = t.Union[str, Dialect, t.Type[Dialect], None]
The provided code snippet includes necessary dependencies for implementing the `except_` function. Write a Python function `def except_( left: ExpOrStr, right: ExpOrStr, distinct: bool = True, dialect: DialectType = None, copy: bool = True, **opts, ) -> Except` to solve the following problem:
Initializes a syntax tree from one EXCEPT expression. Example: >>> except_("SELECT * FROM foo", "SELECT * FROM bla").sql() 'SELECT * FROM foo EXCEPT SELECT * FROM bla' Args: left: the SQL code string corresponding to the left-hand side. If an `Expression` instance is passed, it will be used as-is. right: the SQL code string corresponding to the right-hand side. If an `Expression` instance is passed, it will be used as-is. distinct: set the DISTINCT flag if and only if this is true. dialect: the dialect used to parse the input expression. copy: whether to copy the expression. opts: other options to use to parse the input expressions. Returns: The new Except instance.
Here is the function:
def except_(
left: ExpOrStr,
right: ExpOrStr,
distinct: bool = True,
dialect: DialectType = None,
copy: bool = True,
**opts,
) -> Except:
"""
Initializes a syntax tree from one EXCEPT expression.
Example:
>>> except_("SELECT * FROM foo", "SELECT * FROM bla").sql()
'SELECT * FROM foo EXCEPT SELECT * FROM bla'
Args:
left: the SQL code string corresponding to the left-hand side.
If an `Expression` instance is passed, it will be used as-is.
right: the SQL code string corresponding to the right-hand side.
If an `Expression` instance is passed, it will be used as-is.
distinct: set the DISTINCT flag if and only if this is true.
dialect: the dialect used to parse the input expression.
copy: whether to copy the expression.
opts: other options to use to parse the input expressions.
Returns:
The new Except instance.
"""
left = maybe_parse(sql_or_expression=left, dialect=dialect, copy=copy, **opts)
right = maybe_parse(sql_or_expression=right, dialect=dialect, copy=copy, **opts)
return Except(this=left, expression=right, distinct=distinct) | Initializes a syntax tree from one EXCEPT expression. Example: >>> except_("SELECT * FROM foo", "SELECT * FROM bla").sql() 'SELECT * FROM foo EXCEPT SELECT * FROM bla' Args: left: the SQL code string corresponding to the left-hand side. If an `Expression` instance is passed, it will be used as-is. right: the SQL code string corresponding to the right-hand side. If an `Expression` instance is passed, it will be used as-is. distinct: set the DISTINCT flag if and only if this is true. dialect: the dialect used to parse the input expression. copy: whether to copy the expression. opts: other options to use to parse the input expressions. Returns: The new Except instance. |
152,940 | from __future__ import annotations
import datetime
import math
import numbers
import re
import textwrap
import typing as t
from collections import deque
from copy import deepcopy
from enum import auto
from functools import reduce
from sqlglot.errors import ErrorLevel, ParseError
from sqlglot.helper import (
AutoName,
camel_to_snake_case,
ensure_collection,
ensure_list,
is_int,
seq_get,
subclasses,
)
from sqlglot.tokens import Token
if t.TYPE_CHECKING:
from sqlglot._typing import E, Lit
from sqlglot.dialects.dialect import DialectType
Q = t.TypeVar("Q", bound="Query")
ExpOrStr = t.Union[str, Expression]
class Delete(DML):
arg_types = {
"with": False,
"this": False,
"using": False,
"where": False,
"returning": False,
"limit": False,
"tables": False, # Multiple-Table Syntax (MySQL)
}
def delete(
self,
table: ExpOrStr,
dialect: DialectType = None,
copy: bool = True,
**opts,
) -> Delete:
"""
Create a DELETE expression or replace the table on an existing DELETE expression.
Example:
>>> delete("tbl").sql()
'DELETE FROM tbl'
Args:
table: the table from which to delete.
dialect: the dialect used to parse the input expression.
copy: if `False`, modify this expression instance in-place.
opts: other options to use to parse the input expressions.
Returns:
Delete: the modified expression.
"""
return _apply_builder(
expression=table,
instance=self,
arg="this",
dialect=dialect,
into=Table,
copy=copy,
**opts,
)
def where(
self,
*expressions: t.Optional[ExpOrStr],
append: bool = True,
dialect: DialectType = None,
copy: bool = True,
**opts,
) -> Delete:
"""
Append to or set the WHERE expressions.
Example:
>>> delete("tbl").where("x = 'a' OR x < 'b'").sql()
"DELETE FROM tbl WHERE x = 'a' OR x < 'b'"
Args:
*expressions: the SQL code strings to parse.
If an `Expression` instance is passed, it will be used as-is.
Multiple expressions are combined with an AND operator.
append: if `True`, AND the new expressions to any existing expression.
Otherwise, this resets the expression.
dialect: the dialect used to parse the input expressions.
copy: if `False`, modify this expression instance in-place.
opts: other options to use to parse the input expressions.
Returns:
Delete: the modified expression.
"""
return _apply_conjunction_builder(
*expressions,
instance=self,
arg="where",
append=append,
into=Where,
dialect=dialect,
copy=copy,
**opts,
)
def cast(expression: ExpOrStr, to: DATA_TYPE, copy: bool = True, **opts) -> Cast:
"""Cast an expression to a data type.
Example:
>>> cast('x + 1', 'int').sql()
'CAST(x + 1 AS INT)'
Args:
expression: The expression to cast.
to: The datatype to cast to.
copy: Whether to copy the supplied expressions.
Returns:
The new Cast instance.
"""
expression = maybe_parse(expression, copy=copy, **opts)
data_type = DataType.build(to, copy=copy, **opts)
expression = Cast(this=expression, to=data_type)
expression.type = data_type
return expression
DialectType = t.Union[str, Dialect, t.Type[Dialect], None]
The provided code snippet includes necessary dependencies for implementing the `delete` function. Write a Python function `def delete( table: ExpOrStr, where: t.Optional[ExpOrStr] = None, returning: t.Optional[ExpOrStr] = None, dialect: DialectType = None, **opts, ) -> Delete` to solve the following problem:
Builds a delete statement. Example: >>> delete("my_table", where="id > 1").sql() 'DELETE FROM my_table WHERE id > 1' Args: where: sql conditional parsed into a WHERE statement returning: sql conditional parsed into a RETURNING statement dialect: the dialect used to parse the input expressions. **opts: other options to use to parse the input expressions. Returns: Delete: the syntax tree for the DELETE statement.
Here is the function:
def delete(
table: ExpOrStr,
where: t.Optional[ExpOrStr] = None,
returning: t.Optional[ExpOrStr] = None,
dialect: DialectType = None,
**opts,
) -> Delete:
"""
Builds a delete statement.
Example:
>>> delete("my_table", where="id > 1").sql()
'DELETE FROM my_table WHERE id > 1'
Args:
where: sql conditional parsed into a WHERE statement
returning: sql conditional parsed into a RETURNING statement
dialect: the dialect used to parse the input expressions.
**opts: other options to use to parse the input expressions.
Returns:
Delete: the syntax tree for the DELETE statement.
"""
delete_expr = Delete().delete(table, dialect=dialect, copy=False, **opts)
if where:
delete_expr = delete_expr.where(where, dialect=dialect, copy=False, **opts)
if returning:
delete_expr = t.cast(
Delete, delete_expr.returning(returning, dialect=dialect, copy=False, **opts)
)
return delete_expr | Builds a delete statement. Example: >>> delete("my_table", where="id > 1").sql() 'DELETE FROM my_table WHERE id > 1' Args: where: sql conditional parsed into a WHERE statement returning: sql conditional parsed into a RETURNING statement dialect: the dialect used to parse the input expressions. **opts: other options to use to parse the input expressions. Returns: Delete: the syntax tree for the DELETE statement. |
152,941 | from __future__ import annotations
import datetime
import math
import numbers
import re
import textwrap
import typing as t
from collections import deque
from copy import deepcopy
from enum import auto
from functools import reduce
from sqlglot.errors import ErrorLevel, ParseError
from sqlglot.helper import (
AutoName,
camel_to_snake_case,
ensure_collection,
ensure_list,
is_int,
seq_get,
subclasses,
)
from sqlglot.tokens import Token
class Literal(Condition):
arg_types = {"this": True, "is_string": True}
def hashable_args(self) -> t.Any:
return (self.this, self.args.get("is_string"))
def number(cls, number) -> Literal:
return cls(this=str(number), is_string=False)
def string(cls, string) -> Literal:
return cls(this=str(string), is_string=True)
def output_name(self) -> str:
return self.name
class Var(Expression):
pass
class Interval(TimeUnit):
arg_types = {"this": False, "unit": False}
INTERVAL_STRING_RE = re.compile(r"\s*([0-9]+)\s*([a-zA-Z]+)\s*")
The provided code snippet includes necessary dependencies for implementing the `to_interval` function. Write a Python function `def to_interval(interval: str | Literal) -> Interval` to solve the following problem:
Builds an interval expression from a string like '1 day' or '5 months'.
Here is the function:
def to_interval(interval: str | Literal) -> Interval:
"""Builds an interval expression from a string like '1 day' or '5 months'."""
if isinstance(interval, Literal):
if not interval.is_string:
raise ValueError("Invalid interval string.")
interval = interval.this
interval_parts = INTERVAL_STRING_RE.match(interval) # type: ignore
if not interval_parts:
raise ValueError("Invalid interval string.")
return Interval(
this=Literal.string(interval_parts.group(1)),
unit=Var(this=interval_parts.group(2).upper()),
) | Builds an interval expression from a string like '1 day' or '5 months'. |
152,942 | from __future__ import annotations
import datetime
import math
import numbers
import re
import textwrap
import typing as t
from collections import deque
from copy import deepcopy
from enum import auto
from functools import reduce
from sqlglot.errors import ErrorLevel, ParseError
from sqlglot.helper import (
AutoName,
camel_to_snake_case,
ensure_collection,
ensure_list,
is_int,
seq_get,
subclasses,
)
from sqlglot.tokens import Token
if t.TYPE_CHECKING:
from sqlglot._typing import E, Lit
from sqlglot.dialects.dialect import DialectType
Q = t.TypeVar("Q", bound="Query")
class Expression(metaclass=_Expression):
"""
The base class for all expressions in a syntax tree. Each Expression encapsulates any necessary
context, such as its child expressions, their names (arg keys), and whether a given child expression
is optional or not.
Attributes:
key: a unique key for each class in the Expression hierarchy. This is useful for hashing
and representing expressions as strings.
arg_types: determines the arguments (child nodes) supported by an expression. It maps
arg keys to booleans that indicate whether the corresponding args are optional.
parent: a reference to the parent expression (or None, in case of root expressions).
arg_key: the arg key an expression is associated with, i.e. the name its parent expression
uses to refer to it.
index: the index of an expression if it is inside of a list argument in its parent
comments: a list of comments that are associated with a given expression. This is used in
order to preserve comments when transpiling SQL code.
type: the `sqlglot.expressions.DataType` type of an expression. This is inferred by the
optimizer, in order to enable some transformations that require type information.
meta: a dictionary that can be used to store useful metadata for a given expression.
Example:
>>> class Foo(Expression):
... arg_types = {"this": True, "expression": False}
The above definition informs us that Foo is an Expression that requires an argument called
"this" and may also optionally receive an argument called "expression".
Args:
args: a mapping used for retrieving the arguments of an expression, given their arg keys.
"""
key = "expression"
arg_types = {"this": True}
__slots__ = ("args", "parent", "arg_key", "index", "comments", "_type", "_meta", "_hash")
def __init__(self, **args: t.Any):
self.args: t.Dict[str, t.Any] = args
self.parent: t.Optional[Expression] = None
self.arg_key: t.Optional[str] = None
self.index: t.Optional[int] = None
self.comments: t.Optional[t.List[str]] = None
self._type: t.Optional[DataType] = None
self._meta: t.Optional[t.Dict[str, t.Any]] = None
self._hash: t.Optional[int] = None
for arg_key, value in self.args.items():
self._set_parent(arg_key, value)
def __eq__(self, other) -> bool:
return type(self) is type(other) and hash(self) == hash(other)
def hashable_args(self) -> t.Any:
return frozenset(
(k, tuple(_norm_arg(a) for a in v) if type(v) is list else _norm_arg(v))
for k, v in self.args.items()
if not (v is None or v is False or (type(v) is list and not v))
)
def __hash__(self) -> int:
if self._hash is not None:
return self._hash
return hash((self.__class__, self.hashable_args))
def this(self) -> t.Any:
"""
Retrieves the argument with key "this".
"""
return self.args.get("this")
def expression(self) -> t.Any:
"""
Retrieves the argument with key "expression".
"""
return self.args.get("expression")
def expressions(self) -> t.List[t.Any]:
"""
Retrieves the argument with key "expressions".
"""
return self.args.get("expressions") or []
def text(self, key) -> str:
"""
Returns a textual representation of the argument corresponding to "key". This can only be used
for args that are strings or leaf Expression instances, such as identifiers and literals.
"""
field = self.args.get(key)
if isinstance(field, str):
return field
if isinstance(field, (Identifier, Literal, Var)):
return field.this
if isinstance(field, (Star, Null)):
return field.name
return ""
def is_string(self) -> bool:
"""
Checks whether a Literal expression is a string.
"""
return isinstance(self, Literal) and self.args["is_string"]
def is_number(self) -> bool:
"""
Checks whether a Literal expression is a number.
"""
return isinstance(self, Literal) and not self.args["is_string"]
def is_int(self) -> bool:
"""
Checks whether a Literal expression is an integer.
"""
return self.is_number and is_int(self.name)
def is_star(self) -> bool:
"""Checks whether an expression is a star."""
return isinstance(self, Star) or (isinstance(self, Column) and isinstance(self.this, Star))
def alias(self) -> str:
"""
Returns the alias of the expression, or an empty string if it's not aliased.
"""
if isinstance(self.args.get("alias"), TableAlias):
return self.args["alias"].name
return self.text("alias")
def alias_column_names(self) -> t.List[str]:
table_alias = self.args.get("alias")
if not table_alias:
return []
return [c.name for c in table_alias.args.get("columns") or []]
def name(self) -> str:
return self.text("this")
def alias_or_name(self) -> str:
return self.alias or self.name
def output_name(self) -> str:
"""
Name of the output column if this expression is a selection.
If the Expression has no output name, an empty string is returned.
Example:
>>> from sqlglot import parse_one
>>> parse_one("SELECT a").expressions[0].output_name
'a'
>>> parse_one("SELECT b AS c").expressions[0].output_name
'c'
>>> parse_one("SELECT 1 + 2").expressions[0].output_name
''
"""
return ""
def type(self) -> t.Optional[DataType]:
return self._type
def type(self, dtype: t.Optional[DataType | DataType.Type | str]) -> None:
if dtype and not isinstance(dtype, DataType):
dtype = DataType.build(dtype)
self._type = dtype # type: ignore
def is_type(self, *dtypes) -> bool:
return self.type is not None and self.type.is_type(*dtypes)
def is_leaf(self) -> bool:
return not any(isinstance(v, (Expression, list)) for v in self.args.values())
def meta(self) -> t.Dict[str, t.Any]:
if self._meta is None:
self._meta = {}
return self._meta
def __deepcopy__(self, memo):
root = self.__class__()
stack = [(self, root)]
while stack:
node, copy = stack.pop()
if node.comments is not None:
copy.comments = deepcopy(node.comments)
if node._type is not None:
copy._type = deepcopy(node._type)
if node._meta is not None:
copy._meta = deepcopy(node._meta)
if node._hash is not None:
copy._hash = node._hash
for k, vs in node.args.items():
if hasattr(vs, "parent"):
stack.append((vs, vs.__class__()))
copy.set(k, stack[-1][-1])
elif type(vs) is list:
copy.args[k] = []
for v in vs:
if hasattr(v, "parent"):
stack.append((v, v.__class__()))
copy.append(k, stack[-1][-1])
else:
copy.append(k, v)
else:
copy.args[k] = vs
return root
def copy(self):
"""
Returns a deep copy of the expression.
"""
return deepcopy(self)
def add_comments(self, comments: t.Optional[t.List[str]]) -> None:
if self.comments is None:
self.comments = []
if comments:
for comment in comments:
_, *meta = comment.split(SQLGLOT_META)
if meta:
for kv in "".join(meta).split(","):
k, *v = kv.split("=")
value = v[0].strip() if v else True
self.meta[k.strip()] = value
self.comments.append(comment)
def append(self, arg_key: str, value: t.Any) -> None:
"""
Appends value to arg_key if it's a list or sets it as a new list.
Args:
arg_key (str): name of the list expression arg
value (Any): value to append to the list
"""
if type(self.args.get(arg_key)) is not list:
self.args[arg_key] = []
self._set_parent(arg_key, value)
values = self.args[arg_key]
if hasattr(value, "parent"):
value.index = len(values)
values.append(value)
def set(self, arg_key: str, value: t.Any) -> None:
"""
Sets arg_key to value.
Args:
arg_key: name of the expression arg.
value: value to set the arg to.
"""
if value is None:
self.args.pop(arg_key, None)
else:
self.args[arg_key] = value
self._set_parent(arg_key, value)
def _set_parent(self, arg_key: str, value: t.Any, index: t.Optional[int] = None) -> None:
if hasattr(value, "parent"):
value.parent = self
value.arg_key = arg_key
value.index = index
elif type(value) is list:
for index, v in enumerate(value):
if hasattr(v, "parent"):
v.parent = self
v.arg_key = arg_key
v.index = index
def depth(self) -> int:
"""
Returns the depth of this tree.
"""
if self.parent:
return self.parent.depth + 1
return 0
def iter_expressions(self, reverse: bool = False) -> t.Iterator[Expression]:
"""Yields the key and expression for all arguments, exploding list args."""
# remove tuple when python 3.7 is deprecated
for vs in reversed(tuple(self.args.values())) if reverse else self.args.values():
if type(vs) is list:
for v in reversed(vs) if reverse else vs:
if hasattr(v, "parent"):
yield v
else:
if hasattr(vs, "parent"):
yield vs
def find(self, *expression_types: t.Type[E], bfs: bool = True) -> t.Optional[E]:
"""
Returns the first node in this tree which matches at least one of
the specified types.
Args:
expression_types: the expression type(s) to match.
bfs: whether to search the AST using the BFS algorithm (DFS is used if false).
Returns:
The node which matches the criteria or None if no such node was found.
"""
return next(self.find_all(*expression_types, bfs=bfs), None)
def find_all(self, *expression_types: t.Type[E], bfs: bool = True) -> t.Iterator[E]:
"""
Returns a generator object which visits all nodes in this tree and only
yields those that match at least one of the specified expression types.
Args:
expression_types: the expression type(s) to match.
bfs: whether to search the AST using the BFS algorithm (DFS is used if false).
Returns:
The generator object.
"""
for expression in self.walk(bfs=bfs):
if isinstance(expression, expression_types):
yield expression
def find_ancestor(self, *expression_types: t.Type[E]) -> t.Optional[E]:
"""
Returns a nearest parent matching expression_types.
Args:
expression_types: the expression type(s) to match.
Returns:
The parent node.
"""
ancestor = self.parent
while ancestor and not isinstance(ancestor, expression_types):
ancestor = ancestor.parent
return ancestor # type: ignore
def parent_select(self) -> t.Optional[Select]:
"""
Returns the parent select statement.
"""
return self.find_ancestor(Select)
def same_parent(self) -> bool:
"""Returns if the parent is the same class as itself."""
return type(self.parent) is self.__class__
def root(self) -> Expression:
"""
Returns the root expression of this tree.
"""
expression = self
while expression.parent:
expression = expression.parent
return expression
def walk(
self, bfs: bool = True, prune: t.Optional[t.Callable[[Expression], bool]] = None
) -> t.Iterator[Expression]:
"""
Returns a generator object which visits all nodes in this tree.
Args:
bfs (bool): if set to True the BFS traversal order will be applied,
otherwise the DFS traversal will be used instead.
prune ((node, parent, arg_key) -> bool): callable that returns True if
the generator should stop traversing this branch of the tree.
Returns:
the generator object.
"""
if bfs:
yield from self.bfs(prune=prune)
else:
yield from self.dfs(prune=prune)
def dfs(
self, prune: t.Optional[t.Callable[[Expression], bool]] = None
) -> t.Iterator[Expression]:
"""
Returns a generator object which visits all nodes in this tree in
the DFS (Depth-first) order.
Returns:
The generator object.
"""
stack = [self]
while stack:
node = stack.pop()
yield node
if prune and prune(node):
continue
for v in node.iter_expressions(reverse=True):
stack.append(v)
def bfs(
self, prune: t.Optional[t.Callable[[Expression], bool]] = None
) -> t.Iterator[Expression]:
"""
Returns a generator object which visits all nodes in this tree in
the BFS (Breadth-first) order.
Returns:
The generator object.
"""
queue = deque([self])
while queue:
node = queue.popleft()
yield node
if prune and prune(node):
continue
for v in node.iter_expressions():
queue.append(v)
def unnest(self):
"""
Returns the first non parenthesis child or self.
"""
expression = self
while type(expression) is Paren:
expression = expression.this
return expression
def unalias(self):
"""
Returns the inner expression if this is an Alias.
"""
if isinstance(self, Alias):
return self.this
return self
def unnest_operands(self):
"""
Returns unnested operands as a tuple.
"""
return tuple(arg.unnest() for arg in self.iter_expressions())
def flatten(self, unnest=True):
"""
Returns a generator which yields child nodes whose parents are the same class.
A AND B AND C -> [A, B, C]
"""
for node in self.dfs(prune=lambda n: n.parent and type(n) is not self.__class__):
if type(node) is not self.__class__:
yield node.unnest() if unnest and not isinstance(node, Subquery) else node
def __str__(self) -> str:
return self.sql()
def __repr__(self) -> str:
return _to_s(self)
def to_s(self) -> str:
"""
Same as __repr__, but includes additional information which can be useful
for debugging, like empty or missing args and the AST nodes' object IDs.
"""
return _to_s(self, verbose=True)
def sql(self, dialect: DialectType = None, **opts) -> str:
"""
Returns SQL string representation of this tree.
Args:
dialect: the dialect of the output SQL string (eg. "spark", "hive", "presto", "mysql").
opts: other `sqlglot.generator.Generator` options.
Returns:
The SQL string.
"""
from sqlglot.dialects import Dialect
return Dialect.get_or_raise(dialect).generate(self, **opts)
def transform(self, fun: t.Callable, *args: t.Any, copy: bool = True, **kwargs) -> Expression:
"""
Visits all tree nodes (excluding already transformed ones)
and applies the given transformation function to each node.
Args:
fun (function): a function which takes a node as an argument and returns a
new transformed node or the same node without modifications. If the function
returns None, then the corresponding node will be removed from the syntax tree.
copy (bool): if set to True a new tree instance is constructed, otherwise the tree is
modified in place.
Returns:
The transformed tree.
"""
root = None
new_node = None
for node in (self.copy() if copy else self).dfs(prune=lambda n: n is not new_node):
new_node = fun(node, *args, **kwargs)
if root:
if new_node is not node:
node.replace(new_node)
else:
root = new_node
assert root
return root.assert_is(Expression)
def replace(self, expression: E) -> E: ...
def replace(self, expression: None) -> None: ...
def replace(self, expression):
"""
Swap out this expression with a new expression.
For example::
>>> tree = Select().select("x").from_("tbl")
>>> tree.find(Column).replace(column("y"))
Column(
this=Identifier(this=y, quoted=False))
>>> tree.sql()
'SELECT y FROM tbl'
Args:
expression: new node
Returns:
The new expression or expressions.
"""
parent = self.parent
if not parent:
return expression
key = self.arg_key
value = parent.args.get(key)
if isinstance(value, list):
index = self.index
if isinstance(expression, list):
value.pop(index)
value[index:index] = expression
parent._set_parent(key, value)
else:
if expression is None:
value.pop(index)
for v in value[index:]:
v.index = v.index - 1
else:
value[index] = expression
parent._set_parent(key, expression, index=index)
elif value is not None:
if expression is None:
parent.args.pop(key)
else:
parent.set(key, expression)
if expression is not self:
self.parent = None
self.arg_key = None
self.index = None
return expression
def pop(self: E) -> E:
"""
Remove this expression from its AST.
Returns:
The popped expression.
"""
self.replace(None)
return self
def assert_is(self, type_: t.Type[E]) -> E:
"""
Assert that this `Expression` is an instance of `type_`.
If it is NOT an instance of `type_`, this raises an assertion error.
Otherwise, this returns this expression.
Examples:
This is useful for type security in chained expressions:
>>> import sqlglot
>>> sqlglot.parse_one("SELECT x from y").assert_is(Select).select("z").sql()
'SELECT x, z FROM y'
"""
if not isinstance(self, type_):
raise AssertionError(f"{self} is not {type_}.")
return self
def error_messages(self, args: t.Optional[t.Sequence] = None) -> t.List[str]:
"""
Checks if this expression is valid (e.g. all mandatory args are set).
Args:
args: a sequence of values that were used to instantiate a Func expression. This is used
to check that the provided arguments don't exceed the function argument limit.
Returns:
A list of error messages for all possible errors that were found.
"""
errors: t.List[str] = []
for k in self.args:
if k not in self.arg_types:
errors.append(f"Unexpected keyword: '{k}' for {self.__class__}")
for k, mandatory in self.arg_types.items():
v = self.args.get(k)
if mandatory and (v is None or (isinstance(v, list) and not v)):
errors.append(f"Required keyword: '{k}' missing for {self.__class__}")
if (
args
and isinstance(self, Func)
and len(args) > len(self.arg_types)
and not self.is_var_len_args
):
errors.append(
f"The number of provided arguments ({len(args)}) is greater than "
f"the maximum number of supported arguments ({len(self.arg_types)})"
)
return errors
def dump(self):
"""
Dump this Expression to a JSON-serializable dict.
"""
from sqlglot.serde import dump
return dump(self)
def load(cls, obj):
"""
Load a dict (as returned by `Expression.dump`) into an Expression instance.
"""
from sqlglot.serde import load
return load(obj)
def and_(
self,
*expressions: t.Optional[ExpOrStr],
dialect: DialectType = None,
copy: bool = True,
**opts,
) -> Condition:
"""
AND this condition with one or multiple expressions.
Example:
>>> condition("x=1").and_("y=1").sql()
'x = 1 AND y = 1'
Args:
*expressions: the SQL code strings to parse.
If an `Expression` instance is passed, it will be used as-is.
dialect: the dialect used to parse the input expression.
copy: whether to copy the involved expressions (only applies to Expressions).
opts: other options to use to parse the input expressions.
Returns:
The new And condition.
"""
return and_(self, *expressions, dialect=dialect, copy=copy, **opts)
def or_(
self,
*expressions: t.Optional[ExpOrStr],
dialect: DialectType = None,
copy: bool = True,
**opts,
) -> Condition:
"""
OR this condition with one or multiple expressions.
Example:
>>> condition("x=1").or_("y=1").sql()
'x = 1 OR y = 1'
Args:
*expressions: the SQL code strings to parse.
If an `Expression` instance is passed, it will be used as-is.
dialect: the dialect used to parse the input expression.
copy: whether to copy the involved expressions (only applies to Expressions).
opts: other options to use to parse the input expressions.
Returns:
The new Or condition.
"""
return or_(self, *expressions, dialect=dialect, copy=copy, **opts)
def not_(self, copy: bool = True):
"""
Wrap this condition with NOT.
Example:
>>> condition("x=1").not_().sql()
'NOT x = 1'
Args:
copy: whether to copy this object.
Returns:
The new Not instance.
"""
return not_(self, copy=copy)
def as_(
self,
alias: str | Identifier,
quoted: t.Optional[bool] = None,
dialect: DialectType = None,
copy: bool = True,
**opts,
) -> Alias:
return alias_(self, alias, quoted=quoted, dialect=dialect, copy=copy, **opts)
def _binop(self, klass: t.Type[E], other: t.Any, reverse: bool = False) -> E:
this = self.copy()
other = convert(other, copy=True)
if not isinstance(this, klass) and not isinstance(other, klass):
this = _wrap(this, Binary)
other = _wrap(other, Binary)
if reverse:
return klass(this=other, expression=this)
return klass(this=this, expression=other)
def __getitem__(self, other: ExpOrStr | t.Tuple[ExpOrStr]) -> Bracket:
return Bracket(
this=self.copy(), expressions=[convert(e, copy=True) for e in ensure_list(other)]
)
def __iter__(self) -> t.Iterator:
if "expressions" in self.arg_types:
return iter(self.args.get("expressions") or [])
# We define this because __getitem__ converts Expression into an iterable, which is
# problematic because one can hit infinite loops if they do "for x in some_expr: ..."
# See: https://peps.python.org/pep-0234/
raise TypeError(f"'{self.__class__.__name__}' object is not iterable")
def isin(
self,
*expressions: t.Any,
query: t.Optional[ExpOrStr] = None,
unnest: t.Optional[ExpOrStr] | t.Collection[ExpOrStr] = None,
copy: bool = True,
**opts,
) -> In:
return In(
this=maybe_copy(self, copy),
expressions=[convert(e, copy=copy) for e in expressions],
query=maybe_parse(query, copy=copy, **opts) if query else None,
unnest=(
Unnest(
expressions=[
maybe_parse(t.cast(ExpOrStr, e), copy=copy, **opts)
for e in ensure_list(unnest)
]
)
if unnest
else None
),
)
def between(self, low: t.Any, high: t.Any, copy: bool = True, **opts) -> Between:
return Between(
this=maybe_copy(self, copy),
low=convert(low, copy=copy, **opts),
high=convert(high, copy=copy, **opts),
)
def is_(self, other: ExpOrStr) -> Is:
return self._binop(Is, other)
def like(self, other: ExpOrStr) -> Like:
return self._binop(Like, other)
def ilike(self, other: ExpOrStr) -> ILike:
return self._binop(ILike, other)
def eq(self, other: t.Any) -> EQ:
return self._binop(EQ, other)
def neq(self, other: t.Any) -> NEQ:
return self._binop(NEQ, other)
def rlike(self, other: ExpOrStr) -> RegexpLike:
return self._binop(RegexpLike, other)
def div(self, other: ExpOrStr, typed: bool = False, safe: bool = False) -> Div:
div = self._binop(Div, other)
div.args["typed"] = typed
div.args["safe"] = safe
return div
def desc(self, nulls_first: bool = False) -> Ordered:
return Ordered(this=self.copy(), desc=True, nulls_first=nulls_first)
def __lt__(self, other: t.Any) -> LT:
return self._binop(LT, other)
def __le__(self, other: t.Any) -> LTE:
return self._binop(LTE, other)
def __gt__(self, other: t.Any) -> GT:
return self._binop(GT, other)
def __ge__(self, other: t.Any) -> GTE:
return self._binop(GTE, other)
def __add__(self, other: t.Any) -> Add:
return self._binop(Add, other)
def __radd__(self, other: t.Any) -> Add:
return self._binop(Add, other, reverse=True)
def __sub__(self, other: t.Any) -> Sub:
return self._binop(Sub, other)
def __rsub__(self, other: t.Any) -> Sub:
return self._binop(Sub, other, reverse=True)
def __mul__(self, other: t.Any) -> Mul:
return self._binop(Mul, other)
def __rmul__(self, other: t.Any) -> Mul:
return self._binop(Mul, other, reverse=True)
def __truediv__(self, other: t.Any) -> Div:
return self._binop(Div, other)
def __rtruediv__(self, other: t.Any) -> Div:
return self._binop(Div, other, reverse=True)
def __floordiv__(self, other: t.Any) -> IntDiv:
return self._binop(IntDiv, other)
def __rfloordiv__(self, other: t.Any) -> IntDiv:
return self._binop(IntDiv, other, reverse=True)
def __mod__(self, other: t.Any) -> Mod:
return self._binop(Mod, other)
def __rmod__(self, other: t.Any) -> Mod:
return self._binop(Mod, other, reverse=True)
def __pow__(self, other: t.Any) -> Pow:
return self._binop(Pow, other)
def __rpow__(self, other: t.Any) -> Pow:
return self._binop(Pow, other, reverse=True)
def __and__(self, other: t.Any) -> And:
return self._binop(And, other)
def __rand__(self, other: t.Any) -> And:
return self._binop(And, other, reverse=True)
def __or__(self, other: t.Any) -> Or:
return self._binop(Or, other)
def __ror__(self, other: t.Any) -> Or:
return self._binop(Or, other, reverse=True)
def __neg__(self) -> Neg:
return Neg(this=_wrap(self.copy(), Binary))
def __invert__(self) -> Not:
return not_(self.copy())
ExpOrStr = t.Union[str, Expression]
class Var(Expression):
pass
The provided code snippet includes necessary dependencies for implementing the `var` function. Write a Python function `def var(name: t.Optional[ExpOrStr]) -> Var` to solve the following problem:
Build a SQL variable. Example: >>> repr(var('x')) 'Var(this=x)' >>> repr(var(column('x', table='y'))) 'Var(this=x)' Args: name: The name of the var or an expression who's name will become the var. Returns: The new variable node.
Here is the function:
def var(name: t.Optional[ExpOrStr]) -> Var:
"""Build a SQL variable.
Example:
>>> repr(var('x'))
'Var(this=x)'
>>> repr(var(column('x', table='y')))
'Var(this=x)'
Args:
name: The name of the var or an expression who's name will become the var.
Returns:
The new variable node.
"""
if not name:
raise ValueError("Cannot convert empty name into var.")
if isinstance(name, Expression):
name = name.name
return Var(this=name) | Build a SQL variable. Example: >>> repr(var('x')) 'Var(this=x)' >>> repr(var(column('x', table='y'))) 'Var(this=x)' Args: name: The name of the var or an expression who's name will become the var. Returns: The new variable node. |
152,943 | from __future__ import annotations
import datetime
import math
import numbers
import re
import textwrap
import typing as t
from collections import deque
from copy import deepcopy
from enum import auto
from functools import reduce
from sqlglot.errors import ErrorLevel, ParseError
from sqlglot.helper import (
AutoName,
camel_to_snake_case,
ensure_collection,
ensure_list,
is_int,
seq_get,
subclasses,
)
from sqlglot.tokens import Token
class RenameTable(Expression):
pass
class Table(Expression):
arg_types = {
"this": False,
"alias": False,
"db": False,
"catalog": False,
"laterals": False,
"joins": False,
"pivots": False,
"hints": False,
"system_time": False,
"version": False,
"format": False,
"pattern": False,
"ordinality": False,
"when": False,
"only": False,
}
def name(self) -> str:
if isinstance(self.this, Func):
return ""
return self.this.name
def db(self) -> str:
return self.text("db")
def catalog(self) -> str:
return self.text("catalog")
def selects(self) -> t.List[Expression]:
return []
def named_selects(self) -> t.List[str]:
return []
def parts(self) -> t.List[Expression]:
"""Return the parts of a table in order catalog, db, table."""
parts: t.List[Expression] = []
for arg in ("catalog", "db", "this"):
part = self.args.get(arg)
if isinstance(part, Dot):
parts.extend(part.flatten())
elif isinstance(part, Expression):
parts.append(part)
return parts
def to_column(self, copy: bool = True) -> Alias | Column | Dot:
parts = self.parts
col = column(*reversed(parts[0:4]), fields=parts[4:], copy=copy) # type: ignore
alias = self.args.get("alias")
if alias:
col = alias_(col, alias.this, copy=copy)
return col
class AlterTable(Expression):
arg_types = {
"this": True,
"actions": True,
"exists": False,
"only": False,
"options": False,
}
def to_table(sql_path: str | Table, **kwargs) -> Table: ...
def to_table(sql_path: None, **kwargs) -> None: ...
def to_table(
sql_path: t.Optional[str | Table], dialect: DialectType = None, copy: bool = True, **kwargs
) -> t.Optional[Table]:
"""
Create a table expression from a `[catalog].[schema].[table]` sql path. Catalog and schema are optional.
If a table is passed in then that table is returned.
Args:
sql_path: a `[catalog].[schema].[table]` string.
dialect: the source dialect according to which the table name will be parsed.
copy: Whether to copy a table if it is passed in.
kwargs: the kwargs to instantiate the resulting `Table` expression with.
Returns:
A table expression.
"""
if sql_path is None or isinstance(sql_path, Table):
return maybe_copy(sql_path, copy=copy)
if not isinstance(sql_path, str):
raise ValueError(f"Invalid type provided for a table: {type(sql_path)}")
table = maybe_parse(sql_path, into=Table, dialect=dialect)
if table:
for k, v in kwargs.items():
table.set(k, v)
return table
The provided code snippet includes necessary dependencies for implementing the `rename_table` function. Write a Python function `def rename_table(old_name: str | Table, new_name: str | Table) -> AlterTable` to solve the following problem:
Build ALTER TABLE... RENAME... expression Args: old_name: The old name of the table new_name: The new name of the table Returns: Alter table expression
Here is the function:
def rename_table(old_name: str | Table, new_name: str | Table) -> AlterTable:
"""Build ALTER TABLE... RENAME... expression
Args:
old_name: The old name of the table
new_name: The new name of the table
Returns:
Alter table expression
"""
old_table = to_table(old_name)
new_table = to_table(new_name)
return AlterTable(
this=old_table,
actions=[
RenameTable(this=new_table),
],
) | Build ALTER TABLE... RENAME... expression Args: old_name: The old name of the table new_name: The new name of the table Returns: Alter table expression |
152,944 | from __future__ import annotations
import datetime
import math
import numbers
import re
import textwrap
import typing as t
from collections import deque
from copy import deepcopy
from enum import auto
from functools import reduce
from sqlglot.errors import ErrorLevel, ParseError
from sqlglot.helper import (
AutoName,
camel_to_snake_case,
ensure_collection,
ensure_list,
is_int,
seq_get,
subclasses,
)
from sqlglot.tokens import Token
if t.TYPE_CHECKING:
from sqlglot._typing import E, Lit
from sqlglot.dialects.dialect import DialectType
Q = t.TypeVar("Q", bound="Query")
class Column(Condition):
arg_types = {"this": True, "table": False, "db": False, "catalog": False, "join_mark": False}
def table(self) -> str:
return self.text("table")
def db(self) -> str:
return self.text("db")
def catalog(self) -> str:
return self.text("catalog")
def output_name(self) -> str:
return self.name
def parts(self) -> t.List[Identifier]:
"""Return the parts of a column in order catalog, db, table, name."""
return [
t.cast(Identifier, self.args[part])
for part in ("catalog", "db", "table", "this")
if self.args.get(part)
]
def to_dot(self) -> Dot | Identifier:
"""Converts the column into a dot expression."""
parts = self.parts
parent = self.parent
while parent:
if isinstance(parent, Dot):
parts.append(parent.expression)
parent = parent.parent
return Dot.build(deepcopy(parts)) if len(parts) > 1 else parts[0]
class RenameColumn(Expression):
arg_types = {"this": True, "to": True, "exists": False}
class Table(Expression):
arg_types = {
"this": False,
"alias": False,
"db": False,
"catalog": False,
"laterals": False,
"joins": False,
"pivots": False,
"hints": False,
"system_time": False,
"version": False,
"format": False,
"pattern": False,
"ordinality": False,
"when": False,
"only": False,
}
def name(self) -> str:
if isinstance(self.this, Func):
return ""
return self.this.name
def db(self) -> str:
return self.text("db")
def catalog(self) -> str:
return self.text("catalog")
def selects(self) -> t.List[Expression]:
return []
def named_selects(self) -> t.List[str]:
return []
def parts(self) -> t.List[Expression]:
"""Return the parts of a table in order catalog, db, table."""
parts: t.List[Expression] = []
for arg in ("catalog", "db", "this"):
part = self.args.get(arg)
if isinstance(part, Dot):
parts.extend(part.flatten())
elif isinstance(part, Expression):
parts.append(part)
return parts
def to_column(self, copy: bool = True) -> Alias | Column | Dot:
parts = self.parts
col = column(*reversed(parts[0:4]), fields=parts[4:], copy=copy) # type: ignore
alias = self.args.get("alias")
if alias:
col = alias_(col, alias.this, copy=copy)
return col
class AlterTable(Expression):
arg_types = {
"this": True,
"actions": True,
"exists": False,
"only": False,
"options": False,
}
def to_table(sql_path: str | Table, **kwargs) -> Table: ...
def to_table(sql_path: None, **kwargs) -> None: ...
def to_table(
sql_path: t.Optional[str | Table], dialect: DialectType = None, copy: bool = True, **kwargs
) -> t.Optional[Table]:
"""
Create a table expression from a `[catalog].[schema].[table]` sql path. Catalog and schema are optional.
If a table is passed in then that table is returned.
Args:
sql_path: a `[catalog].[schema].[table]` string.
dialect: the source dialect according to which the table name will be parsed.
copy: Whether to copy a table if it is passed in.
kwargs: the kwargs to instantiate the resulting `Table` expression with.
Returns:
A table expression.
"""
if sql_path is None or isinstance(sql_path, Table):
return maybe_copy(sql_path, copy=copy)
if not isinstance(sql_path, str):
raise ValueError(f"Invalid type provided for a table: {type(sql_path)}")
table = maybe_parse(sql_path, into=Table, dialect=dialect)
if table:
for k, v in kwargs.items():
table.set(k, v)
return table
def to_column(sql_path: str | Column, **kwargs) -> Column:
"""
Create a column from a `[table].[column]` sql path. Schema is optional.
If a column is passed in then that column is returned.
Args:
sql_path: `[table].[column]` string
Returns:
Table: A column expression
"""
if sql_path is None or isinstance(sql_path, Column):
return sql_path
if not isinstance(sql_path, str):
raise ValueError(f"Invalid type provided for column: {type(sql_path)}")
return column(*reversed(sql_path.split(".")), **kwargs) # type: ignore
The provided code snippet includes necessary dependencies for implementing the `rename_column` function. Write a Python function `def rename_column( table_name: str | Table, old_column_name: str | Column, new_column_name: str | Column, exists: t.Optional[bool] = None, ) -> AlterTable` to solve the following problem:
Build ALTER TABLE... RENAME COLUMN... expression Args: table_name: Name of the table old_column: The old name of the column new_column: The new name of the column exists: Whether to add the `IF EXISTS` clause Returns: Alter table expression
Here is the function:
def rename_column(
table_name: str | Table,
old_column_name: str | Column,
new_column_name: str | Column,
exists: t.Optional[bool] = None,
) -> AlterTable:
"""Build ALTER TABLE... RENAME COLUMN... expression
Args:
table_name: Name of the table
old_column: The old name of the column
new_column: The new name of the column
exists: Whether to add the `IF EXISTS` clause
Returns:
Alter table expression
"""
table = to_table(table_name)
old_column = to_column(old_column_name)
new_column = to_column(new_column_name)
return AlterTable(
this=table,
actions=[
RenameColumn(this=old_column, to=new_column, exists=exists),
],
) | Build ALTER TABLE... RENAME COLUMN... expression Args: table_name: Name of the table old_column: The old name of the column new_column: The new name of the column exists: Whether to add the `IF EXISTS` clause Returns: Alter table expression |
152,945 | from __future__ import annotations
import datetime
import math
import numbers
import re
import textwrap
import typing as t
from collections import deque
from copy import deepcopy
from enum import auto
from functools import reduce
from sqlglot.errors import ErrorLevel, ParseError
from sqlglot.helper import (
AutoName,
camel_to_snake_case,
ensure_collection,
ensure_list,
is_int,
seq_get,
subclasses,
)
from sqlglot.tokens import Token
if t.TYPE_CHECKING:
from sqlglot._typing import E, Lit
from sqlglot.dialects.dialect import DialectType
Q = t.TypeVar("Q", bound="Query")
class Expression(metaclass=_Expression):
"""
The base class for all expressions in a syntax tree. Each Expression encapsulates any necessary
context, such as its child expressions, their names (arg keys), and whether a given child expression
is optional or not.
Attributes:
key: a unique key for each class in the Expression hierarchy. This is useful for hashing
and representing expressions as strings.
arg_types: determines the arguments (child nodes) supported by an expression. It maps
arg keys to booleans that indicate whether the corresponding args are optional.
parent: a reference to the parent expression (or None, in case of root expressions).
arg_key: the arg key an expression is associated with, i.e. the name its parent expression
uses to refer to it.
index: the index of an expression if it is inside of a list argument in its parent
comments: a list of comments that are associated with a given expression. This is used in
order to preserve comments when transpiling SQL code.
type: the `sqlglot.expressions.DataType` type of an expression. This is inferred by the
optimizer, in order to enable some transformations that require type information.
meta: a dictionary that can be used to store useful metadata for a given expression.
Example:
>>> class Foo(Expression):
... arg_types = {"this": True, "expression": False}
The above definition informs us that Foo is an Expression that requires an argument called
"this" and may also optionally receive an argument called "expression".
Args:
args: a mapping used for retrieving the arguments of an expression, given their arg keys.
"""
key = "expression"
arg_types = {"this": True}
__slots__ = ("args", "parent", "arg_key", "index", "comments", "_type", "_meta", "_hash")
def __init__(self, **args: t.Any):
self.args: t.Dict[str, t.Any] = args
self.parent: t.Optional[Expression] = None
self.arg_key: t.Optional[str] = None
self.index: t.Optional[int] = None
self.comments: t.Optional[t.List[str]] = None
self._type: t.Optional[DataType] = None
self._meta: t.Optional[t.Dict[str, t.Any]] = None
self._hash: t.Optional[int] = None
for arg_key, value in self.args.items():
self._set_parent(arg_key, value)
def __eq__(self, other) -> bool:
return type(self) is type(other) and hash(self) == hash(other)
def hashable_args(self) -> t.Any:
return frozenset(
(k, tuple(_norm_arg(a) for a in v) if type(v) is list else _norm_arg(v))
for k, v in self.args.items()
if not (v is None or v is False or (type(v) is list and not v))
)
def __hash__(self) -> int:
if self._hash is not None:
return self._hash
return hash((self.__class__, self.hashable_args))
def this(self) -> t.Any:
"""
Retrieves the argument with key "this".
"""
return self.args.get("this")
def expression(self) -> t.Any:
"""
Retrieves the argument with key "expression".
"""
return self.args.get("expression")
def expressions(self) -> t.List[t.Any]:
"""
Retrieves the argument with key "expressions".
"""
return self.args.get("expressions") or []
def text(self, key) -> str:
"""
Returns a textual representation of the argument corresponding to "key". This can only be used
for args that are strings or leaf Expression instances, such as identifiers and literals.
"""
field = self.args.get(key)
if isinstance(field, str):
return field
if isinstance(field, (Identifier, Literal, Var)):
return field.this
if isinstance(field, (Star, Null)):
return field.name
return ""
def is_string(self) -> bool:
"""
Checks whether a Literal expression is a string.
"""
return isinstance(self, Literal) and self.args["is_string"]
def is_number(self) -> bool:
"""
Checks whether a Literal expression is a number.
"""
return isinstance(self, Literal) and not self.args["is_string"]
def is_int(self) -> bool:
"""
Checks whether a Literal expression is an integer.
"""
return self.is_number and is_int(self.name)
def is_star(self) -> bool:
"""Checks whether an expression is a star."""
return isinstance(self, Star) or (isinstance(self, Column) and isinstance(self.this, Star))
def alias(self) -> str:
"""
Returns the alias of the expression, or an empty string if it's not aliased.
"""
if isinstance(self.args.get("alias"), TableAlias):
return self.args["alias"].name
return self.text("alias")
def alias_column_names(self) -> t.List[str]:
table_alias = self.args.get("alias")
if not table_alias:
return []
return [c.name for c in table_alias.args.get("columns") or []]
def name(self) -> str:
return self.text("this")
def alias_or_name(self) -> str:
return self.alias or self.name
def output_name(self) -> str:
"""
Name of the output column if this expression is a selection.
If the Expression has no output name, an empty string is returned.
Example:
>>> from sqlglot import parse_one
>>> parse_one("SELECT a").expressions[0].output_name
'a'
>>> parse_one("SELECT b AS c").expressions[0].output_name
'c'
>>> parse_one("SELECT 1 + 2").expressions[0].output_name
''
"""
return ""
def type(self) -> t.Optional[DataType]:
return self._type
def type(self, dtype: t.Optional[DataType | DataType.Type | str]) -> None:
if dtype and not isinstance(dtype, DataType):
dtype = DataType.build(dtype)
self._type = dtype # type: ignore
def is_type(self, *dtypes) -> bool:
return self.type is not None and self.type.is_type(*dtypes)
def is_leaf(self) -> bool:
return not any(isinstance(v, (Expression, list)) for v in self.args.values())
def meta(self) -> t.Dict[str, t.Any]:
if self._meta is None:
self._meta = {}
return self._meta
def __deepcopy__(self, memo):
root = self.__class__()
stack = [(self, root)]
while stack:
node, copy = stack.pop()
if node.comments is not None:
copy.comments = deepcopy(node.comments)
if node._type is not None:
copy._type = deepcopy(node._type)
if node._meta is not None:
copy._meta = deepcopy(node._meta)
if node._hash is not None:
copy._hash = node._hash
for k, vs in node.args.items():
if hasattr(vs, "parent"):
stack.append((vs, vs.__class__()))
copy.set(k, stack[-1][-1])
elif type(vs) is list:
copy.args[k] = []
for v in vs:
if hasattr(v, "parent"):
stack.append((v, v.__class__()))
copy.append(k, stack[-1][-1])
else:
copy.append(k, v)
else:
copy.args[k] = vs
return root
def copy(self):
"""
Returns a deep copy of the expression.
"""
return deepcopy(self)
def add_comments(self, comments: t.Optional[t.List[str]]) -> None:
if self.comments is None:
self.comments = []
if comments:
for comment in comments:
_, *meta = comment.split(SQLGLOT_META)
if meta:
for kv in "".join(meta).split(","):
k, *v = kv.split("=")
value = v[0].strip() if v else True
self.meta[k.strip()] = value
self.comments.append(comment)
def append(self, arg_key: str, value: t.Any) -> None:
"""
Appends value to arg_key if it's a list or sets it as a new list.
Args:
arg_key (str): name of the list expression arg
value (Any): value to append to the list
"""
if type(self.args.get(arg_key)) is not list:
self.args[arg_key] = []
self._set_parent(arg_key, value)
values = self.args[arg_key]
if hasattr(value, "parent"):
value.index = len(values)
values.append(value)
def set(self, arg_key: str, value: t.Any) -> None:
"""
Sets arg_key to value.
Args:
arg_key: name of the expression arg.
value: value to set the arg to.
"""
if value is None:
self.args.pop(arg_key, None)
else:
self.args[arg_key] = value
self._set_parent(arg_key, value)
def _set_parent(self, arg_key: str, value: t.Any, index: t.Optional[int] = None) -> None:
if hasattr(value, "parent"):
value.parent = self
value.arg_key = arg_key
value.index = index
elif type(value) is list:
for index, v in enumerate(value):
if hasattr(v, "parent"):
v.parent = self
v.arg_key = arg_key
v.index = index
def depth(self) -> int:
"""
Returns the depth of this tree.
"""
if self.parent:
return self.parent.depth + 1
return 0
def iter_expressions(self, reverse: bool = False) -> t.Iterator[Expression]:
"""Yields the key and expression for all arguments, exploding list args."""
# remove tuple when python 3.7 is deprecated
for vs in reversed(tuple(self.args.values())) if reverse else self.args.values():
if type(vs) is list:
for v in reversed(vs) if reverse else vs:
if hasattr(v, "parent"):
yield v
else:
if hasattr(vs, "parent"):
yield vs
def find(self, *expression_types: t.Type[E], bfs: bool = True) -> t.Optional[E]:
"""
Returns the first node in this tree which matches at least one of
the specified types.
Args:
expression_types: the expression type(s) to match.
bfs: whether to search the AST using the BFS algorithm (DFS is used if false).
Returns:
The node which matches the criteria or None if no such node was found.
"""
return next(self.find_all(*expression_types, bfs=bfs), None)
def find_all(self, *expression_types: t.Type[E], bfs: bool = True) -> t.Iterator[E]:
"""
Returns a generator object which visits all nodes in this tree and only
yields those that match at least one of the specified expression types.
Args:
expression_types: the expression type(s) to match.
bfs: whether to search the AST using the BFS algorithm (DFS is used if false).
Returns:
The generator object.
"""
for expression in self.walk(bfs=bfs):
if isinstance(expression, expression_types):
yield expression
def find_ancestor(self, *expression_types: t.Type[E]) -> t.Optional[E]:
"""
Returns a nearest parent matching expression_types.
Args:
expression_types: the expression type(s) to match.
Returns:
The parent node.
"""
ancestor = self.parent
while ancestor and not isinstance(ancestor, expression_types):
ancestor = ancestor.parent
return ancestor # type: ignore
def parent_select(self) -> t.Optional[Select]:
"""
Returns the parent select statement.
"""
return self.find_ancestor(Select)
def same_parent(self) -> bool:
"""Returns if the parent is the same class as itself."""
return type(self.parent) is self.__class__
def root(self) -> Expression:
"""
Returns the root expression of this tree.
"""
expression = self
while expression.parent:
expression = expression.parent
return expression
def walk(
self, bfs: bool = True, prune: t.Optional[t.Callable[[Expression], bool]] = None
) -> t.Iterator[Expression]:
"""
Returns a generator object which visits all nodes in this tree.
Args:
bfs (bool): if set to True the BFS traversal order will be applied,
otherwise the DFS traversal will be used instead.
prune ((node, parent, arg_key) -> bool): callable that returns True if
the generator should stop traversing this branch of the tree.
Returns:
the generator object.
"""
if bfs:
yield from self.bfs(prune=prune)
else:
yield from self.dfs(prune=prune)
def dfs(
self, prune: t.Optional[t.Callable[[Expression], bool]] = None
) -> t.Iterator[Expression]:
"""
Returns a generator object which visits all nodes in this tree in
the DFS (Depth-first) order.
Returns:
The generator object.
"""
stack = [self]
while stack:
node = stack.pop()
yield node
if prune and prune(node):
continue
for v in node.iter_expressions(reverse=True):
stack.append(v)
def bfs(
self, prune: t.Optional[t.Callable[[Expression], bool]] = None
) -> t.Iterator[Expression]:
"""
Returns a generator object which visits all nodes in this tree in
the BFS (Breadth-first) order.
Returns:
The generator object.
"""
queue = deque([self])
while queue:
node = queue.popleft()
yield node
if prune and prune(node):
continue
for v in node.iter_expressions():
queue.append(v)
def unnest(self):
"""
Returns the first non parenthesis child or self.
"""
expression = self
while type(expression) is Paren:
expression = expression.this
return expression
def unalias(self):
"""
Returns the inner expression if this is an Alias.
"""
if isinstance(self, Alias):
return self.this
return self
def unnest_operands(self):
"""
Returns unnested operands as a tuple.
"""
return tuple(arg.unnest() for arg in self.iter_expressions())
def flatten(self, unnest=True):
"""
Returns a generator which yields child nodes whose parents are the same class.
A AND B AND C -> [A, B, C]
"""
for node in self.dfs(prune=lambda n: n.parent and type(n) is not self.__class__):
if type(node) is not self.__class__:
yield node.unnest() if unnest and not isinstance(node, Subquery) else node
def __str__(self) -> str:
return self.sql()
def __repr__(self) -> str:
return _to_s(self)
def to_s(self) -> str:
"""
Same as __repr__, but includes additional information which can be useful
for debugging, like empty or missing args and the AST nodes' object IDs.
"""
return _to_s(self, verbose=True)
def sql(self, dialect: DialectType = None, **opts) -> str:
"""
Returns SQL string representation of this tree.
Args:
dialect: the dialect of the output SQL string (eg. "spark", "hive", "presto", "mysql").
opts: other `sqlglot.generator.Generator` options.
Returns:
The SQL string.
"""
from sqlglot.dialects import Dialect
return Dialect.get_or_raise(dialect).generate(self, **opts)
def transform(self, fun: t.Callable, *args: t.Any, copy: bool = True, **kwargs) -> Expression:
"""
Visits all tree nodes (excluding already transformed ones)
and applies the given transformation function to each node.
Args:
fun (function): a function which takes a node as an argument and returns a
new transformed node or the same node without modifications. If the function
returns None, then the corresponding node will be removed from the syntax tree.
copy (bool): if set to True a new tree instance is constructed, otherwise the tree is
modified in place.
Returns:
The transformed tree.
"""
root = None
new_node = None
for node in (self.copy() if copy else self).dfs(prune=lambda n: n is not new_node):
new_node = fun(node, *args, **kwargs)
if root:
if new_node is not node:
node.replace(new_node)
else:
root = new_node
assert root
return root.assert_is(Expression)
def replace(self, expression: E) -> E: ...
def replace(self, expression: None) -> None: ...
def replace(self, expression):
"""
Swap out this expression with a new expression.
For example::
>>> tree = Select().select("x").from_("tbl")
>>> tree.find(Column).replace(column("y"))
Column(
this=Identifier(this=y, quoted=False))
>>> tree.sql()
'SELECT y FROM tbl'
Args:
expression: new node
Returns:
The new expression or expressions.
"""
parent = self.parent
if not parent:
return expression
key = self.arg_key
value = parent.args.get(key)
if isinstance(value, list):
index = self.index
if isinstance(expression, list):
value.pop(index)
value[index:index] = expression
parent._set_parent(key, value)
else:
if expression is None:
value.pop(index)
for v in value[index:]:
v.index = v.index - 1
else:
value[index] = expression
parent._set_parent(key, expression, index=index)
elif value is not None:
if expression is None:
parent.args.pop(key)
else:
parent.set(key, expression)
if expression is not self:
self.parent = None
self.arg_key = None
self.index = None
return expression
def pop(self: E) -> E:
"""
Remove this expression from its AST.
Returns:
The popped expression.
"""
self.replace(None)
return self
def assert_is(self, type_: t.Type[E]) -> E:
"""
Assert that this `Expression` is an instance of `type_`.
If it is NOT an instance of `type_`, this raises an assertion error.
Otherwise, this returns this expression.
Examples:
This is useful for type security in chained expressions:
>>> import sqlglot
>>> sqlglot.parse_one("SELECT x from y").assert_is(Select).select("z").sql()
'SELECT x, z FROM y'
"""
if not isinstance(self, type_):
raise AssertionError(f"{self} is not {type_}.")
return self
def error_messages(self, args: t.Optional[t.Sequence] = None) -> t.List[str]:
"""
Checks if this expression is valid (e.g. all mandatory args are set).
Args:
args: a sequence of values that were used to instantiate a Func expression. This is used
to check that the provided arguments don't exceed the function argument limit.
Returns:
A list of error messages for all possible errors that were found.
"""
errors: t.List[str] = []
for k in self.args:
if k not in self.arg_types:
errors.append(f"Unexpected keyword: '{k}' for {self.__class__}")
for k, mandatory in self.arg_types.items():
v = self.args.get(k)
if mandatory and (v is None or (isinstance(v, list) and not v)):
errors.append(f"Required keyword: '{k}' missing for {self.__class__}")
if (
args
and isinstance(self, Func)
and len(args) > len(self.arg_types)
and not self.is_var_len_args
):
errors.append(
f"The number of provided arguments ({len(args)}) is greater than "
f"the maximum number of supported arguments ({len(self.arg_types)})"
)
return errors
def dump(self):
"""
Dump this Expression to a JSON-serializable dict.
"""
from sqlglot.serde import dump
return dump(self)
def load(cls, obj):
"""
Load a dict (as returned by `Expression.dump`) into an Expression instance.
"""
from sqlglot.serde import load
return load(obj)
def and_(
self,
*expressions: t.Optional[ExpOrStr],
dialect: DialectType = None,
copy: bool = True,
**opts,
) -> Condition:
"""
AND this condition with one or multiple expressions.
Example:
>>> condition("x=1").and_("y=1").sql()
'x = 1 AND y = 1'
Args:
*expressions: the SQL code strings to parse.
If an `Expression` instance is passed, it will be used as-is.
dialect: the dialect used to parse the input expression.
copy: whether to copy the involved expressions (only applies to Expressions).
opts: other options to use to parse the input expressions.
Returns:
The new And condition.
"""
return and_(self, *expressions, dialect=dialect, copy=copy, **opts)
def or_(
self,
*expressions: t.Optional[ExpOrStr],
dialect: DialectType = None,
copy: bool = True,
**opts,
) -> Condition:
"""
OR this condition with one or multiple expressions.
Example:
>>> condition("x=1").or_("y=1").sql()
'x = 1 OR y = 1'
Args:
*expressions: the SQL code strings to parse.
If an `Expression` instance is passed, it will be used as-is.
dialect: the dialect used to parse the input expression.
copy: whether to copy the involved expressions (only applies to Expressions).
opts: other options to use to parse the input expressions.
Returns:
The new Or condition.
"""
return or_(self, *expressions, dialect=dialect, copy=copy, **opts)
def not_(self, copy: bool = True):
"""
Wrap this condition with NOT.
Example:
>>> condition("x=1").not_().sql()
'NOT x = 1'
Args:
copy: whether to copy this object.
Returns:
The new Not instance.
"""
return not_(self, copy=copy)
def as_(
self,
alias: str | Identifier,
quoted: t.Optional[bool] = None,
dialect: DialectType = None,
copy: bool = True,
**opts,
) -> Alias:
return alias_(self, alias, quoted=quoted, dialect=dialect, copy=copy, **opts)
def _binop(self, klass: t.Type[E], other: t.Any, reverse: bool = False) -> E:
this = self.copy()
other = convert(other, copy=True)
if not isinstance(this, klass) and not isinstance(other, klass):
this = _wrap(this, Binary)
other = _wrap(other, Binary)
if reverse:
return klass(this=other, expression=this)
return klass(this=this, expression=other)
def __getitem__(self, other: ExpOrStr | t.Tuple[ExpOrStr]) -> Bracket:
return Bracket(
this=self.copy(), expressions=[convert(e, copy=True) for e in ensure_list(other)]
)
def __iter__(self) -> t.Iterator:
if "expressions" in self.arg_types:
return iter(self.args.get("expressions") or [])
# We define this because __getitem__ converts Expression into an iterable, which is
# problematic because one can hit infinite loops if they do "for x in some_expr: ..."
# See: https://peps.python.org/pep-0234/
raise TypeError(f"'{self.__class__.__name__}' object is not iterable")
def isin(
self,
*expressions: t.Any,
query: t.Optional[ExpOrStr] = None,
unnest: t.Optional[ExpOrStr] | t.Collection[ExpOrStr] = None,
copy: bool = True,
**opts,
) -> In:
return In(
this=maybe_copy(self, copy),
expressions=[convert(e, copy=copy) for e in expressions],
query=maybe_parse(query, copy=copy, **opts) if query else None,
unnest=(
Unnest(
expressions=[
maybe_parse(t.cast(ExpOrStr, e), copy=copy, **opts)
for e in ensure_list(unnest)
]
)
if unnest
else None
),
)
def between(self, low: t.Any, high: t.Any, copy: bool = True, **opts) -> Between:
return Between(
this=maybe_copy(self, copy),
low=convert(low, copy=copy, **opts),
high=convert(high, copy=copy, **opts),
)
def is_(self, other: ExpOrStr) -> Is:
return self._binop(Is, other)
def like(self, other: ExpOrStr) -> Like:
return self._binop(Like, other)
def ilike(self, other: ExpOrStr) -> ILike:
return self._binop(ILike, other)
def eq(self, other: t.Any) -> EQ:
return self._binop(EQ, other)
def neq(self, other: t.Any) -> NEQ:
return self._binop(NEQ, other)
def rlike(self, other: ExpOrStr) -> RegexpLike:
return self._binop(RegexpLike, other)
def div(self, other: ExpOrStr, typed: bool = False, safe: bool = False) -> Div:
div = self._binop(Div, other)
div.args["typed"] = typed
div.args["safe"] = safe
return div
def desc(self, nulls_first: bool = False) -> Ordered:
return Ordered(this=self.copy(), desc=True, nulls_first=nulls_first)
def __lt__(self, other: t.Any) -> LT:
return self._binop(LT, other)
def __le__(self, other: t.Any) -> LTE:
return self._binop(LTE, other)
def __gt__(self, other: t.Any) -> GT:
return self._binop(GT, other)
def __ge__(self, other: t.Any) -> GTE:
return self._binop(GTE, other)
def __add__(self, other: t.Any) -> Add:
return self._binop(Add, other)
def __radd__(self, other: t.Any) -> Add:
return self._binop(Add, other, reverse=True)
def __sub__(self, other: t.Any) -> Sub:
return self._binop(Sub, other)
def __rsub__(self, other: t.Any) -> Sub:
return self._binop(Sub, other, reverse=True)
def __mul__(self, other: t.Any) -> Mul:
return self._binop(Mul, other)
def __rmul__(self, other: t.Any) -> Mul:
return self._binop(Mul, other, reverse=True)
def __truediv__(self, other: t.Any) -> Div:
return self._binop(Div, other)
def __rtruediv__(self, other: t.Any) -> Div:
return self._binop(Div, other, reverse=True)
def __floordiv__(self, other: t.Any) -> IntDiv:
return self._binop(IntDiv, other)
def __rfloordiv__(self, other: t.Any) -> IntDiv:
return self._binop(IntDiv, other, reverse=True)
def __mod__(self, other: t.Any) -> Mod:
return self._binop(Mod, other)
def __rmod__(self, other: t.Any) -> Mod:
return self._binop(Mod, other, reverse=True)
def __pow__(self, other: t.Any) -> Pow:
return self._binop(Pow, other)
def __rpow__(self, other: t.Any) -> Pow:
return self._binop(Pow, other, reverse=True)
def __and__(self, other: t.Any) -> And:
return self._binop(And, other)
def __rand__(self, other: t.Any) -> And:
return self._binop(And, other, reverse=True)
def __or__(self, other: t.Any) -> Or:
return self._binop(Or, other)
def __ror__(self, other: t.Any) -> Or:
return self._binop(Or, other, reverse=True)
def __neg__(self) -> Neg:
return Neg(this=_wrap(self.copy(), Binary))
def __invert__(self) -> Not:
return not_(self.copy())
def seq_get(seq: t.Sequence[T], index: int) -> t.Optional[T]:
"""Returns the value in `seq` at position `index`, or `None` if `index` is out of bounds."""
try:
return seq[index]
except IndexError:
return None
def ensure_collection(value: t.Collection[T]) -> t.Collection[T]: ...
def ensure_collection(value: T) -> t.Collection[T]: ...
def ensure_collection(value):
"""
Ensures that a value is a collection (excluding `str` and `bytes`), otherwise wraps it into a list.
Args:
value: The value of interest.
Returns:
The value if it's a collection, or else the value wrapped in a list.
"""
if value is None:
return []
return (
value if isinstance(value, Collection) and not isinstance(value, (str, bytes)) else [value]
)
The provided code snippet includes necessary dependencies for implementing the `replace_children` function. Write a Python function `def replace_children(expression: Expression, fun: t.Callable, *args, **kwargs) -> None` to solve the following problem:
Replace children of an expression with the result of a lambda fun(child) -> exp.
Here is the function:
def replace_children(expression: Expression, fun: t.Callable, *args, **kwargs) -> None:
"""
Replace children of an expression with the result of a lambda fun(child) -> exp.
"""
for k, v in tuple(expression.args.items()):
is_list_arg = type(v) is list
child_nodes = v if is_list_arg else [v]
new_child_nodes = []
for cn in child_nodes:
if isinstance(cn, Expression):
for child_node in ensure_collection(fun(cn, *args, **kwargs)):
new_child_nodes.append(child_node)
else:
new_child_nodes.append(cn)
expression.set(k, new_child_nodes if is_list_arg else seq_get(new_child_nodes, 0)) | Replace children of an expression with the result of a lambda fun(child) -> exp. |
152,946 | from __future__ import annotations
import datetime
import math
import numbers
import re
import textwrap
import typing as t
from collections import deque
from copy import deepcopy
from enum import auto
from functools import reduce
from sqlglot.errors import ErrorLevel, ParseError
from sqlglot.helper import (
AutoName,
camel_to_snake_case,
ensure_collection,
ensure_list,
is_int,
seq_get,
subclasses,
)
from sqlglot.tokens import Token
if t.TYPE_CHECKING:
from sqlglot._typing import E, Lit
from sqlglot.dialects.dialect import DialectType
Q = t.TypeVar("Q", bound="Query")
class Expression(metaclass=_Expression):
"""
The base class for all expressions in a syntax tree. Each Expression encapsulates any necessary
context, such as its child expressions, their names (arg keys), and whether a given child expression
is optional or not.
Attributes:
key: a unique key for each class in the Expression hierarchy. This is useful for hashing
and representing expressions as strings.
arg_types: determines the arguments (child nodes) supported by an expression. It maps
arg keys to booleans that indicate whether the corresponding args are optional.
parent: a reference to the parent expression (or None, in case of root expressions).
arg_key: the arg key an expression is associated with, i.e. the name its parent expression
uses to refer to it.
index: the index of an expression if it is inside of a list argument in its parent
comments: a list of comments that are associated with a given expression. This is used in
order to preserve comments when transpiling SQL code.
type: the `sqlglot.expressions.DataType` type of an expression. This is inferred by the
optimizer, in order to enable some transformations that require type information.
meta: a dictionary that can be used to store useful metadata for a given expression.
Example:
>>> class Foo(Expression):
... arg_types = {"this": True, "expression": False}
The above definition informs us that Foo is an Expression that requires an argument called
"this" and may also optionally receive an argument called "expression".
Args:
args: a mapping used for retrieving the arguments of an expression, given their arg keys.
"""
key = "expression"
arg_types = {"this": True}
__slots__ = ("args", "parent", "arg_key", "index", "comments", "_type", "_meta", "_hash")
def __init__(self, **args: t.Any):
self.args: t.Dict[str, t.Any] = args
self.parent: t.Optional[Expression] = None
self.arg_key: t.Optional[str] = None
self.index: t.Optional[int] = None
self.comments: t.Optional[t.List[str]] = None
self._type: t.Optional[DataType] = None
self._meta: t.Optional[t.Dict[str, t.Any]] = None
self._hash: t.Optional[int] = None
for arg_key, value in self.args.items():
self._set_parent(arg_key, value)
def __eq__(self, other) -> bool:
return type(self) is type(other) and hash(self) == hash(other)
def hashable_args(self) -> t.Any:
return frozenset(
(k, tuple(_norm_arg(a) for a in v) if type(v) is list else _norm_arg(v))
for k, v in self.args.items()
if not (v is None or v is False or (type(v) is list and not v))
)
def __hash__(self) -> int:
if self._hash is not None:
return self._hash
return hash((self.__class__, self.hashable_args))
def this(self) -> t.Any:
"""
Retrieves the argument with key "this".
"""
return self.args.get("this")
def expression(self) -> t.Any:
"""
Retrieves the argument with key "expression".
"""
return self.args.get("expression")
def expressions(self) -> t.List[t.Any]:
"""
Retrieves the argument with key "expressions".
"""
return self.args.get("expressions") or []
def text(self, key) -> str:
"""
Returns a textual representation of the argument corresponding to "key". This can only be used
for args that are strings or leaf Expression instances, such as identifiers and literals.
"""
field = self.args.get(key)
if isinstance(field, str):
return field
if isinstance(field, (Identifier, Literal, Var)):
return field.this
if isinstance(field, (Star, Null)):
return field.name
return ""
def is_string(self) -> bool:
"""
Checks whether a Literal expression is a string.
"""
return isinstance(self, Literal) and self.args["is_string"]
def is_number(self) -> bool:
"""
Checks whether a Literal expression is a number.
"""
return isinstance(self, Literal) and not self.args["is_string"]
def is_int(self) -> bool:
"""
Checks whether a Literal expression is an integer.
"""
return self.is_number and is_int(self.name)
def is_star(self) -> bool:
"""Checks whether an expression is a star."""
return isinstance(self, Star) or (isinstance(self, Column) and isinstance(self.this, Star))
def alias(self) -> str:
"""
Returns the alias of the expression, or an empty string if it's not aliased.
"""
if isinstance(self.args.get("alias"), TableAlias):
return self.args["alias"].name
return self.text("alias")
def alias_column_names(self) -> t.List[str]:
table_alias = self.args.get("alias")
if not table_alias:
return []
return [c.name for c in table_alias.args.get("columns") or []]
def name(self) -> str:
return self.text("this")
def alias_or_name(self) -> str:
return self.alias or self.name
def output_name(self) -> str:
"""
Name of the output column if this expression is a selection.
If the Expression has no output name, an empty string is returned.
Example:
>>> from sqlglot import parse_one
>>> parse_one("SELECT a").expressions[0].output_name
'a'
>>> parse_one("SELECT b AS c").expressions[0].output_name
'c'
>>> parse_one("SELECT 1 + 2").expressions[0].output_name
''
"""
return ""
def type(self) -> t.Optional[DataType]:
return self._type
def type(self, dtype: t.Optional[DataType | DataType.Type | str]) -> None:
if dtype and not isinstance(dtype, DataType):
dtype = DataType.build(dtype)
self._type = dtype # type: ignore
def is_type(self, *dtypes) -> bool:
return self.type is not None and self.type.is_type(*dtypes)
def is_leaf(self) -> bool:
return not any(isinstance(v, (Expression, list)) for v in self.args.values())
def meta(self) -> t.Dict[str, t.Any]:
if self._meta is None:
self._meta = {}
return self._meta
def __deepcopy__(self, memo):
root = self.__class__()
stack = [(self, root)]
while stack:
node, copy = stack.pop()
if node.comments is not None:
copy.comments = deepcopy(node.comments)
if node._type is not None:
copy._type = deepcopy(node._type)
if node._meta is not None:
copy._meta = deepcopy(node._meta)
if node._hash is not None:
copy._hash = node._hash
for k, vs in node.args.items():
if hasattr(vs, "parent"):
stack.append((vs, vs.__class__()))
copy.set(k, stack[-1][-1])
elif type(vs) is list:
copy.args[k] = []
for v in vs:
if hasattr(v, "parent"):
stack.append((v, v.__class__()))
copy.append(k, stack[-1][-1])
else:
copy.append(k, v)
else:
copy.args[k] = vs
return root
def copy(self):
"""
Returns a deep copy of the expression.
"""
return deepcopy(self)
def add_comments(self, comments: t.Optional[t.List[str]]) -> None:
if self.comments is None:
self.comments = []
if comments:
for comment in comments:
_, *meta = comment.split(SQLGLOT_META)
if meta:
for kv in "".join(meta).split(","):
k, *v = kv.split("=")
value = v[0].strip() if v else True
self.meta[k.strip()] = value
self.comments.append(comment)
def append(self, arg_key: str, value: t.Any) -> None:
"""
Appends value to arg_key if it's a list or sets it as a new list.
Args:
arg_key (str): name of the list expression arg
value (Any): value to append to the list
"""
if type(self.args.get(arg_key)) is not list:
self.args[arg_key] = []
self._set_parent(arg_key, value)
values = self.args[arg_key]
if hasattr(value, "parent"):
value.index = len(values)
values.append(value)
def set(self, arg_key: str, value: t.Any) -> None:
"""
Sets arg_key to value.
Args:
arg_key: name of the expression arg.
value: value to set the arg to.
"""
if value is None:
self.args.pop(arg_key, None)
else:
self.args[arg_key] = value
self._set_parent(arg_key, value)
def _set_parent(self, arg_key: str, value: t.Any, index: t.Optional[int] = None) -> None:
if hasattr(value, "parent"):
value.parent = self
value.arg_key = arg_key
value.index = index
elif type(value) is list:
for index, v in enumerate(value):
if hasattr(v, "parent"):
v.parent = self
v.arg_key = arg_key
v.index = index
def depth(self) -> int:
"""
Returns the depth of this tree.
"""
if self.parent:
return self.parent.depth + 1
return 0
def iter_expressions(self, reverse: bool = False) -> t.Iterator[Expression]:
"""Yields the key and expression for all arguments, exploding list args."""
# remove tuple when python 3.7 is deprecated
for vs in reversed(tuple(self.args.values())) if reverse else self.args.values():
if type(vs) is list:
for v in reversed(vs) if reverse else vs:
if hasattr(v, "parent"):
yield v
else:
if hasattr(vs, "parent"):
yield vs
def find(self, *expression_types: t.Type[E], bfs: bool = True) -> t.Optional[E]:
"""
Returns the first node in this tree which matches at least one of
the specified types.
Args:
expression_types: the expression type(s) to match.
bfs: whether to search the AST using the BFS algorithm (DFS is used if false).
Returns:
The node which matches the criteria or None if no such node was found.
"""
return next(self.find_all(*expression_types, bfs=bfs), None)
def find_all(self, *expression_types: t.Type[E], bfs: bool = True) -> t.Iterator[E]:
"""
Returns a generator object which visits all nodes in this tree and only
yields those that match at least one of the specified expression types.
Args:
expression_types: the expression type(s) to match.
bfs: whether to search the AST using the BFS algorithm (DFS is used if false).
Returns:
The generator object.
"""
for expression in self.walk(bfs=bfs):
if isinstance(expression, expression_types):
yield expression
def find_ancestor(self, *expression_types: t.Type[E]) -> t.Optional[E]:
"""
Returns a nearest parent matching expression_types.
Args:
expression_types: the expression type(s) to match.
Returns:
The parent node.
"""
ancestor = self.parent
while ancestor and not isinstance(ancestor, expression_types):
ancestor = ancestor.parent
return ancestor # type: ignore
def parent_select(self) -> t.Optional[Select]:
"""
Returns the parent select statement.
"""
return self.find_ancestor(Select)
def same_parent(self) -> bool:
"""Returns if the parent is the same class as itself."""
return type(self.parent) is self.__class__
def root(self) -> Expression:
"""
Returns the root expression of this tree.
"""
expression = self
while expression.parent:
expression = expression.parent
return expression
def walk(
self, bfs: bool = True, prune: t.Optional[t.Callable[[Expression], bool]] = None
) -> t.Iterator[Expression]:
"""
Returns a generator object which visits all nodes in this tree.
Args:
bfs (bool): if set to True the BFS traversal order will be applied,
otherwise the DFS traversal will be used instead.
prune ((node, parent, arg_key) -> bool): callable that returns True if
the generator should stop traversing this branch of the tree.
Returns:
the generator object.
"""
if bfs:
yield from self.bfs(prune=prune)
else:
yield from self.dfs(prune=prune)
def dfs(
self, prune: t.Optional[t.Callable[[Expression], bool]] = None
) -> t.Iterator[Expression]:
"""
Returns a generator object which visits all nodes in this tree in
the DFS (Depth-first) order.
Returns:
The generator object.
"""
stack = [self]
while stack:
node = stack.pop()
yield node
if prune and prune(node):
continue
for v in node.iter_expressions(reverse=True):
stack.append(v)
def bfs(
self, prune: t.Optional[t.Callable[[Expression], bool]] = None
) -> t.Iterator[Expression]:
"""
Returns a generator object which visits all nodes in this tree in
the BFS (Breadth-first) order.
Returns:
The generator object.
"""
queue = deque([self])
while queue:
node = queue.popleft()
yield node
if prune and prune(node):
continue
for v in node.iter_expressions():
queue.append(v)
def unnest(self):
"""
Returns the first non parenthesis child or self.
"""
expression = self
while type(expression) is Paren:
expression = expression.this
return expression
def unalias(self):
"""
Returns the inner expression if this is an Alias.
"""
if isinstance(self, Alias):
return self.this
return self
def unnest_operands(self):
"""
Returns unnested operands as a tuple.
"""
return tuple(arg.unnest() for arg in self.iter_expressions())
def flatten(self, unnest=True):
"""
Returns a generator which yields child nodes whose parents are the same class.
A AND B AND C -> [A, B, C]
"""
for node in self.dfs(prune=lambda n: n.parent and type(n) is not self.__class__):
if type(node) is not self.__class__:
yield node.unnest() if unnest and not isinstance(node, Subquery) else node
def __str__(self) -> str:
return self.sql()
def __repr__(self) -> str:
return _to_s(self)
def to_s(self) -> str:
"""
Same as __repr__, but includes additional information which can be useful
for debugging, like empty or missing args and the AST nodes' object IDs.
"""
return _to_s(self, verbose=True)
def sql(self, dialect: DialectType = None, **opts) -> str:
"""
Returns SQL string representation of this tree.
Args:
dialect: the dialect of the output SQL string (eg. "spark", "hive", "presto", "mysql").
opts: other `sqlglot.generator.Generator` options.
Returns:
The SQL string.
"""
from sqlglot.dialects import Dialect
return Dialect.get_or_raise(dialect).generate(self, **opts)
def transform(self, fun: t.Callable, *args: t.Any, copy: bool = True, **kwargs) -> Expression:
"""
Visits all tree nodes (excluding already transformed ones)
and applies the given transformation function to each node.
Args:
fun (function): a function which takes a node as an argument and returns a
new transformed node or the same node without modifications. If the function
returns None, then the corresponding node will be removed from the syntax tree.
copy (bool): if set to True a new tree instance is constructed, otherwise the tree is
modified in place.
Returns:
The transformed tree.
"""
root = None
new_node = None
for node in (self.copy() if copy else self).dfs(prune=lambda n: n is not new_node):
new_node = fun(node, *args, **kwargs)
if root:
if new_node is not node:
node.replace(new_node)
else:
root = new_node
assert root
return root.assert_is(Expression)
def replace(self, expression: E) -> E: ...
def replace(self, expression: None) -> None: ...
def replace(self, expression):
"""
Swap out this expression with a new expression.
For example::
>>> tree = Select().select("x").from_("tbl")
>>> tree.find(Column).replace(column("y"))
Column(
this=Identifier(this=y, quoted=False))
>>> tree.sql()
'SELECT y FROM tbl'
Args:
expression: new node
Returns:
The new expression or expressions.
"""
parent = self.parent
if not parent:
return expression
key = self.arg_key
value = parent.args.get(key)
if isinstance(value, list):
index = self.index
if isinstance(expression, list):
value.pop(index)
value[index:index] = expression
parent._set_parent(key, value)
else:
if expression is None:
value.pop(index)
for v in value[index:]:
v.index = v.index - 1
else:
value[index] = expression
parent._set_parent(key, expression, index=index)
elif value is not None:
if expression is None:
parent.args.pop(key)
else:
parent.set(key, expression)
if expression is not self:
self.parent = None
self.arg_key = None
self.index = None
return expression
def pop(self: E) -> E:
"""
Remove this expression from its AST.
Returns:
The popped expression.
"""
self.replace(None)
return self
def assert_is(self, type_: t.Type[E]) -> E:
"""
Assert that this `Expression` is an instance of `type_`.
If it is NOT an instance of `type_`, this raises an assertion error.
Otherwise, this returns this expression.
Examples:
This is useful for type security in chained expressions:
>>> import sqlglot
>>> sqlglot.parse_one("SELECT x from y").assert_is(Select).select("z").sql()
'SELECT x, z FROM y'
"""
if not isinstance(self, type_):
raise AssertionError(f"{self} is not {type_}.")
return self
def error_messages(self, args: t.Optional[t.Sequence] = None) -> t.List[str]:
"""
Checks if this expression is valid (e.g. all mandatory args are set).
Args:
args: a sequence of values that were used to instantiate a Func expression. This is used
to check that the provided arguments don't exceed the function argument limit.
Returns:
A list of error messages for all possible errors that were found.
"""
errors: t.List[str] = []
for k in self.args:
if k not in self.arg_types:
errors.append(f"Unexpected keyword: '{k}' for {self.__class__}")
for k, mandatory in self.arg_types.items():
v = self.args.get(k)
if mandatory and (v is None or (isinstance(v, list) and not v)):
errors.append(f"Required keyword: '{k}' missing for {self.__class__}")
if (
args
and isinstance(self, Func)
and len(args) > len(self.arg_types)
and not self.is_var_len_args
):
errors.append(
f"The number of provided arguments ({len(args)}) is greater than "
f"the maximum number of supported arguments ({len(self.arg_types)})"
)
return errors
def dump(self):
"""
Dump this Expression to a JSON-serializable dict.
"""
from sqlglot.serde import dump
return dump(self)
def load(cls, obj):
"""
Load a dict (as returned by `Expression.dump`) into an Expression instance.
"""
from sqlglot.serde import load
return load(obj)
def and_(
self,
*expressions: t.Optional[ExpOrStr],
dialect: DialectType = None,
copy: bool = True,
**opts,
) -> Condition:
"""
AND this condition with one or multiple expressions.
Example:
>>> condition("x=1").and_("y=1").sql()
'x = 1 AND y = 1'
Args:
*expressions: the SQL code strings to parse.
If an `Expression` instance is passed, it will be used as-is.
dialect: the dialect used to parse the input expression.
copy: whether to copy the involved expressions (only applies to Expressions).
opts: other options to use to parse the input expressions.
Returns:
The new And condition.
"""
return and_(self, *expressions, dialect=dialect, copy=copy, **opts)
def or_(
self,
*expressions: t.Optional[ExpOrStr],
dialect: DialectType = None,
copy: bool = True,
**opts,
) -> Condition:
"""
OR this condition with one or multiple expressions.
Example:
>>> condition("x=1").or_("y=1").sql()
'x = 1 OR y = 1'
Args:
*expressions: the SQL code strings to parse.
If an `Expression` instance is passed, it will be used as-is.
dialect: the dialect used to parse the input expression.
copy: whether to copy the involved expressions (only applies to Expressions).
opts: other options to use to parse the input expressions.
Returns:
The new Or condition.
"""
return or_(self, *expressions, dialect=dialect, copy=copy, **opts)
def not_(self, copy: bool = True):
"""
Wrap this condition with NOT.
Example:
>>> condition("x=1").not_().sql()
'NOT x = 1'
Args:
copy: whether to copy this object.
Returns:
The new Not instance.
"""
return not_(self, copy=copy)
def as_(
self,
alias: str | Identifier,
quoted: t.Optional[bool] = None,
dialect: DialectType = None,
copy: bool = True,
**opts,
) -> Alias:
return alias_(self, alias, quoted=quoted, dialect=dialect, copy=copy, **opts)
def _binop(self, klass: t.Type[E], other: t.Any, reverse: bool = False) -> E:
this = self.copy()
other = convert(other, copy=True)
if not isinstance(this, klass) and not isinstance(other, klass):
this = _wrap(this, Binary)
other = _wrap(other, Binary)
if reverse:
return klass(this=other, expression=this)
return klass(this=this, expression=other)
def __getitem__(self, other: ExpOrStr | t.Tuple[ExpOrStr]) -> Bracket:
return Bracket(
this=self.copy(), expressions=[convert(e, copy=True) for e in ensure_list(other)]
)
def __iter__(self) -> t.Iterator:
if "expressions" in self.arg_types:
return iter(self.args.get("expressions") or [])
# We define this because __getitem__ converts Expression into an iterable, which is
# problematic because one can hit infinite loops if they do "for x in some_expr: ..."
# See: https://peps.python.org/pep-0234/
raise TypeError(f"'{self.__class__.__name__}' object is not iterable")
def isin(
self,
*expressions: t.Any,
query: t.Optional[ExpOrStr] = None,
unnest: t.Optional[ExpOrStr] | t.Collection[ExpOrStr] = None,
copy: bool = True,
**opts,
) -> In:
return In(
this=maybe_copy(self, copy),
expressions=[convert(e, copy=copy) for e in expressions],
query=maybe_parse(query, copy=copy, **opts) if query else None,
unnest=(
Unnest(
expressions=[
maybe_parse(t.cast(ExpOrStr, e), copy=copy, **opts)
for e in ensure_list(unnest)
]
)
if unnest
else None
),
)
def between(self, low: t.Any, high: t.Any, copy: bool = True, **opts) -> Between:
return Between(
this=maybe_copy(self, copy),
low=convert(low, copy=copy, **opts),
high=convert(high, copy=copy, **opts),
)
def is_(self, other: ExpOrStr) -> Is:
return self._binop(Is, other)
def like(self, other: ExpOrStr) -> Like:
return self._binop(Like, other)
def ilike(self, other: ExpOrStr) -> ILike:
return self._binop(ILike, other)
def eq(self, other: t.Any) -> EQ:
return self._binop(EQ, other)
def neq(self, other: t.Any) -> NEQ:
return self._binop(NEQ, other)
def rlike(self, other: ExpOrStr) -> RegexpLike:
return self._binop(RegexpLike, other)
def div(self, other: ExpOrStr, typed: bool = False, safe: bool = False) -> Div:
div = self._binop(Div, other)
div.args["typed"] = typed
div.args["safe"] = safe
return div
def desc(self, nulls_first: bool = False) -> Ordered:
return Ordered(this=self.copy(), desc=True, nulls_first=nulls_first)
def __lt__(self, other: t.Any) -> LT:
return self._binop(LT, other)
def __le__(self, other: t.Any) -> LTE:
return self._binop(LTE, other)
def __gt__(self, other: t.Any) -> GT:
return self._binop(GT, other)
def __ge__(self, other: t.Any) -> GTE:
return self._binop(GTE, other)
def __add__(self, other: t.Any) -> Add:
return self._binop(Add, other)
def __radd__(self, other: t.Any) -> Add:
return self._binop(Add, other, reverse=True)
def __sub__(self, other: t.Any) -> Sub:
return self._binop(Sub, other)
def __rsub__(self, other: t.Any) -> Sub:
return self._binop(Sub, other, reverse=True)
def __mul__(self, other: t.Any) -> Mul:
return self._binop(Mul, other)
def __rmul__(self, other: t.Any) -> Mul:
return self._binop(Mul, other, reverse=True)
def __truediv__(self, other: t.Any) -> Div:
return self._binop(Div, other)
def __rtruediv__(self, other: t.Any) -> Div:
return self._binop(Div, other, reverse=True)
def __floordiv__(self, other: t.Any) -> IntDiv:
return self._binop(IntDiv, other)
def __rfloordiv__(self, other: t.Any) -> IntDiv:
return self._binop(IntDiv, other, reverse=True)
def __mod__(self, other: t.Any) -> Mod:
return self._binop(Mod, other)
def __rmod__(self, other: t.Any) -> Mod:
return self._binop(Mod, other, reverse=True)
def __pow__(self, other: t.Any) -> Pow:
return self._binop(Pow, other)
def __rpow__(self, other: t.Any) -> Pow:
return self._binop(Pow, other, reverse=True)
def __and__(self, other: t.Any) -> And:
return self._binop(And, other)
def __rand__(self, other: t.Any) -> And:
return self._binop(And, other, reverse=True)
def __or__(self, other: t.Any) -> Or:
return self._binop(Or, other)
def __ror__(self, other: t.Any) -> Or:
return self._binop(Or, other, reverse=True)
def __neg__(self) -> Neg:
return Neg(this=_wrap(self.copy(), Binary))
def __invert__(self) -> Not:
return not_(self.copy())
The provided code snippet includes necessary dependencies for implementing the `replace_tree` function. Write a Python function `def replace_tree( expression: Expression, fun: t.Callable, prune: t.Optional[t.Callable[[Expression], bool]] = None, ) -> Expression` to solve the following problem:
Replace an entire tree with the result of function calls on each node. This will be traversed in reverse dfs, so leaves first. If new nodes are created as a result of function calls, they will also be traversed.
Here is the function:
def replace_tree(
expression: Expression,
fun: t.Callable,
prune: t.Optional[t.Callable[[Expression], bool]] = None,
) -> Expression:
"""
Replace an entire tree with the result of function calls on each node.
This will be traversed in reverse dfs, so leaves first.
If new nodes are created as a result of function calls, they will also be traversed.
"""
stack = list(expression.dfs(prune=prune))
while stack:
node = stack.pop()
new_node = fun(node)
if new_node is not node:
node.replace(new_node)
if isinstance(new_node, Expression):
stack.append(new_node)
return new_node | Replace an entire tree with the result of function calls on each node. This will be traversed in reverse dfs, so leaves first. If new nodes are created as a result of function calls, they will also be traversed. |
152,947 | from __future__ import annotations
import datetime
import math
import numbers
import re
import textwrap
import typing as t
from collections import deque
from copy import deepcopy
from enum import auto
from functools import reduce
from sqlglot.errors import ErrorLevel, ParseError
from sqlglot.helper import (
AutoName,
camel_to_snake_case,
ensure_collection,
ensure_list,
is_int,
seq_get,
subclasses,
)
from sqlglot.tokens import Token
class Table(Expression):
arg_types = {
"this": False,
"alias": False,
"db": False,
"catalog": False,
"laterals": False,
"joins": False,
"pivots": False,
"hints": False,
"system_time": False,
"version": False,
"format": False,
"pattern": False,
"ordinality": False,
"when": False,
"only": False,
}
def name(self) -> str:
if isinstance(self.this, Func):
return ""
return self.this.name
def db(self) -> str:
return self.text("db")
def catalog(self) -> str:
return self.text("catalog")
def selects(self) -> t.List[Expression]:
return []
def named_selects(self) -> t.List[str]:
return []
def parts(self) -> t.List[Expression]:
"""Return the parts of a table in order catalog, db, table."""
parts: t.List[Expression] = []
for arg in ("catalog", "db", "this"):
part = self.args.get(arg)
if isinstance(part, Dot):
parts.extend(part.flatten())
elif isinstance(part, Expression):
parts.append(part)
return parts
def to_column(self, copy: bool = True) -> Alias | Column | Dot:
parts = self.parts
col = column(*reversed(parts[0:4]), fields=parts[4:], copy=copy) # type: ignore
alias = self.args.get("alias")
if alias:
col = alias_(col, alias.this, copy=copy)
return col
def maybe_parse(
sql_or_expression: ExpOrStr,
*,
into: t.Type[E],
dialect: DialectType = None,
prefix: t.Optional[str] = None,
copy: bool = False,
**opts,
) -> E: ...
def maybe_parse(
sql_or_expression: str | E,
*,
into: t.Optional[IntoType] = None,
dialect: DialectType = None,
prefix: t.Optional[str] = None,
copy: bool = False,
**opts,
) -> E: ...
def maybe_parse(
sql_or_expression: ExpOrStr,
*,
into: t.Optional[IntoType] = None,
dialect: DialectType = None,
prefix: t.Optional[str] = None,
copy: bool = False,
**opts,
) -> Expression:
"""Gracefully handle a possible string or expression.
Example:
>>> maybe_parse("1")
Literal(this=1, is_string=False)
>>> maybe_parse(to_identifier("x"))
Identifier(this=x, quoted=False)
Args:
sql_or_expression: the SQL code string or an expression
into: the SQLGlot Expression to parse into
dialect: the dialect used to parse the input expressions (in the case that an
input expression is a SQL string).
prefix: a string to prefix the sql with before it gets parsed
(automatically includes a space)
copy: whether to copy the expression.
**opts: other options to use to parse the input expressions (again, in the case
that an input expression is a SQL string).
Returns:
Expression: the parsed or given expression.
"""
if isinstance(sql_or_expression, Expression):
if copy:
return sql_or_expression.copy()
return sql_or_expression
if sql_or_expression is None:
raise ParseError("SQL cannot be None")
import sqlglot
sql = str(sql_or_expression)
if prefix:
sql = f"{prefix} {sql}"
return sqlglot.parse_one(sql, read=dialect, into=into, **opts)
SAFE_IDENTIFIER_RE: t.Pattern[str] = re.compile(r"^[_a-zA-Z][\w]*$")
DialectType = t.Union[str, Dialect, t.Type[Dialect], None]
The provided code snippet includes necessary dependencies for implementing the `table_name` function. Write a Python function `def table_name(table: Table | str, dialect: DialectType = None, identify: bool = False) -> str` to solve the following problem:
Get the full name of a table as a string. Args: table: Table expression node or string. dialect: The dialect to generate the table name for. identify: Determines when an identifier should be quoted. Possible values are: False (default): Never quote, except in cases where it's mandatory by the dialect. True: Always quote. Examples: >>> from sqlglot import exp, parse_one >>> table_name(parse_one("select * from a.b.c").find(exp.Table)) 'a.b.c' Returns: The table name.
Here is the function:
def table_name(table: Table | str, dialect: DialectType = None, identify: bool = False) -> str:
"""Get the full name of a table as a string.
Args:
table: Table expression node or string.
dialect: The dialect to generate the table name for.
identify: Determines when an identifier should be quoted. Possible values are:
False (default): Never quote, except in cases where it's mandatory by the dialect.
True: Always quote.
Examples:
>>> from sqlglot import exp, parse_one
>>> table_name(parse_one("select * from a.b.c").find(exp.Table))
'a.b.c'
Returns:
The table name.
"""
table = maybe_parse(table, into=Table, dialect=dialect)
if not table:
raise ValueError(f"Cannot parse {table}")
return ".".join(
(
part.sql(dialect=dialect, identify=True, copy=False)
if identify or not SAFE_IDENTIFIER_RE.match(part.name)
else part.name
)
for part in table.parts
) | Get the full name of a table as a string. Args: table: Table expression node or string. dialect: The dialect to generate the table name for. identify: Determines when an identifier should be quoted. Possible values are: False (default): Never quote, except in cases where it's mandatory by the dialect. True: Always quote. Examples: >>> from sqlglot import exp, parse_one >>> table_name(parse_one("select * from a.b.c").find(exp.Table)) 'a.b.c' Returns: The table name. |
152,948 | from __future__ import annotations
import datetime
import math
import numbers
import re
import textwrap
import typing as t
from collections import deque
from copy import deepcopy
from enum import auto
from functools import reduce
from sqlglot.errors import ErrorLevel, ParseError
from sqlglot.helper import (
AutoName,
camel_to_snake_case,
ensure_collection,
ensure_list,
is_int,
seq_get,
subclasses,
)
from sqlglot.tokens import Token
if t.TYPE_CHECKING:
from sqlglot._typing import E, Lit
from sqlglot.dialects.dialect import DialectType
Q = t.TypeVar("Q", bound="Query")
TABLE_PARTS = ("this", "db", "catalog")
class Expression(metaclass=_Expression):
"""
The base class for all expressions in a syntax tree. Each Expression encapsulates any necessary
context, such as its child expressions, their names (arg keys), and whether a given child expression
is optional or not.
Attributes:
key: a unique key for each class in the Expression hierarchy. This is useful for hashing
and representing expressions as strings.
arg_types: determines the arguments (child nodes) supported by an expression. It maps
arg keys to booleans that indicate whether the corresponding args are optional.
parent: a reference to the parent expression (or None, in case of root expressions).
arg_key: the arg key an expression is associated with, i.e. the name its parent expression
uses to refer to it.
index: the index of an expression if it is inside of a list argument in its parent
comments: a list of comments that are associated with a given expression. This is used in
order to preserve comments when transpiling SQL code.
type: the `sqlglot.expressions.DataType` type of an expression. This is inferred by the
optimizer, in order to enable some transformations that require type information.
meta: a dictionary that can be used to store useful metadata for a given expression.
Example:
>>> class Foo(Expression):
... arg_types = {"this": True, "expression": False}
The above definition informs us that Foo is an Expression that requires an argument called
"this" and may also optionally receive an argument called "expression".
Args:
args: a mapping used for retrieving the arguments of an expression, given their arg keys.
"""
key = "expression"
arg_types = {"this": True}
__slots__ = ("args", "parent", "arg_key", "index", "comments", "_type", "_meta", "_hash")
def __init__(self, **args: t.Any):
self.args: t.Dict[str, t.Any] = args
self.parent: t.Optional[Expression] = None
self.arg_key: t.Optional[str] = None
self.index: t.Optional[int] = None
self.comments: t.Optional[t.List[str]] = None
self._type: t.Optional[DataType] = None
self._meta: t.Optional[t.Dict[str, t.Any]] = None
self._hash: t.Optional[int] = None
for arg_key, value in self.args.items():
self._set_parent(arg_key, value)
def __eq__(self, other) -> bool:
return type(self) is type(other) and hash(self) == hash(other)
def hashable_args(self) -> t.Any:
return frozenset(
(k, tuple(_norm_arg(a) for a in v) if type(v) is list else _norm_arg(v))
for k, v in self.args.items()
if not (v is None or v is False or (type(v) is list and not v))
)
def __hash__(self) -> int:
if self._hash is not None:
return self._hash
return hash((self.__class__, self.hashable_args))
def this(self) -> t.Any:
"""
Retrieves the argument with key "this".
"""
return self.args.get("this")
def expression(self) -> t.Any:
"""
Retrieves the argument with key "expression".
"""
return self.args.get("expression")
def expressions(self) -> t.List[t.Any]:
"""
Retrieves the argument with key "expressions".
"""
return self.args.get("expressions") or []
def text(self, key) -> str:
"""
Returns a textual representation of the argument corresponding to "key". This can only be used
for args that are strings or leaf Expression instances, such as identifiers and literals.
"""
field = self.args.get(key)
if isinstance(field, str):
return field
if isinstance(field, (Identifier, Literal, Var)):
return field.this
if isinstance(field, (Star, Null)):
return field.name
return ""
def is_string(self) -> bool:
"""
Checks whether a Literal expression is a string.
"""
return isinstance(self, Literal) and self.args["is_string"]
def is_number(self) -> bool:
"""
Checks whether a Literal expression is a number.
"""
return isinstance(self, Literal) and not self.args["is_string"]
def is_int(self) -> bool:
"""
Checks whether a Literal expression is an integer.
"""
return self.is_number and is_int(self.name)
def is_star(self) -> bool:
"""Checks whether an expression is a star."""
return isinstance(self, Star) or (isinstance(self, Column) and isinstance(self.this, Star))
def alias(self) -> str:
"""
Returns the alias of the expression, or an empty string if it's not aliased.
"""
if isinstance(self.args.get("alias"), TableAlias):
return self.args["alias"].name
return self.text("alias")
def alias_column_names(self) -> t.List[str]:
table_alias = self.args.get("alias")
if not table_alias:
return []
return [c.name for c in table_alias.args.get("columns") or []]
def name(self) -> str:
return self.text("this")
def alias_or_name(self) -> str:
return self.alias or self.name
def output_name(self) -> str:
"""
Name of the output column if this expression is a selection.
If the Expression has no output name, an empty string is returned.
Example:
>>> from sqlglot import parse_one
>>> parse_one("SELECT a").expressions[0].output_name
'a'
>>> parse_one("SELECT b AS c").expressions[0].output_name
'c'
>>> parse_one("SELECT 1 + 2").expressions[0].output_name
''
"""
return ""
def type(self) -> t.Optional[DataType]:
return self._type
def type(self, dtype: t.Optional[DataType | DataType.Type | str]) -> None:
if dtype and not isinstance(dtype, DataType):
dtype = DataType.build(dtype)
self._type = dtype # type: ignore
def is_type(self, *dtypes) -> bool:
return self.type is not None and self.type.is_type(*dtypes)
def is_leaf(self) -> bool:
return not any(isinstance(v, (Expression, list)) for v in self.args.values())
def meta(self) -> t.Dict[str, t.Any]:
if self._meta is None:
self._meta = {}
return self._meta
def __deepcopy__(self, memo):
root = self.__class__()
stack = [(self, root)]
while stack:
node, copy = stack.pop()
if node.comments is not None:
copy.comments = deepcopy(node.comments)
if node._type is not None:
copy._type = deepcopy(node._type)
if node._meta is not None:
copy._meta = deepcopy(node._meta)
if node._hash is not None:
copy._hash = node._hash
for k, vs in node.args.items():
if hasattr(vs, "parent"):
stack.append((vs, vs.__class__()))
copy.set(k, stack[-1][-1])
elif type(vs) is list:
copy.args[k] = []
for v in vs:
if hasattr(v, "parent"):
stack.append((v, v.__class__()))
copy.append(k, stack[-1][-1])
else:
copy.append(k, v)
else:
copy.args[k] = vs
return root
def copy(self):
"""
Returns a deep copy of the expression.
"""
return deepcopy(self)
def add_comments(self, comments: t.Optional[t.List[str]]) -> None:
if self.comments is None:
self.comments = []
if comments:
for comment in comments:
_, *meta = comment.split(SQLGLOT_META)
if meta:
for kv in "".join(meta).split(","):
k, *v = kv.split("=")
value = v[0].strip() if v else True
self.meta[k.strip()] = value
self.comments.append(comment)
def append(self, arg_key: str, value: t.Any) -> None:
"""
Appends value to arg_key if it's a list or sets it as a new list.
Args:
arg_key (str): name of the list expression arg
value (Any): value to append to the list
"""
if type(self.args.get(arg_key)) is not list:
self.args[arg_key] = []
self._set_parent(arg_key, value)
values = self.args[arg_key]
if hasattr(value, "parent"):
value.index = len(values)
values.append(value)
def set(self, arg_key: str, value: t.Any) -> None:
"""
Sets arg_key to value.
Args:
arg_key: name of the expression arg.
value: value to set the arg to.
"""
if value is None:
self.args.pop(arg_key, None)
else:
self.args[arg_key] = value
self._set_parent(arg_key, value)
def _set_parent(self, arg_key: str, value: t.Any, index: t.Optional[int] = None) -> None:
if hasattr(value, "parent"):
value.parent = self
value.arg_key = arg_key
value.index = index
elif type(value) is list:
for index, v in enumerate(value):
if hasattr(v, "parent"):
v.parent = self
v.arg_key = arg_key
v.index = index
def depth(self) -> int:
"""
Returns the depth of this tree.
"""
if self.parent:
return self.parent.depth + 1
return 0
def iter_expressions(self, reverse: bool = False) -> t.Iterator[Expression]:
"""Yields the key and expression for all arguments, exploding list args."""
# remove tuple when python 3.7 is deprecated
for vs in reversed(tuple(self.args.values())) if reverse else self.args.values():
if type(vs) is list:
for v in reversed(vs) if reverse else vs:
if hasattr(v, "parent"):
yield v
else:
if hasattr(vs, "parent"):
yield vs
def find(self, *expression_types: t.Type[E], bfs: bool = True) -> t.Optional[E]:
"""
Returns the first node in this tree which matches at least one of
the specified types.
Args:
expression_types: the expression type(s) to match.
bfs: whether to search the AST using the BFS algorithm (DFS is used if false).
Returns:
The node which matches the criteria or None if no such node was found.
"""
return next(self.find_all(*expression_types, bfs=bfs), None)
def find_all(self, *expression_types: t.Type[E], bfs: bool = True) -> t.Iterator[E]:
"""
Returns a generator object which visits all nodes in this tree and only
yields those that match at least one of the specified expression types.
Args:
expression_types: the expression type(s) to match.
bfs: whether to search the AST using the BFS algorithm (DFS is used if false).
Returns:
The generator object.
"""
for expression in self.walk(bfs=bfs):
if isinstance(expression, expression_types):
yield expression
def find_ancestor(self, *expression_types: t.Type[E]) -> t.Optional[E]:
"""
Returns a nearest parent matching expression_types.
Args:
expression_types: the expression type(s) to match.
Returns:
The parent node.
"""
ancestor = self.parent
while ancestor and not isinstance(ancestor, expression_types):
ancestor = ancestor.parent
return ancestor # type: ignore
def parent_select(self) -> t.Optional[Select]:
"""
Returns the parent select statement.
"""
return self.find_ancestor(Select)
def same_parent(self) -> bool:
"""Returns if the parent is the same class as itself."""
return type(self.parent) is self.__class__
def root(self) -> Expression:
"""
Returns the root expression of this tree.
"""
expression = self
while expression.parent:
expression = expression.parent
return expression
def walk(
self, bfs: bool = True, prune: t.Optional[t.Callable[[Expression], bool]] = None
) -> t.Iterator[Expression]:
"""
Returns a generator object which visits all nodes in this tree.
Args:
bfs (bool): if set to True the BFS traversal order will be applied,
otherwise the DFS traversal will be used instead.
prune ((node, parent, arg_key) -> bool): callable that returns True if
the generator should stop traversing this branch of the tree.
Returns:
the generator object.
"""
if bfs:
yield from self.bfs(prune=prune)
else:
yield from self.dfs(prune=prune)
def dfs(
self, prune: t.Optional[t.Callable[[Expression], bool]] = None
) -> t.Iterator[Expression]:
"""
Returns a generator object which visits all nodes in this tree in
the DFS (Depth-first) order.
Returns:
The generator object.
"""
stack = [self]
while stack:
node = stack.pop()
yield node
if prune and prune(node):
continue
for v in node.iter_expressions(reverse=True):
stack.append(v)
def bfs(
self, prune: t.Optional[t.Callable[[Expression], bool]] = None
) -> t.Iterator[Expression]:
"""
Returns a generator object which visits all nodes in this tree in
the BFS (Breadth-first) order.
Returns:
The generator object.
"""
queue = deque([self])
while queue:
node = queue.popleft()
yield node
if prune and prune(node):
continue
for v in node.iter_expressions():
queue.append(v)
def unnest(self):
"""
Returns the first non parenthesis child or self.
"""
expression = self
while type(expression) is Paren:
expression = expression.this
return expression
def unalias(self):
"""
Returns the inner expression if this is an Alias.
"""
if isinstance(self, Alias):
return self.this
return self
def unnest_operands(self):
"""
Returns unnested operands as a tuple.
"""
return tuple(arg.unnest() for arg in self.iter_expressions())
def flatten(self, unnest=True):
"""
Returns a generator which yields child nodes whose parents are the same class.
A AND B AND C -> [A, B, C]
"""
for node in self.dfs(prune=lambda n: n.parent and type(n) is not self.__class__):
if type(node) is not self.__class__:
yield node.unnest() if unnest and not isinstance(node, Subquery) else node
def __str__(self) -> str:
return self.sql()
def __repr__(self) -> str:
return _to_s(self)
def to_s(self) -> str:
"""
Same as __repr__, but includes additional information which can be useful
for debugging, like empty or missing args and the AST nodes' object IDs.
"""
return _to_s(self, verbose=True)
def sql(self, dialect: DialectType = None, **opts) -> str:
"""
Returns SQL string representation of this tree.
Args:
dialect: the dialect of the output SQL string (eg. "spark", "hive", "presto", "mysql").
opts: other `sqlglot.generator.Generator` options.
Returns:
The SQL string.
"""
from sqlglot.dialects import Dialect
return Dialect.get_or_raise(dialect).generate(self, **opts)
def transform(self, fun: t.Callable, *args: t.Any, copy: bool = True, **kwargs) -> Expression:
"""
Visits all tree nodes (excluding already transformed ones)
and applies the given transformation function to each node.
Args:
fun (function): a function which takes a node as an argument and returns a
new transformed node or the same node without modifications. If the function
returns None, then the corresponding node will be removed from the syntax tree.
copy (bool): if set to True a new tree instance is constructed, otherwise the tree is
modified in place.
Returns:
The transformed tree.
"""
root = None
new_node = None
for node in (self.copy() if copy else self).dfs(prune=lambda n: n is not new_node):
new_node = fun(node, *args, **kwargs)
if root:
if new_node is not node:
node.replace(new_node)
else:
root = new_node
assert root
return root.assert_is(Expression)
def replace(self, expression: E) -> E: ...
def replace(self, expression: None) -> None: ...
def replace(self, expression):
"""
Swap out this expression with a new expression.
For example::
>>> tree = Select().select("x").from_("tbl")
>>> tree.find(Column).replace(column("y"))
Column(
this=Identifier(this=y, quoted=False))
>>> tree.sql()
'SELECT y FROM tbl'
Args:
expression: new node
Returns:
The new expression or expressions.
"""
parent = self.parent
if not parent:
return expression
key = self.arg_key
value = parent.args.get(key)
if isinstance(value, list):
index = self.index
if isinstance(expression, list):
value.pop(index)
value[index:index] = expression
parent._set_parent(key, value)
else:
if expression is None:
value.pop(index)
for v in value[index:]:
v.index = v.index - 1
else:
value[index] = expression
parent._set_parent(key, expression, index=index)
elif value is not None:
if expression is None:
parent.args.pop(key)
else:
parent.set(key, expression)
if expression is not self:
self.parent = None
self.arg_key = None
self.index = None
return expression
def pop(self: E) -> E:
"""
Remove this expression from its AST.
Returns:
The popped expression.
"""
self.replace(None)
return self
def assert_is(self, type_: t.Type[E]) -> E:
"""
Assert that this `Expression` is an instance of `type_`.
If it is NOT an instance of `type_`, this raises an assertion error.
Otherwise, this returns this expression.
Examples:
This is useful for type security in chained expressions:
>>> import sqlglot
>>> sqlglot.parse_one("SELECT x from y").assert_is(Select).select("z").sql()
'SELECT x, z FROM y'
"""
if not isinstance(self, type_):
raise AssertionError(f"{self} is not {type_}.")
return self
def error_messages(self, args: t.Optional[t.Sequence] = None) -> t.List[str]:
"""
Checks if this expression is valid (e.g. all mandatory args are set).
Args:
args: a sequence of values that were used to instantiate a Func expression. This is used
to check that the provided arguments don't exceed the function argument limit.
Returns:
A list of error messages for all possible errors that were found.
"""
errors: t.List[str] = []
for k in self.args:
if k not in self.arg_types:
errors.append(f"Unexpected keyword: '{k}' for {self.__class__}")
for k, mandatory in self.arg_types.items():
v = self.args.get(k)
if mandatory and (v is None or (isinstance(v, list) and not v)):
errors.append(f"Required keyword: '{k}' missing for {self.__class__}")
if (
args
and isinstance(self, Func)
and len(args) > len(self.arg_types)
and not self.is_var_len_args
):
errors.append(
f"The number of provided arguments ({len(args)}) is greater than "
f"the maximum number of supported arguments ({len(self.arg_types)})"
)
return errors
def dump(self):
"""
Dump this Expression to a JSON-serializable dict.
"""
from sqlglot.serde import dump
return dump(self)
def load(cls, obj):
"""
Load a dict (as returned by `Expression.dump`) into an Expression instance.
"""
from sqlglot.serde import load
return load(obj)
def and_(
self,
*expressions: t.Optional[ExpOrStr],
dialect: DialectType = None,
copy: bool = True,
**opts,
) -> Condition:
"""
AND this condition with one or multiple expressions.
Example:
>>> condition("x=1").and_("y=1").sql()
'x = 1 AND y = 1'
Args:
*expressions: the SQL code strings to parse.
If an `Expression` instance is passed, it will be used as-is.
dialect: the dialect used to parse the input expression.
copy: whether to copy the involved expressions (only applies to Expressions).
opts: other options to use to parse the input expressions.
Returns:
The new And condition.
"""
return and_(self, *expressions, dialect=dialect, copy=copy, **opts)
def or_(
self,
*expressions: t.Optional[ExpOrStr],
dialect: DialectType = None,
copy: bool = True,
**opts,
) -> Condition:
"""
OR this condition with one or multiple expressions.
Example:
>>> condition("x=1").or_("y=1").sql()
'x = 1 OR y = 1'
Args:
*expressions: the SQL code strings to parse.
If an `Expression` instance is passed, it will be used as-is.
dialect: the dialect used to parse the input expression.
copy: whether to copy the involved expressions (only applies to Expressions).
opts: other options to use to parse the input expressions.
Returns:
The new Or condition.
"""
return or_(self, *expressions, dialect=dialect, copy=copy, **opts)
def not_(self, copy: bool = True):
"""
Wrap this condition with NOT.
Example:
>>> condition("x=1").not_().sql()
'NOT x = 1'
Args:
copy: whether to copy this object.
Returns:
The new Not instance.
"""
return not_(self, copy=copy)
def as_(
self,
alias: str | Identifier,
quoted: t.Optional[bool] = None,
dialect: DialectType = None,
copy: bool = True,
**opts,
) -> Alias:
return alias_(self, alias, quoted=quoted, dialect=dialect, copy=copy, **opts)
def _binop(self, klass: t.Type[E], other: t.Any, reverse: bool = False) -> E:
this = self.copy()
other = convert(other, copy=True)
if not isinstance(this, klass) and not isinstance(other, klass):
this = _wrap(this, Binary)
other = _wrap(other, Binary)
if reverse:
return klass(this=other, expression=this)
return klass(this=this, expression=other)
def __getitem__(self, other: ExpOrStr | t.Tuple[ExpOrStr]) -> Bracket:
return Bracket(
this=self.copy(), expressions=[convert(e, copy=True) for e in ensure_list(other)]
)
def __iter__(self) -> t.Iterator:
if "expressions" in self.arg_types:
return iter(self.args.get("expressions") or [])
# We define this because __getitem__ converts Expression into an iterable, which is
# problematic because one can hit infinite loops if they do "for x in some_expr: ..."
# See: https://peps.python.org/pep-0234/
raise TypeError(f"'{self.__class__.__name__}' object is not iterable")
def isin(
self,
*expressions: t.Any,
query: t.Optional[ExpOrStr] = None,
unnest: t.Optional[ExpOrStr] | t.Collection[ExpOrStr] = None,
copy: bool = True,
**opts,
) -> In:
return In(
this=maybe_copy(self, copy),
expressions=[convert(e, copy=copy) for e in expressions],
query=maybe_parse(query, copy=copy, **opts) if query else None,
unnest=(
Unnest(
expressions=[
maybe_parse(t.cast(ExpOrStr, e), copy=copy, **opts)
for e in ensure_list(unnest)
]
)
if unnest
else None
),
)
def between(self, low: t.Any, high: t.Any, copy: bool = True, **opts) -> Between:
return Between(
this=maybe_copy(self, copy),
low=convert(low, copy=copy, **opts),
high=convert(high, copy=copy, **opts),
)
def is_(self, other: ExpOrStr) -> Is:
return self._binop(Is, other)
def like(self, other: ExpOrStr) -> Like:
return self._binop(Like, other)
def ilike(self, other: ExpOrStr) -> ILike:
return self._binop(ILike, other)
def eq(self, other: t.Any) -> EQ:
return self._binop(EQ, other)
def neq(self, other: t.Any) -> NEQ:
return self._binop(NEQ, other)
def rlike(self, other: ExpOrStr) -> RegexpLike:
return self._binop(RegexpLike, other)
def div(self, other: ExpOrStr, typed: bool = False, safe: bool = False) -> Div:
div = self._binop(Div, other)
div.args["typed"] = typed
div.args["safe"] = safe
return div
def desc(self, nulls_first: bool = False) -> Ordered:
return Ordered(this=self.copy(), desc=True, nulls_first=nulls_first)
def __lt__(self, other: t.Any) -> LT:
return self._binop(LT, other)
def __le__(self, other: t.Any) -> LTE:
return self._binop(LTE, other)
def __gt__(self, other: t.Any) -> GT:
return self._binop(GT, other)
def __ge__(self, other: t.Any) -> GTE:
return self._binop(GTE, other)
def __add__(self, other: t.Any) -> Add:
return self._binop(Add, other)
def __radd__(self, other: t.Any) -> Add:
return self._binop(Add, other, reverse=True)
def __sub__(self, other: t.Any) -> Sub:
return self._binop(Sub, other)
def __rsub__(self, other: t.Any) -> Sub:
return self._binop(Sub, other, reverse=True)
def __mul__(self, other: t.Any) -> Mul:
return self._binop(Mul, other)
def __rmul__(self, other: t.Any) -> Mul:
return self._binop(Mul, other, reverse=True)
def __truediv__(self, other: t.Any) -> Div:
return self._binop(Div, other)
def __rtruediv__(self, other: t.Any) -> Div:
return self._binop(Div, other, reverse=True)
def __floordiv__(self, other: t.Any) -> IntDiv:
return self._binop(IntDiv, other)
def __rfloordiv__(self, other: t.Any) -> IntDiv:
return self._binop(IntDiv, other, reverse=True)
def __mod__(self, other: t.Any) -> Mod:
return self._binop(Mod, other)
def __rmod__(self, other: t.Any) -> Mod:
return self._binop(Mod, other, reverse=True)
def __pow__(self, other: t.Any) -> Pow:
return self._binop(Pow, other)
def __rpow__(self, other: t.Any) -> Pow:
return self._binop(Pow, other, reverse=True)
def __and__(self, other: t.Any) -> And:
return self._binop(And, other)
def __rand__(self, other: t.Any) -> And:
return self._binop(And, other, reverse=True)
def __or__(self, other: t.Any) -> Or:
return self._binop(Or, other)
def __ror__(self, other: t.Any) -> Or:
return self._binop(Or, other, reverse=True)
def __neg__(self) -> Neg:
return Neg(this=_wrap(self.copy(), Binary))
def __invert__(self) -> Not:
return not_(self.copy())
class Table(Expression):
arg_types = {
"this": False,
"alias": False,
"db": False,
"catalog": False,
"laterals": False,
"joins": False,
"pivots": False,
"hints": False,
"system_time": False,
"version": False,
"format": False,
"pattern": False,
"ordinality": False,
"when": False,
"only": False,
}
def name(self) -> str:
if isinstance(self.this, Func):
return ""
return self.this.name
def db(self) -> str:
return self.text("db")
def catalog(self) -> str:
return self.text("catalog")
def selects(self) -> t.List[Expression]:
return []
def named_selects(self) -> t.List[str]:
return []
def parts(self) -> t.List[Expression]:
"""Return the parts of a table in order catalog, db, table."""
parts: t.List[Expression] = []
for arg in ("catalog", "db", "this"):
part = self.args.get(arg)
if isinstance(part, Dot):
parts.extend(part.flatten())
elif isinstance(part, Expression):
parts.append(part)
return parts
def to_column(self, copy: bool = True) -> Alias | Column | Dot:
parts = self.parts
col = column(*reversed(parts[0:4]), fields=parts[4:], copy=copy) # type: ignore
alias = self.args.get("alias")
if alias:
col = alias_(col, alias.this, copy=copy)
return col
def to_table(sql_path: str | Table, **kwargs) -> Table: ...
def to_table(sql_path: None, **kwargs) -> None: ...
def to_table(
sql_path: t.Optional[str | Table], dialect: DialectType = None, copy: bool = True, **kwargs
) -> t.Optional[Table]:
"""
Create a table expression from a `[catalog].[schema].[table]` sql path. Catalog and schema are optional.
If a table is passed in then that table is returned.
Args:
sql_path: a `[catalog].[schema].[table]` string.
dialect: the source dialect according to which the table name will be parsed.
copy: Whether to copy a table if it is passed in.
kwargs: the kwargs to instantiate the resulting `Table` expression with.
Returns:
A table expression.
"""
if sql_path is None or isinstance(sql_path, Table):
return maybe_copy(sql_path, copy=copy)
if not isinstance(sql_path, str):
raise ValueError(f"Invalid type provided for a table: {type(sql_path)}")
table = maybe_parse(sql_path, into=Table, dialect=dialect)
if table:
for k, v in kwargs.items():
table.set(k, v)
return table
def normalize_table_name(table: str | Table, dialect: DialectType = None, copy: bool = True) -> str:
"""Returns a case normalized table name without quotes.
Args:
table: the table to normalize
dialect: the dialect to use for normalization rules
copy: whether to copy the expression.
Examples:
>>> normalize_table_name("`A-B`.c", dialect="bigquery")
'A-B.c'
"""
from sqlglot.optimizer.normalize_identifiers import normalize_identifiers
return ".".join(
p.name
for p in normalize_identifiers(
to_table(table, dialect=dialect, copy=copy), dialect=dialect
).parts
)
E = t.TypeVar("E", bound="sqlglot.exp.Expression")
DialectType = t.Union[str, Dialect, t.Type[Dialect], None]
The provided code snippet includes necessary dependencies for implementing the `replace_tables` function. Write a Python function `def replace_tables( expression: E, mapping: t.Dict[str, str], dialect: DialectType = None, copy: bool = True ) -> E` to solve the following problem:
Replace all tables in expression according to the mapping. Args: expression: expression node to be transformed and replaced. mapping: mapping of table names. dialect: the dialect of the mapping table copy: whether to copy the expression. Examples: >>> from sqlglot import exp, parse_one >>> replace_tables(parse_one("select * from a.b"), {"a.b": "c"}).sql() 'SELECT * FROM c /* a.b */' Returns: The mapped expression.
Here is the function:
def replace_tables(
expression: E, mapping: t.Dict[str, str], dialect: DialectType = None, copy: bool = True
) -> E:
"""Replace all tables in expression according to the mapping.
Args:
expression: expression node to be transformed and replaced.
mapping: mapping of table names.
dialect: the dialect of the mapping table
copy: whether to copy the expression.
Examples:
>>> from sqlglot import exp, parse_one
>>> replace_tables(parse_one("select * from a.b"), {"a.b": "c"}).sql()
'SELECT * FROM c /* a.b */'
Returns:
The mapped expression.
"""
mapping = {normalize_table_name(k, dialect=dialect): v for k, v in mapping.items()}
def _replace_tables(node: Expression) -> Expression:
if isinstance(node, Table):
original = normalize_table_name(node, dialect=dialect)
new_name = mapping.get(original)
if new_name:
table = to_table(
new_name,
**{k: v for k, v in node.args.items() if k not in TABLE_PARTS},
dialect=dialect,
)
table.add_comments([original])
return table
return node
return expression.transform(_replace_tables, copy=copy) # type: ignore | Replace all tables in expression according to the mapping. Args: expression: expression node to be transformed and replaced. mapping: mapping of table names. dialect: the dialect of the mapping table copy: whether to copy the expression. Examples: >>> from sqlglot import exp, parse_one >>> replace_tables(parse_one("select * from a.b"), {"a.b": "c"}).sql() 'SELECT * FROM c /* a.b */' Returns: The mapped expression. |
152,949 | from __future__ import annotations
import datetime
import math
import numbers
import re
import textwrap
import typing as t
from collections import deque
from copy import deepcopy
from enum import auto
from functools import reduce
from sqlglot.errors import ErrorLevel, ParseError
from sqlglot.helper import (
AutoName,
camel_to_snake_case,
ensure_collection,
ensure_list,
is_int,
seq_get,
subclasses,
)
from sqlglot.tokens import Token
class Expression(metaclass=_Expression):
"""
The base class for all expressions in a syntax tree. Each Expression encapsulates any necessary
context, such as its child expressions, their names (arg keys), and whether a given child expression
is optional or not.
Attributes:
key: a unique key for each class in the Expression hierarchy. This is useful for hashing
and representing expressions as strings.
arg_types: determines the arguments (child nodes) supported by an expression. It maps
arg keys to booleans that indicate whether the corresponding args are optional.
parent: a reference to the parent expression (or None, in case of root expressions).
arg_key: the arg key an expression is associated with, i.e. the name its parent expression
uses to refer to it.
index: the index of an expression if it is inside of a list argument in its parent
comments: a list of comments that are associated with a given expression. This is used in
order to preserve comments when transpiling SQL code.
type: the `sqlglot.expressions.DataType` type of an expression. This is inferred by the
optimizer, in order to enable some transformations that require type information.
meta: a dictionary that can be used to store useful metadata for a given expression.
Example:
>>> class Foo(Expression):
... arg_types = {"this": True, "expression": False}
The above definition informs us that Foo is an Expression that requires an argument called
"this" and may also optionally receive an argument called "expression".
Args:
args: a mapping used for retrieving the arguments of an expression, given their arg keys.
"""
key = "expression"
arg_types = {"this": True}
__slots__ = ("args", "parent", "arg_key", "index", "comments", "_type", "_meta", "_hash")
def __init__(self, **args: t.Any):
self.args: t.Dict[str, t.Any] = args
self.parent: t.Optional[Expression] = None
self.arg_key: t.Optional[str] = None
self.index: t.Optional[int] = None
self.comments: t.Optional[t.List[str]] = None
self._type: t.Optional[DataType] = None
self._meta: t.Optional[t.Dict[str, t.Any]] = None
self._hash: t.Optional[int] = None
for arg_key, value in self.args.items():
self._set_parent(arg_key, value)
def __eq__(self, other) -> bool:
return type(self) is type(other) and hash(self) == hash(other)
def hashable_args(self) -> t.Any:
return frozenset(
(k, tuple(_norm_arg(a) for a in v) if type(v) is list else _norm_arg(v))
for k, v in self.args.items()
if not (v is None or v is False or (type(v) is list and not v))
)
def __hash__(self) -> int:
if self._hash is not None:
return self._hash
return hash((self.__class__, self.hashable_args))
def this(self) -> t.Any:
"""
Retrieves the argument with key "this".
"""
return self.args.get("this")
def expression(self) -> t.Any:
"""
Retrieves the argument with key "expression".
"""
return self.args.get("expression")
def expressions(self) -> t.List[t.Any]:
"""
Retrieves the argument with key "expressions".
"""
return self.args.get("expressions") or []
def text(self, key) -> str:
"""
Returns a textual representation of the argument corresponding to "key". This can only be used
for args that are strings or leaf Expression instances, such as identifiers and literals.
"""
field = self.args.get(key)
if isinstance(field, str):
return field
if isinstance(field, (Identifier, Literal, Var)):
return field.this
if isinstance(field, (Star, Null)):
return field.name
return ""
def is_string(self) -> bool:
"""
Checks whether a Literal expression is a string.
"""
return isinstance(self, Literal) and self.args["is_string"]
def is_number(self) -> bool:
"""
Checks whether a Literal expression is a number.
"""
return isinstance(self, Literal) and not self.args["is_string"]
def is_int(self) -> bool:
"""
Checks whether a Literal expression is an integer.
"""
return self.is_number and is_int(self.name)
def is_star(self) -> bool:
"""Checks whether an expression is a star."""
return isinstance(self, Star) or (isinstance(self, Column) and isinstance(self.this, Star))
def alias(self) -> str:
"""
Returns the alias of the expression, or an empty string if it's not aliased.
"""
if isinstance(self.args.get("alias"), TableAlias):
return self.args["alias"].name
return self.text("alias")
def alias_column_names(self) -> t.List[str]:
table_alias = self.args.get("alias")
if not table_alias:
return []
return [c.name for c in table_alias.args.get("columns") or []]
def name(self) -> str:
return self.text("this")
def alias_or_name(self) -> str:
return self.alias or self.name
def output_name(self) -> str:
"""
Name of the output column if this expression is a selection.
If the Expression has no output name, an empty string is returned.
Example:
>>> from sqlglot import parse_one
>>> parse_one("SELECT a").expressions[0].output_name
'a'
>>> parse_one("SELECT b AS c").expressions[0].output_name
'c'
>>> parse_one("SELECT 1 + 2").expressions[0].output_name
''
"""
return ""
def type(self) -> t.Optional[DataType]:
return self._type
def type(self, dtype: t.Optional[DataType | DataType.Type | str]) -> None:
if dtype and not isinstance(dtype, DataType):
dtype = DataType.build(dtype)
self._type = dtype # type: ignore
def is_type(self, *dtypes) -> bool:
return self.type is not None and self.type.is_type(*dtypes)
def is_leaf(self) -> bool:
return not any(isinstance(v, (Expression, list)) for v in self.args.values())
def meta(self) -> t.Dict[str, t.Any]:
if self._meta is None:
self._meta = {}
return self._meta
def __deepcopy__(self, memo):
root = self.__class__()
stack = [(self, root)]
while stack:
node, copy = stack.pop()
if node.comments is not None:
copy.comments = deepcopy(node.comments)
if node._type is not None:
copy._type = deepcopy(node._type)
if node._meta is not None:
copy._meta = deepcopy(node._meta)
if node._hash is not None:
copy._hash = node._hash
for k, vs in node.args.items():
if hasattr(vs, "parent"):
stack.append((vs, vs.__class__()))
copy.set(k, stack[-1][-1])
elif type(vs) is list:
copy.args[k] = []
for v in vs:
if hasattr(v, "parent"):
stack.append((v, v.__class__()))
copy.append(k, stack[-1][-1])
else:
copy.append(k, v)
else:
copy.args[k] = vs
return root
def copy(self):
"""
Returns a deep copy of the expression.
"""
return deepcopy(self)
def add_comments(self, comments: t.Optional[t.List[str]]) -> None:
if self.comments is None:
self.comments = []
if comments:
for comment in comments:
_, *meta = comment.split(SQLGLOT_META)
if meta:
for kv in "".join(meta).split(","):
k, *v = kv.split("=")
value = v[0].strip() if v else True
self.meta[k.strip()] = value
self.comments.append(comment)
def append(self, arg_key: str, value: t.Any) -> None:
"""
Appends value to arg_key if it's a list or sets it as a new list.
Args:
arg_key (str): name of the list expression arg
value (Any): value to append to the list
"""
if type(self.args.get(arg_key)) is not list:
self.args[arg_key] = []
self._set_parent(arg_key, value)
values = self.args[arg_key]
if hasattr(value, "parent"):
value.index = len(values)
values.append(value)
def set(self, arg_key: str, value: t.Any) -> None:
"""
Sets arg_key to value.
Args:
arg_key: name of the expression arg.
value: value to set the arg to.
"""
if value is None:
self.args.pop(arg_key, None)
else:
self.args[arg_key] = value
self._set_parent(arg_key, value)
def _set_parent(self, arg_key: str, value: t.Any, index: t.Optional[int] = None) -> None:
if hasattr(value, "parent"):
value.parent = self
value.arg_key = arg_key
value.index = index
elif type(value) is list:
for index, v in enumerate(value):
if hasattr(v, "parent"):
v.parent = self
v.arg_key = arg_key
v.index = index
def depth(self) -> int:
"""
Returns the depth of this tree.
"""
if self.parent:
return self.parent.depth + 1
return 0
def iter_expressions(self, reverse: bool = False) -> t.Iterator[Expression]:
"""Yields the key and expression for all arguments, exploding list args."""
# remove tuple when python 3.7 is deprecated
for vs in reversed(tuple(self.args.values())) if reverse else self.args.values():
if type(vs) is list:
for v in reversed(vs) if reverse else vs:
if hasattr(v, "parent"):
yield v
else:
if hasattr(vs, "parent"):
yield vs
def find(self, *expression_types: t.Type[E], bfs: bool = True) -> t.Optional[E]:
"""
Returns the first node in this tree which matches at least one of
the specified types.
Args:
expression_types: the expression type(s) to match.
bfs: whether to search the AST using the BFS algorithm (DFS is used if false).
Returns:
The node which matches the criteria or None if no such node was found.
"""
return next(self.find_all(*expression_types, bfs=bfs), None)
def find_all(self, *expression_types: t.Type[E], bfs: bool = True) -> t.Iterator[E]:
"""
Returns a generator object which visits all nodes in this tree and only
yields those that match at least one of the specified expression types.
Args:
expression_types: the expression type(s) to match.
bfs: whether to search the AST using the BFS algorithm (DFS is used if false).
Returns:
The generator object.
"""
for expression in self.walk(bfs=bfs):
if isinstance(expression, expression_types):
yield expression
def find_ancestor(self, *expression_types: t.Type[E]) -> t.Optional[E]:
"""
Returns a nearest parent matching expression_types.
Args:
expression_types: the expression type(s) to match.
Returns:
The parent node.
"""
ancestor = self.parent
while ancestor and not isinstance(ancestor, expression_types):
ancestor = ancestor.parent
return ancestor # type: ignore
def parent_select(self) -> t.Optional[Select]:
"""
Returns the parent select statement.
"""
return self.find_ancestor(Select)
def same_parent(self) -> bool:
"""Returns if the parent is the same class as itself."""
return type(self.parent) is self.__class__
def root(self) -> Expression:
"""
Returns the root expression of this tree.
"""
expression = self
while expression.parent:
expression = expression.parent
return expression
def walk(
self, bfs: bool = True, prune: t.Optional[t.Callable[[Expression], bool]] = None
) -> t.Iterator[Expression]:
"""
Returns a generator object which visits all nodes in this tree.
Args:
bfs (bool): if set to True the BFS traversal order will be applied,
otherwise the DFS traversal will be used instead.
prune ((node, parent, arg_key) -> bool): callable that returns True if
the generator should stop traversing this branch of the tree.
Returns:
the generator object.
"""
if bfs:
yield from self.bfs(prune=prune)
else:
yield from self.dfs(prune=prune)
def dfs(
self, prune: t.Optional[t.Callable[[Expression], bool]] = None
) -> t.Iterator[Expression]:
"""
Returns a generator object which visits all nodes in this tree in
the DFS (Depth-first) order.
Returns:
The generator object.
"""
stack = [self]
while stack:
node = stack.pop()
yield node
if prune and prune(node):
continue
for v in node.iter_expressions(reverse=True):
stack.append(v)
def bfs(
self, prune: t.Optional[t.Callable[[Expression], bool]] = None
) -> t.Iterator[Expression]:
"""
Returns a generator object which visits all nodes in this tree in
the BFS (Breadth-first) order.
Returns:
The generator object.
"""
queue = deque([self])
while queue:
node = queue.popleft()
yield node
if prune and prune(node):
continue
for v in node.iter_expressions():
queue.append(v)
def unnest(self):
"""
Returns the first non parenthesis child or self.
"""
expression = self
while type(expression) is Paren:
expression = expression.this
return expression
def unalias(self):
"""
Returns the inner expression if this is an Alias.
"""
if isinstance(self, Alias):
return self.this
return self
def unnest_operands(self):
"""
Returns unnested operands as a tuple.
"""
return tuple(arg.unnest() for arg in self.iter_expressions())
def flatten(self, unnest=True):
"""
Returns a generator which yields child nodes whose parents are the same class.
A AND B AND C -> [A, B, C]
"""
for node in self.dfs(prune=lambda n: n.parent and type(n) is not self.__class__):
if type(node) is not self.__class__:
yield node.unnest() if unnest and not isinstance(node, Subquery) else node
def __str__(self) -> str:
return self.sql()
def __repr__(self) -> str:
return _to_s(self)
def to_s(self) -> str:
"""
Same as __repr__, but includes additional information which can be useful
for debugging, like empty or missing args and the AST nodes' object IDs.
"""
return _to_s(self, verbose=True)
def sql(self, dialect: DialectType = None, **opts) -> str:
"""
Returns SQL string representation of this tree.
Args:
dialect: the dialect of the output SQL string (eg. "spark", "hive", "presto", "mysql").
opts: other `sqlglot.generator.Generator` options.
Returns:
The SQL string.
"""
from sqlglot.dialects import Dialect
return Dialect.get_or_raise(dialect).generate(self, **opts)
def transform(self, fun: t.Callable, *args: t.Any, copy: bool = True, **kwargs) -> Expression:
"""
Visits all tree nodes (excluding already transformed ones)
and applies the given transformation function to each node.
Args:
fun (function): a function which takes a node as an argument and returns a
new transformed node or the same node without modifications. If the function
returns None, then the corresponding node will be removed from the syntax tree.
copy (bool): if set to True a new tree instance is constructed, otherwise the tree is
modified in place.
Returns:
The transformed tree.
"""
root = None
new_node = None
for node in (self.copy() if copy else self).dfs(prune=lambda n: n is not new_node):
new_node = fun(node, *args, **kwargs)
if root:
if new_node is not node:
node.replace(new_node)
else:
root = new_node
assert root
return root.assert_is(Expression)
def replace(self, expression: E) -> E: ...
def replace(self, expression: None) -> None: ...
def replace(self, expression):
"""
Swap out this expression with a new expression.
For example::
>>> tree = Select().select("x").from_("tbl")
>>> tree.find(Column).replace(column("y"))
Column(
this=Identifier(this=y, quoted=False))
>>> tree.sql()
'SELECT y FROM tbl'
Args:
expression: new node
Returns:
The new expression or expressions.
"""
parent = self.parent
if not parent:
return expression
key = self.arg_key
value = parent.args.get(key)
if isinstance(value, list):
index = self.index
if isinstance(expression, list):
value.pop(index)
value[index:index] = expression
parent._set_parent(key, value)
else:
if expression is None:
value.pop(index)
for v in value[index:]:
v.index = v.index - 1
else:
value[index] = expression
parent._set_parent(key, expression, index=index)
elif value is not None:
if expression is None:
parent.args.pop(key)
else:
parent.set(key, expression)
if expression is not self:
self.parent = None
self.arg_key = None
self.index = None
return expression
def pop(self: E) -> E:
"""
Remove this expression from its AST.
Returns:
The popped expression.
"""
self.replace(None)
return self
def assert_is(self, type_: t.Type[E]) -> E:
"""
Assert that this `Expression` is an instance of `type_`.
If it is NOT an instance of `type_`, this raises an assertion error.
Otherwise, this returns this expression.
Examples:
This is useful for type security in chained expressions:
>>> import sqlglot
>>> sqlglot.parse_one("SELECT x from y").assert_is(Select).select("z").sql()
'SELECT x, z FROM y'
"""
if not isinstance(self, type_):
raise AssertionError(f"{self} is not {type_}.")
return self
def error_messages(self, args: t.Optional[t.Sequence] = None) -> t.List[str]:
"""
Checks if this expression is valid (e.g. all mandatory args are set).
Args:
args: a sequence of values that were used to instantiate a Func expression. This is used
to check that the provided arguments don't exceed the function argument limit.
Returns:
A list of error messages for all possible errors that were found.
"""
errors: t.List[str] = []
for k in self.args:
if k not in self.arg_types:
errors.append(f"Unexpected keyword: '{k}' for {self.__class__}")
for k, mandatory in self.arg_types.items():
v = self.args.get(k)
if mandatory and (v is None or (isinstance(v, list) and not v)):
errors.append(f"Required keyword: '{k}' missing for {self.__class__}")
if (
args
and isinstance(self, Func)
and len(args) > len(self.arg_types)
and not self.is_var_len_args
):
errors.append(
f"The number of provided arguments ({len(args)}) is greater than "
f"the maximum number of supported arguments ({len(self.arg_types)})"
)
return errors
def dump(self):
"""
Dump this Expression to a JSON-serializable dict.
"""
from sqlglot.serde import dump
return dump(self)
def load(cls, obj):
"""
Load a dict (as returned by `Expression.dump`) into an Expression instance.
"""
from sqlglot.serde import load
return load(obj)
def and_(
self,
*expressions: t.Optional[ExpOrStr],
dialect: DialectType = None,
copy: bool = True,
**opts,
) -> Condition:
"""
AND this condition with one or multiple expressions.
Example:
>>> condition("x=1").and_("y=1").sql()
'x = 1 AND y = 1'
Args:
*expressions: the SQL code strings to parse.
If an `Expression` instance is passed, it will be used as-is.
dialect: the dialect used to parse the input expression.
copy: whether to copy the involved expressions (only applies to Expressions).
opts: other options to use to parse the input expressions.
Returns:
The new And condition.
"""
return and_(self, *expressions, dialect=dialect, copy=copy, **opts)
def or_(
self,
*expressions: t.Optional[ExpOrStr],
dialect: DialectType = None,
copy: bool = True,
**opts,
) -> Condition:
"""
OR this condition with one or multiple expressions.
Example:
>>> condition("x=1").or_("y=1").sql()
'x = 1 OR y = 1'
Args:
*expressions: the SQL code strings to parse.
If an `Expression` instance is passed, it will be used as-is.
dialect: the dialect used to parse the input expression.
copy: whether to copy the involved expressions (only applies to Expressions).
opts: other options to use to parse the input expressions.
Returns:
The new Or condition.
"""
return or_(self, *expressions, dialect=dialect, copy=copy, **opts)
def not_(self, copy: bool = True):
"""
Wrap this condition with NOT.
Example:
>>> condition("x=1").not_().sql()
'NOT x = 1'
Args:
copy: whether to copy this object.
Returns:
The new Not instance.
"""
return not_(self, copy=copy)
def as_(
self,
alias: str | Identifier,
quoted: t.Optional[bool] = None,
dialect: DialectType = None,
copy: bool = True,
**opts,
) -> Alias:
return alias_(self, alias, quoted=quoted, dialect=dialect, copy=copy, **opts)
def _binop(self, klass: t.Type[E], other: t.Any, reverse: bool = False) -> E:
this = self.copy()
other = convert(other, copy=True)
if not isinstance(this, klass) and not isinstance(other, klass):
this = _wrap(this, Binary)
other = _wrap(other, Binary)
if reverse:
return klass(this=other, expression=this)
return klass(this=this, expression=other)
def __getitem__(self, other: ExpOrStr | t.Tuple[ExpOrStr]) -> Bracket:
return Bracket(
this=self.copy(), expressions=[convert(e, copy=True) for e in ensure_list(other)]
)
def __iter__(self) -> t.Iterator:
if "expressions" in self.arg_types:
return iter(self.args.get("expressions") or [])
# We define this because __getitem__ converts Expression into an iterable, which is
# problematic because one can hit infinite loops if they do "for x in some_expr: ..."
# See: https://peps.python.org/pep-0234/
raise TypeError(f"'{self.__class__.__name__}' object is not iterable")
def isin(
self,
*expressions: t.Any,
query: t.Optional[ExpOrStr] = None,
unnest: t.Optional[ExpOrStr] | t.Collection[ExpOrStr] = None,
copy: bool = True,
**opts,
) -> In:
return In(
this=maybe_copy(self, copy),
expressions=[convert(e, copy=copy) for e in expressions],
query=maybe_parse(query, copy=copy, **opts) if query else None,
unnest=(
Unnest(
expressions=[
maybe_parse(t.cast(ExpOrStr, e), copy=copy, **opts)
for e in ensure_list(unnest)
]
)
if unnest
else None
),
)
def between(self, low: t.Any, high: t.Any, copy: bool = True, **opts) -> Between:
return Between(
this=maybe_copy(self, copy),
low=convert(low, copy=copy, **opts),
high=convert(high, copy=copy, **opts),
)
def is_(self, other: ExpOrStr) -> Is:
return self._binop(Is, other)
def like(self, other: ExpOrStr) -> Like:
return self._binop(Like, other)
def ilike(self, other: ExpOrStr) -> ILike:
return self._binop(ILike, other)
def eq(self, other: t.Any) -> EQ:
return self._binop(EQ, other)
def neq(self, other: t.Any) -> NEQ:
return self._binop(NEQ, other)
def rlike(self, other: ExpOrStr) -> RegexpLike:
return self._binop(RegexpLike, other)
def div(self, other: ExpOrStr, typed: bool = False, safe: bool = False) -> Div:
div = self._binop(Div, other)
div.args["typed"] = typed
div.args["safe"] = safe
return div
def desc(self, nulls_first: bool = False) -> Ordered:
return Ordered(this=self.copy(), desc=True, nulls_first=nulls_first)
def __lt__(self, other: t.Any) -> LT:
return self._binop(LT, other)
def __le__(self, other: t.Any) -> LTE:
return self._binop(LTE, other)
def __gt__(self, other: t.Any) -> GT:
return self._binop(GT, other)
def __ge__(self, other: t.Any) -> GTE:
return self._binop(GTE, other)
def __add__(self, other: t.Any) -> Add:
return self._binop(Add, other)
def __radd__(self, other: t.Any) -> Add:
return self._binop(Add, other, reverse=True)
def __sub__(self, other: t.Any) -> Sub:
return self._binop(Sub, other)
def __rsub__(self, other: t.Any) -> Sub:
return self._binop(Sub, other, reverse=True)
def __mul__(self, other: t.Any) -> Mul:
return self._binop(Mul, other)
def __rmul__(self, other: t.Any) -> Mul:
return self._binop(Mul, other, reverse=True)
def __truediv__(self, other: t.Any) -> Div:
return self._binop(Div, other)
def __rtruediv__(self, other: t.Any) -> Div:
return self._binop(Div, other, reverse=True)
def __floordiv__(self, other: t.Any) -> IntDiv:
return self._binop(IntDiv, other)
def __rfloordiv__(self, other: t.Any) -> IntDiv:
return self._binop(IntDiv, other, reverse=True)
def __mod__(self, other: t.Any) -> Mod:
return self._binop(Mod, other)
def __rmod__(self, other: t.Any) -> Mod:
return self._binop(Mod, other, reverse=True)
def __pow__(self, other: t.Any) -> Pow:
return self._binop(Pow, other)
def __rpow__(self, other: t.Any) -> Pow:
return self._binop(Pow, other, reverse=True)
def __and__(self, other: t.Any) -> And:
return self._binop(And, other)
def __rand__(self, other: t.Any) -> And:
return self._binop(And, other, reverse=True)
def __or__(self, other: t.Any) -> Or:
return self._binop(Or, other)
def __ror__(self, other: t.Any) -> Or:
return self._binop(Or, other, reverse=True)
def __neg__(self) -> Neg:
return Neg(this=_wrap(self.copy(), Binary))
def __invert__(self) -> Not:
return not_(self.copy())
class Placeholder(Condition):
arg_types = {"this": False, "kind": False}
def convert(value: t.Any, copy: bool = False) -> Expression:
"""Convert a python value into an expression object.
Raises an error if a conversion is not possible.
Args:
value: A python object.
copy: Whether to copy `value` (only applies to Expressions and collections).
Returns:
Expression: the equivalent expression object.
"""
if isinstance(value, Expression):
return maybe_copy(value, copy)
if isinstance(value, str):
return Literal.string(value)
if isinstance(value, bool):
return Boolean(this=value)
if value is None or (isinstance(value, float) and math.isnan(value)):
return null()
if isinstance(value, numbers.Number):
return Literal.number(value)
if isinstance(value, datetime.datetime):
datetime_literal = Literal.string(
(value if value.tzinfo else value.replace(tzinfo=datetime.timezone.utc)).isoformat()
)
return TimeStrToTime(this=datetime_literal)
if isinstance(value, datetime.date):
date_literal = Literal.string(value.strftime("%Y-%m-%d"))
return DateStrToDate(this=date_literal)
if isinstance(value, tuple):
return Tuple(expressions=[convert(v, copy=copy) for v in value])
if isinstance(value, list):
return Array(expressions=[convert(v, copy=copy) for v in value])
if isinstance(value, dict):
return Map(
keys=Array(expressions=[convert(k, copy=copy) for k in value]),
values=Array(expressions=[convert(v, copy=copy) for v in value.values()]),
)
raise ValueError(f"Cannot convert {value}")
The provided code snippet includes necessary dependencies for implementing the `replace_placeholders` function. Write a Python function `def replace_placeholders(expression: Expression, *args, **kwargs) -> Expression` to solve the following problem:
Replace placeholders in an expression. Args: expression: expression node to be transformed and replaced. args: positional names that will substitute unnamed placeholders in the given order. kwargs: keyword arguments that will substitute named placeholders. Examples: >>> from sqlglot import exp, parse_one >>> replace_placeholders( ... parse_one("select * from :tbl where ? = ?"), ... exp.to_identifier("str_col"), "b", tbl=exp.to_identifier("foo") ... ).sql() "SELECT * FROM foo WHERE str_col = 'b'" Returns: The mapped expression.
Here is the function:
def replace_placeholders(expression: Expression, *args, **kwargs) -> Expression:
"""Replace placeholders in an expression.
Args:
expression: expression node to be transformed and replaced.
args: positional names that will substitute unnamed placeholders in the given order.
kwargs: keyword arguments that will substitute named placeholders.
Examples:
>>> from sqlglot import exp, parse_one
>>> replace_placeholders(
... parse_one("select * from :tbl where ? = ?"),
... exp.to_identifier("str_col"), "b", tbl=exp.to_identifier("foo")
... ).sql()
"SELECT * FROM foo WHERE str_col = 'b'"
Returns:
The mapped expression.
"""
def _replace_placeholders(node: Expression, args, **kwargs) -> Expression:
if isinstance(node, Placeholder):
if node.name:
new_name = kwargs.get(node.name)
if new_name is not None:
return convert(new_name)
else:
try:
return convert(next(args))
except StopIteration:
pass
return node
return expression.transform(_replace_placeholders, iter(args), **kwargs) | Replace placeholders in an expression. Args: expression: expression node to be transformed and replaced. args: positional names that will substitute unnamed placeholders in the given order. kwargs: keyword arguments that will substitute named placeholders. Examples: >>> from sqlglot import exp, parse_one >>> replace_placeholders( ... parse_one("select * from :tbl where ? = ?"), ... exp.to_identifier("str_col"), "b", tbl=exp.to_identifier("foo") ... ).sql() "SELECT * FROM foo WHERE str_col = 'b'" Returns: The mapped expression. |
152,950 | from __future__ import annotations
import datetime
import math
import numbers
import re
import textwrap
import typing as t
from collections import deque
from copy import deepcopy
from enum import auto
from functools import reduce
from sqlglot.errors import ErrorLevel, ParseError
from sqlglot.helper import (
AutoName,
camel_to_snake_case,
ensure_collection,
ensure_list,
is_int,
seq_get,
subclasses,
)
from sqlglot.tokens import Token
if t.TYPE_CHECKING:
from sqlglot._typing import E, Lit
from sqlglot.dialects.dialect import DialectType
Q = t.TypeVar("Q", bound="Query")
class Expression(metaclass=_Expression):
"""
The base class for all expressions in a syntax tree. Each Expression encapsulates any necessary
context, such as its child expressions, their names (arg keys), and whether a given child expression
is optional or not.
Attributes:
key: a unique key for each class in the Expression hierarchy. This is useful for hashing
and representing expressions as strings.
arg_types: determines the arguments (child nodes) supported by an expression. It maps
arg keys to booleans that indicate whether the corresponding args are optional.
parent: a reference to the parent expression (or None, in case of root expressions).
arg_key: the arg key an expression is associated with, i.e. the name its parent expression
uses to refer to it.
index: the index of an expression if it is inside of a list argument in its parent
comments: a list of comments that are associated with a given expression. This is used in
order to preserve comments when transpiling SQL code.
type: the `sqlglot.expressions.DataType` type of an expression. This is inferred by the
optimizer, in order to enable some transformations that require type information.
meta: a dictionary that can be used to store useful metadata for a given expression.
Example:
>>> class Foo(Expression):
... arg_types = {"this": True, "expression": False}
The above definition informs us that Foo is an Expression that requires an argument called
"this" and may also optionally receive an argument called "expression".
Args:
args: a mapping used for retrieving the arguments of an expression, given their arg keys.
"""
key = "expression"
arg_types = {"this": True}
__slots__ = ("args", "parent", "arg_key", "index", "comments", "_type", "_meta", "_hash")
def __init__(self, **args: t.Any):
self.args: t.Dict[str, t.Any] = args
self.parent: t.Optional[Expression] = None
self.arg_key: t.Optional[str] = None
self.index: t.Optional[int] = None
self.comments: t.Optional[t.List[str]] = None
self._type: t.Optional[DataType] = None
self._meta: t.Optional[t.Dict[str, t.Any]] = None
self._hash: t.Optional[int] = None
for arg_key, value in self.args.items():
self._set_parent(arg_key, value)
def __eq__(self, other) -> bool:
return type(self) is type(other) and hash(self) == hash(other)
def hashable_args(self) -> t.Any:
return frozenset(
(k, tuple(_norm_arg(a) for a in v) if type(v) is list else _norm_arg(v))
for k, v in self.args.items()
if not (v is None or v is False or (type(v) is list and not v))
)
def __hash__(self) -> int:
if self._hash is not None:
return self._hash
return hash((self.__class__, self.hashable_args))
def this(self) -> t.Any:
"""
Retrieves the argument with key "this".
"""
return self.args.get("this")
def expression(self) -> t.Any:
"""
Retrieves the argument with key "expression".
"""
return self.args.get("expression")
def expressions(self) -> t.List[t.Any]:
"""
Retrieves the argument with key "expressions".
"""
return self.args.get("expressions") or []
def text(self, key) -> str:
"""
Returns a textual representation of the argument corresponding to "key". This can only be used
for args that are strings or leaf Expression instances, such as identifiers and literals.
"""
field = self.args.get(key)
if isinstance(field, str):
return field
if isinstance(field, (Identifier, Literal, Var)):
return field.this
if isinstance(field, (Star, Null)):
return field.name
return ""
def is_string(self) -> bool:
"""
Checks whether a Literal expression is a string.
"""
return isinstance(self, Literal) and self.args["is_string"]
def is_number(self) -> bool:
"""
Checks whether a Literal expression is a number.
"""
return isinstance(self, Literal) and not self.args["is_string"]
def is_int(self) -> bool:
"""
Checks whether a Literal expression is an integer.
"""
return self.is_number and is_int(self.name)
def is_star(self) -> bool:
"""Checks whether an expression is a star."""
return isinstance(self, Star) or (isinstance(self, Column) and isinstance(self.this, Star))
def alias(self) -> str:
"""
Returns the alias of the expression, or an empty string if it's not aliased.
"""
if isinstance(self.args.get("alias"), TableAlias):
return self.args["alias"].name
return self.text("alias")
def alias_column_names(self) -> t.List[str]:
table_alias = self.args.get("alias")
if not table_alias:
return []
return [c.name for c in table_alias.args.get("columns") or []]
def name(self) -> str:
return self.text("this")
def alias_or_name(self) -> str:
return self.alias or self.name
def output_name(self) -> str:
"""
Name of the output column if this expression is a selection.
If the Expression has no output name, an empty string is returned.
Example:
>>> from sqlglot import parse_one
>>> parse_one("SELECT a").expressions[0].output_name
'a'
>>> parse_one("SELECT b AS c").expressions[0].output_name
'c'
>>> parse_one("SELECT 1 + 2").expressions[0].output_name
''
"""
return ""
def type(self) -> t.Optional[DataType]:
return self._type
def type(self, dtype: t.Optional[DataType | DataType.Type | str]) -> None:
if dtype and not isinstance(dtype, DataType):
dtype = DataType.build(dtype)
self._type = dtype # type: ignore
def is_type(self, *dtypes) -> bool:
return self.type is not None and self.type.is_type(*dtypes)
def is_leaf(self) -> bool:
return not any(isinstance(v, (Expression, list)) for v in self.args.values())
def meta(self) -> t.Dict[str, t.Any]:
if self._meta is None:
self._meta = {}
return self._meta
def __deepcopy__(self, memo):
root = self.__class__()
stack = [(self, root)]
while stack:
node, copy = stack.pop()
if node.comments is not None:
copy.comments = deepcopy(node.comments)
if node._type is not None:
copy._type = deepcopy(node._type)
if node._meta is not None:
copy._meta = deepcopy(node._meta)
if node._hash is not None:
copy._hash = node._hash
for k, vs in node.args.items():
if hasattr(vs, "parent"):
stack.append((vs, vs.__class__()))
copy.set(k, stack[-1][-1])
elif type(vs) is list:
copy.args[k] = []
for v in vs:
if hasattr(v, "parent"):
stack.append((v, v.__class__()))
copy.append(k, stack[-1][-1])
else:
copy.append(k, v)
else:
copy.args[k] = vs
return root
def copy(self):
"""
Returns a deep copy of the expression.
"""
return deepcopy(self)
def add_comments(self, comments: t.Optional[t.List[str]]) -> None:
if self.comments is None:
self.comments = []
if comments:
for comment in comments:
_, *meta = comment.split(SQLGLOT_META)
if meta:
for kv in "".join(meta).split(","):
k, *v = kv.split("=")
value = v[0].strip() if v else True
self.meta[k.strip()] = value
self.comments.append(comment)
def append(self, arg_key: str, value: t.Any) -> None:
"""
Appends value to arg_key if it's a list or sets it as a new list.
Args:
arg_key (str): name of the list expression arg
value (Any): value to append to the list
"""
if type(self.args.get(arg_key)) is not list:
self.args[arg_key] = []
self._set_parent(arg_key, value)
values = self.args[arg_key]
if hasattr(value, "parent"):
value.index = len(values)
values.append(value)
def set(self, arg_key: str, value: t.Any) -> None:
"""
Sets arg_key to value.
Args:
arg_key: name of the expression arg.
value: value to set the arg to.
"""
if value is None:
self.args.pop(arg_key, None)
else:
self.args[arg_key] = value
self._set_parent(arg_key, value)
def _set_parent(self, arg_key: str, value: t.Any, index: t.Optional[int] = None) -> None:
if hasattr(value, "parent"):
value.parent = self
value.arg_key = arg_key
value.index = index
elif type(value) is list:
for index, v in enumerate(value):
if hasattr(v, "parent"):
v.parent = self
v.arg_key = arg_key
v.index = index
def depth(self) -> int:
"""
Returns the depth of this tree.
"""
if self.parent:
return self.parent.depth + 1
return 0
def iter_expressions(self, reverse: bool = False) -> t.Iterator[Expression]:
"""Yields the key and expression for all arguments, exploding list args."""
# remove tuple when python 3.7 is deprecated
for vs in reversed(tuple(self.args.values())) if reverse else self.args.values():
if type(vs) is list:
for v in reversed(vs) if reverse else vs:
if hasattr(v, "parent"):
yield v
else:
if hasattr(vs, "parent"):
yield vs
def find(self, *expression_types: t.Type[E], bfs: bool = True) -> t.Optional[E]:
"""
Returns the first node in this tree which matches at least one of
the specified types.
Args:
expression_types: the expression type(s) to match.
bfs: whether to search the AST using the BFS algorithm (DFS is used if false).
Returns:
The node which matches the criteria or None if no such node was found.
"""
return next(self.find_all(*expression_types, bfs=bfs), None)
def find_all(self, *expression_types: t.Type[E], bfs: bool = True) -> t.Iterator[E]:
"""
Returns a generator object which visits all nodes in this tree and only
yields those that match at least one of the specified expression types.
Args:
expression_types: the expression type(s) to match.
bfs: whether to search the AST using the BFS algorithm (DFS is used if false).
Returns:
The generator object.
"""
for expression in self.walk(bfs=bfs):
if isinstance(expression, expression_types):
yield expression
def find_ancestor(self, *expression_types: t.Type[E]) -> t.Optional[E]:
"""
Returns a nearest parent matching expression_types.
Args:
expression_types: the expression type(s) to match.
Returns:
The parent node.
"""
ancestor = self.parent
while ancestor and not isinstance(ancestor, expression_types):
ancestor = ancestor.parent
return ancestor # type: ignore
def parent_select(self) -> t.Optional[Select]:
"""
Returns the parent select statement.
"""
return self.find_ancestor(Select)
def same_parent(self) -> bool:
"""Returns if the parent is the same class as itself."""
return type(self.parent) is self.__class__
def root(self) -> Expression:
"""
Returns the root expression of this tree.
"""
expression = self
while expression.parent:
expression = expression.parent
return expression
def walk(
self, bfs: bool = True, prune: t.Optional[t.Callable[[Expression], bool]] = None
) -> t.Iterator[Expression]:
"""
Returns a generator object which visits all nodes in this tree.
Args:
bfs (bool): if set to True the BFS traversal order will be applied,
otherwise the DFS traversal will be used instead.
prune ((node, parent, arg_key) -> bool): callable that returns True if
the generator should stop traversing this branch of the tree.
Returns:
the generator object.
"""
if bfs:
yield from self.bfs(prune=prune)
else:
yield from self.dfs(prune=prune)
def dfs(
self, prune: t.Optional[t.Callable[[Expression], bool]] = None
) -> t.Iterator[Expression]:
"""
Returns a generator object which visits all nodes in this tree in
the DFS (Depth-first) order.
Returns:
The generator object.
"""
stack = [self]
while stack:
node = stack.pop()
yield node
if prune and prune(node):
continue
for v in node.iter_expressions(reverse=True):
stack.append(v)
def bfs(
self, prune: t.Optional[t.Callable[[Expression], bool]] = None
) -> t.Iterator[Expression]:
"""
Returns a generator object which visits all nodes in this tree in
the BFS (Breadth-first) order.
Returns:
The generator object.
"""
queue = deque([self])
while queue:
node = queue.popleft()
yield node
if prune and prune(node):
continue
for v in node.iter_expressions():
queue.append(v)
def unnest(self):
"""
Returns the first non parenthesis child or self.
"""
expression = self
while type(expression) is Paren:
expression = expression.this
return expression
def unalias(self):
"""
Returns the inner expression if this is an Alias.
"""
if isinstance(self, Alias):
return self.this
return self
def unnest_operands(self):
"""
Returns unnested operands as a tuple.
"""
return tuple(arg.unnest() for arg in self.iter_expressions())
def flatten(self, unnest=True):
"""
Returns a generator which yields child nodes whose parents are the same class.
A AND B AND C -> [A, B, C]
"""
for node in self.dfs(prune=lambda n: n.parent and type(n) is not self.__class__):
if type(node) is not self.__class__:
yield node.unnest() if unnest and not isinstance(node, Subquery) else node
def __str__(self) -> str:
return self.sql()
def __repr__(self) -> str:
return _to_s(self)
def to_s(self) -> str:
"""
Same as __repr__, but includes additional information which can be useful
for debugging, like empty or missing args and the AST nodes' object IDs.
"""
return _to_s(self, verbose=True)
def sql(self, dialect: DialectType = None, **opts) -> str:
"""
Returns SQL string representation of this tree.
Args:
dialect: the dialect of the output SQL string (eg. "spark", "hive", "presto", "mysql").
opts: other `sqlglot.generator.Generator` options.
Returns:
The SQL string.
"""
from sqlglot.dialects import Dialect
return Dialect.get_or_raise(dialect).generate(self, **opts)
def transform(self, fun: t.Callable, *args: t.Any, copy: bool = True, **kwargs) -> Expression:
"""
Visits all tree nodes (excluding already transformed ones)
and applies the given transformation function to each node.
Args:
fun (function): a function which takes a node as an argument and returns a
new transformed node or the same node without modifications. If the function
returns None, then the corresponding node will be removed from the syntax tree.
copy (bool): if set to True a new tree instance is constructed, otherwise the tree is
modified in place.
Returns:
The transformed tree.
"""
root = None
new_node = None
for node in (self.copy() if copy else self).dfs(prune=lambda n: n is not new_node):
new_node = fun(node, *args, **kwargs)
if root:
if new_node is not node:
node.replace(new_node)
else:
root = new_node
assert root
return root.assert_is(Expression)
def replace(self, expression: E) -> E: ...
def replace(self, expression: None) -> None: ...
def replace(self, expression):
"""
Swap out this expression with a new expression.
For example::
>>> tree = Select().select("x").from_("tbl")
>>> tree.find(Column).replace(column("y"))
Column(
this=Identifier(this=y, quoted=False))
>>> tree.sql()
'SELECT y FROM tbl'
Args:
expression: new node
Returns:
The new expression or expressions.
"""
parent = self.parent
if not parent:
return expression
key = self.arg_key
value = parent.args.get(key)
if isinstance(value, list):
index = self.index
if isinstance(expression, list):
value.pop(index)
value[index:index] = expression
parent._set_parent(key, value)
else:
if expression is None:
value.pop(index)
for v in value[index:]:
v.index = v.index - 1
else:
value[index] = expression
parent._set_parent(key, expression, index=index)
elif value is not None:
if expression is None:
parent.args.pop(key)
else:
parent.set(key, expression)
if expression is not self:
self.parent = None
self.arg_key = None
self.index = None
return expression
def pop(self: E) -> E:
"""
Remove this expression from its AST.
Returns:
The popped expression.
"""
self.replace(None)
return self
def assert_is(self, type_: t.Type[E]) -> E:
"""
Assert that this `Expression` is an instance of `type_`.
If it is NOT an instance of `type_`, this raises an assertion error.
Otherwise, this returns this expression.
Examples:
This is useful for type security in chained expressions:
>>> import sqlglot
>>> sqlglot.parse_one("SELECT x from y").assert_is(Select).select("z").sql()
'SELECT x, z FROM y'
"""
if not isinstance(self, type_):
raise AssertionError(f"{self} is not {type_}.")
return self
def error_messages(self, args: t.Optional[t.Sequence] = None) -> t.List[str]:
"""
Checks if this expression is valid (e.g. all mandatory args are set).
Args:
args: a sequence of values that were used to instantiate a Func expression. This is used
to check that the provided arguments don't exceed the function argument limit.
Returns:
A list of error messages for all possible errors that were found.
"""
errors: t.List[str] = []
for k in self.args:
if k not in self.arg_types:
errors.append(f"Unexpected keyword: '{k}' for {self.__class__}")
for k, mandatory in self.arg_types.items():
v = self.args.get(k)
if mandatory and (v is None or (isinstance(v, list) and not v)):
errors.append(f"Required keyword: '{k}' missing for {self.__class__}")
if (
args
and isinstance(self, Func)
and len(args) > len(self.arg_types)
and not self.is_var_len_args
):
errors.append(
f"The number of provided arguments ({len(args)}) is greater than "
f"the maximum number of supported arguments ({len(self.arg_types)})"
)
return errors
def dump(self):
"""
Dump this Expression to a JSON-serializable dict.
"""
from sqlglot.serde import dump
return dump(self)
def load(cls, obj):
"""
Load a dict (as returned by `Expression.dump`) into an Expression instance.
"""
from sqlglot.serde import load
return load(obj)
def and_(
self,
*expressions: t.Optional[ExpOrStr],
dialect: DialectType = None,
copy: bool = True,
**opts,
) -> Condition:
"""
AND this condition with one or multiple expressions.
Example:
>>> condition("x=1").and_("y=1").sql()
'x = 1 AND y = 1'
Args:
*expressions: the SQL code strings to parse.
If an `Expression` instance is passed, it will be used as-is.
dialect: the dialect used to parse the input expression.
copy: whether to copy the involved expressions (only applies to Expressions).
opts: other options to use to parse the input expressions.
Returns:
The new And condition.
"""
return and_(self, *expressions, dialect=dialect, copy=copy, **opts)
def or_(
self,
*expressions: t.Optional[ExpOrStr],
dialect: DialectType = None,
copy: bool = True,
**opts,
) -> Condition:
"""
OR this condition with one or multiple expressions.
Example:
>>> condition("x=1").or_("y=1").sql()
'x = 1 OR y = 1'
Args:
*expressions: the SQL code strings to parse.
If an `Expression` instance is passed, it will be used as-is.
dialect: the dialect used to parse the input expression.
copy: whether to copy the involved expressions (only applies to Expressions).
opts: other options to use to parse the input expressions.
Returns:
The new Or condition.
"""
return or_(self, *expressions, dialect=dialect, copy=copy, **opts)
def not_(self, copy: bool = True):
"""
Wrap this condition with NOT.
Example:
>>> condition("x=1").not_().sql()
'NOT x = 1'
Args:
copy: whether to copy this object.
Returns:
The new Not instance.
"""
return not_(self, copy=copy)
def as_(
self,
alias: str | Identifier,
quoted: t.Optional[bool] = None,
dialect: DialectType = None,
copy: bool = True,
**opts,
) -> Alias:
return alias_(self, alias, quoted=quoted, dialect=dialect, copy=copy, **opts)
def _binop(self, klass: t.Type[E], other: t.Any, reverse: bool = False) -> E:
this = self.copy()
other = convert(other, copy=True)
if not isinstance(this, klass) and not isinstance(other, klass):
this = _wrap(this, Binary)
other = _wrap(other, Binary)
if reverse:
return klass(this=other, expression=this)
return klass(this=this, expression=other)
def __getitem__(self, other: ExpOrStr | t.Tuple[ExpOrStr]) -> Bracket:
return Bracket(
this=self.copy(), expressions=[convert(e, copy=True) for e in ensure_list(other)]
)
def __iter__(self) -> t.Iterator:
if "expressions" in self.arg_types:
return iter(self.args.get("expressions") or [])
# We define this because __getitem__ converts Expression into an iterable, which is
# problematic because one can hit infinite loops if they do "for x in some_expr: ..."
# See: https://peps.python.org/pep-0234/
raise TypeError(f"'{self.__class__.__name__}' object is not iterable")
def isin(
self,
*expressions: t.Any,
query: t.Optional[ExpOrStr] = None,
unnest: t.Optional[ExpOrStr] | t.Collection[ExpOrStr] = None,
copy: bool = True,
**opts,
) -> In:
return In(
this=maybe_copy(self, copy),
expressions=[convert(e, copy=copy) for e in expressions],
query=maybe_parse(query, copy=copy, **opts) if query else None,
unnest=(
Unnest(
expressions=[
maybe_parse(t.cast(ExpOrStr, e), copy=copy, **opts)
for e in ensure_list(unnest)
]
)
if unnest
else None
),
)
def between(self, low: t.Any, high: t.Any, copy: bool = True, **opts) -> Between:
return Between(
this=maybe_copy(self, copy),
low=convert(low, copy=copy, **opts),
high=convert(high, copy=copy, **opts),
)
def is_(self, other: ExpOrStr) -> Is:
return self._binop(Is, other)
def like(self, other: ExpOrStr) -> Like:
return self._binop(Like, other)
def ilike(self, other: ExpOrStr) -> ILike:
return self._binop(ILike, other)
def eq(self, other: t.Any) -> EQ:
return self._binop(EQ, other)
def neq(self, other: t.Any) -> NEQ:
return self._binop(NEQ, other)
def rlike(self, other: ExpOrStr) -> RegexpLike:
return self._binop(RegexpLike, other)
def div(self, other: ExpOrStr, typed: bool = False, safe: bool = False) -> Div:
div = self._binop(Div, other)
div.args["typed"] = typed
div.args["safe"] = safe
return div
def desc(self, nulls_first: bool = False) -> Ordered:
return Ordered(this=self.copy(), desc=True, nulls_first=nulls_first)
def __lt__(self, other: t.Any) -> LT:
return self._binop(LT, other)
def __le__(self, other: t.Any) -> LTE:
return self._binop(LTE, other)
def __gt__(self, other: t.Any) -> GT:
return self._binop(GT, other)
def __ge__(self, other: t.Any) -> GTE:
return self._binop(GTE, other)
def __add__(self, other: t.Any) -> Add:
return self._binop(Add, other)
def __radd__(self, other: t.Any) -> Add:
return self._binop(Add, other, reverse=True)
def __sub__(self, other: t.Any) -> Sub:
return self._binop(Sub, other)
def __rsub__(self, other: t.Any) -> Sub:
return self._binop(Sub, other, reverse=True)
def __mul__(self, other: t.Any) -> Mul:
return self._binop(Mul, other)
def __rmul__(self, other: t.Any) -> Mul:
return self._binop(Mul, other, reverse=True)
def __truediv__(self, other: t.Any) -> Div:
return self._binop(Div, other)
def __rtruediv__(self, other: t.Any) -> Div:
return self._binop(Div, other, reverse=True)
def __floordiv__(self, other: t.Any) -> IntDiv:
return self._binop(IntDiv, other)
def __rfloordiv__(self, other: t.Any) -> IntDiv:
return self._binop(IntDiv, other, reverse=True)
def __mod__(self, other: t.Any) -> Mod:
return self._binop(Mod, other)
def __rmod__(self, other: t.Any) -> Mod:
return self._binop(Mod, other, reverse=True)
def __pow__(self, other: t.Any) -> Pow:
return self._binop(Pow, other)
def __rpow__(self, other: t.Any) -> Pow:
return self._binop(Pow, other, reverse=True)
def __and__(self, other: t.Any) -> And:
return self._binop(And, other)
def __rand__(self, other: t.Any) -> And:
return self._binop(And, other, reverse=True)
def __or__(self, other: t.Any) -> Or:
return self._binop(Or, other)
def __ror__(self, other: t.Any) -> Or:
return self._binop(Or, other, reverse=True)
def __neg__(self) -> Neg:
return Neg(this=_wrap(self.copy(), Binary))
def __invert__(self) -> Not:
return not_(self.copy())
class Query(Expression):
def subquery(self, alias: t.Optional[ExpOrStr] = None, copy: bool = True) -> Subquery:
"""
Returns a `Subquery` that wraps around this query.
Example:
>>> subquery = Select().select("x").from_("tbl").subquery()
>>> Select().select("x").from_(subquery).sql()
'SELECT x FROM (SELECT x FROM tbl)'
Args:
alias: an optional alias for the subquery.
copy: if `False`, modify this expression instance in-place.
"""
instance = maybe_copy(self, copy)
if not isinstance(alias, Expression):
alias = TableAlias(this=to_identifier(alias)) if alias else None
return Subquery(this=instance, alias=alias)
def limit(
self, expression: ExpOrStr | int, dialect: DialectType = None, copy: bool = True, **opts
) -> Select:
"""
Adds a LIMIT clause to this query.
Example:
>>> select("1").union(select("1")).limit(1).sql()
'SELECT * FROM (SELECT 1 UNION SELECT 1) AS _l_0 LIMIT 1'
Args:
expression: the SQL code string to parse.
This can also be an integer.
If a `Limit` instance is passed, it will be used as-is.
If another `Expression` instance is passed, it will be wrapped in a `Limit`.
dialect: the dialect used to parse the input expression.
copy: if `False`, modify this expression instance in-place.
opts: other options to use to parse the input expressions.
Returns:
A limited Select expression.
"""
return (
select("*")
.from_(self.subquery(alias="_l_0", copy=copy))
.limit(expression, dialect=dialect, copy=False, **opts)
)
def ctes(self) -> t.List[CTE]:
"""Returns a list of all the CTEs attached to this query."""
with_ = self.args.get("with")
return with_.expressions if with_ else []
def selects(self) -> t.List[Expression]:
"""Returns the query's projections."""
raise NotImplementedError("Query objects must implement `selects`")
def named_selects(self) -> t.List[str]:
"""Returns the output names of the query's projections."""
raise NotImplementedError("Query objects must implement `named_selects`")
def select(
self: Q,
*expressions: t.Optional[ExpOrStr],
append: bool = True,
dialect: DialectType = None,
copy: bool = True,
**opts,
) -> Q:
"""
Append to or set the SELECT expressions.
Example:
>>> Select().select("x", "y").sql()
'SELECT x, y'
Args:
*expressions: the SQL code strings to parse.
If an `Expression` instance is passed, it will be used as-is.
append: if `True`, add to any existing expressions.
Otherwise, this resets the expressions.
dialect: the dialect used to parse the input expressions.
copy: if `False`, modify this expression instance in-place.
opts: other options to use to parse the input expressions.
Returns:
The modified Query expression.
"""
raise NotImplementedError("Query objects must implement `select`")
def with_(
self: Q,
alias: ExpOrStr,
as_: ExpOrStr,
recursive: t.Optional[bool] = None,
append: bool = True,
dialect: DialectType = None,
copy: bool = True,
**opts,
) -> Q:
"""
Append to or set the common table expressions.
Example:
>>> Select().with_("tbl2", as_="SELECT * FROM tbl").select("x").from_("tbl2").sql()
'WITH tbl2 AS (SELECT * FROM tbl) SELECT x FROM tbl2'
Args:
alias: the SQL code string to parse as the table name.
If an `Expression` instance is passed, this is used as-is.
as_: the SQL code string to parse as the table expression.
If an `Expression` instance is passed, it will be used as-is.
recursive: set the RECURSIVE part of the expression. Defaults to `False`.
append: if `True`, add to any existing expressions.
Otherwise, this resets the expressions.
dialect: the dialect used to parse the input expression.
copy: if `False`, modify this expression instance in-place.
opts: other options to use to parse the input expressions.
Returns:
The modified expression.
"""
return _apply_cte_builder(
self, alias, as_, recursive=recursive, append=append, dialect=dialect, copy=copy, **opts
)
def union(
self, expression: ExpOrStr, distinct: bool = True, dialect: DialectType = None, **opts
) -> Union:
"""
Builds a UNION expression.
Example:
>>> import sqlglot
>>> sqlglot.parse_one("SELECT * FROM foo").union("SELECT * FROM bla").sql()
'SELECT * FROM foo UNION SELECT * FROM bla'
Args:
expression: the SQL code string.
If an `Expression` instance is passed, it will be used as-is.
distinct: set the DISTINCT flag if and only if this is true.
dialect: the dialect used to parse the input expression.
opts: other options to use to parse the input expressions.
Returns:
The new Union expression.
"""
return union(left=self, right=expression, distinct=distinct, dialect=dialect, **opts)
def intersect(
self, expression: ExpOrStr, distinct: bool = True, dialect: DialectType = None, **opts
) -> Intersect:
"""
Builds an INTERSECT expression.
Example:
>>> import sqlglot
>>> sqlglot.parse_one("SELECT * FROM foo").intersect("SELECT * FROM bla").sql()
'SELECT * FROM foo INTERSECT SELECT * FROM bla'
Args:
expression: the SQL code string.
If an `Expression` instance is passed, it will be used as-is.
distinct: set the DISTINCT flag if and only if this is true.
dialect: the dialect used to parse the input expression.
opts: other options to use to parse the input expressions.
Returns:
The new Intersect expression.
"""
return intersect(left=self, right=expression, distinct=distinct, dialect=dialect, **opts)
def except_(
self, expression: ExpOrStr, distinct: bool = True, dialect: DialectType = None, **opts
) -> Except:
"""
Builds an EXCEPT expression.
Example:
>>> import sqlglot
>>> sqlglot.parse_one("SELECT * FROM foo").except_("SELECT * FROM bla").sql()
'SELECT * FROM foo EXCEPT SELECT * FROM bla'
Args:
expression: the SQL code string.
If an `Expression` instance is passed, it will be used as-is.
distinct: set the DISTINCT flag if and only if this is true.
dialect: the dialect used to parse the input expression.
opts: other options to use to parse the input expressions.
Returns:
The new Except expression.
"""
return except_(left=self, right=expression, distinct=distinct, dialect=dialect, **opts)
class Table(Expression):
arg_types = {
"this": False,
"alias": False,
"db": False,
"catalog": False,
"laterals": False,
"joins": False,
"pivots": False,
"hints": False,
"system_time": False,
"version": False,
"format": False,
"pattern": False,
"ordinality": False,
"when": False,
"only": False,
}
def name(self) -> str:
if isinstance(self.this, Func):
return ""
return self.this.name
def db(self) -> str:
return self.text("db")
def catalog(self) -> str:
return self.text("catalog")
def selects(self) -> t.List[Expression]:
return []
def named_selects(self) -> t.List[str]:
return []
def parts(self) -> t.List[Expression]:
"""Return the parts of a table in order catalog, db, table."""
parts: t.List[Expression] = []
for arg in ("catalog", "db", "this"):
part = self.args.get(arg)
if isinstance(part, Dot):
parts.extend(part.flatten())
elif isinstance(part, Expression):
parts.append(part)
return parts
def to_column(self, copy: bool = True) -> Alias | Column | Dot:
parts = self.parts
col = column(*reversed(parts[0:4]), fields=parts[4:], copy=copy) # type: ignore
alias = self.args.get("alias")
if alias:
col = alias_(col, alias.this, copy=copy)
return col
def subquery(
expression: ExpOrStr,
alias: t.Optional[Identifier | str] = None,
dialect: DialectType = None,
**opts,
) -> Select:
"""
Build a subquery expression that's selected from.
Example:
>>> subquery('select x from tbl', 'bar').select('x').sql()
'SELECT x FROM (SELECT x FROM tbl) AS bar'
Args:
expression: the SQL code strings to parse.
If an Expression instance is passed, this is used as-is.
alias: the alias name to use.
dialect: the dialect used to parse the input expression.
**opts: other options to use to parse the input expressions.
Returns:
A new Select instance with the subquery expression included.
"""
expression = maybe_parse(expression, dialect=dialect, **opts).subquery(alias)
return Select().from_(expression, dialect=dialect, **opts)
def normalize_table_name(table: str | Table, dialect: DialectType = None, copy: bool = True) -> str:
"""Returns a case normalized table name without quotes.
Args:
table: the table to normalize
dialect: the dialect to use for normalization rules
copy: whether to copy the expression.
Examples:
>>> normalize_table_name("`A-B`.c", dialect="bigquery")
'A-B.c'
"""
from sqlglot.optimizer.normalize_identifiers import normalize_identifiers
return ".".join(
p.name
for p in normalize_identifiers(
to_table(table, dialect=dialect, copy=copy), dialect=dialect
).parts
)
DialectType = t.Union[str, Dialect, t.Type[Dialect], None]
The provided code snippet includes necessary dependencies for implementing the `expand` function. Write a Python function `def expand( expression: Expression, sources: t.Dict[str, Query], dialect: DialectType = None, copy: bool = True, ) -> Expression` to solve the following problem:
Transforms an expression by expanding all referenced sources into subqueries. Examples: >>> from sqlglot import parse_one >>> expand(parse_one("select * from x AS z"), {"x": parse_one("select * from y")}).sql() 'SELECT * FROM (SELECT * FROM y) AS z /* source: x */' >>> expand(parse_one("select * from x AS z"), {"x": parse_one("select * from y"), "y": parse_one("select * from z")}).sql() 'SELECT * FROM (SELECT * FROM (SELECT * FROM z) AS y /* source: y */) AS z /* source: x */' Args: expression: The expression to expand. sources: A dictionary of name to Queries. dialect: The dialect of the sources dict. copy: Whether to copy the expression during transformation. Defaults to True. Returns: The transformed expression.
Here is the function:
def expand(
expression: Expression,
sources: t.Dict[str, Query],
dialect: DialectType = None,
copy: bool = True,
) -> Expression:
"""Transforms an expression by expanding all referenced sources into subqueries.
Examples:
>>> from sqlglot import parse_one
>>> expand(parse_one("select * from x AS z"), {"x": parse_one("select * from y")}).sql()
'SELECT * FROM (SELECT * FROM y) AS z /* source: x */'
>>> expand(parse_one("select * from x AS z"), {"x": parse_one("select * from y"), "y": parse_one("select * from z")}).sql()
'SELECT * FROM (SELECT * FROM (SELECT * FROM z) AS y /* source: y */) AS z /* source: x */'
Args:
expression: The expression to expand.
sources: A dictionary of name to Queries.
dialect: The dialect of the sources dict.
copy: Whether to copy the expression during transformation. Defaults to True.
Returns:
The transformed expression.
"""
sources = {normalize_table_name(k, dialect=dialect): v for k, v in sources.items()}
def _expand(node: Expression):
if isinstance(node, Table):
name = normalize_table_name(node, dialect=dialect)
source = sources.get(name)
if source:
subquery = source.subquery(node.alias or name)
subquery.comments = [f"source: {name}"]
return subquery.transform(_expand, copy=False)
return node
return expression.transform(_expand, copy=copy) | Transforms an expression by expanding all referenced sources into subqueries. Examples: >>> from sqlglot import parse_one >>> expand(parse_one("select * from x AS z"), {"x": parse_one("select * from y")}).sql() 'SELECT * FROM (SELECT * FROM y) AS z /* source: x */' >>> expand(parse_one("select * from x AS z"), {"x": parse_one("select * from y"), "y": parse_one("select * from z")}).sql() 'SELECT * FROM (SELECT * FROM (SELECT * FROM z) AS y /* source: y */) AS z /* source: x */' Args: expression: The expression to expand. sources: A dictionary of name to Queries. dialect: The dialect of the sources dict. copy: Whether to copy the expression during transformation. Defaults to True. Returns: The transformed expression. |
152,951 | from __future__ import annotations
import datetime
import math
import numbers
import re
import textwrap
import typing as t
from collections import deque
from copy import deepcopy
from enum import auto
from functools import reduce
from sqlglot.errors import ErrorLevel, ParseError
from sqlglot.helper import (
AutoName,
camel_to_snake_case,
ensure_collection,
ensure_list,
is_int,
seq_get,
subclasses,
)
from sqlglot.tokens import Token
if t.TYPE_CHECKING:
from sqlglot._typing import E, Lit
from sqlglot.dialects.dialect import DialectType
Q = t.TypeVar("Q", bound="Query")
ExpOrStr = t.Union[str, Expression]
class Case(Func):
arg_types = {"this": False, "ifs": True, "default": False}
def when(self, condition: ExpOrStr, then: ExpOrStr, copy: bool = True, **opts) -> Case:
instance = maybe_copy(self, copy)
instance.append(
"ifs",
If(
this=maybe_parse(condition, copy=copy, **opts),
true=maybe_parse(then, copy=copy, **opts),
),
)
return instance
def else_(self, condition: ExpOrStr, copy: bool = True, **opts) -> Case:
instance = maybe_copy(self, copy)
instance.set("default", maybe_parse(condition, copy=copy, **opts))
return instance
def maybe_parse(
sql_or_expression: ExpOrStr,
*,
into: t.Type[E],
dialect: DialectType = None,
prefix: t.Optional[str] = None,
copy: bool = False,
**opts,
) -> E: ...
def maybe_parse(
sql_or_expression: str | E,
*,
into: t.Optional[IntoType] = None,
dialect: DialectType = None,
prefix: t.Optional[str] = None,
copy: bool = False,
**opts,
) -> E: ...
def maybe_parse(
sql_or_expression: ExpOrStr,
*,
into: t.Optional[IntoType] = None,
dialect: DialectType = None,
prefix: t.Optional[str] = None,
copy: bool = False,
**opts,
) -> Expression:
"""Gracefully handle a possible string or expression.
Example:
>>> maybe_parse("1")
Literal(this=1, is_string=False)
>>> maybe_parse(to_identifier("x"))
Identifier(this=x, quoted=False)
Args:
sql_or_expression: the SQL code string or an expression
into: the SQLGlot Expression to parse into
dialect: the dialect used to parse the input expressions (in the case that an
input expression is a SQL string).
prefix: a string to prefix the sql with before it gets parsed
(automatically includes a space)
copy: whether to copy the expression.
**opts: other options to use to parse the input expressions (again, in the case
that an input expression is a SQL string).
Returns:
Expression: the parsed or given expression.
"""
if isinstance(sql_or_expression, Expression):
if copy:
return sql_or_expression.copy()
return sql_or_expression
if sql_or_expression is None:
raise ParseError("SQL cannot be None")
import sqlglot
sql = str(sql_or_expression)
if prefix:
sql = f"{prefix} {sql}"
return sqlglot.parse_one(sql, read=dialect, into=into, **opts)
The provided code snippet includes necessary dependencies for implementing the `case` function. Write a Python function `def case( expression: t.Optional[ExpOrStr] = None, **opts, ) -> Case` to solve the following problem:
Initialize a CASE statement. Example: case().when("a = 1", "foo").else_("bar") Args: expression: Optionally, the input expression (not all dialects support this) **opts: Extra keyword arguments for parsing `expression`
Here is the function:
def case(
expression: t.Optional[ExpOrStr] = None,
**opts,
) -> Case:
"""
Initialize a CASE statement.
Example:
case().when("a = 1", "foo").else_("bar")
Args:
expression: Optionally, the input expression (not all dialects support this)
**opts: Extra keyword arguments for parsing `expression`
"""
if expression is not None:
this = maybe_parse(expression, **opts)
else:
this = None
return Case(this=this, ifs=[]) | Initialize a CASE statement. Example: case().when("a = 1", "foo").else_("bar") Args: expression: Optionally, the input expression (not all dialects support this) **opts: Extra keyword arguments for parsing `expression` |
152,952 | from __future__ import annotations
import datetime
import math
import numbers
import re
import textwrap
import typing as t
from collections import deque
from copy import deepcopy
from enum import auto
from functools import reduce
from sqlglot.errors import ErrorLevel, ParseError
from sqlglot.helper import (
AutoName,
camel_to_snake_case,
ensure_collection,
ensure_list,
is_int,
seq_get,
subclasses,
)
from sqlglot.tokens import Token
if t.TYPE_CHECKING:
from sqlglot._typing import E, Lit
from sqlglot.dialects.dialect import DialectType
Q = t.TypeVar("Q", bound="Query")
class Expression(metaclass=_Expression):
"""
The base class for all expressions in a syntax tree. Each Expression encapsulates any necessary
context, such as its child expressions, their names (arg keys), and whether a given child expression
is optional or not.
Attributes:
key: a unique key for each class in the Expression hierarchy. This is useful for hashing
and representing expressions as strings.
arg_types: determines the arguments (child nodes) supported by an expression. It maps
arg keys to booleans that indicate whether the corresponding args are optional.
parent: a reference to the parent expression (or None, in case of root expressions).
arg_key: the arg key an expression is associated with, i.e. the name its parent expression
uses to refer to it.
index: the index of an expression if it is inside of a list argument in its parent
comments: a list of comments that are associated with a given expression. This is used in
order to preserve comments when transpiling SQL code.
type: the `sqlglot.expressions.DataType` type of an expression. This is inferred by the
optimizer, in order to enable some transformations that require type information.
meta: a dictionary that can be used to store useful metadata for a given expression.
Example:
>>> class Foo(Expression):
... arg_types = {"this": True, "expression": False}
The above definition informs us that Foo is an Expression that requires an argument called
"this" and may also optionally receive an argument called "expression".
Args:
args: a mapping used for retrieving the arguments of an expression, given their arg keys.
"""
key = "expression"
arg_types = {"this": True}
__slots__ = ("args", "parent", "arg_key", "index", "comments", "_type", "_meta", "_hash")
def __init__(self, **args: t.Any):
self.args: t.Dict[str, t.Any] = args
self.parent: t.Optional[Expression] = None
self.arg_key: t.Optional[str] = None
self.index: t.Optional[int] = None
self.comments: t.Optional[t.List[str]] = None
self._type: t.Optional[DataType] = None
self._meta: t.Optional[t.Dict[str, t.Any]] = None
self._hash: t.Optional[int] = None
for arg_key, value in self.args.items():
self._set_parent(arg_key, value)
def __eq__(self, other) -> bool:
return type(self) is type(other) and hash(self) == hash(other)
def hashable_args(self) -> t.Any:
return frozenset(
(k, tuple(_norm_arg(a) for a in v) if type(v) is list else _norm_arg(v))
for k, v in self.args.items()
if not (v is None or v is False or (type(v) is list and not v))
)
def __hash__(self) -> int:
if self._hash is not None:
return self._hash
return hash((self.__class__, self.hashable_args))
def this(self) -> t.Any:
"""
Retrieves the argument with key "this".
"""
return self.args.get("this")
def expression(self) -> t.Any:
"""
Retrieves the argument with key "expression".
"""
return self.args.get("expression")
def expressions(self) -> t.List[t.Any]:
"""
Retrieves the argument with key "expressions".
"""
return self.args.get("expressions") or []
def text(self, key) -> str:
"""
Returns a textual representation of the argument corresponding to "key". This can only be used
for args that are strings or leaf Expression instances, such as identifiers and literals.
"""
field = self.args.get(key)
if isinstance(field, str):
return field
if isinstance(field, (Identifier, Literal, Var)):
return field.this
if isinstance(field, (Star, Null)):
return field.name
return ""
def is_string(self) -> bool:
"""
Checks whether a Literal expression is a string.
"""
return isinstance(self, Literal) and self.args["is_string"]
def is_number(self) -> bool:
"""
Checks whether a Literal expression is a number.
"""
return isinstance(self, Literal) and not self.args["is_string"]
def is_int(self) -> bool:
"""
Checks whether a Literal expression is an integer.
"""
return self.is_number and is_int(self.name)
def is_star(self) -> bool:
"""Checks whether an expression is a star."""
return isinstance(self, Star) or (isinstance(self, Column) and isinstance(self.this, Star))
def alias(self) -> str:
"""
Returns the alias of the expression, or an empty string if it's not aliased.
"""
if isinstance(self.args.get("alias"), TableAlias):
return self.args["alias"].name
return self.text("alias")
def alias_column_names(self) -> t.List[str]:
table_alias = self.args.get("alias")
if not table_alias:
return []
return [c.name for c in table_alias.args.get("columns") or []]
def name(self) -> str:
return self.text("this")
def alias_or_name(self) -> str:
return self.alias or self.name
def output_name(self) -> str:
"""
Name of the output column if this expression is a selection.
If the Expression has no output name, an empty string is returned.
Example:
>>> from sqlglot import parse_one
>>> parse_one("SELECT a").expressions[0].output_name
'a'
>>> parse_one("SELECT b AS c").expressions[0].output_name
'c'
>>> parse_one("SELECT 1 + 2").expressions[0].output_name
''
"""
return ""
def type(self) -> t.Optional[DataType]:
return self._type
def type(self, dtype: t.Optional[DataType | DataType.Type | str]) -> None:
if dtype and not isinstance(dtype, DataType):
dtype = DataType.build(dtype)
self._type = dtype # type: ignore
def is_type(self, *dtypes) -> bool:
return self.type is not None and self.type.is_type(*dtypes)
def is_leaf(self) -> bool:
return not any(isinstance(v, (Expression, list)) for v in self.args.values())
def meta(self) -> t.Dict[str, t.Any]:
if self._meta is None:
self._meta = {}
return self._meta
def __deepcopy__(self, memo):
root = self.__class__()
stack = [(self, root)]
while stack:
node, copy = stack.pop()
if node.comments is not None:
copy.comments = deepcopy(node.comments)
if node._type is not None:
copy._type = deepcopy(node._type)
if node._meta is not None:
copy._meta = deepcopy(node._meta)
if node._hash is not None:
copy._hash = node._hash
for k, vs in node.args.items():
if hasattr(vs, "parent"):
stack.append((vs, vs.__class__()))
copy.set(k, stack[-1][-1])
elif type(vs) is list:
copy.args[k] = []
for v in vs:
if hasattr(v, "parent"):
stack.append((v, v.__class__()))
copy.append(k, stack[-1][-1])
else:
copy.append(k, v)
else:
copy.args[k] = vs
return root
def copy(self):
"""
Returns a deep copy of the expression.
"""
return deepcopy(self)
def add_comments(self, comments: t.Optional[t.List[str]]) -> None:
if self.comments is None:
self.comments = []
if comments:
for comment in comments:
_, *meta = comment.split(SQLGLOT_META)
if meta:
for kv in "".join(meta).split(","):
k, *v = kv.split("=")
value = v[0].strip() if v else True
self.meta[k.strip()] = value
self.comments.append(comment)
def append(self, arg_key: str, value: t.Any) -> None:
"""
Appends value to arg_key if it's a list or sets it as a new list.
Args:
arg_key (str): name of the list expression arg
value (Any): value to append to the list
"""
if type(self.args.get(arg_key)) is not list:
self.args[arg_key] = []
self._set_parent(arg_key, value)
values = self.args[arg_key]
if hasattr(value, "parent"):
value.index = len(values)
values.append(value)
def set(self, arg_key: str, value: t.Any) -> None:
"""
Sets arg_key to value.
Args:
arg_key: name of the expression arg.
value: value to set the arg to.
"""
if value is None:
self.args.pop(arg_key, None)
else:
self.args[arg_key] = value
self._set_parent(arg_key, value)
def _set_parent(self, arg_key: str, value: t.Any, index: t.Optional[int] = None) -> None:
if hasattr(value, "parent"):
value.parent = self
value.arg_key = arg_key
value.index = index
elif type(value) is list:
for index, v in enumerate(value):
if hasattr(v, "parent"):
v.parent = self
v.arg_key = arg_key
v.index = index
def depth(self) -> int:
"""
Returns the depth of this tree.
"""
if self.parent:
return self.parent.depth + 1
return 0
def iter_expressions(self, reverse: bool = False) -> t.Iterator[Expression]:
"""Yields the key and expression for all arguments, exploding list args."""
# remove tuple when python 3.7 is deprecated
for vs in reversed(tuple(self.args.values())) if reverse else self.args.values():
if type(vs) is list:
for v in reversed(vs) if reverse else vs:
if hasattr(v, "parent"):
yield v
else:
if hasattr(vs, "parent"):
yield vs
def find(self, *expression_types: t.Type[E], bfs: bool = True) -> t.Optional[E]:
"""
Returns the first node in this tree which matches at least one of
the specified types.
Args:
expression_types: the expression type(s) to match.
bfs: whether to search the AST using the BFS algorithm (DFS is used if false).
Returns:
The node which matches the criteria or None if no such node was found.
"""
return next(self.find_all(*expression_types, bfs=bfs), None)
def find_all(self, *expression_types: t.Type[E], bfs: bool = True) -> t.Iterator[E]:
"""
Returns a generator object which visits all nodes in this tree and only
yields those that match at least one of the specified expression types.
Args:
expression_types: the expression type(s) to match.
bfs: whether to search the AST using the BFS algorithm (DFS is used if false).
Returns:
The generator object.
"""
for expression in self.walk(bfs=bfs):
if isinstance(expression, expression_types):
yield expression
def find_ancestor(self, *expression_types: t.Type[E]) -> t.Optional[E]:
"""
Returns a nearest parent matching expression_types.
Args:
expression_types: the expression type(s) to match.
Returns:
The parent node.
"""
ancestor = self.parent
while ancestor and not isinstance(ancestor, expression_types):
ancestor = ancestor.parent
return ancestor # type: ignore
def parent_select(self) -> t.Optional[Select]:
"""
Returns the parent select statement.
"""
return self.find_ancestor(Select)
def same_parent(self) -> bool:
"""Returns if the parent is the same class as itself."""
return type(self.parent) is self.__class__
def root(self) -> Expression:
"""
Returns the root expression of this tree.
"""
expression = self
while expression.parent:
expression = expression.parent
return expression
def walk(
self, bfs: bool = True, prune: t.Optional[t.Callable[[Expression], bool]] = None
) -> t.Iterator[Expression]:
"""
Returns a generator object which visits all nodes in this tree.
Args:
bfs (bool): if set to True the BFS traversal order will be applied,
otherwise the DFS traversal will be used instead.
prune ((node, parent, arg_key) -> bool): callable that returns True if
the generator should stop traversing this branch of the tree.
Returns:
the generator object.
"""
if bfs:
yield from self.bfs(prune=prune)
else:
yield from self.dfs(prune=prune)
def dfs(
self, prune: t.Optional[t.Callable[[Expression], bool]] = None
) -> t.Iterator[Expression]:
"""
Returns a generator object which visits all nodes in this tree in
the DFS (Depth-first) order.
Returns:
The generator object.
"""
stack = [self]
while stack:
node = stack.pop()
yield node
if prune and prune(node):
continue
for v in node.iter_expressions(reverse=True):
stack.append(v)
def bfs(
self, prune: t.Optional[t.Callable[[Expression], bool]] = None
) -> t.Iterator[Expression]:
"""
Returns a generator object which visits all nodes in this tree in
the BFS (Breadth-first) order.
Returns:
The generator object.
"""
queue = deque([self])
while queue:
node = queue.popleft()
yield node
if prune and prune(node):
continue
for v in node.iter_expressions():
queue.append(v)
def unnest(self):
"""
Returns the first non parenthesis child or self.
"""
expression = self
while type(expression) is Paren:
expression = expression.this
return expression
def unalias(self):
"""
Returns the inner expression if this is an Alias.
"""
if isinstance(self, Alias):
return self.this
return self
def unnest_operands(self):
"""
Returns unnested operands as a tuple.
"""
return tuple(arg.unnest() for arg in self.iter_expressions())
def flatten(self, unnest=True):
"""
Returns a generator which yields child nodes whose parents are the same class.
A AND B AND C -> [A, B, C]
"""
for node in self.dfs(prune=lambda n: n.parent and type(n) is not self.__class__):
if type(node) is not self.__class__:
yield node.unnest() if unnest and not isinstance(node, Subquery) else node
def __str__(self) -> str:
return self.sql()
def __repr__(self) -> str:
return _to_s(self)
def to_s(self) -> str:
"""
Same as __repr__, but includes additional information which can be useful
for debugging, like empty or missing args and the AST nodes' object IDs.
"""
return _to_s(self, verbose=True)
def sql(self, dialect: DialectType = None, **opts) -> str:
"""
Returns SQL string representation of this tree.
Args:
dialect: the dialect of the output SQL string (eg. "spark", "hive", "presto", "mysql").
opts: other `sqlglot.generator.Generator` options.
Returns:
The SQL string.
"""
from sqlglot.dialects import Dialect
return Dialect.get_or_raise(dialect).generate(self, **opts)
def transform(self, fun: t.Callable, *args: t.Any, copy: bool = True, **kwargs) -> Expression:
"""
Visits all tree nodes (excluding already transformed ones)
and applies the given transformation function to each node.
Args:
fun (function): a function which takes a node as an argument and returns a
new transformed node or the same node without modifications. If the function
returns None, then the corresponding node will be removed from the syntax tree.
copy (bool): if set to True a new tree instance is constructed, otherwise the tree is
modified in place.
Returns:
The transformed tree.
"""
root = None
new_node = None
for node in (self.copy() if copy else self).dfs(prune=lambda n: n is not new_node):
new_node = fun(node, *args, **kwargs)
if root:
if new_node is not node:
node.replace(new_node)
else:
root = new_node
assert root
return root.assert_is(Expression)
def replace(self, expression: E) -> E: ...
def replace(self, expression: None) -> None: ...
def replace(self, expression):
"""
Swap out this expression with a new expression.
For example::
>>> tree = Select().select("x").from_("tbl")
>>> tree.find(Column).replace(column("y"))
Column(
this=Identifier(this=y, quoted=False))
>>> tree.sql()
'SELECT y FROM tbl'
Args:
expression: new node
Returns:
The new expression or expressions.
"""
parent = self.parent
if not parent:
return expression
key = self.arg_key
value = parent.args.get(key)
if isinstance(value, list):
index = self.index
if isinstance(expression, list):
value.pop(index)
value[index:index] = expression
parent._set_parent(key, value)
else:
if expression is None:
value.pop(index)
for v in value[index:]:
v.index = v.index - 1
else:
value[index] = expression
parent._set_parent(key, expression, index=index)
elif value is not None:
if expression is None:
parent.args.pop(key)
else:
parent.set(key, expression)
if expression is not self:
self.parent = None
self.arg_key = None
self.index = None
return expression
def pop(self: E) -> E:
"""
Remove this expression from its AST.
Returns:
The popped expression.
"""
self.replace(None)
return self
def assert_is(self, type_: t.Type[E]) -> E:
"""
Assert that this `Expression` is an instance of `type_`.
If it is NOT an instance of `type_`, this raises an assertion error.
Otherwise, this returns this expression.
Examples:
This is useful for type security in chained expressions:
>>> import sqlglot
>>> sqlglot.parse_one("SELECT x from y").assert_is(Select).select("z").sql()
'SELECT x, z FROM y'
"""
if not isinstance(self, type_):
raise AssertionError(f"{self} is not {type_}.")
return self
def error_messages(self, args: t.Optional[t.Sequence] = None) -> t.List[str]:
"""
Checks if this expression is valid (e.g. all mandatory args are set).
Args:
args: a sequence of values that were used to instantiate a Func expression. This is used
to check that the provided arguments don't exceed the function argument limit.
Returns:
A list of error messages for all possible errors that were found.
"""
errors: t.List[str] = []
for k in self.args:
if k not in self.arg_types:
errors.append(f"Unexpected keyword: '{k}' for {self.__class__}")
for k, mandatory in self.arg_types.items():
v = self.args.get(k)
if mandatory and (v is None or (isinstance(v, list) and not v)):
errors.append(f"Required keyword: '{k}' missing for {self.__class__}")
if (
args
and isinstance(self, Func)
and len(args) > len(self.arg_types)
and not self.is_var_len_args
):
errors.append(
f"The number of provided arguments ({len(args)}) is greater than "
f"the maximum number of supported arguments ({len(self.arg_types)})"
)
return errors
def dump(self):
"""
Dump this Expression to a JSON-serializable dict.
"""
from sqlglot.serde import dump
return dump(self)
def load(cls, obj):
"""
Load a dict (as returned by `Expression.dump`) into an Expression instance.
"""
from sqlglot.serde import load
return load(obj)
def and_(
self,
*expressions: t.Optional[ExpOrStr],
dialect: DialectType = None,
copy: bool = True,
**opts,
) -> Condition:
"""
AND this condition with one or multiple expressions.
Example:
>>> condition("x=1").and_("y=1").sql()
'x = 1 AND y = 1'
Args:
*expressions: the SQL code strings to parse.
If an `Expression` instance is passed, it will be used as-is.
dialect: the dialect used to parse the input expression.
copy: whether to copy the involved expressions (only applies to Expressions).
opts: other options to use to parse the input expressions.
Returns:
The new And condition.
"""
return and_(self, *expressions, dialect=dialect, copy=copy, **opts)
def or_(
self,
*expressions: t.Optional[ExpOrStr],
dialect: DialectType = None,
copy: bool = True,
**opts,
) -> Condition:
"""
OR this condition with one or multiple expressions.
Example:
>>> condition("x=1").or_("y=1").sql()
'x = 1 OR y = 1'
Args:
*expressions: the SQL code strings to parse.
If an `Expression` instance is passed, it will be used as-is.
dialect: the dialect used to parse the input expression.
copy: whether to copy the involved expressions (only applies to Expressions).
opts: other options to use to parse the input expressions.
Returns:
The new Or condition.
"""
return or_(self, *expressions, dialect=dialect, copy=copy, **opts)
def not_(self, copy: bool = True):
"""
Wrap this condition with NOT.
Example:
>>> condition("x=1").not_().sql()
'NOT x = 1'
Args:
copy: whether to copy this object.
Returns:
The new Not instance.
"""
return not_(self, copy=copy)
def as_(
self,
alias: str | Identifier,
quoted: t.Optional[bool] = None,
dialect: DialectType = None,
copy: bool = True,
**opts,
) -> Alias:
return alias_(self, alias, quoted=quoted, dialect=dialect, copy=copy, **opts)
def _binop(self, klass: t.Type[E], other: t.Any, reverse: bool = False) -> E:
this = self.copy()
other = convert(other, copy=True)
if not isinstance(this, klass) and not isinstance(other, klass):
this = _wrap(this, Binary)
other = _wrap(other, Binary)
if reverse:
return klass(this=other, expression=this)
return klass(this=this, expression=other)
def __getitem__(self, other: ExpOrStr | t.Tuple[ExpOrStr]) -> Bracket:
return Bracket(
this=self.copy(), expressions=[convert(e, copy=True) for e in ensure_list(other)]
)
def __iter__(self) -> t.Iterator:
if "expressions" in self.arg_types:
return iter(self.args.get("expressions") or [])
# We define this because __getitem__ converts Expression into an iterable, which is
# problematic because one can hit infinite loops if they do "for x in some_expr: ..."
# See: https://peps.python.org/pep-0234/
raise TypeError(f"'{self.__class__.__name__}' object is not iterable")
def isin(
self,
*expressions: t.Any,
query: t.Optional[ExpOrStr] = None,
unnest: t.Optional[ExpOrStr] | t.Collection[ExpOrStr] = None,
copy: bool = True,
**opts,
) -> In:
return In(
this=maybe_copy(self, copy),
expressions=[convert(e, copy=copy) for e in expressions],
query=maybe_parse(query, copy=copy, **opts) if query else None,
unnest=(
Unnest(
expressions=[
maybe_parse(t.cast(ExpOrStr, e), copy=copy, **opts)
for e in ensure_list(unnest)
]
)
if unnest
else None
),
)
def between(self, low: t.Any, high: t.Any, copy: bool = True, **opts) -> Between:
return Between(
this=maybe_copy(self, copy),
low=convert(low, copy=copy, **opts),
high=convert(high, copy=copy, **opts),
)
def is_(self, other: ExpOrStr) -> Is:
return self._binop(Is, other)
def like(self, other: ExpOrStr) -> Like:
return self._binop(Like, other)
def ilike(self, other: ExpOrStr) -> ILike:
return self._binop(ILike, other)
def eq(self, other: t.Any) -> EQ:
return self._binop(EQ, other)
def neq(self, other: t.Any) -> NEQ:
return self._binop(NEQ, other)
def rlike(self, other: ExpOrStr) -> RegexpLike:
return self._binop(RegexpLike, other)
def div(self, other: ExpOrStr, typed: bool = False, safe: bool = False) -> Div:
div = self._binop(Div, other)
div.args["typed"] = typed
div.args["safe"] = safe
return div
def desc(self, nulls_first: bool = False) -> Ordered:
return Ordered(this=self.copy(), desc=True, nulls_first=nulls_first)
def __lt__(self, other: t.Any) -> LT:
return self._binop(LT, other)
def __le__(self, other: t.Any) -> LTE:
return self._binop(LTE, other)
def __gt__(self, other: t.Any) -> GT:
return self._binop(GT, other)
def __ge__(self, other: t.Any) -> GTE:
return self._binop(GTE, other)
def __add__(self, other: t.Any) -> Add:
return self._binop(Add, other)
def __radd__(self, other: t.Any) -> Add:
return self._binop(Add, other, reverse=True)
def __sub__(self, other: t.Any) -> Sub:
return self._binop(Sub, other)
def __rsub__(self, other: t.Any) -> Sub:
return self._binop(Sub, other, reverse=True)
def __mul__(self, other: t.Any) -> Mul:
return self._binop(Mul, other)
def __rmul__(self, other: t.Any) -> Mul:
return self._binop(Mul, other, reverse=True)
def __truediv__(self, other: t.Any) -> Div:
return self._binop(Div, other)
def __rtruediv__(self, other: t.Any) -> Div:
return self._binop(Div, other, reverse=True)
def __floordiv__(self, other: t.Any) -> IntDiv:
return self._binop(IntDiv, other)
def __rfloordiv__(self, other: t.Any) -> IntDiv:
return self._binop(IntDiv, other, reverse=True)
def __mod__(self, other: t.Any) -> Mod:
return self._binop(Mod, other)
def __rmod__(self, other: t.Any) -> Mod:
return self._binop(Mod, other, reverse=True)
def __pow__(self, other: t.Any) -> Pow:
return self._binop(Pow, other)
def __rpow__(self, other: t.Any) -> Pow:
return self._binop(Pow, other, reverse=True)
def __and__(self, other: t.Any) -> And:
return self._binop(And, other)
def __rand__(self, other: t.Any) -> And:
return self._binop(And, other, reverse=True)
def __or__(self, other: t.Any) -> Or:
return self._binop(Or, other)
def __ror__(self, other: t.Any) -> Or:
return self._binop(Or, other, reverse=True)
def __neg__(self) -> Neg:
return Neg(this=_wrap(self.copy(), Binary))
def __invert__(self) -> Not:
return not_(self.copy())
ExpOrStr = t.Union[str, Expression]
DATA_TYPE = t.Union[str, DataType, DataType.Type]
class Any(SubqueryPredicate):
pass
class Cast(Func):
arg_types = {
"this": True,
"to": True,
"format": False,
"safe": False,
"action": False,
}
def name(self) -> str:
return self.this.name
def to(self) -> DataType:
return self.args["to"]
def output_name(self) -> str:
return self.name
def is_type(self, *dtypes: DATA_TYPE) -> bool:
"""
Checks whether this Cast's DataType matches one of the provided data types. Nested types
like arrays or structs will be compared using "structural equivalence" semantics, so e.g.
array<int> != array<float>.
Args:
dtypes: the data types to compare this Cast's DataType to.
Returns:
True, if and only if there is a type in `dtypes` which is equal to this Cast's DataType.
"""
return self.to.is_type(*dtypes)
def maybe_parse(
sql_or_expression: ExpOrStr,
*,
into: t.Type[E],
dialect: DialectType = None,
prefix: t.Optional[str] = None,
copy: bool = False,
**opts,
) -> E: ...
def maybe_parse(
sql_or_expression: str | E,
*,
into: t.Optional[IntoType] = None,
dialect: DialectType = None,
prefix: t.Optional[str] = None,
copy: bool = False,
**opts,
) -> E: ...
def maybe_parse(
sql_or_expression: ExpOrStr,
*,
into: t.Optional[IntoType] = None,
dialect: DialectType = None,
prefix: t.Optional[str] = None,
copy: bool = False,
**opts,
) -> Expression:
"""Gracefully handle a possible string or expression.
Example:
>>> maybe_parse("1")
Literal(this=1, is_string=False)
>>> maybe_parse(to_identifier("x"))
Identifier(this=x, quoted=False)
Args:
sql_or_expression: the SQL code string or an expression
into: the SQLGlot Expression to parse into
dialect: the dialect used to parse the input expressions (in the case that an
input expression is a SQL string).
prefix: a string to prefix the sql with before it gets parsed
(automatically includes a space)
copy: whether to copy the expression.
**opts: other options to use to parse the input expressions (again, in the case
that an input expression is a SQL string).
Returns:
Expression: the parsed or given expression.
"""
if isinstance(sql_or_expression, Expression):
if copy:
return sql_or_expression.copy()
return sql_or_expression
if sql_or_expression is None:
raise ParseError("SQL cannot be None")
import sqlglot
sql = str(sql_or_expression)
if prefix:
sql = f"{prefix} {sql}"
return sqlglot.parse_one(sql, read=dialect, into=into, **opts)
def cast(expression: ExpOrStr, to: DATA_TYPE, copy: bool = True, **opts) -> Cast:
"""Cast an expression to a data type.
Example:
>>> cast('x + 1', 'int').sql()
'CAST(x + 1 AS INT)'
Args:
expression: The expression to cast.
to: The datatype to cast to.
copy: Whether to copy the supplied expressions.
Returns:
The new Cast instance.
"""
expression = maybe_parse(expression, copy=copy, **opts)
data_type = DataType.build(to, copy=copy, **opts)
expression = Cast(this=expression, to=data_type)
expression.type = data_type
return expression
The provided code snippet includes necessary dependencies for implementing the `cast_unless` function. Write a Python function `def cast_unless( expression: ExpOrStr, to: DATA_TYPE, *types: DATA_TYPE, **opts: t.Any, ) -> Expression | Cast` to solve the following problem:
Cast an expression to a data type unless it is a specified type. Args: expression: The expression to cast. to: The data type to cast to. **types: The types to exclude from casting. **opts: Extra keyword arguments for parsing `expression`
Here is the function:
def cast_unless(
expression: ExpOrStr,
to: DATA_TYPE,
*types: DATA_TYPE,
**opts: t.Any,
) -> Expression | Cast:
"""
Cast an expression to a data type unless it is a specified type.
Args:
expression: The expression to cast.
to: The data type to cast to.
**types: The types to exclude from casting.
**opts: Extra keyword arguments for parsing `expression`
"""
expr = maybe_parse(expression, **opts)
if expr.is_type(*types):
return expr
return cast(expr, to, **opts) | Cast an expression to a data type unless it is a specified type. Args: expression: The expression to cast. to: The data type to cast to. **types: The types to exclude from casting. **opts: Extra keyword arguments for parsing `expression` |
152,953 | from __future__ import annotations
import datetime
import math
import numbers
import re
import textwrap
import typing as t
from collections import deque
from copy import deepcopy
from enum import auto
from functools import reduce
from sqlglot.errors import ErrorLevel, ParseError
from sqlglot.helper import (
AutoName,
camel_to_snake_case,
ensure_collection,
ensure_list,
is_int,
seq_get,
subclasses,
)
from sqlglot.tokens import Token
ExpOrStr = t.Union[str, Expression]
class Tuple(Expression):
arg_types = {"expressions": False}
def isin(
self,
*expressions: t.Any,
query: t.Optional[ExpOrStr] = None,
unnest: t.Optional[ExpOrStr] | t.Collection[ExpOrStr] = None,
copy: bool = True,
**opts,
) -> In:
return In(
this=maybe_copy(self, copy),
expressions=[convert(e, copy=copy) for e in expressions],
query=maybe_parse(query, copy=copy, **opts) if query else None,
unnest=(
Unnest(
expressions=[
maybe_parse(t.cast(ExpOrStr, e), copy=copy, **opts)
for e in ensure_list(unnest)
]
)
if unnest
else None
),
)
def maybe_parse(
sql_or_expression: ExpOrStr,
*,
into: t.Type[E],
dialect: DialectType = None,
prefix: t.Optional[str] = None,
copy: bool = False,
**opts,
) -> E: ...
def maybe_parse(
sql_or_expression: str | E,
*,
into: t.Optional[IntoType] = None,
dialect: DialectType = None,
prefix: t.Optional[str] = None,
copy: bool = False,
**opts,
) -> E: ...
def maybe_parse(
sql_or_expression: ExpOrStr,
*,
into: t.Optional[IntoType] = None,
dialect: DialectType = None,
prefix: t.Optional[str] = None,
copy: bool = False,
**opts,
) -> Expression:
"""Gracefully handle a possible string or expression.
Example:
>>> maybe_parse("1")
Literal(this=1, is_string=False)
>>> maybe_parse(to_identifier("x"))
Identifier(this=x, quoted=False)
Args:
sql_or_expression: the SQL code string or an expression
into: the SQLGlot Expression to parse into
dialect: the dialect used to parse the input expressions (in the case that an
input expression is a SQL string).
prefix: a string to prefix the sql with before it gets parsed
(automatically includes a space)
copy: whether to copy the expression.
**opts: other options to use to parse the input expressions (again, in the case
that an input expression is a SQL string).
Returns:
Expression: the parsed or given expression.
"""
if isinstance(sql_or_expression, Expression):
if copy:
return sql_or_expression.copy()
return sql_or_expression
if sql_or_expression is None:
raise ParseError("SQL cannot be None")
import sqlglot
sql = str(sql_or_expression)
if prefix:
sql = f"{prefix} {sql}"
return sqlglot.parse_one(sql, read=dialect, into=into, **opts)
DialectType = t.Union[str, Dialect, t.Type[Dialect], None]
The provided code snippet includes necessary dependencies for implementing the `tuple_` function. Write a Python function `def tuple_( *expressions: ExpOrStr, copy: bool = True, dialect: DialectType = None, **kwargs ) -> Tuple` to solve the following problem:
Returns an tuple. Examples: >>> tuple_(1, 'x').sql() '(1, x)' Args: expressions: the expressions to add to the tuple. copy: whether to copy the argument expressions. dialect: the source dialect. kwargs: the kwargs used to instantiate the function of interest. Returns: A tuple expression.
Here is the function:
def tuple_(
*expressions: ExpOrStr, copy: bool = True, dialect: DialectType = None, **kwargs
) -> Tuple:
"""
Returns an tuple.
Examples:
>>> tuple_(1, 'x').sql()
'(1, x)'
Args:
expressions: the expressions to add to the tuple.
copy: whether to copy the argument expressions.
dialect: the source dialect.
kwargs: the kwargs used to instantiate the function of interest.
Returns:
A tuple expression.
"""
return Tuple(
expressions=[
maybe_parse(expression, copy=copy, dialect=dialect, **kwargs)
for expression in expressions
]
) | Returns an tuple. Examples: >>> tuple_(1, 'x').sql() '(1, x)' Args: expressions: the expressions to add to the tuple. copy: whether to copy the argument expressions. dialect: the source dialect. kwargs: the kwargs used to instantiate the function of interest. Returns: A tuple expression. |
152,954 | from __future__ import annotations
import datetime
import math
import numbers
import re
import textwrap
import typing as t
from collections import deque
from copy import deepcopy
from enum import auto
from functools import reduce
from sqlglot.errors import ErrorLevel, ParseError
from sqlglot.helper import (
AutoName,
camel_to_snake_case,
ensure_collection,
ensure_list,
is_int,
seq_get,
subclasses,
)
from sqlglot.tokens import Token
class Boolean(Condition):
pass
The provided code snippet includes necessary dependencies for implementing the `false` function. Write a Python function `def false() -> Boolean` to solve the following problem:
Returns a false Boolean expression.
Here is the function:
def false() -> Boolean:
"""
Returns a false Boolean expression.
"""
return Boolean(this=False) | Returns a false Boolean expression. |
152,955 | from __future__ import annotations
import abc
import typing as t
from sqlglot import expressions as exp
from sqlglot.dialects.dialect import Dialect
from sqlglot.errors import SchemaError
from sqlglot.helper import dict_depth
from sqlglot.trie import TrieResult, in_trie, new_trie
if t.TYPE_CHECKING:
from sqlglot.dataframe.sql.types import StructType
from sqlglot.dialects.dialect import DialectType
ColumnMapping = t.Union[t.Dict, str, StructType, t.List]
def ensure_column_mapping(mapping: t.Optional[ColumnMapping]) -> t.Dict:
if mapping is None:
return {}
elif isinstance(mapping, dict):
return mapping
elif isinstance(mapping, str):
col_name_type_strs = [x.strip() for x in mapping.split(",")]
return {
name_type_str.split(":")[0].strip(): name_type_str.split(":")[1].strip()
for name_type_str in col_name_type_strs
}
# Check if mapping looks like a DataFrame StructType
elif hasattr(mapping, "simpleString"):
return {struct_field.name: struct_field.dataType.simpleString() for struct_field in mapping}
elif isinstance(mapping, list):
return {x.strip(): None for x in mapping}
raise ValueError(f"Invalid mapping provided: {type(mapping)}") | null |
152,956 | from __future__ import annotations
import abc
import typing as t
from sqlglot import expressions as exp
from sqlglot.dialects.dialect import Dialect
from sqlglot.errors import SchemaError
from sqlglot.helper import dict_depth
from sqlglot.trie import TrieResult, in_trie, new_trie
if t.TYPE_CHECKING:
from sqlglot.dataframe.sql.types import StructType
from sqlglot.dialects.dialect import DialectType
ColumnMapping = t.Union[t.Dict, str, StructType, t.List]
def flatten_schema(
schema: t.Dict, depth: int, keys: t.Optional[t.List[str]] = None
) -> t.List[t.List[str]]:
tables = []
keys = keys or []
for k, v in schema.items():
if depth >= 2:
tables.extend(flatten_schema(v, depth - 1, keys + [k]))
elif depth == 1:
tables.append(keys + [k])
return tables | null |
152,957 | from __future__ import annotations
import abc
import typing as t
from sqlglot import expressions as exp
from sqlglot.dialects.dialect import Dialect
from sqlglot.errors import SchemaError
from sqlglot.helper import dict_depth
from sqlglot.trie import TrieResult, in_trie, new_trie
if t.TYPE_CHECKING:
from sqlglot.dataframe.sql.types import StructType
from sqlglot.dialects.dialect import DialectType
ColumnMapping = t.Union[t.Dict, str, StructType, t.List]
The provided code snippet includes necessary dependencies for implementing the `nested_get` function. Write a Python function `def nested_get( d: t.Dict, *path: t.Tuple[str, str], raise_on_missing: bool = True ) -> t.Optional[t.Any]` to solve the following problem:
Get a value for a nested dictionary. Args: d: the dictionary to search. *path: tuples of (name, key), where: `key` is the key in the dictionary to get. `name` is a string to use in the error if `key` isn't found. Returns: The value or None if it doesn't exist.
Here is the function:
def nested_get(
d: t.Dict, *path: t.Tuple[str, str], raise_on_missing: bool = True
) -> t.Optional[t.Any]:
"""
Get a value for a nested dictionary.
Args:
d: the dictionary to search.
*path: tuples of (name, key), where:
`key` is the key in the dictionary to get.
`name` is a string to use in the error if `key` isn't found.
Returns:
The value or None if it doesn't exist.
"""
for name, key in path:
d = d.get(key) # type: ignore
if d is None:
if raise_on_missing:
name = "table" if name == "this" else name
raise ValueError(f"Unknown {name}: {key}")
return None
return d | Get a value for a nested dictionary. Args: d: the dictionary to search. *path: tuples of (name, key), where: `key` is the key in the dictionary to get. `name` is a string to use in the error if `key` isn't found. Returns: The value or None if it doesn't exist. |
152,958 | from __future__ import annotations
import abc
import typing as t
from sqlglot import expressions as exp
from sqlglot.dialects.dialect import Dialect
from sqlglot.errors import SchemaError
from sqlglot.helper import dict_depth
from sqlglot.trie import TrieResult, in_trie, new_trie
if t.TYPE_CHECKING:
from sqlglot.dataframe.sql.types import StructType
from sqlglot.dialects.dialect import DialectType
ColumnMapping = t.Union[t.Dict, str, StructType, t.List]
The provided code snippet includes necessary dependencies for implementing the `nested_set` function. Write a Python function `def nested_set(d: t.Dict, keys: t.Sequence[str], value: t.Any) -> t.Dict` to solve the following problem:
In-place set a value for a nested dictionary Example: >>> nested_set({}, ["top_key", "second_key"], "value") {'top_key': {'second_key': 'value'}} >>> nested_set({"top_key": {"third_key": "third_value"}}, ["top_key", "second_key"], "value") {'top_key': {'third_key': 'third_value', 'second_key': 'value'}} Args: d: dictionary to update. keys: the keys that makeup the path to `value`. value: the value to set in the dictionary for the given key path. Returns: The (possibly) updated dictionary.
Here is the function:
def nested_set(d: t.Dict, keys: t.Sequence[str], value: t.Any) -> t.Dict:
"""
In-place set a value for a nested dictionary
Example:
>>> nested_set({}, ["top_key", "second_key"], "value")
{'top_key': {'second_key': 'value'}}
>>> nested_set({"top_key": {"third_key": "third_value"}}, ["top_key", "second_key"], "value")
{'top_key': {'third_key': 'third_value', 'second_key': 'value'}}
Args:
d: dictionary to update.
keys: the keys that makeup the path to `value`.
value: the value to set in the dictionary for the given key path.
Returns:
The (possibly) updated dictionary.
"""
if not keys:
return d
if len(keys) == 1:
d[keys[0]] = value
return d
subd = d
for key in keys[:-1]:
if key not in subd:
subd = subd.setdefault(key, {})
else:
subd = subd[key]
subd[keys[-1]] = value
return d | In-place set a value for a nested dictionary Example: >>> nested_set({}, ["top_key", "second_key"], "value") {'top_key': {'second_key': 'value'}} >>> nested_set({"top_key": {"third_key": "third_value"}}, ["top_key", "second_key"], "value") {'top_key': {'third_key': 'third_value', 'second_key': 'value'}} Args: d: dictionary to update. keys: the keys that makeup the path to `value`. value: the value to set in the dictionary for the given key path. Returns: The (possibly) updated dictionary. |
152,959 | import ast
import collections
import itertools
import math
from sqlglot import exp, generator, planner, tokens
from sqlglot.dialects.dialect import Dialect, inline_array_sql
from sqlglot.errors import ExecuteError
from sqlglot.executor.context import Context
from sqlglot.executor.env import ENV
from sqlglot.executor.table import RowReader, Table
from sqlglot.helper import csv_reader, ensure_list, subclasses
def _ordered_py(self, expression):
this = self.sql(expression, "this")
desc = "True" if expression.args.get("desc") else "False"
nulls_first = "True" if expression.args.get("nulls_first") else "False"
return f"ORDERED({this}, {desc}, {nulls_first})" | null |
152,960 | import ast
import collections
import itertools
import math
from sqlglot import exp, generator, planner, tokens
from sqlglot.dialects.dialect import Dialect, inline_array_sql
from sqlglot.errors import ExecuteError
from sqlglot.executor.context import Context
from sqlglot.executor.env import ENV
from sqlglot.executor.table import RowReader, Table
from sqlglot.helper import csv_reader, ensure_list, subclasses
def ensure_list(value: t.Collection[T]) -> t.List[T]: ...
def ensure_list(value: T) -> t.List[T]: ...
def ensure_list(value):
"""
Ensures that a value is a list, otherwise casts or wraps it into one.
Args:
value: The value of interest.
Returns:
The value cast as a list if it's a list or a tuple, or else the value wrapped in a list.
"""
if value is None:
return []
if isinstance(value, (list, tuple)):
return list(value)
return [value]
def _rename(self, e):
try:
values = list(e.args.values())
if len(values) == 1:
values = values[0]
if not isinstance(values, list):
return self.func(e.key, values)
return self.func(e.key, *values)
if isinstance(e, exp.Func) and e.is_var_len_args:
*head, tail = values
return self.func(e.key, *head, *ensure_list(tail))
return self.func(e.key, *values)
except Exception as ex:
raise Exception(f"Could not rename {repr(e)}") from ex | null |
152,961 | import ast
import collections
import itertools
import math
from sqlglot import exp, generator, planner, tokens
from sqlglot.dialects.dialect import Dialect, inline_array_sql
from sqlglot.errors import ExecuteError
from sqlglot.executor.context import Context
from sqlglot.executor.env import ENV
from sqlglot.executor.table import RowReader, Table
from sqlglot.helper import csv_reader, ensure_list, subclasses
def _case_sql(self, expression):
this = self.sql(expression, "this")
chain = self.sql(expression, "default") or "None"
for e in reversed(expression.args["ifs"]):
true = self.sql(e, "true")
condition = self.sql(e, "this")
condition = f"{this} = ({condition})" if this else condition
chain = f"{true} if {condition} else ({chain})"
return chain | null |
152,962 | import ast
import collections
import itertools
import math
from sqlglot import exp, generator, planner, tokens
from sqlglot.dialects.dialect import Dialect, inline_array_sql
from sqlglot.errors import ExecuteError
from sqlglot.executor.context import Context
from sqlglot.executor.env import ENV
from sqlglot.executor.table import RowReader, Table
from sqlglot.helper import csv_reader, ensure_list, subclasses
def _lambda_sql(self, e: exp.Lambda) -> str:
names = {e.name.lower() for e in e.expressions}
e = e.transform(
lambda n: (
exp.var(n.name) if isinstance(n, exp.Identifier) and n.name.lower() in names else n
)
).assert_is(exp.Lambda)
return f"lambda {self.expressions(e, flat=True)}: {self.sql(e, 'this')}" | null |
152,963 | import ast
import collections
import itertools
import math
from sqlglot import exp, generator, planner, tokens
from sqlglot.dialects.dialect import Dialect, inline_array_sql
from sqlglot.errors import ExecuteError
from sqlglot.executor.context import Context
from sqlglot.executor.env import ENV
from sqlglot.executor.table import RowReader, Table
from sqlglot.helper import csv_reader, ensure_list, subclasses
def _div_sql(self: generator.Generator, e: exp.Div) -> str:
denominator = self.sql(e, "expression")
if e.args.get("safe"):
denominator += " or None"
sql = f"DIV({self.sql(e, 'this')}, {denominator})"
if e.args.get("typed"):
sql = f"int({sql})"
return sql | null |
152,964 | from __future__ import annotations
import typing as t
from sqlglot.dialects.dialect import DialectType
from sqlglot.helper import dict_depth
from sqlglot.schema import AbstractMappingSchema, normalize_name
class Tables(AbstractMappingSchema):
pass
def _ensure_tables(d: t.Optional[t.Dict], dialect: DialectType = None) -> t.Dict:
if not d:
return {}
depth = dict_depth(d)
if depth > 1:
return {
normalize_name(k, dialect=dialect, is_table=True).name: _ensure_tables(
v, dialect=dialect
)
for k, v in d.items()
}
result = {}
for table_name, table in d.items():
table_name = normalize_name(table_name, dialect=dialect).name
if isinstance(table, Table):
result[table_name] = table
else:
table = [
{
normalize_name(column_name, dialect=dialect).name: value
for column_name, value in row.items()
}
for row in table
]
column_names = tuple(column_name for column_name in table[0]) if table else ()
rows = [tuple(row[name] for name in column_names) for row in table]
result[table_name] = Table(columns=column_names, rows=rows)
return result
DialectType = t.Union[str, Dialect, t.Type[Dialect], None]
def ensure_tables(d: t.Optional[t.Dict], dialect: DialectType = None) -> Tables:
return Tables(_ensure_tables(d, dialect=dialect)) | null |
152,965 | import datetime
import inspect
import re
import statistics
from functools import wraps
from sqlglot import exp
from sqlglot.generator import Generator
from sqlglot.helper import PYTHON_VERSION, is_int, seq_get
def filter_nulls(func, empty_null=True):
@wraps(func)
def _func(values):
filtered = tuple(v for v in values if v is not None)
if not filtered and empty_null:
return None
return func(filtered)
return _func | null |
152,966 | import datetime
import inspect
import re
import statistics
from functools import wraps
from sqlglot import exp
from sqlglot.generator import Generator
from sqlglot.helper import PYTHON_VERSION, is_int, seq_get
The provided code snippet includes necessary dependencies for implementing the `null_if_any` function. Write a Python function `def null_if_any(*required)` to solve the following problem:
Decorator that makes a function return `None` if any of the `required` arguments are `None`. This also supports decoration with no arguments, e.g.: @null_if_any def foo(a, b): ... In which case all arguments are required.
Here is the function:
def null_if_any(*required):
"""
Decorator that makes a function return `None` if any of the `required` arguments are `None`.
This also supports decoration with no arguments, e.g.:
@null_if_any
def foo(a, b): ...
In which case all arguments are required.
"""
f = None
if len(required) == 1 and callable(required[0]):
f = required[0]
required = ()
def decorator(func):
if required:
required_indices = [
i for i, param in enumerate(inspect.signature(func).parameters) if param in required
]
def predicate(*args):
return any(args[i] is None for i in required_indices)
else:
def predicate(*args):
return any(a is None for a in args)
@wraps(func)
def _func(*args):
if predicate(*args):
return None
return func(*args)
return _func
if f:
return decorator(f)
return decorator | Decorator that makes a function return `None` if any of the `required` arguments are `None`. This also supports decoration with no arguments, e.g.: @null_if_any def foo(a, b): ... In which case all arguments are required. |
152,967 | import datetime
import inspect
import re
import statistics
from functools import wraps
from sqlglot import exp
from sqlglot.generator import Generator
from sqlglot.helper import PYTHON_VERSION, is_int, seq_get
def str_position(substr, this, position=None):
position = position - 1 if position is not None else position
return this.find(substr, position) + 1 | null |
152,968 | import datetime
import inspect
import re
import statistics
from functools import wraps
from sqlglot import exp
from sqlglot.generator import Generator
from sqlglot.helper import PYTHON_VERSION, is_int, seq_get
def substring(this, start=None, length=None):
if start is None:
return this
elif start == 0:
return ""
elif start < 0:
start = len(this) + start
else:
start -= 1
end = None if length is None else start + length
return this[start:end] | null |
152,969 | import datetime
import inspect
import re
import statistics
from functools import wraps
from sqlglot import exp
from sqlglot.generator import Generator
from sqlglot.helper import PYTHON_VERSION, is_int, seq_get
def cast(this, to):
if to == exp.DataType.Type.DATE:
if isinstance(this, datetime.datetime):
return this.date()
if isinstance(this, datetime.date):
return this
if isinstance(this, str):
return datetime.date.fromisoformat(this)
if to == exp.DataType.Type.TIME:
if isinstance(this, datetime.datetime):
return this.time()
if isinstance(this, datetime.time):
return this
if isinstance(this, str):
return datetime.time.fromisoformat(this)
if to in (exp.DataType.Type.DATETIME, exp.DataType.Type.TIMESTAMP):
if isinstance(this, datetime.datetime):
return this
if isinstance(this, datetime.date):
return datetime.datetime(this.year, this.month, this.day)
if isinstance(this, str):
return datetime.datetime.fromisoformat(this)
if to == exp.DataType.Type.BOOLEAN:
return bool(this)
if to in exp.DataType.TEXT_TYPES:
return str(this)
if to in {exp.DataType.Type.FLOAT, exp.DataType.Type.DOUBLE}:
return float(this)
if to in exp.DataType.NUMERIC_TYPES:
return int(this)
raise NotImplementedError(f"Casting {this} to '{to}' not implemented.") | null |
152,970 | import datetime
import inspect
import re
import statistics
from functools import wraps
from sqlglot import exp
from sqlglot.generator import Generator
from sqlglot.helper import PYTHON_VERSION, is_int, seq_get
class reverse_key:
def __init__(self, obj):
self.obj = obj
def __eq__(self, other):
return other.obj == self.obj
def __lt__(self, other):
return other.obj < self.obj
def ordered(this, desc, nulls_first):
if desc:
return reverse_key(this)
return this | null |
152,971 | import datetime
import inspect
import re
import statistics
from functools import wraps
from sqlglot import exp
from sqlglot.generator import Generator
from sqlglot.helper import PYTHON_VERSION, is_int, seq_get
class Generator(metaclass=_Generator):
"""
Generator converts a given syntax tree to the corresponding SQL string.
Args:
pretty: Whether to format the produced SQL string.
Default: False.
identify: Determines when an identifier should be quoted. Possible values are:
False (default): Never quote, except in cases where it's mandatory by the dialect.
True or 'always': Always quote.
'safe': Only quote identifiers that are case insensitive.
normalize: Whether to normalize identifiers to lowercase.
Default: False.
pad: The pad size in a formatted string.
Default: 2.
indent: The indentation size in a formatted string.
Default: 2.
normalize_functions: How to normalize function names. Possible values are:
"upper" or True (default): Convert names to uppercase.
"lower": Convert names to lowercase.
False: Disables function name normalization.
unsupported_level: Determines the generator's behavior when it encounters unsupported expressions.
Default ErrorLevel.WARN.
max_unsupported: Maximum number of unsupported messages to include in a raised UnsupportedError.
This is only relevant if unsupported_level is ErrorLevel.RAISE.
Default: 3
leading_comma: Whether the comma is leading or trailing in select expressions.
This is only relevant when generating in pretty mode.
Default: False
max_text_width: The max number of characters in a segment before creating new lines in pretty mode.
The default is on the smaller end because the length only represents a segment and not the true
line length.
Default: 80
comments: Whether to preserve comments in the output SQL code.
Default: True
"""
TRANSFORMS: t.Dict[t.Type[exp.Expression], t.Callable[..., str]] = {
**JSON_PATH_PART_TRANSFORMS,
exp.AutoRefreshProperty: lambda self, e: f"AUTO REFRESH {self.sql(e, 'this')}",
exp.BackupProperty: lambda self, e: f"BACKUP {self.sql(e, 'this')}",
exp.CaseSpecificColumnConstraint: lambda _,
e: f"{'NOT ' if e.args.get('not_') else ''}CASESPECIFIC",
exp.CharacterSetColumnConstraint: lambda self, e: f"CHARACTER SET {self.sql(e, 'this')}",
exp.CharacterSetProperty: lambda self,
e: f"{'DEFAULT ' if e.args.get('default') else ''}CHARACTER SET={self.sql(e, 'this')}",
exp.ClusteredColumnConstraint: lambda self,
e: f"CLUSTERED ({self.expressions(e, 'this', indent=False)})",
exp.CollateColumnConstraint: lambda self, e: f"COLLATE {self.sql(e, 'this')}",
exp.CommentColumnConstraint: lambda self, e: f"COMMENT {self.sql(e, 'this')}",
exp.CopyGrantsProperty: lambda *_: "COPY GRANTS",
exp.DateAdd: lambda self, e: self.func(
"DATE_ADD", e.this, e.expression, exp.Literal.string(e.text("unit"))
),
exp.DateFormatColumnConstraint: lambda self, e: f"FORMAT {self.sql(e, 'this')}",
exp.DefaultColumnConstraint: lambda self, e: f"DEFAULT {self.sql(e, 'this')}",
exp.EncodeColumnConstraint: lambda self, e: f"ENCODE {self.sql(e, 'this')}",
exp.ExcludeColumnConstraint: lambda self, e: f"EXCLUDE {self.sql(e, 'this').lstrip()}",
exp.ExecuteAsProperty: lambda self, e: self.naked_property(e),
exp.ExternalProperty: lambda *_: "EXTERNAL",
exp.GlobalProperty: lambda *_: "GLOBAL",
exp.HeapProperty: lambda *_: "HEAP",
exp.IcebergProperty: lambda *_: "ICEBERG",
exp.InheritsProperty: lambda self, e: f"INHERITS ({self.expressions(e, flat=True)})",
exp.InlineLengthColumnConstraint: lambda self, e: f"INLINE LENGTH {self.sql(e, 'this')}",
exp.InputModelProperty: lambda self, e: f"INPUT{self.sql(e, 'this')}",
exp.IntervalSpan: lambda self, e: f"{self.sql(e, 'this')} TO {self.sql(e, 'expression')}",
exp.JSONExtract: lambda self, e: self.func(
"JSON_EXTRACT", e.this, e.expression, *e.expressions
),
exp.JSONExtractScalar: lambda self, e: self.func(
"JSON_EXTRACT_SCALAR", e.this, e.expression, *e.expressions
),
exp.LanguageProperty: lambda self, e: self.naked_property(e),
exp.LocationProperty: lambda self, e: self.naked_property(e),
exp.LogProperty: lambda _, e: f"{'NO ' if e.args.get('no') else ''}LOG",
exp.MaterializedProperty: lambda *_: "MATERIALIZED",
exp.NonClusteredColumnConstraint: lambda self,
e: f"NONCLUSTERED ({self.expressions(e, 'this', indent=False)})",
exp.NoPrimaryIndexProperty: lambda *_: "NO PRIMARY INDEX",
exp.NotForReplicationColumnConstraint: lambda *_: "NOT FOR REPLICATION",
exp.OnCommitProperty: lambda _,
e: f"ON COMMIT {'DELETE' if e.args.get('delete') else 'PRESERVE'} ROWS",
exp.OnProperty: lambda self, e: f"ON {self.sql(e, 'this')}",
exp.OnUpdateColumnConstraint: lambda self, e: f"ON UPDATE {self.sql(e, 'this')}",
exp.OutputModelProperty: lambda self, e: f"OUTPUT{self.sql(e, 'this')}",
exp.PathColumnConstraint: lambda self, e: f"PATH {self.sql(e, 'this')}",
exp.RemoteWithConnectionModelProperty: lambda self,
e: f"REMOTE WITH CONNECTION {self.sql(e, 'this')}",
exp.ReturnsProperty: lambda self, e: self.naked_property(e),
exp.SampleProperty: lambda self, e: f"SAMPLE BY {self.sql(e, 'this')}",
exp.SetConfigProperty: lambda self, e: self.sql(e, "this"),
exp.SetProperty: lambda _, e: f"{'MULTI' if e.args.get('multi') else ''}SET",
exp.SettingsProperty: lambda self, e: f"SETTINGS{self.seg('')}{(self.expressions(e))}",
exp.SharingProperty: lambda self, e: f"SHARING={self.sql(e, 'this')}",
exp.SqlReadWriteProperty: lambda _, e: e.name,
exp.SqlSecurityProperty: lambda _,
e: f"SQL SECURITY {'DEFINER' if e.args.get('definer') else 'INVOKER'}",
exp.StabilityProperty: lambda _, e: e.name,
exp.TemporaryProperty: lambda *_: "TEMPORARY",
exp.TitleColumnConstraint: lambda self, e: f"TITLE {self.sql(e, 'this')}",
exp.Timestamp: lambda self, e: self.func("TIMESTAMP", e.this, e.expression),
exp.ToTableProperty: lambda self, e: f"TO {self.sql(e.this)}",
exp.TransformModelProperty: lambda self, e: self.func("TRANSFORM", *e.expressions),
exp.TransientProperty: lambda *_: "TRANSIENT",
exp.UppercaseColumnConstraint: lambda *_: "UPPERCASE",
exp.UnloggedProperty: lambda *_: "UNLOGGED",
exp.VarMap: lambda self, e: self.func("MAP", e.args["keys"], e.args["values"]),
exp.VolatileProperty: lambda *_: "VOLATILE",
exp.WithJournalTableProperty: lambda self, e: f"WITH JOURNAL TABLE={self.sql(e, 'this')}",
exp.WithOperator: lambda self, e: f"{self.sql(e, 'this')} WITH {self.sql(e, 'op')}",
}
# Whether null ordering is supported in order by
# True: Full Support, None: No support, False: No support in window specifications
NULL_ORDERING_SUPPORTED: t.Optional[bool] = True
# Whether ignore nulls is inside the agg or outside.
# FIRST(x IGNORE NULLS) OVER vs FIRST (x) IGNORE NULLS OVER
IGNORE_NULLS_IN_FUNC = False
# Whether locking reads (i.e. SELECT ... FOR UPDATE/SHARE) are supported
LOCKING_READS_SUPPORTED = False
# Always do union distinct or union all
EXPLICIT_UNION = False
# Wrap derived values in parens, usually standard but spark doesn't support it
WRAP_DERIVED_VALUES = True
# Whether create function uses an AS before the RETURN
CREATE_FUNCTION_RETURN_AS = True
# Whether MERGE ... WHEN MATCHED BY SOURCE is allowed
MATCHED_BY_SOURCE = True
# Whether the INTERVAL expression works only with values like '1 day'
SINGLE_STRING_INTERVAL = False
# Whether the plural form of date parts like day (i.e. "days") is supported in INTERVALs
INTERVAL_ALLOWS_PLURAL_FORM = True
# Whether limit and fetch are supported (possible values: "ALL", "LIMIT", "FETCH")
LIMIT_FETCH = "ALL"
# Whether limit and fetch allows expresions or just limits
LIMIT_ONLY_LITERALS = False
# Whether a table is allowed to be renamed with a db
RENAME_TABLE_WITH_DB = True
# The separator for grouping sets and rollups
GROUPINGS_SEP = ","
# The string used for creating an index on a table
INDEX_ON = "ON"
# Whether join hints should be generated
JOIN_HINTS = True
# Whether table hints should be generated
TABLE_HINTS = True
# Whether query hints should be generated
QUERY_HINTS = True
# What kind of separator to use for query hints
QUERY_HINT_SEP = ", "
# Whether comparing against booleans (e.g. x IS TRUE) is supported
IS_BOOL_ALLOWED = True
# Whether to include the "SET" keyword in the "INSERT ... ON DUPLICATE KEY UPDATE" statement
DUPLICATE_KEY_UPDATE_WITH_SET = True
# Whether to generate the limit as TOP <value> instead of LIMIT <value>
LIMIT_IS_TOP = False
# Whether to generate INSERT INTO ... RETURNING or INSERT INTO RETURNING ...
RETURNING_END = True
# Whether to generate the (+) suffix for columns used in old-style join conditions
COLUMN_JOIN_MARKS_SUPPORTED = False
# Whether to generate an unquoted value for EXTRACT's date part argument
EXTRACT_ALLOWS_QUOTES = True
# Whether TIMETZ / TIMESTAMPTZ will be generated using the "WITH TIME ZONE" syntax
TZ_TO_WITH_TIME_ZONE = False
# Whether the NVL2 function is supported
NVL2_SUPPORTED = True
# https://cloud.google.com/bigquery/docs/reference/standard-sql/query-syntax
SELECT_KINDS: t.Tuple[str, ...] = ("STRUCT", "VALUE")
# Whether VALUES statements can be used as derived tables.
# MySQL 5 and Redshift do not allow this, so when False, it will convert
# SELECT * VALUES into SELECT UNION
VALUES_AS_TABLE = True
# Whether the word COLUMN is included when adding a column with ALTER TABLE
ALTER_TABLE_INCLUDE_COLUMN_KEYWORD = True
# UNNEST WITH ORDINALITY (presto) instead of UNNEST WITH OFFSET (bigquery)
UNNEST_WITH_ORDINALITY = True
# Whether FILTER (WHERE cond) can be used for conditional aggregation
AGGREGATE_FILTER_SUPPORTED = True
# Whether JOIN sides (LEFT, RIGHT) are supported in conjunction with SEMI/ANTI join kinds
SEMI_ANTI_JOIN_WITH_SIDE = True
# Whether to include the type of a computed column in the CREATE DDL
COMPUTED_COLUMN_WITH_TYPE = True
# Whether CREATE TABLE .. COPY .. is supported. False means we'll generate CLONE instead of COPY
SUPPORTS_TABLE_COPY = True
# Whether parentheses are required around the table sample's expression
TABLESAMPLE_REQUIRES_PARENS = True
# Whether a table sample clause's size needs to be followed by the ROWS keyword
TABLESAMPLE_SIZE_IS_ROWS = True
# The keyword(s) to use when generating a sample clause
TABLESAMPLE_KEYWORDS = "TABLESAMPLE"
# Whether the TABLESAMPLE clause supports a method name, like BERNOULLI
TABLESAMPLE_WITH_METHOD = True
# The keyword to use when specifying the seed of a sample clause
TABLESAMPLE_SEED_KEYWORD = "SEED"
# Whether COLLATE is a function instead of a binary operator
COLLATE_IS_FUNC = False
# Whether data types support additional specifiers like e.g. CHAR or BYTE (oracle)
DATA_TYPE_SPECIFIERS_ALLOWED = False
# Whether conditions require booleans WHERE x = 0 vs WHERE x
ENSURE_BOOLS = False
# Whether the "RECURSIVE" keyword is required when defining recursive CTEs
CTE_RECURSIVE_KEYWORD_REQUIRED = True
# Whether CONCAT requires >1 arguments
SUPPORTS_SINGLE_ARG_CONCAT = True
# Whether LAST_DAY function supports a date part argument
LAST_DAY_SUPPORTS_DATE_PART = True
# Whether named columns are allowed in table aliases
SUPPORTS_TABLE_ALIAS_COLUMNS = True
# Whether UNPIVOT aliases are Identifiers (False means they're Literals)
UNPIVOT_ALIASES_ARE_IDENTIFIERS = True
# What delimiter to use for separating JSON key/value pairs
JSON_KEY_VALUE_PAIR_SEP = ":"
# INSERT OVERWRITE TABLE x override
INSERT_OVERWRITE = " OVERWRITE TABLE"
# Whether the SELECT .. INTO syntax is used instead of CTAS
SUPPORTS_SELECT_INTO = False
# Whether UNLOGGED tables can be created
SUPPORTS_UNLOGGED_TABLES = False
# Whether the CREATE TABLE LIKE statement is supported
SUPPORTS_CREATE_TABLE_LIKE = True
# Whether the LikeProperty needs to be specified inside of the schema clause
LIKE_PROPERTY_INSIDE_SCHEMA = False
# Whether DISTINCT can be followed by multiple args in an AggFunc. If not, it will be
# transpiled into a series of CASE-WHEN-ELSE, ultimately using a tuple conseisting of the args
MULTI_ARG_DISTINCT = True
# Whether the JSON extraction operators expect a value of type JSON
JSON_TYPE_REQUIRED_FOR_EXTRACTION = False
# Whether bracketed keys like ["foo"] are supported in JSON paths
JSON_PATH_BRACKETED_KEY_SUPPORTED = True
# Whether to escape keys using single quotes in JSON paths
JSON_PATH_SINGLE_QUOTE_ESCAPE = False
# The JSONPathPart expressions supported by this dialect
SUPPORTED_JSON_PATH_PARTS = ALL_JSON_PATH_PARTS.copy()
# Whether any(f(x) for x in array) can be implemented by this dialect
CAN_IMPLEMENT_ARRAY_ANY = False
# Whether the function TO_NUMBER is supported
SUPPORTS_TO_NUMBER = True
TYPE_MAPPING = {
exp.DataType.Type.NCHAR: "CHAR",
exp.DataType.Type.NVARCHAR: "VARCHAR",
exp.DataType.Type.MEDIUMTEXT: "TEXT",
exp.DataType.Type.LONGTEXT: "TEXT",
exp.DataType.Type.TINYTEXT: "TEXT",
exp.DataType.Type.MEDIUMBLOB: "BLOB",
exp.DataType.Type.LONGBLOB: "BLOB",
exp.DataType.Type.TINYBLOB: "BLOB",
exp.DataType.Type.INET: "INET",
}
STAR_MAPPING = {
"except": "EXCEPT",
"replace": "REPLACE",
}
TIME_PART_SINGULARS = {
"MICROSECONDS": "MICROSECOND",
"SECONDS": "SECOND",
"MINUTES": "MINUTE",
"HOURS": "HOUR",
"DAYS": "DAY",
"WEEKS": "WEEK",
"MONTHS": "MONTH",
"QUARTERS": "QUARTER",
"YEARS": "YEAR",
}
AFTER_HAVING_MODIFIER_TRANSFORMS = {
"cluster": lambda self, e: self.sql(e, "cluster"),
"distribute": lambda self, e: self.sql(e, "distribute"),
"qualify": lambda self, e: self.sql(e, "qualify"),
"sort": lambda self, e: self.sql(e, "sort"),
"windows": lambda self, e: (
self.seg("WINDOW ") + self.expressions(e, key="windows", flat=True)
if e.args.get("windows")
else ""
),
}
TOKEN_MAPPING: t.Dict[TokenType, str] = {}
STRUCT_DELIMITER = ("<", ">")
PARAMETER_TOKEN = "@"
NAMED_PLACEHOLDER_TOKEN = ":"
PROPERTIES_LOCATION = {
exp.AlgorithmProperty: exp.Properties.Location.POST_CREATE,
exp.AutoIncrementProperty: exp.Properties.Location.POST_SCHEMA,
exp.AutoRefreshProperty: exp.Properties.Location.POST_SCHEMA,
exp.BackupProperty: exp.Properties.Location.POST_SCHEMA,
exp.BlockCompressionProperty: exp.Properties.Location.POST_NAME,
exp.CharacterSetProperty: exp.Properties.Location.POST_SCHEMA,
exp.ChecksumProperty: exp.Properties.Location.POST_NAME,
exp.CollateProperty: exp.Properties.Location.POST_SCHEMA,
exp.CopyGrantsProperty: exp.Properties.Location.POST_SCHEMA,
exp.Cluster: exp.Properties.Location.POST_SCHEMA,
exp.ClusteredByProperty: exp.Properties.Location.POST_SCHEMA,
exp.DataBlocksizeProperty: exp.Properties.Location.POST_NAME,
exp.DefinerProperty: exp.Properties.Location.POST_CREATE,
exp.DictRange: exp.Properties.Location.POST_SCHEMA,
exp.DictProperty: exp.Properties.Location.POST_SCHEMA,
exp.DistKeyProperty: exp.Properties.Location.POST_SCHEMA,
exp.DistStyleProperty: exp.Properties.Location.POST_SCHEMA,
exp.EngineProperty: exp.Properties.Location.POST_SCHEMA,
exp.ExecuteAsProperty: exp.Properties.Location.POST_SCHEMA,
exp.ExternalProperty: exp.Properties.Location.POST_CREATE,
exp.FallbackProperty: exp.Properties.Location.POST_NAME,
exp.FileFormatProperty: exp.Properties.Location.POST_WITH,
exp.FreespaceProperty: exp.Properties.Location.POST_NAME,
exp.GlobalProperty: exp.Properties.Location.POST_CREATE,
exp.HeapProperty: exp.Properties.Location.POST_WITH,
exp.InheritsProperty: exp.Properties.Location.POST_SCHEMA,
exp.IcebergProperty: exp.Properties.Location.POST_CREATE,
exp.InputModelProperty: exp.Properties.Location.POST_SCHEMA,
exp.IsolatedLoadingProperty: exp.Properties.Location.POST_NAME,
exp.JournalProperty: exp.Properties.Location.POST_NAME,
exp.LanguageProperty: exp.Properties.Location.POST_SCHEMA,
exp.LikeProperty: exp.Properties.Location.POST_SCHEMA,
exp.LocationProperty: exp.Properties.Location.POST_SCHEMA,
exp.LockProperty: exp.Properties.Location.POST_SCHEMA,
exp.LockingProperty: exp.Properties.Location.POST_ALIAS,
exp.LogProperty: exp.Properties.Location.POST_NAME,
exp.MaterializedProperty: exp.Properties.Location.POST_CREATE,
exp.MergeBlockRatioProperty: exp.Properties.Location.POST_NAME,
exp.NoPrimaryIndexProperty: exp.Properties.Location.POST_EXPRESSION,
exp.OnProperty: exp.Properties.Location.POST_SCHEMA,
exp.OnCommitProperty: exp.Properties.Location.POST_EXPRESSION,
exp.Order: exp.Properties.Location.POST_SCHEMA,
exp.OutputModelProperty: exp.Properties.Location.POST_SCHEMA,
exp.PartitionedByProperty: exp.Properties.Location.POST_WITH,
exp.PartitionedOfProperty: exp.Properties.Location.POST_SCHEMA,
exp.PrimaryKey: exp.Properties.Location.POST_SCHEMA,
exp.Property: exp.Properties.Location.POST_WITH,
exp.RemoteWithConnectionModelProperty: exp.Properties.Location.POST_SCHEMA,
exp.ReturnsProperty: exp.Properties.Location.POST_SCHEMA,
exp.RowFormatProperty: exp.Properties.Location.POST_SCHEMA,
exp.RowFormatDelimitedProperty: exp.Properties.Location.POST_SCHEMA,
exp.RowFormatSerdeProperty: exp.Properties.Location.POST_SCHEMA,
exp.SampleProperty: exp.Properties.Location.POST_SCHEMA,
exp.SchemaCommentProperty: exp.Properties.Location.POST_SCHEMA,
exp.SerdeProperties: exp.Properties.Location.POST_SCHEMA,
exp.Set: exp.Properties.Location.POST_SCHEMA,
exp.SettingsProperty: exp.Properties.Location.POST_SCHEMA,
exp.SetProperty: exp.Properties.Location.POST_CREATE,
exp.SetConfigProperty: exp.Properties.Location.POST_SCHEMA,
exp.SharingProperty: exp.Properties.Location.POST_EXPRESSION,
exp.SequenceProperties: exp.Properties.Location.POST_EXPRESSION,
exp.SortKeyProperty: exp.Properties.Location.POST_SCHEMA,
exp.SqlReadWriteProperty: exp.Properties.Location.POST_SCHEMA,
exp.SqlSecurityProperty: exp.Properties.Location.POST_CREATE,
exp.StabilityProperty: exp.Properties.Location.POST_SCHEMA,
exp.TemporaryProperty: exp.Properties.Location.POST_CREATE,
exp.ToTableProperty: exp.Properties.Location.POST_SCHEMA,
exp.TransientProperty: exp.Properties.Location.POST_CREATE,
exp.TransformModelProperty: exp.Properties.Location.POST_SCHEMA,
exp.MergeTreeTTL: exp.Properties.Location.POST_SCHEMA,
exp.UnloggedProperty: exp.Properties.Location.POST_CREATE,
exp.VolatileProperty: exp.Properties.Location.POST_CREATE,
exp.WithDataProperty: exp.Properties.Location.POST_EXPRESSION,
exp.WithJournalTableProperty: exp.Properties.Location.POST_NAME,
exp.WithSystemVersioningProperty: exp.Properties.Location.POST_SCHEMA,
}
# Keywords that can't be used as unquoted identifier names
RESERVED_KEYWORDS: t.Set[str] = set()
# Expressions whose comments are separated from them for better formatting
WITH_SEPARATED_COMMENTS: t.Tuple[t.Type[exp.Expression], ...] = (
exp.Create,
exp.Delete,
exp.Drop,
exp.From,
exp.Insert,
exp.Join,
exp.Select,
exp.Union,
exp.Update,
exp.Where,
exp.With,
)
# Expressions that should not have their comments generated in maybe_comment
EXCLUDE_COMMENTS: t.Tuple[t.Type[exp.Expression], ...] = (
exp.Binary,
exp.Union,
)
# Expressions that can remain unwrapped when appearing in the context of an INTERVAL
UNWRAPPED_INTERVAL_VALUES: t.Tuple[t.Type[exp.Expression], ...] = (
exp.Column,
exp.Literal,
exp.Neg,
exp.Paren,
)
PARAMETERIZABLE_TEXT_TYPES = {
exp.DataType.Type.NVARCHAR,
exp.DataType.Type.VARCHAR,
exp.DataType.Type.CHAR,
exp.DataType.Type.NCHAR,
}
# Expressions that need to have all CTEs under them bubbled up to them
EXPRESSIONS_WITHOUT_NESTED_CTES: t.Set[t.Type[exp.Expression]] = set()
SENTINEL_LINE_BREAK = "__SQLGLOT__LB__"
__slots__ = (
"pretty",
"identify",
"normalize",
"pad",
"_indent",
"normalize_functions",
"unsupported_level",
"max_unsupported",
"leading_comma",
"max_text_width",
"comments",
"dialect",
"unsupported_messages",
"_escaped_quote_end",
"_escaped_identifier_end",
)
def __init__(
self,
pretty: t.Optional[bool] = None,
identify: str | bool = False,
normalize: bool = False,
pad: int = 2,
indent: int = 2,
normalize_functions: t.Optional[str | bool] = None,
unsupported_level: ErrorLevel = ErrorLevel.WARN,
max_unsupported: int = 3,
leading_comma: bool = False,
max_text_width: int = 80,
comments: bool = True,
dialect: DialectType = None,
):
import sqlglot
from sqlglot.dialects import Dialect
self.pretty = pretty if pretty is not None else sqlglot.pretty
self.identify = identify
self.normalize = normalize
self.pad = pad
self._indent = indent
self.unsupported_level = unsupported_level
self.max_unsupported = max_unsupported
self.leading_comma = leading_comma
self.max_text_width = max_text_width
self.comments = comments
self.dialect = Dialect.get_or_raise(dialect)
# This is both a Dialect property and a Generator argument, so we prioritize the latter
self.normalize_functions = (
self.dialect.NORMALIZE_FUNCTIONS if normalize_functions is None else normalize_functions
)
self.unsupported_messages: t.List[str] = []
self._escaped_quote_end: str = (
self.dialect.tokenizer_class.STRING_ESCAPES[0] + self.dialect.QUOTE_END
)
self._escaped_identifier_end: str = (
self.dialect.tokenizer_class.IDENTIFIER_ESCAPES[0] + self.dialect.IDENTIFIER_END
)
def generate(self, expression: exp.Expression, copy: bool = True) -> str:
"""
Generates the SQL string corresponding to the given syntax tree.
Args:
expression: The syntax tree.
copy: Whether to copy the expression. The generator performs mutations so
it is safer to copy.
Returns:
The SQL string corresponding to `expression`.
"""
if copy:
expression = expression.copy()
expression = self.preprocess(expression)
self.unsupported_messages = []
sql = self.sql(expression).strip()
if self.pretty:
sql = sql.replace(self.SENTINEL_LINE_BREAK, "\n")
if self.unsupported_level == ErrorLevel.IGNORE:
return sql
if self.unsupported_level == ErrorLevel.WARN:
for msg in self.unsupported_messages:
logger.warning(msg)
elif self.unsupported_level == ErrorLevel.RAISE and self.unsupported_messages:
raise UnsupportedError(concat_messages(self.unsupported_messages, self.max_unsupported))
return sql
def preprocess(self, expression: exp.Expression) -> exp.Expression:
"""Apply generic preprocessing transformations to a given expression."""
if (
not expression.parent
and type(expression) in self.EXPRESSIONS_WITHOUT_NESTED_CTES
and any(node.parent is not expression for node in expression.find_all(exp.With))
):
from sqlglot.transforms import move_ctes_to_top_level
expression = move_ctes_to_top_level(expression)
if self.ENSURE_BOOLS:
from sqlglot.transforms import ensure_bools
expression = ensure_bools(expression)
return expression
def unsupported(self, message: str) -> None:
if self.unsupported_level == ErrorLevel.IMMEDIATE:
raise UnsupportedError(message)
self.unsupported_messages.append(message)
def sep(self, sep: str = " ") -> str:
return f"{sep.strip()}\n" if self.pretty else sep
def seg(self, sql: str, sep: str = " ") -> str:
return f"{self.sep(sep)}{sql}"
def pad_comment(self, comment: str) -> str:
comment = " " + comment if comment[0].strip() else comment
comment = comment + " " if comment[-1].strip() else comment
return comment
def maybe_comment(
self,
sql: str,
expression: t.Optional[exp.Expression] = None,
comments: t.Optional[t.List[str]] = None,
) -> str:
comments = (
((expression and expression.comments) if comments is None else comments) # type: ignore
if self.comments
else None
)
if not comments or isinstance(expression, self.EXCLUDE_COMMENTS):
return sql
comments_sql = " ".join(
f"/*{self.pad_comment(comment)}*/" for comment in comments if comment
)
if not comments_sql:
return sql
if isinstance(expression, self.WITH_SEPARATED_COMMENTS):
return (
f"{self.sep()}{comments_sql}{sql}"
if sql[0].isspace()
else f"{comments_sql}{self.sep()}{sql}"
)
return f"{sql} {comments_sql}"
def wrap(self, expression: exp.Expression | str) -> str:
this_sql = self.indent(
(
self.sql(expression)
if isinstance(expression, exp.UNWRAPPED_QUERIES)
else self.sql(expression, "this")
),
level=1,
pad=0,
)
return f"({self.sep('')}{this_sql}{self.seg(')', sep='')}"
def no_identify(self, func: t.Callable[..., str], *args, **kwargs) -> str:
original = self.identify
self.identify = False
result = func(*args, **kwargs)
self.identify = original
return result
def normalize_func(self, name: str) -> str:
if self.normalize_functions == "upper" or self.normalize_functions is True:
return name.upper()
if self.normalize_functions == "lower":
return name.lower()
return name
def indent(
self,
sql: str,
level: int = 0,
pad: t.Optional[int] = None,
skip_first: bool = False,
skip_last: bool = False,
) -> str:
if not self.pretty:
return sql
pad = self.pad if pad is None else pad
lines = sql.split("\n")
return "\n".join(
(
line
if (skip_first and i == 0) or (skip_last and i == len(lines) - 1)
else f"{' ' * (level * self._indent + pad)}{line}"
)
for i, line in enumerate(lines)
)
def sql(
self,
expression: t.Optional[str | exp.Expression],
key: t.Optional[str] = None,
comment: bool = True,
) -> str:
if not expression:
return ""
if isinstance(expression, str):
return expression
if key:
value = expression.args.get(key)
if value:
return self.sql(value)
return ""
transform = self.TRANSFORMS.get(expression.__class__)
if callable(transform):
sql = transform(self, expression)
elif isinstance(expression, exp.Expression):
exp_handler_name = f"{expression.key}_sql"
if hasattr(self, exp_handler_name):
sql = getattr(self, exp_handler_name)(expression)
elif isinstance(expression, exp.Func):
sql = self.function_fallback_sql(expression)
elif isinstance(expression, exp.Property):
sql = self.property_sql(expression)
else:
raise ValueError(f"Unsupported expression type {expression.__class__.__name__}")
else:
raise ValueError(f"Expected an Expression. Received {type(expression)}: {expression}")
return self.maybe_comment(sql, expression) if self.comments and comment else sql
def uncache_sql(self, expression: exp.Uncache) -> str:
table = self.sql(expression, "this")
exists_sql = " IF EXISTS" if expression.args.get("exists") else ""
return f"UNCACHE TABLE{exists_sql} {table}"
def cache_sql(self, expression: exp.Cache) -> str:
lazy = " LAZY" if expression.args.get("lazy") else ""
table = self.sql(expression, "this")
options = expression.args.get("options")
options = f" OPTIONS({self.sql(options[0])} = {self.sql(options[1])})" if options else ""
sql = self.sql(expression, "expression")
sql = f" AS{self.sep()}{sql}" if sql else ""
sql = f"CACHE{lazy} TABLE {table}{options}{sql}"
return self.prepend_ctes(expression, sql)
def characterset_sql(self, expression: exp.CharacterSet) -> str:
if isinstance(expression.parent, exp.Cast):
return f"CHAR CHARACTER SET {self.sql(expression, 'this')}"
default = "DEFAULT " if expression.args.get("default") else ""
return f"{default}CHARACTER SET={self.sql(expression, 'this')}"
def column_sql(self, expression: exp.Column) -> str:
join_mark = " (+)" if expression.args.get("join_mark") else ""
if join_mark and not self.COLUMN_JOIN_MARKS_SUPPORTED:
join_mark = ""
self.unsupported("Outer join syntax using the (+) operator is not supported.")
column = ".".join(
self.sql(part)
for part in (
expression.args.get("catalog"),
expression.args.get("db"),
expression.args.get("table"),
expression.args.get("this"),
)
if part
)
return f"{column}{join_mark}"
def columnposition_sql(self, expression: exp.ColumnPosition) -> str:
this = self.sql(expression, "this")
this = f" {this}" if this else ""
position = self.sql(expression, "position")
return f"{position}{this}"
def columndef_sql(self, expression: exp.ColumnDef, sep: str = " ") -> str:
column = self.sql(expression, "this")
kind = self.sql(expression, "kind")
constraints = self.expressions(expression, key="constraints", sep=" ", flat=True)
exists = "IF NOT EXISTS " if expression.args.get("exists") else ""
kind = f"{sep}{kind}" if kind else ""
constraints = f" {constraints}" if constraints else ""
position = self.sql(expression, "position")
position = f" {position}" if position else ""
if expression.find(exp.ComputedColumnConstraint) and not self.COMPUTED_COLUMN_WITH_TYPE:
kind = ""
return f"{exists}{column}{kind}{constraints}{position}"
def columnconstraint_sql(self, expression: exp.ColumnConstraint) -> str:
this = self.sql(expression, "this")
kind_sql = self.sql(expression, "kind").strip()
return f"CONSTRAINT {this} {kind_sql}" if this else kind_sql
def computedcolumnconstraint_sql(self, expression: exp.ComputedColumnConstraint) -> str:
this = self.sql(expression, "this")
if expression.args.get("not_null"):
persisted = " PERSISTED NOT NULL"
elif expression.args.get("persisted"):
persisted = " PERSISTED"
else:
persisted = ""
return f"AS {this}{persisted}"
def autoincrementcolumnconstraint_sql(self, _) -> str:
return self.token_sql(TokenType.AUTO_INCREMENT)
def compresscolumnconstraint_sql(self, expression: exp.CompressColumnConstraint) -> str:
if isinstance(expression.this, list):
this = self.wrap(self.expressions(expression, key="this", flat=True))
else:
this = self.sql(expression, "this")
return f"COMPRESS {this}"
def generatedasidentitycolumnconstraint_sql(
self, expression: exp.GeneratedAsIdentityColumnConstraint
) -> str:
this = ""
if expression.this is not None:
on_null = " ON NULL" if expression.args.get("on_null") else ""
this = " ALWAYS" if expression.this else f" BY DEFAULT{on_null}"
start = expression.args.get("start")
start = f"START WITH {start}" if start else ""
increment = expression.args.get("increment")
increment = f" INCREMENT BY {increment}" if increment else ""
minvalue = expression.args.get("minvalue")
minvalue = f" MINVALUE {minvalue}" if minvalue else ""
maxvalue = expression.args.get("maxvalue")
maxvalue = f" MAXVALUE {maxvalue}" if maxvalue else ""
cycle = expression.args.get("cycle")
cycle_sql = ""
if cycle is not None:
cycle_sql = f"{' NO' if not cycle else ''} CYCLE"
cycle_sql = cycle_sql.strip() if not start and not increment else cycle_sql
sequence_opts = ""
if start or increment or cycle_sql:
sequence_opts = f"{start}{increment}{minvalue}{maxvalue}{cycle_sql}"
sequence_opts = f" ({sequence_opts.strip()})"
expr = self.sql(expression, "expression")
expr = f"({expr})" if expr else "IDENTITY"
return f"GENERATED{this} AS {expr}{sequence_opts}"
def generatedasrowcolumnconstraint_sql(
self, expression: exp.GeneratedAsRowColumnConstraint
) -> str:
start = "START" if expression.args.get("start") else "END"
hidden = " HIDDEN" if expression.args.get("hidden") else ""
return f"GENERATED ALWAYS AS ROW {start}{hidden}"
def periodforsystemtimeconstraint_sql(
self, expression: exp.PeriodForSystemTimeConstraint
) -> str:
return f"PERIOD FOR SYSTEM_TIME ({self.sql(expression, 'this')}, {self.sql(expression, 'expression')})"
def notnullcolumnconstraint_sql(self, expression: exp.NotNullColumnConstraint) -> str:
return f"{'' if expression.args.get('allow_null') else 'NOT '}NULL"
def transformcolumnconstraint_sql(self, expression: exp.TransformColumnConstraint) -> str:
return f"AS {self.sql(expression, 'this')}"
def primarykeycolumnconstraint_sql(self, expression: exp.PrimaryKeyColumnConstraint) -> str:
desc = expression.args.get("desc")
if desc is not None:
return f"PRIMARY KEY{' DESC' if desc else ' ASC'}"
return "PRIMARY KEY"
def uniquecolumnconstraint_sql(self, expression: exp.UniqueColumnConstraint) -> str:
this = self.sql(expression, "this")
this = f" {this}" if this else ""
index_type = expression.args.get("index_type")
index_type = f" USING {index_type}" if index_type else ""
on_conflict = self.sql(expression, "on_conflict")
on_conflict = f" {on_conflict}" if on_conflict else ""
return f"UNIQUE{this}{index_type}{on_conflict}"
def createable_sql(self, expression: exp.Create, locations: t.DefaultDict) -> str:
return self.sql(expression, "this")
def create_sql(self, expression: exp.Create) -> str:
kind = self.sql(expression, "kind")
properties = expression.args.get("properties")
properties_locs = self.locate_properties(properties) if properties else defaultdict()
this = self.createable_sql(expression, properties_locs)
properties_sql = ""
if properties_locs.get(exp.Properties.Location.POST_SCHEMA) or properties_locs.get(
exp.Properties.Location.POST_WITH
):
properties_sql = self.sql(
exp.Properties(
expressions=[
*properties_locs[exp.Properties.Location.POST_SCHEMA],
*properties_locs[exp.Properties.Location.POST_WITH],
]
)
)
begin = " BEGIN" if expression.args.get("begin") else ""
end = " END" if expression.args.get("end") else ""
expression_sql = self.sql(expression, "expression")
if expression_sql:
expression_sql = f"{begin}{self.sep()}{expression_sql}{end}"
if self.CREATE_FUNCTION_RETURN_AS or not isinstance(expression.expression, exp.Return):
if properties_locs.get(exp.Properties.Location.POST_ALIAS):
postalias_props_sql = self.properties(
exp.Properties(
expressions=properties_locs[exp.Properties.Location.POST_ALIAS]
),
wrapped=False,
)
expression_sql = f" AS {postalias_props_sql}{expression_sql}"
else:
expression_sql = f" AS{expression_sql}"
postindex_props_sql = ""
if properties_locs.get(exp.Properties.Location.POST_INDEX):
postindex_props_sql = self.properties(
exp.Properties(expressions=properties_locs[exp.Properties.Location.POST_INDEX]),
wrapped=False,
prefix=" ",
)
indexes = self.expressions(expression, key="indexes", indent=False, sep=" ")
indexes = f" {indexes}" if indexes else ""
index_sql = indexes + postindex_props_sql
replace = " OR REPLACE" if expression.args.get("replace") else ""
unique = " UNIQUE" if expression.args.get("unique") else ""
postcreate_props_sql = ""
if properties_locs.get(exp.Properties.Location.POST_CREATE):
postcreate_props_sql = self.properties(
exp.Properties(expressions=properties_locs[exp.Properties.Location.POST_CREATE]),
sep=" ",
prefix=" ",
wrapped=False,
)
modifiers = "".join((replace, unique, postcreate_props_sql))
postexpression_props_sql = ""
if properties_locs.get(exp.Properties.Location.POST_EXPRESSION):
postexpression_props_sql = self.properties(
exp.Properties(
expressions=properties_locs[exp.Properties.Location.POST_EXPRESSION]
),
sep=" ",
prefix=" ",
wrapped=False,
)
exists_sql = " IF NOT EXISTS" if expression.args.get("exists") else ""
no_schema_binding = (
" WITH NO SCHEMA BINDING" if expression.args.get("no_schema_binding") else ""
)
clone = self.sql(expression, "clone")
clone = f" {clone}" if clone else ""
expression_sql = f"CREATE{modifiers} {kind}{exists_sql} {this}{properties_sql}{expression_sql}{postexpression_props_sql}{index_sql}{no_schema_binding}{clone}"
return self.prepend_ctes(expression, expression_sql)
def sequenceproperties_sql(self, expression: exp.SequenceProperties) -> str:
start = self.sql(expression, "start")
start = f"START WITH {start}" if start else ""
increment = self.sql(expression, "increment")
increment = f" INCREMENT BY {increment}" if increment else ""
minvalue = self.sql(expression, "minvalue")
minvalue = f" MINVALUE {minvalue}" if minvalue else ""
maxvalue = self.sql(expression, "maxvalue")
maxvalue = f" MAXVALUE {maxvalue}" if maxvalue else ""
owned = self.sql(expression, "owned")
owned = f" OWNED BY {owned}" if owned else ""
cache = expression.args.get("cache")
if cache is None:
cache_str = ""
elif cache is True:
cache_str = " CACHE"
else:
cache_str = f" CACHE {cache}"
options = self.expressions(expression, key="options", flat=True, sep=" ")
options = f" {options}" if options else ""
return f"{start}{increment}{minvalue}{maxvalue}{cache_str}{options}{owned}".lstrip()
def clone_sql(self, expression: exp.Clone) -> str:
this = self.sql(expression, "this")
shallow = "SHALLOW " if expression.args.get("shallow") else ""
keyword = "COPY" if expression.args.get("copy") and self.SUPPORTS_TABLE_COPY else "CLONE"
return f"{shallow}{keyword} {this}"
def describe_sql(self, expression: exp.Describe) -> str:
extended = " EXTENDED" if expression.args.get("extended") else ""
return f"DESCRIBE{extended} {self.sql(expression, 'this')}"
def heredoc_sql(self, expression: exp.Heredoc) -> str:
tag = self.sql(expression, "tag")
return f"${tag}${self.sql(expression, 'this')}${tag}$"
def prepend_ctes(self, expression: exp.Expression, sql: str) -> str:
with_ = self.sql(expression, "with")
if with_:
sql = f"{with_}{self.sep()}{sql}"
return sql
def with_sql(self, expression: exp.With) -> str:
sql = self.expressions(expression, flat=True)
recursive = (
"RECURSIVE "
if self.CTE_RECURSIVE_KEYWORD_REQUIRED and expression.args.get("recursive")
else ""
)
return f"WITH {recursive}{sql}"
def cte_sql(self, expression: exp.CTE) -> str:
alias = self.sql(expression, "alias")
materialized = expression.args.get("materialized")
if materialized is False:
materialized = "NOT MATERIALIZED "
elif materialized:
materialized = "MATERIALIZED "
return f"{alias} AS {materialized or ''}{self.wrap(expression)}"
def tablealias_sql(self, expression: exp.TableAlias) -> str:
alias = self.sql(expression, "this")
columns = self.expressions(expression, key="columns", flat=True)
columns = f"({columns})" if columns else ""
if columns and not self.SUPPORTS_TABLE_ALIAS_COLUMNS:
columns = ""
self.unsupported("Named columns are not supported in table alias.")
if not alias and not self.dialect.UNNEST_COLUMN_ONLY:
alias = "_t"
return f"{alias}{columns}"
def bitstring_sql(self, expression: exp.BitString) -> str:
this = self.sql(expression, "this")
if self.dialect.BIT_START:
return f"{self.dialect.BIT_START}{this}{self.dialect.BIT_END}"
return f"{int(this, 2)}"
def hexstring_sql(self, expression: exp.HexString) -> str:
this = self.sql(expression, "this")
if self.dialect.HEX_START:
return f"{self.dialect.HEX_START}{this}{self.dialect.HEX_END}"
return f"{int(this, 16)}"
def bytestring_sql(self, expression: exp.ByteString) -> str:
this = self.sql(expression, "this")
if self.dialect.BYTE_START:
return f"{self.dialect.BYTE_START}{this}{self.dialect.BYTE_END}"
return this
def unicodestring_sql(self, expression: exp.UnicodeString) -> str:
this = self.sql(expression, "this")
escape = expression.args.get("escape")
if self.dialect.UNICODE_START:
escape = f" UESCAPE {self.sql(escape)}" if escape else ""
return f"{self.dialect.UNICODE_START}{this}{self.dialect.UNICODE_END}{escape}"
if escape:
pattern = re.compile(rf"{escape.name}(\d+)")
else:
pattern = ESCAPED_UNICODE_RE
this = pattern.sub(r"\\u\1", this)
return f"{self.dialect.QUOTE_START}{this}{self.dialect.QUOTE_END}"
def rawstring_sql(self, expression: exp.RawString) -> str:
string = self.escape_str(expression.this.replace("\\", "\\\\"))
return f"{self.dialect.QUOTE_START}{string}{self.dialect.QUOTE_END}"
def datatypeparam_sql(self, expression: exp.DataTypeParam) -> str:
this = self.sql(expression, "this")
specifier = self.sql(expression, "expression")
specifier = f" {specifier}" if specifier and self.DATA_TYPE_SPECIFIERS_ALLOWED else ""
return f"{this}{specifier}"
def datatype_sql(self, expression: exp.DataType) -> str:
type_value = expression.this
if type_value == exp.DataType.Type.USERDEFINED and expression.args.get("kind"):
type_sql = self.sql(expression, "kind")
else:
type_sql = (
self.TYPE_MAPPING.get(type_value, type_value.value)
if isinstance(type_value, exp.DataType.Type)
else type_value
)
nested = ""
interior = self.expressions(expression, flat=True)
values = ""
if interior:
if expression.args.get("nested"):
nested = f"{self.STRUCT_DELIMITER[0]}{interior}{self.STRUCT_DELIMITER[1]}"
if expression.args.get("values") is not None:
delimiters = ("[", "]") if type_value == exp.DataType.Type.ARRAY else ("(", ")")
values = self.expressions(expression, key="values", flat=True)
values = f"{delimiters[0]}{values}{delimiters[1]}"
elif type_value == exp.DataType.Type.INTERVAL:
nested = f" {interior}"
else:
nested = f"({interior})"
type_sql = f"{type_sql}{nested}{values}"
if self.TZ_TO_WITH_TIME_ZONE and type_value in (
exp.DataType.Type.TIMETZ,
exp.DataType.Type.TIMESTAMPTZ,
):
type_sql = f"{type_sql} WITH TIME ZONE"
return type_sql
def directory_sql(self, expression: exp.Directory) -> str:
local = "LOCAL " if expression.args.get("local") else ""
row_format = self.sql(expression, "row_format")
row_format = f" {row_format}" if row_format else ""
return f"{local}DIRECTORY {self.sql(expression, 'this')}{row_format}"
def delete_sql(self, expression: exp.Delete) -> str:
this = self.sql(expression, "this")
this = f" FROM {this}" if this else ""
using = self.sql(expression, "using")
using = f" USING {using}" if using else ""
where = self.sql(expression, "where")
returning = self.sql(expression, "returning")
limit = self.sql(expression, "limit")
tables = self.expressions(expression, key="tables")
tables = f" {tables}" if tables else ""
if self.RETURNING_END:
expression_sql = f"{this}{using}{where}{returning}{limit}"
else:
expression_sql = f"{returning}{this}{using}{where}{limit}"
return self.prepend_ctes(expression, f"DELETE{tables}{expression_sql}")
def drop_sql(self, expression: exp.Drop) -> str:
this = self.sql(expression, "this")
expressions = self.expressions(expression, flat=True)
expressions = f" ({expressions})" if expressions else ""
kind = expression.args["kind"]
exists_sql = " IF EXISTS " if expression.args.get("exists") else " "
temporary = " TEMPORARY" if expression.args.get("temporary") else ""
materialized = " MATERIALIZED" if expression.args.get("materialized") else ""
cascade = " CASCADE" if expression.args.get("cascade") else ""
constraints = " CONSTRAINTS" if expression.args.get("constraints") else ""
purge = " PURGE" if expression.args.get("purge") else ""
return f"DROP{temporary}{materialized} {kind}{exists_sql}{this}{expressions}{cascade}{constraints}{purge}"
def except_sql(self, expression: exp.Except) -> str:
return self.set_operations(expression)
def except_op(self, expression: exp.Except) -> str:
return f"EXCEPT{'' if expression.args.get('distinct') else ' ALL'}"
def fetch_sql(self, expression: exp.Fetch) -> str:
direction = expression.args.get("direction")
direction = f" {direction}" if direction else ""
count = expression.args.get("count")
count = f" {count}" if count else ""
if expression.args.get("percent"):
count = f"{count} PERCENT"
with_ties_or_only = "WITH TIES" if expression.args.get("with_ties") else "ONLY"
return f"{self.seg('FETCH')}{direction}{count} ROWS {with_ties_or_only}"
def filter_sql(self, expression: exp.Filter) -> str:
if self.AGGREGATE_FILTER_SUPPORTED:
this = self.sql(expression, "this")
where = self.sql(expression, "expression").strip()
return f"{this} FILTER({where})"
agg = expression.this
agg_arg = agg.this
cond = expression.expression.this
agg_arg.replace(exp.If(this=cond.copy(), true=agg_arg.copy()))
return self.sql(agg)
def hint_sql(self, expression: exp.Hint) -> str:
if not self.QUERY_HINTS:
self.unsupported("Hints are not supported")
return ""
return f" /*+ {self.expressions(expression, sep=self.QUERY_HINT_SEP).strip()} */"
def indexparameters_sql(self, expression: exp.IndexParameters) -> str:
using = self.sql(expression, "using")
using = f" USING {using}" if using else ""
columns = self.expressions(expression, key="columns", flat=True)
columns = f"({columns})" if columns else ""
partition_by = self.expressions(expression, key="partition_by", flat=True)
partition_by = f" PARTITION BY {partition_by}" if partition_by else ""
where = self.sql(expression, "where")
include = self.expressions(expression, key="include", flat=True)
if include:
include = f" INCLUDE ({include})"
with_storage = self.expressions(expression, key="with_storage", flat=True)
with_storage = f" WITH ({with_storage})" if with_storage else ""
tablespace = self.sql(expression, "tablespace")
tablespace = f" USING INDEX TABLESPACE {tablespace}" if tablespace else ""
return f"{using}{columns}{include}{with_storage}{tablespace}{partition_by}{where}"
def index_sql(self, expression: exp.Index) -> str:
unique = "UNIQUE " if expression.args.get("unique") else ""
primary = "PRIMARY " if expression.args.get("primary") else ""
amp = "AMP " if expression.args.get("amp") else ""
name = self.sql(expression, "this")
name = f"{name} " if name else ""
table = self.sql(expression, "table")
table = f"{self.INDEX_ON} {table}" if table else ""
index = "INDEX " if not table else ""
params = self.sql(expression, "params")
return f"{unique}{primary}{amp}{index}{name}{table}{params}"
def identifier_sql(self, expression: exp.Identifier) -> str:
text = expression.name
lower = text.lower()
text = lower if self.normalize and not expression.quoted else text
text = text.replace(self.dialect.IDENTIFIER_END, self._escaped_identifier_end)
if (
expression.quoted
or self.dialect.can_identify(text, self.identify)
or lower in self.RESERVED_KEYWORDS
or (not self.dialect.IDENTIFIERS_CAN_START_WITH_DIGIT and text[:1].isdigit())
):
text = f"{self.dialect.IDENTIFIER_START}{text}{self.dialect.IDENTIFIER_END}"
return text
def inputoutputformat_sql(self, expression: exp.InputOutputFormat) -> str:
input_format = self.sql(expression, "input_format")
input_format = f"INPUTFORMAT {input_format}" if input_format else ""
output_format = self.sql(expression, "output_format")
output_format = f"OUTPUTFORMAT {output_format}" if output_format else ""
return self.sep().join((input_format, output_format))
def national_sql(self, expression: exp.National, prefix: str = "N") -> str:
string = self.sql(exp.Literal.string(expression.name))
return f"{prefix}{string}"
def partition_sql(self, expression: exp.Partition) -> str:
return f"PARTITION({self.expressions(expression, flat=True)})"
def properties_sql(self, expression: exp.Properties) -> str:
root_properties = []
with_properties = []
for p in expression.expressions:
p_loc = self.PROPERTIES_LOCATION[p.__class__]
if p_loc == exp.Properties.Location.POST_WITH:
with_properties.append(p)
elif p_loc == exp.Properties.Location.POST_SCHEMA:
root_properties.append(p)
return self.root_properties(
exp.Properties(expressions=root_properties)
) + self.with_properties(exp.Properties(expressions=with_properties))
def root_properties(self, properties: exp.Properties) -> str:
if properties.expressions:
return self.sep() + self.expressions(properties, indent=False, sep=" ")
return ""
def properties(
self,
properties: exp.Properties,
prefix: str = "",
sep: str = ", ",
suffix: str = "",
wrapped: bool = True,
) -> str:
if properties.expressions:
expressions = self.expressions(properties, sep=sep, indent=False)
if expressions:
expressions = self.wrap(expressions) if wrapped else expressions
return f"{prefix}{' ' if prefix.strip() else ''}{expressions}{suffix}"
return ""
def with_properties(self, properties: exp.Properties) -> str:
return self.properties(properties, prefix=self.seg("WITH"))
def locate_properties(self, properties: exp.Properties) -> t.DefaultDict:
properties_locs = defaultdict(list)
for p in properties.expressions:
p_loc = self.PROPERTIES_LOCATION[p.__class__]
if p_loc != exp.Properties.Location.UNSUPPORTED:
properties_locs[p_loc].append(p)
else:
self.unsupported(f"Unsupported property {p.key}")
return properties_locs
def property_name(self, expression: exp.Property, string_key: bool = False) -> str:
if isinstance(expression.this, exp.Dot):
return self.sql(expression, "this")
return f"'{expression.name}'" if string_key else expression.name
def property_sql(self, expression: exp.Property) -> str:
property_cls = expression.__class__
if property_cls == exp.Property:
return f"{self.property_name(expression)}={self.sql(expression, 'value')}"
property_name = exp.Properties.PROPERTY_TO_NAME.get(property_cls)
if not property_name:
self.unsupported(f"Unsupported property {expression.key}")
return f"{property_name}={self.sql(expression, 'this')}"
def likeproperty_sql(self, expression: exp.LikeProperty) -> str:
if self.SUPPORTS_CREATE_TABLE_LIKE:
options = " ".join(f"{e.name} {self.sql(e, 'value')}" for e in expression.expressions)
options = f" {options}" if options else ""
like = f"LIKE {self.sql(expression, 'this')}{options}"
if self.LIKE_PROPERTY_INSIDE_SCHEMA and not isinstance(expression.parent, exp.Schema):
like = f"({like})"
return like
if expression.expressions:
self.unsupported("Transpilation of LIKE property options is unsupported")
select = exp.select("*").from_(expression.this).limit(0)
return f"AS {self.sql(select)}"
def fallbackproperty_sql(self, expression: exp.FallbackProperty) -> str:
no = "NO " if expression.args.get("no") else ""
protection = " PROTECTION" if expression.args.get("protection") else ""
return f"{no}FALLBACK{protection}"
def journalproperty_sql(self, expression: exp.JournalProperty) -> str:
no = "NO " if expression.args.get("no") else ""
local = expression.args.get("local")
local = f"{local} " if local else ""
dual = "DUAL " if expression.args.get("dual") else ""
before = "BEFORE " if expression.args.get("before") else ""
after = "AFTER " if expression.args.get("after") else ""
return f"{no}{local}{dual}{before}{after}JOURNAL"
def freespaceproperty_sql(self, expression: exp.FreespaceProperty) -> str:
freespace = self.sql(expression, "this")
percent = " PERCENT" if expression.args.get("percent") else ""
return f"FREESPACE={freespace}{percent}"
def checksumproperty_sql(self, expression: exp.ChecksumProperty) -> str:
if expression.args.get("default"):
property = "DEFAULT"
elif expression.args.get("on"):
property = "ON"
else:
property = "OFF"
return f"CHECKSUM={property}"
def mergeblockratioproperty_sql(self, expression: exp.MergeBlockRatioProperty) -> str:
if expression.args.get("no"):
return "NO MERGEBLOCKRATIO"
if expression.args.get("default"):
return "DEFAULT MERGEBLOCKRATIO"
percent = " PERCENT" if expression.args.get("percent") else ""
return f"MERGEBLOCKRATIO={self.sql(expression, 'this')}{percent}"
def datablocksizeproperty_sql(self, expression: exp.DataBlocksizeProperty) -> str:
default = expression.args.get("default")
minimum = expression.args.get("minimum")
maximum = expression.args.get("maximum")
if default or minimum or maximum:
if default:
prop = "DEFAULT"
elif minimum:
prop = "MINIMUM"
else:
prop = "MAXIMUM"
return f"{prop} DATABLOCKSIZE"
units = expression.args.get("units")
units = f" {units}" if units else ""
return f"DATABLOCKSIZE={self.sql(expression, 'size')}{units}"
def blockcompressionproperty_sql(self, expression: exp.BlockCompressionProperty) -> str:
autotemp = expression.args.get("autotemp")
always = expression.args.get("always")
default = expression.args.get("default")
manual = expression.args.get("manual")
never = expression.args.get("never")
if autotemp is not None:
prop = f"AUTOTEMP({self.expressions(autotemp)})"
elif always:
prop = "ALWAYS"
elif default:
prop = "DEFAULT"
elif manual:
prop = "MANUAL"
elif never:
prop = "NEVER"
return f"BLOCKCOMPRESSION={prop}"
def isolatedloadingproperty_sql(self, expression: exp.IsolatedLoadingProperty) -> str:
no = expression.args.get("no")
no = " NO" if no else ""
concurrent = expression.args.get("concurrent")
concurrent = " CONCURRENT" if concurrent else ""
for_ = ""
if expression.args.get("for_all"):
for_ = " FOR ALL"
elif expression.args.get("for_insert"):
for_ = " FOR INSERT"
elif expression.args.get("for_none"):
for_ = " FOR NONE"
return f"WITH{no}{concurrent} ISOLATED LOADING{for_}"
def partitionboundspec_sql(self, expression: exp.PartitionBoundSpec) -> str:
if isinstance(expression.this, list):
return f"IN ({self.expressions(expression, key='this', flat=True)})"
if expression.this:
modulus = self.sql(expression, "this")
remainder = self.sql(expression, "expression")
return f"WITH (MODULUS {modulus}, REMAINDER {remainder})"
from_expressions = self.expressions(expression, key="from_expressions", flat=True)
to_expressions = self.expressions(expression, key="to_expressions", flat=True)
return f"FROM ({from_expressions}) TO ({to_expressions})"
def partitionedofproperty_sql(self, expression: exp.PartitionedOfProperty) -> str:
this = self.sql(expression, "this")
for_values_or_default = expression.expression
if isinstance(for_values_or_default, exp.PartitionBoundSpec):
for_values_or_default = f" FOR VALUES {self.sql(for_values_or_default)}"
else:
for_values_or_default = " DEFAULT"
return f"PARTITION OF {this}{for_values_or_default}"
def lockingproperty_sql(self, expression: exp.LockingProperty) -> str:
kind = expression.args.get("kind")
this = f" {self.sql(expression, 'this')}" if expression.this else ""
for_or_in = expression.args.get("for_or_in")
for_or_in = f" {for_or_in}" if for_or_in else ""
lock_type = expression.args.get("lock_type")
override = " OVERRIDE" if expression.args.get("override") else ""
return f"LOCKING {kind}{this}{for_or_in} {lock_type}{override}"
def withdataproperty_sql(self, expression: exp.WithDataProperty) -> str:
data_sql = f"WITH {'NO ' if expression.args.get('no') else ''}DATA"
statistics = expression.args.get("statistics")
statistics_sql = ""
if statistics is not None:
statistics_sql = f" AND {'NO ' if not statistics else ''}STATISTICS"
return f"{data_sql}{statistics_sql}"
def withsystemversioningproperty_sql(self, expression: exp.WithSystemVersioningProperty) -> str:
sql = "WITH(SYSTEM_VERSIONING=ON"
if expression.this:
history_table = self.sql(expression, "this")
sql = f"{sql}(HISTORY_TABLE={history_table}"
if expression.expression:
data_consistency_check = self.sql(expression, "expression")
sql = f"{sql}, DATA_CONSISTENCY_CHECK={data_consistency_check}"
sql = f"{sql})"
return f"{sql})"
def insert_sql(self, expression: exp.Insert) -> str:
hint = self.sql(expression, "hint")
overwrite = expression.args.get("overwrite")
if isinstance(expression.this, exp.Directory):
this = " OVERWRITE" if overwrite else " INTO"
else:
this = self.INSERT_OVERWRITE if overwrite else " INTO"
alternative = expression.args.get("alternative")
alternative = f" OR {alternative}" if alternative else ""
ignore = " IGNORE" if expression.args.get("ignore") else ""
is_function = expression.args.get("is_function")
if is_function:
this = f"{this} FUNCTION"
this = f"{this} {self.sql(expression, 'this')}"
exists = " IF EXISTS" if expression.args.get("exists") else ""
partition_sql = (
f" {self.sql(expression, 'partition')}" if expression.args.get("partition") else ""
)
where = self.sql(expression, "where")
where = f"{self.sep()}REPLACE WHERE {where}" if where else ""
expression_sql = f"{self.sep()}{self.sql(expression, 'expression')}"
on_conflict = self.sql(expression, "conflict")
on_conflict = f" {on_conflict}" if on_conflict else ""
by_name = " BY NAME" if expression.args.get("by_name") else ""
returning = self.sql(expression, "returning")
if self.RETURNING_END:
expression_sql = f"{expression_sql}{on_conflict}{returning}"
else:
expression_sql = f"{returning}{expression_sql}{on_conflict}"
sql = f"INSERT{hint}{alternative}{ignore}{this}{by_name}{exists}{partition_sql}{where}{expression_sql}"
return self.prepend_ctes(expression, sql)
def intersect_sql(self, expression: exp.Intersect) -> str:
return self.set_operations(expression)
def intersect_op(self, expression: exp.Intersect) -> str:
return f"INTERSECT{'' if expression.args.get('distinct') else ' ALL'}"
def introducer_sql(self, expression: exp.Introducer) -> str:
return f"{self.sql(expression, 'this')} {self.sql(expression, 'expression')}"
def kill_sql(self, expression: exp.Kill) -> str:
kind = self.sql(expression, "kind")
kind = f" {kind}" if kind else ""
this = self.sql(expression, "this")
this = f" {this}" if this else ""
return f"KILL{kind}{this}"
def pseudotype_sql(self, expression: exp.PseudoType) -> str:
return expression.name
def objectidentifier_sql(self, expression: exp.ObjectIdentifier) -> str:
return expression.name
def onconflict_sql(self, expression: exp.OnConflict) -> str:
conflict = "ON DUPLICATE KEY" if expression.args.get("duplicate") else "ON CONFLICT"
constraint = self.sql(expression, "constraint")
constraint = f" ON CONSTRAINT {constraint}" if constraint else ""
conflict_keys = self.expressions(expression, key="conflict_keys", flat=True)
conflict_keys = f"({conflict_keys}) " if conflict_keys else " "
action = self.sql(expression, "action")
expressions = self.expressions(expression, flat=True)
if expressions:
set_keyword = "SET " if self.DUPLICATE_KEY_UPDATE_WITH_SET else ""
expressions = f" {set_keyword}{expressions}"
return f"{conflict}{constraint}{conflict_keys}{action}{expressions}"
def returning_sql(self, expression: exp.Returning) -> str:
return f"{self.seg('RETURNING')} {self.expressions(expression, flat=True)}"
def rowformatdelimitedproperty_sql(self, expression: exp.RowFormatDelimitedProperty) -> str:
fields = expression.args.get("fields")
fields = f" FIELDS TERMINATED BY {fields}" if fields else ""
escaped = expression.args.get("escaped")
escaped = f" ESCAPED BY {escaped}" if escaped else ""
items = expression.args.get("collection_items")
items = f" COLLECTION ITEMS TERMINATED BY {items}" if items else ""
keys = expression.args.get("map_keys")
keys = f" MAP KEYS TERMINATED BY {keys}" if keys else ""
lines = expression.args.get("lines")
lines = f" LINES TERMINATED BY {lines}" if lines else ""
null = expression.args.get("null")
null = f" NULL DEFINED AS {null}" if null else ""
return f"ROW FORMAT DELIMITED{fields}{escaped}{items}{keys}{lines}{null}"
def withtablehint_sql(self, expression: exp.WithTableHint) -> str:
return f"WITH ({self.expressions(expression, flat=True)})"
def indextablehint_sql(self, expression: exp.IndexTableHint) -> str:
this = f"{self.sql(expression, 'this')} INDEX"
target = self.sql(expression, "target")
target = f" FOR {target}" if target else ""
return f"{this}{target} ({self.expressions(expression, flat=True)})"
def historicaldata_sql(self, expression: exp.HistoricalData) -> str:
this = self.sql(expression, "this")
kind = self.sql(expression, "kind")
expr = self.sql(expression, "expression")
return f"{this} ({kind} => {expr})"
def table_parts(self, expression: exp.Table) -> str:
return ".".join(
self.sql(part)
for part in (
expression.args.get("catalog"),
expression.args.get("db"),
expression.args.get("this"),
)
if part is not None
)
def table_sql(self, expression: exp.Table, sep: str = " AS ") -> str:
table = self.table_parts(expression)
only = "ONLY " if expression.args.get("only") else ""
version = self.sql(expression, "version")
version = f" {version}" if version else ""
alias = self.sql(expression, "alias")
alias = f"{sep}{alias}" if alias else ""
hints = self.expressions(expression, key="hints", sep=" ")
hints = f" {hints}" if hints and self.TABLE_HINTS else ""
pivots = self.expressions(expression, key="pivots", sep=" ", flat=True)
pivots = f" {pivots}" if pivots else ""
joins = self.expressions(expression, key="joins", sep="", skip_first=True)
laterals = self.expressions(expression, key="laterals", sep="")
file_format = self.sql(expression, "format")
if file_format:
pattern = self.sql(expression, "pattern")
pattern = f", PATTERN => {pattern}" if pattern else ""
file_format = f" (FILE_FORMAT => {file_format}{pattern})"
ordinality = expression.args.get("ordinality") or ""
if ordinality:
ordinality = f" WITH ORDINALITY{alias}"
alias = ""
when = self.sql(expression, "when")
if when:
table = f"{table} {when}"
return f"{only}{table}{version}{file_format}{alias}{hints}{pivots}{joins}{laterals}{ordinality}"
def tablesample_sql(
self,
expression: exp.TableSample,
sep: str = " AS ",
tablesample_keyword: t.Optional[str] = None,
) -> str:
if self.dialect.ALIAS_POST_TABLESAMPLE and expression.this and expression.this.alias:
table = expression.this.copy()
table.set("alias", None)
this = self.sql(table)
alias = f"{sep}{self.sql(expression.this, 'alias')}"
else:
this = self.sql(expression, "this")
alias = ""
method = self.sql(expression, "method")
method = f"{method} " if method and self.TABLESAMPLE_WITH_METHOD else ""
numerator = self.sql(expression, "bucket_numerator")
denominator = self.sql(expression, "bucket_denominator")
field = self.sql(expression, "bucket_field")
field = f" ON {field}" if field else ""
bucket = f"BUCKET {numerator} OUT OF {denominator}{field}" if numerator else ""
seed = self.sql(expression, "seed")
seed = f" {self.TABLESAMPLE_SEED_KEYWORD} ({seed})" if seed else ""
size = self.sql(expression, "size")
if size and self.TABLESAMPLE_SIZE_IS_ROWS:
size = f"{size} ROWS"
percent = self.sql(expression, "percent")
if percent and not self.dialect.TABLESAMPLE_SIZE_IS_PERCENT:
percent = f"{percent} PERCENT"
expr = f"{bucket}{percent}{size}"
if self.TABLESAMPLE_REQUIRES_PARENS:
expr = f"({expr})"
return (
f"{this} {tablesample_keyword or self.TABLESAMPLE_KEYWORDS} {method}{expr}{seed}{alias}"
)
def pivot_sql(self, expression: exp.Pivot) -> str:
expressions = self.expressions(expression, flat=True)
if expression.this:
this = self.sql(expression, "this")
if not expressions:
return f"UNPIVOT {this}"
on = f"{self.seg('ON')} {expressions}"
using = self.expressions(expression, key="using", flat=True)
using = f"{self.seg('USING')} {using}" if using else ""
group = self.sql(expression, "group")
return f"PIVOT {this}{on}{using}{group}"
alias = self.sql(expression, "alias")
alias = f" AS {alias}" if alias else ""
direction = "UNPIVOT" if expression.unpivot else "PIVOT"
field = self.sql(expression, "field")
include_nulls = expression.args.get("include_nulls")
if include_nulls is not None:
nulls = " INCLUDE NULLS " if include_nulls else " EXCLUDE NULLS "
else:
nulls = ""
return f"{direction}{nulls}({expressions} FOR {field}){alias}"
def version_sql(self, expression: exp.Version) -> str:
this = f"FOR {expression.name}"
kind = expression.text("kind")
expr = self.sql(expression, "expression")
return f"{this} {kind} {expr}"
def tuple_sql(self, expression: exp.Tuple) -> str:
return f"({self.expressions(expression, flat=True)})"
def update_sql(self, expression: exp.Update) -> str:
this = self.sql(expression, "this")
set_sql = self.expressions(expression, flat=True)
from_sql = self.sql(expression, "from")
where_sql = self.sql(expression, "where")
returning = self.sql(expression, "returning")
order = self.sql(expression, "order")
limit = self.sql(expression, "limit")
if self.RETURNING_END:
expression_sql = f"{from_sql}{where_sql}{returning}"
else:
expression_sql = f"{returning}{from_sql}{where_sql}"
sql = f"UPDATE {this} SET {set_sql}{expression_sql}{order}{limit}"
return self.prepend_ctes(expression, sql)
def values_sql(self, expression: exp.Values) -> str:
# The VALUES clause is still valid in an `INSERT INTO ..` statement, for example
if self.VALUES_AS_TABLE or not expression.find_ancestor(exp.From, exp.Join):
args = self.expressions(expression)
alias = self.sql(expression, "alias")
values = f"VALUES{self.seg('')}{args}"
values = (
f"({values})"
if self.WRAP_DERIVED_VALUES and (alias or isinstance(expression.parent, exp.From))
else values
)
return f"{values} AS {alias}" if alias else values
# Converts `VALUES...` expression into a series of select unions.
alias_node = expression.args.get("alias")
column_names = alias_node and alias_node.columns
selects: t.List[exp.Query] = []
for i, tup in enumerate(expression.expressions):
row = tup.expressions
if i == 0 and column_names:
row = [
exp.alias_(value, column_name) for value, column_name in zip(row, column_names)
]
selects.append(exp.Select(expressions=row))
if self.pretty:
# This may result in poor performance for large-cardinality `VALUES` tables, due to
# the deep nesting of the resulting exp.Unions. If this is a problem, either increase
# `sys.setrecursionlimit` to avoid RecursionErrors, or don't set `pretty`.
query = reduce(lambda x, y: exp.union(x, y, distinct=False, copy=False), selects)
return self.subquery_sql(query.subquery(alias_node and alias_node.this, copy=False))
alias = f" AS {self.sql(alias_node, 'this')}" if alias_node else ""
unions = " UNION ALL ".join(self.sql(select) for select in selects)
return f"({unions}){alias}"
def var_sql(self, expression: exp.Var) -> str:
return self.sql(expression, "this")
def into_sql(self, expression: exp.Into) -> str:
temporary = " TEMPORARY" if expression.args.get("temporary") else ""
unlogged = " UNLOGGED" if expression.args.get("unlogged") else ""
return f"{self.seg('INTO')}{temporary or unlogged} {self.sql(expression, 'this')}"
def from_sql(self, expression: exp.From) -> str:
return f"{self.seg('FROM')} {self.sql(expression, 'this')}"
def group_sql(self, expression: exp.Group) -> str:
group_by = self.op_expressions("GROUP BY", expression)
if expression.args.get("all"):
return f"{group_by} ALL"
grouping_sets = self.expressions(expression, key="grouping_sets", indent=False)
grouping_sets = (
f"{self.seg('GROUPING SETS')} {self.wrap(grouping_sets)}" if grouping_sets else ""
)
cube = expression.args.get("cube", [])
if seq_get(cube, 0) is True:
return f"{group_by}{self.seg('WITH CUBE')}"
else:
cube_sql = self.expressions(expression, key="cube", indent=False)
cube_sql = f"{self.seg('CUBE')} {self.wrap(cube_sql)}" if cube_sql else ""
rollup = expression.args.get("rollup", [])
if seq_get(rollup, 0) is True:
return f"{group_by}{self.seg('WITH ROLLUP')}"
else:
rollup_sql = self.expressions(expression, key="rollup", indent=False)
rollup_sql = f"{self.seg('ROLLUP')} {self.wrap(rollup_sql)}" if rollup_sql else ""
groupings = csv(
grouping_sets,
cube_sql,
rollup_sql,
self.seg("WITH TOTALS") if expression.args.get("totals") else "",
sep=self.GROUPINGS_SEP,
)
if expression.args.get("expressions") and groupings:
group_by = f"{group_by}{self.GROUPINGS_SEP}"
return f"{group_by}{groupings}"
def having_sql(self, expression: exp.Having) -> str:
this = self.indent(self.sql(expression, "this"))
return f"{self.seg('HAVING')}{self.sep()}{this}"
def connect_sql(self, expression: exp.Connect) -> str:
start = self.sql(expression, "start")
start = self.seg(f"START WITH {start}") if start else ""
connect = self.sql(expression, "connect")
connect = self.seg(f"CONNECT BY {connect}")
return start + connect
def prior_sql(self, expression: exp.Prior) -> str:
return f"PRIOR {self.sql(expression, 'this')}"
def join_sql(self, expression: exp.Join) -> str:
if not self.SEMI_ANTI_JOIN_WITH_SIDE and expression.kind in ("SEMI", "ANTI"):
side = None
else:
side = expression.side
op_sql = " ".join(
op
for op in (
expression.method,
"GLOBAL" if expression.args.get("global") else None,
side,
expression.kind,
expression.hint if self.JOIN_HINTS else None,
)
if op
)
on_sql = self.sql(expression, "on")
using = expression.args.get("using")
if not on_sql and using:
on_sql = csv(*(self.sql(column) for column in using))
this = expression.this
this_sql = self.sql(this)
if on_sql:
on_sql = self.indent(on_sql, skip_first=True)
space = self.seg(" " * self.pad) if self.pretty else " "
if using:
on_sql = f"{space}USING ({on_sql})"
else:
on_sql = f"{space}ON {on_sql}"
elif not op_sql:
if isinstance(this, exp.Lateral) and this.args.get("cross_apply") is not None:
return f" {this_sql}"
return f", {this_sql}"
op_sql = f"{op_sql} JOIN" if op_sql else "JOIN"
return f"{self.seg(op_sql)} {this_sql}{on_sql}"
def lambda_sql(self, expression: exp.Lambda, arrow_sep: str = "->") -> str:
args = self.expressions(expression, flat=True)
args = f"({args})" if len(args.split(",")) > 1 else args
return f"{args} {arrow_sep} {self.sql(expression, 'this')}"
def lateral_op(self, expression: exp.Lateral) -> str:
cross_apply = expression.args.get("cross_apply")
# https://www.mssqltips.com/sqlservertip/1958/sql-server-cross-apply-and-outer-apply/
if cross_apply is True:
op = "INNER JOIN "
elif cross_apply is False:
op = "LEFT JOIN "
else:
op = ""
return f"{op}LATERAL"
def lateral_sql(self, expression: exp.Lateral) -> str:
this = self.sql(expression, "this")
if expression.args.get("view"):
alias = expression.args["alias"]
columns = self.expressions(alias, key="columns", flat=True)
table = f" {alias.name}" if alias.name else ""
columns = f" AS {columns}" if columns else ""
op_sql = self.seg(f"LATERAL VIEW{' OUTER' if expression.args.get('outer') else ''}")
return f"{op_sql}{self.sep()}{this}{table}{columns}"
alias = self.sql(expression, "alias")
alias = f" AS {alias}" if alias else ""
return f"{self.lateral_op(expression)} {this}{alias}"
def limit_sql(self, expression: exp.Limit, top: bool = False) -> str:
this = self.sql(expression, "this")
args = [
self._simplify_unless_literal(e) if self.LIMIT_ONLY_LITERALS else e
for e in (expression.args.get(k) for k in ("offset", "expression"))
if e
]
args_sql = ", ".join(self.sql(e) for e in args)
args_sql = f"({args_sql})" if top and any(not e.is_number for e in args) else args_sql
expressions = self.expressions(expression, flat=True)
expressions = f" BY {expressions}" if expressions else ""
return f"{this}{self.seg('TOP' if top else 'LIMIT')} {args_sql}{expressions}"
def offset_sql(self, expression: exp.Offset) -> str:
this = self.sql(expression, "this")
value = expression.expression
value = self._simplify_unless_literal(value) if self.LIMIT_ONLY_LITERALS else value
expressions = self.expressions(expression, flat=True)
expressions = f" BY {expressions}" if expressions else ""
return f"{this}{self.seg('OFFSET')} {self.sql(value)}{expressions}"
def setitem_sql(self, expression: exp.SetItem) -> str:
kind = self.sql(expression, "kind")
kind = f"{kind} " if kind else ""
this = self.sql(expression, "this")
expressions = self.expressions(expression)
collate = self.sql(expression, "collate")
collate = f" COLLATE {collate}" if collate else ""
global_ = "GLOBAL " if expression.args.get("global") else ""
return f"{global_}{kind}{this}{expressions}{collate}"
def set_sql(self, expression: exp.Set) -> str:
expressions = (
f" {self.expressions(expression, flat=True)}" if expression.expressions else ""
)
tag = " TAG" if expression.args.get("tag") else ""
return f"{'UNSET' if expression.args.get('unset') else 'SET'}{tag}{expressions}"
def pragma_sql(self, expression: exp.Pragma) -> str:
return f"PRAGMA {self.sql(expression, 'this')}"
def lock_sql(self, expression: exp.Lock) -> str:
if not self.LOCKING_READS_SUPPORTED:
self.unsupported("Locking reads using 'FOR UPDATE/SHARE' are not supported")
return ""
lock_type = "FOR UPDATE" if expression.args["update"] else "FOR SHARE"
expressions = self.expressions(expression, flat=True)
expressions = f" OF {expressions}" if expressions else ""
wait = expression.args.get("wait")
if wait is not None:
if isinstance(wait, exp.Literal):
wait = f" WAIT {self.sql(wait)}"
else:
wait = " NOWAIT" if wait else " SKIP LOCKED"
return f"{lock_type}{expressions}{wait or ''}"
def literal_sql(self, expression: exp.Literal) -> str:
text = expression.this or ""
if expression.is_string:
text = f"{self.dialect.QUOTE_START}{self.escape_str(text)}{self.dialect.QUOTE_END}"
return text
def escape_str(self, text: str) -> str:
text = text.replace(self.dialect.QUOTE_END, self._escaped_quote_end)
if self.dialect.INVERSE_ESCAPE_SEQUENCES:
text = "".join(self.dialect.INVERSE_ESCAPE_SEQUENCES.get(ch, ch) for ch in text)
elif self.pretty:
text = text.replace("\n", self.SENTINEL_LINE_BREAK)
return text
def loaddata_sql(self, expression: exp.LoadData) -> str:
local = " LOCAL" if expression.args.get("local") else ""
inpath = f" INPATH {self.sql(expression, 'inpath')}"
overwrite = " OVERWRITE" if expression.args.get("overwrite") else ""
this = f" INTO TABLE {self.sql(expression, 'this')}"
partition = self.sql(expression, "partition")
partition = f" {partition}" if partition else ""
input_format = self.sql(expression, "input_format")
input_format = f" INPUTFORMAT {input_format}" if input_format else ""
serde = self.sql(expression, "serde")
serde = f" SERDE {serde}" if serde else ""
return f"LOAD DATA{local}{inpath}{overwrite}{this}{partition}{input_format}{serde}"
def null_sql(self, *_) -> str:
return "NULL"
def boolean_sql(self, expression: exp.Boolean) -> str:
return "TRUE" if expression.this else "FALSE"
def order_sql(self, expression: exp.Order, flat: bool = False) -> str:
this = self.sql(expression, "this")
this = f"{this} " if this else this
siblings = "SIBLINGS " if expression.args.get("siblings") else ""
order = self.op_expressions(f"{this}ORDER {siblings}BY", expression, flat=this or flat) # type: ignore
interpolated_values = [
f"{self.sql(named_expression, 'alias')} AS {self.sql(named_expression, 'this')}"
for named_expression in expression.args.get("interpolate") or []
]
interpolate = (
f" INTERPOLATE ({', '.join(interpolated_values)})" if interpolated_values else ""
)
return f"{order}{interpolate}"
def withfill_sql(self, expression: exp.WithFill) -> str:
from_sql = self.sql(expression, "from")
from_sql = f" FROM {from_sql}" if from_sql else ""
to_sql = self.sql(expression, "to")
to_sql = f" TO {to_sql}" if to_sql else ""
step_sql = self.sql(expression, "step")
step_sql = f" STEP {step_sql}" if step_sql else ""
return f"WITH FILL{from_sql}{to_sql}{step_sql}"
def cluster_sql(self, expression: exp.Cluster) -> str:
return self.op_expressions("CLUSTER BY", expression)
def distribute_sql(self, expression: exp.Distribute) -> str:
return self.op_expressions("DISTRIBUTE BY", expression)
def sort_sql(self, expression: exp.Sort) -> str:
return self.op_expressions("SORT BY", expression)
def ordered_sql(self, expression: exp.Ordered) -> str:
desc = expression.args.get("desc")
asc = not desc
nulls_first = expression.args.get("nulls_first")
nulls_last = not nulls_first
nulls_are_large = self.dialect.NULL_ORDERING == "nulls_are_large"
nulls_are_small = self.dialect.NULL_ORDERING == "nulls_are_small"
nulls_are_last = self.dialect.NULL_ORDERING == "nulls_are_last"
this = self.sql(expression, "this")
sort_order = " DESC" if desc else (" ASC" if desc is False else "")
nulls_sort_change = ""
if nulls_first and (
(asc and nulls_are_large) or (desc and nulls_are_small) or nulls_are_last
):
nulls_sort_change = " NULLS FIRST"
elif (
nulls_last
and ((asc and nulls_are_small) or (desc and nulls_are_large))
and not nulls_are_last
):
nulls_sort_change = " NULLS LAST"
# If the NULLS FIRST/LAST clause is unsupported, we add another sort key to simulate it
if nulls_sort_change and not self.NULL_ORDERING_SUPPORTED:
window = expression.find_ancestor(exp.Window, exp.Select)
if isinstance(window, exp.Window) and window.args.get("spec"):
self.unsupported(
f"'{nulls_sort_change.strip()}' translation not supported in window functions"
)
nulls_sort_change = ""
elif self.NULL_ORDERING_SUPPORTED is None:
if expression.this.is_int:
self.unsupported(
f"'{nulls_sort_change.strip()}' translation not supported with positional ordering"
)
else:
null_sort_order = " DESC" if nulls_sort_change == " NULLS FIRST" else ""
this = f"CASE WHEN {this} IS NULL THEN 1 ELSE 0 END{null_sort_order}, {this}"
nulls_sort_change = ""
with_fill = self.sql(expression, "with_fill")
with_fill = f" {with_fill}" if with_fill else ""
return f"{this}{sort_order}{nulls_sort_change}{with_fill}"
def matchrecognize_sql(self, expression: exp.MatchRecognize) -> str:
partition = self.partition_by_sql(expression)
order = self.sql(expression, "order")
measures = self.expressions(expression, key="measures")
measures = self.seg(f"MEASURES{self.seg(measures)}") if measures else ""
rows = self.sql(expression, "rows")
rows = self.seg(rows) if rows else ""
after = self.sql(expression, "after")
after = self.seg(after) if after else ""
pattern = self.sql(expression, "pattern")
pattern = self.seg(f"PATTERN ({pattern})") if pattern else ""
definition_sqls = [
f"{self.sql(definition, 'alias')} AS {self.sql(definition, 'this')}"
for definition in expression.args.get("define", [])
]
definitions = self.expressions(sqls=definition_sqls)
define = self.seg(f"DEFINE{self.seg(definitions)}") if definitions else ""
body = "".join(
(
partition,
order,
measures,
rows,
after,
pattern,
define,
)
)
alias = self.sql(expression, "alias")
alias = f" {alias}" if alias else ""
return f"{self.seg('MATCH_RECOGNIZE')} {self.wrap(body)}{alias}"
def query_modifiers(self, expression: exp.Expression, *sqls: str) -> str:
limit = expression.args.get("limit")
if self.LIMIT_FETCH == "LIMIT" and isinstance(limit, exp.Fetch):
limit = exp.Limit(expression=exp.maybe_copy(limit.args.get("count")))
elif self.LIMIT_FETCH == "FETCH" and isinstance(limit, exp.Limit):
limit = exp.Fetch(direction="FIRST", count=exp.maybe_copy(limit.expression))
options = self.expressions(expression, key="options")
if options:
options = f" OPTION{self.wrap(options)}"
return csv(
*sqls,
*[self.sql(join) for join in expression.args.get("joins") or []],
self.sql(expression, "connect"),
self.sql(expression, "match"),
*[self.sql(lateral) for lateral in expression.args.get("laterals") or []],
self.sql(expression, "prewhere"),
self.sql(expression, "where"),
self.sql(expression, "group"),
self.sql(expression, "having"),
*[gen(self, expression) for gen in self.AFTER_HAVING_MODIFIER_TRANSFORMS.values()],
self.sql(expression, "order"),
*self.offset_limit_modifiers(expression, isinstance(limit, exp.Fetch), limit),
*self.after_limit_modifiers(expression),
options,
sep="",
)
def queryoption_sql(self, expression: exp.QueryOption) -> str:
return ""
def offset_limit_modifiers(
self, expression: exp.Expression, fetch: bool, limit: t.Optional[exp.Fetch | exp.Limit]
) -> t.List[str]:
return [
self.sql(expression, "offset") if fetch else self.sql(limit),
self.sql(limit) if fetch else self.sql(expression, "offset"),
]
def after_limit_modifiers(self, expression: exp.Expression) -> t.List[str]:
locks = self.expressions(expression, key="locks", sep=" ")
locks = f" {locks}" if locks else ""
return [locks, self.sql(expression, "sample")]
def select_sql(self, expression: exp.Select) -> str:
into = expression.args.get("into")
if not self.SUPPORTS_SELECT_INTO and into:
into.pop()
hint = self.sql(expression, "hint")
distinct = self.sql(expression, "distinct")
distinct = f" {distinct}" if distinct else ""
kind = self.sql(expression, "kind")
limit = expression.args.get("limit")
if isinstance(limit, exp.Limit) and self.LIMIT_IS_TOP:
top = self.limit_sql(limit, top=True)
limit.pop()
else:
top = ""
expressions = self.expressions(expression)
if kind:
if kind in self.SELECT_KINDS:
kind = f" AS {kind}"
else:
if kind == "STRUCT":
expressions = self.expressions(
sqls=[
self.sql(
exp.Struct(
expressions=[
exp.PropertyEQ(this=e.args.get("alias"), expression=e.this)
if isinstance(e, exp.Alias)
else e
for e in expression.expressions
]
)
)
]
)
kind = ""
# We use LIMIT_IS_TOP as a proxy for whether DISTINCT should go first because tsql and Teradata
# are the only dialects that use LIMIT_IS_TOP and both place DISTINCT first.
top_distinct = f"{distinct}{hint}{top}" if self.LIMIT_IS_TOP else f"{top}{hint}{distinct}"
expressions = f"{self.sep()}{expressions}" if expressions else expressions
sql = self.query_modifiers(
expression,
f"SELECT{top_distinct}{kind}{expressions}",
self.sql(expression, "into", comment=False),
self.sql(expression, "from", comment=False),
)
sql = self.prepend_ctes(expression, sql)
if not self.SUPPORTS_SELECT_INTO and into:
if into.args.get("temporary"):
table_kind = " TEMPORARY"
elif self.SUPPORTS_UNLOGGED_TABLES and into.args.get("unlogged"):
table_kind = " UNLOGGED"
else:
table_kind = ""
sql = f"CREATE{table_kind} TABLE {self.sql(into.this)} AS {sql}"
return sql
def schema_sql(self, expression: exp.Schema) -> str:
this = self.sql(expression, "this")
sql = self.schema_columns_sql(expression)
return f"{this} {sql}" if this and sql else this or sql
def schema_columns_sql(self, expression: exp.Schema) -> str:
if expression.expressions:
return f"({self.sep('')}{self.expressions(expression)}{self.seg(')', sep='')}"
return ""
def star_sql(self, expression: exp.Star) -> str:
except_ = self.expressions(expression, key="except", flat=True)
except_ = f"{self.seg(self.STAR_MAPPING['except'])} ({except_})" if except_ else ""
replace = self.expressions(expression, key="replace", flat=True)
replace = f"{self.seg(self.STAR_MAPPING['replace'])} ({replace})" if replace else ""
return f"*{except_}{replace}"
def parameter_sql(self, expression: exp.Parameter) -> str:
this = self.sql(expression, "this")
return f"{self.PARAMETER_TOKEN}{this}"
def sessionparameter_sql(self, expression: exp.SessionParameter) -> str:
this = self.sql(expression, "this")
kind = expression.text("kind")
if kind:
kind = f"{kind}."
return f"@@{kind}{this}"
def placeholder_sql(self, expression: exp.Placeholder) -> str:
return f"{self.NAMED_PLACEHOLDER_TOKEN}{expression.name}" if expression.name else "?"
def subquery_sql(self, expression: exp.Subquery, sep: str = " AS ") -> str:
alias = self.sql(expression, "alias")
alias = f"{sep}{alias}" if alias else ""
pivots = self.expressions(expression, key="pivots", sep=" ", flat=True)
pivots = f" {pivots}" if pivots else ""
sql = self.query_modifiers(expression, self.wrap(expression), alias, pivots)
return self.prepend_ctes(expression, sql)
def qualify_sql(self, expression: exp.Qualify) -> str:
this = self.indent(self.sql(expression, "this"))
return f"{self.seg('QUALIFY')}{self.sep()}{this}"
def set_operations(self, expression: exp.Union) -> str:
sqls: t.List[str] = []
stack: t.List[t.Union[str, exp.Expression]] = [expression]
while stack:
node = stack.pop()
if isinstance(node, exp.Union):
stack.append(node.expression)
stack.append(
self.maybe_comment(
getattr(self, f"{node.key}_op")(node),
expression=node.this,
comments=node.comments,
)
)
stack.append(node.this)
else:
sqls.append(self.sql(node))
this = self.sep().join(sqls)
this = self.query_modifiers(expression, this)
return self.prepend_ctes(expression, this)
def union_sql(self, expression: exp.Union) -> str:
return self.set_operations(expression)
def union_op(self, expression: exp.Union) -> str:
kind = " DISTINCT" if self.EXPLICIT_UNION else ""
kind = kind if expression.args.get("distinct") else " ALL"
by_name = " BY NAME" if expression.args.get("by_name") else ""
return f"UNION{kind}{by_name}"
def unnest_sql(self, expression: exp.Unnest) -> str:
args = self.expressions(expression, flat=True)
alias = expression.args.get("alias")
offset = expression.args.get("offset")
if self.UNNEST_WITH_ORDINALITY:
if alias and isinstance(offset, exp.Expression):
alias.append("columns", offset)
if alias and self.dialect.UNNEST_COLUMN_ONLY:
columns = alias.columns
alias = self.sql(columns[0]) if columns else ""
else:
alias = self.sql(alias)
alias = f" AS {alias}" if alias else alias
if self.UNNEST_WITH_ORDINALITY:
suffix = f" WITH ORDINALITY{alias}" if offset else alias
else:
if isinstance(offset, exp.Expression):
suffix = f"{alias} WITH OFFSET AS {self.sql(offset)}"
elif offset:
suffix = f"{alias} WITH OFFSET"
else:
suffix = alias
return f"UNNEST({args}){suffix}"
def prewhere_sql(self, expression: exp.PreWhere) -> str:
return ""
def where_sql(self, expression: exp.Where) -> str:
this = self.indent(self.sql(expression, "this"))
return f"{self.seg('WHERE')}{self.sep()}{this}"
def window_sql(self, expression: exp.Window) -> str:
this = self.sql(expression, "this")
partition = self.partition_by_sql(expression)
order = expression.args.get("order")
order = self.order_sql(order, flat=True) if order else ""
spec = self.sql(expression, "spec")
alias = self.sql(expression, "alias")
over = self.sql(expression, "over") or "OVER"
this = f"{this} {'AS' if expression.arg_key == 'windows' else over}"
first = expression.args.get("first")
if first is None:
first = ""
else:
first = "FIRST" if first else "LAST"
if not partition and not order and not spec and alias:
return f"{this} {alias}"
args = " ".join(arg for arg in (alias, first, partition, order, spec) if arg)
return f"{this} ({args})"
def partition_by_sql(self, expression: exp.Window | exp.MatchRecognize) -> str:
partition = self.expressions(expression, key="partition_by", flat=True)
return f"PARTITION BY {partition}" if partition else ""
def windowspec_sql(self, expression: exp.WindowSpec) -> str:
kind = self.sql(expression, "kind")
start = csv(self.sql(expression, "start"), self.sql(expression, "start_side"), sep=" ")
end = (
csv(self.sql(expression, "end"), self.sql(expression, "end_side"), sep=" ")
or "CURRENT ROW"
)
return f"{kind} BETWEEN {start} AND {end}"
def withingroup_sql(self, expression: exp.WithinGroup) -> str:
this = self.sql(expression, "this")
expression_sql = self.sql(expression, "expression")[1:] # order has a leading space
return f"{this} WITHIN GROUP ({expression_sql})"
def between_sql(self, expression: exp.Between) -> str:
this = self.sql(expression, "this")
low = self.sql(expression, "low")
high = self.sql(expression, "high")
return f"{this} BETWEEN {low} AND {high}"
def bracket_sql(self, expression: exp.Bracket) -> str:
expressions = apply_index_offset(
expression.this,
expression.expressions,
self.dialect.INDEX_OFFSET - expression.args.get("offset", 0),
)
expressions_sql = ", ".join(self.sql(e) for e in expressions)
return f"{self.sql(expression, 'this')}[{expressions_sql}]"
def all_sql(self, expression: exp.All) -> str:
return f"ALL {self.wrap(expression)}"
def any_sql(self, expression: exp.Any) -> str:
this = self.sql(expression, "this")
if isinstance(expression.this, (*exp.UNWRAPPED_QUERIES, exp.Paren)):
if isinstance(expression.this, exp.UNWRAPPED_QUERIES):
this = self.wrap(this)
return f"ANY{this}"
return f"ANY {this}"
def exists_sql(self, expression: exp.Exists) -> str:
return f"EXISTS{self.wrap(expression)}"
def case_sql(self, expression: exp.Case) -> str:
this = self.sql(expression, "this")
statements = [f"CASE {this}" if this else "CASE"]
for e in expression.args["ifs"]:
statements.append(f"WHEN {self.sql(e, 'this')}")
statements.append(f"THEN {self.sql(e, 'true')}")
default = self.sql(expression, "default")
if default:
statements.append(f"ELSE {default}")
statements.append("END")
if self.pretty and self.text_width(statements) > self.max_text_width:
return self.indent("\n".join(statements), skip_first=True, skip_last=True)
return " ".join(statements)
def constraint_sql(self, expression: exp.Constraint) -> str:
this = self.sql(expression, "this")
expressions = self.expressions(expression, flat=True)
return f"CONSTRAINT {this} {expressions}"
def nextvaluefor_sql(self, expression: exp.NextValueFor) -> str:
order = expression.args.get("order")
order = f" OVER ({self.order_sql(order, flat=True)})" if order else ""
return f"NEXT VALUE FOR {self.sql(expression, 'this')}{order}"
def extract_sql(self, expression: exp.Extract) -> str:
this = self.sql(expression, "this") if self.EXTRACT_ALLOWS_QUOTES else expression.this.name
expression_sql = self.sql(expression, "expression")
return f"EXTRACT({this} FROM {expression_sql})"
def trim_sql(self, expression: exp.Trim) -> str:
trim_type = self.sql(expression, "position")
if trim_type == "LEADING":
return self.func("LTRIM", expression.this)
elif trim_type == "TRAILING":
return self.func("RTRIM", expression.this)
else:
return self.func("TRIM", expression.this, expression.expression)
def convert_concat_args(self, expression: exp.Concat | exp.ConcatWs) -> t.List[exp.Expression]:
args = expression.expressions
if isinstance(expression, exp.ConcatWs):
args = args[1:] # Skip the delimiter
if self.dialect.STRICT_STRING_CONCAT and expression.args.get("safe"):
args = [exp.cast(e, "text") for e in args]
if not self.dialect.CONCAT_COALESCE and expression.args.get("coalesce"):
args = [exp.func("coalesce", e, exp.Literal.string("")) for e in args]
return args
def concat_sql(self, expression: exp.Concat) -> str:
expressions = self.convert_concat_args(expression)
# Some dialects don't allow a single-argument CONCAT call
if not self.SUPPORTS_SINGLE_ARG_CONCAT and len(expressions) == 1:
return self.sql(expressions[0])
return self.func("CONCAT", *expressions)
def concatws_sql(self, expression: exp.ConcatWs) -> str:
return self.func(
"CONCAT_WS", seq_get(expression.expressions, 0), *self.convert_concat_args(expression)
)
def check_sql(self, expression: exp.Check) -> str:
this = self.sql(expression, key="this")
return f"CHECK ({this})"
def foreignkey_sql(self, expression: exp.ForeignKey) -> str:
expressions = self.expressions(expression, flat=True)
reference = self.sql(expression, "reference")
reference = f" {reference}" if reference else ""
delete = self.sql(expression, "delete")
delete = f" ON DELETE {delete}" if delete else ""
update = self.sql(expression, "update")
update = f" ON UPDATE {update}" if update else ""
return f"FOREIGN KEY ({expressions}){reference}{delete}{update}"
def primarykey_sql(self, expression: exp.ForeignKey) -> str:
expressions = self.expressions(expression, flat=True)
options = self.expressions(expression, key="options", flat=True, sep=" ")
options = f" {options}" if options else ""
return f"PRIMARY KEY ({expressions}){options}"
def if_sql(self, expression: exp.If) -> str:
return self.case_sql(exp.Case(ifs=[expression], default=expression.args.get("false")))
def matchagainst_sql(self, expression: exp.MatchAgainst) -> str:
modifier = expression.args.get("modifier")
modifier = f" {modifier}" if modifier else ""
return f"{self.func('MATCH', *expression.expressions)} AGAINST({self.sql(expression, 'this')}{modifier})"
def jsonkeyvalue_sql(self, expression: exp.JSONKeyValue) -> str:
return f"{self.sql(expression, 'this')}{self.JSON_KEY_VALUE_PAIR_SEP} {self.sql(expression, 'expression')}"
def jsonpath_sql(self, expression: exp.JSONPath) -> str:
path = self.expressions(expression, sep="", flat=True).lstrip(".")
return f"{self.dialect.QUOTE_START}{path}{self.dialect.QUOTE_END}"
def json_path_part(self, expression: int | str | exp.JSONPathPart) -> str:
if isinstance(expression, exp.JSONPathPart):
transform = self.TRANSFORMS.get(expression.__class__)
if not callable(transform):
self.unsupported(f"Unsupported JSONPathPart type {expression.__class__.__name__}")
return ""
return transform(self, expression)
if isinstance(expression, int):
return str(expression)
if self.JSON_PATH_SINGLE_QUOTE_ESCAPE:
escaped = expression.replace("'", "\\'")
escaped = f"\\'{expression}\\'"
else:
escaped = expression.replace('"', '\\"')
escaped = f'"{escaped}"'
return escaped
def formatjson_sql(self, expression: exp.FormatJson) -> str:
return f"{self.sql(expression, 'this')} FORMAT JSON"
def jsonobject_sql(self, expression: exp.JSONObject | exp.JSONObjectAgg) -> str:
null_handling = expression.args.get("null_handling")
null_handling = f" {null_handling}" if null_handling else ""
unique_keys = expression.args.get("unique_keys")
if unique_keys is not None:
unique_keys = f" {'WITH' if unique_keys else 'WITHOUT'} UNIQUE KEYS"
else:
unique_keys = ""
return_type = self.sql(expression, "return_type")
return_type = f" RETURNING {return_type}" if return_type else ""
encoding = self.sql(expression, "encoding")
encoding = f" ENCODING {encoding}" if encoding else ""
return self.func(
"JSON_OBJECT" if isinstance(expression, exp.JSONObject) else "JSON_OBJECTAGG",
*expression.expressions,
suffix=f"{null_handling}{unique_keys}{return_type}{encoding})",
)
def jsonobjectagg_sql(self, expression: exp.JSONObjectAgg) -> str:
return self.jsonobject_sql(expression)
def jsonarray_sql(self, expression: exp.JSONArray) -> str:
null_handling = expression.args.get("null_handling")
null_handling = f" {null_handling}" if null_handling else ""
return_type = self.sql(expression, "return_type")
return_type = f" RETURNING {return_type}" if return_type else ""
strict = " STRICT" if expression.args.get("strict") else ""
return self.func(
"JSON_ARRAY", *expression.expressions, suffix=f"{null_handling}{return_type}{strict})"
)
def jsonarrayagg_sql(self, expression: exp.JSONArrayAgg) -> str:
this = self.sql(expression, "this")
order = self.sql(expression, "order")
null_handling = expression.args.get("null_handling")
null_handling = f" {null_handling}" if null_handling else ""
return_type = self.sql(expression, "return_type")
return_type = f" RETURNING {return_type}" if return_type else ""
strict = " STRICT" if expression.args.get("strict") else ""
return self.func(
"JSON_ARRAYAGG",
this,
suffix=f"{order}{null_handling}{return_type}{strict})",
)
def jsoncolumndef_sql(self, expression: exp.JSONColumnDef) -> str:
path = self.sql(expression, "path")
path = f" PATH {path}" if path else ""
nested_schema = self.sql(expression, "nested_schema")
if nested_schema:
return f"NESTED{path} {nested_schema}"
this = self.sql(expression, "this")
kind = self.sql(expression, "kind")
kind = f" {kind}" if kind else ""
return f"{this}{kind}{path}"
def jsonschema_sql(self, expression: exp.JSONSchema) -> str:
return self.func("COLUMNS", *expression.expressions)
def jsontable_sql(self, expression: exp.JSONTable) -> str:
this = self.sql(expression, "this")
path = self.sql(expression, "path")
path = f", {path}" if path else ""
error_handling = expression.args.get("error_handling")
error_handling = f" {error_handling}" if error_handling else ""
empty_handling = expression.args.get("empty_handling")
empty_handling = f" {empty_handling}" if empty_handling else ""
schema = self.sql(expression, "schema")
return self.func(
"JSON_TABLE", this, suffix=f"{path}{error_handling}{empty_handling} {schema})"
)
def openjsoncolumndef_sql(self, expression: exp.OpenJSONColumnDef) -> str:
this = self.sql(expression, "this")
kind = self.sql(expression, "kind")
path = self.sql(expression, "path")
path = f" {path}" if path else ""
as_json = " AS JSON" if expression.args.get("as_json") else ""
return f"{this} {kind}{path}{as_json}"
def openjson_sql(self, expression: exp.OpenJSON) -> str:
this = self.sql(expression, "this")
path = self.sql(expression, "path")
path = f", {path}" if path else ""
expressions = self.expressions(expression)
with_ = (
f" WITH ({self.seg(self.indent(expressions), sep='')}{self.seg(')', sep='')}"
if expressions
else ""
)
return f"OPENJSON({this}{path}){with_}"
def in_sql(self, expression: exp.In) -> str:
query = expression.args.get("query")
unnest = expression.args.get("unnest")
field = expression.args.get("field")
is_global = " GLOBAL" if expression.args.get("is_global") else ""
if query:
in_sql = self.wrap(self.sql(query))
elif unnest:
in_sql = self.in_unnest_op(unnest)
elif field:
in_sql = self.sql(field)
else:
in_sql = f"({self.expressions(expression, flat=True)})"
return f"{self.sql(expression, 'this')}{is_global} IN {in_sql}"
def in_unnest_op(self, unnest: exp.Unnest) -> str:
return f"(SELECT {self.sql(unnest)})"
def interval_sql(self, expression: exp.Interval) -> str:
unit = self.sql(expression, "unit")
if not self.INTERVAL_ALLOWS_PLURAL_FORM:
unit = self.TIME_PART_SINGULARS.get(unit, unit)
unit = f" {unit}" if unit else ""
if self.SINGLE_STRING_INTERVAL:
this = expression.this.name if expression.this else ""
return f"INTERVAL '{this}{unit}'" if this else f"INTERVAL{unit}"
this = self.sql(expression, "this")
if this:
unwrapped = isinstance(expression.this, self.UNWRAPPED_INTERVAL_VALUES)
this = f" {this}" if unwrapped else f" ({this})"
return f"INTERVAL{this}{unit}"
def return_sql(self, expression: exp.Return) -> str:
return f"RETURN {self.sql(expression, 'this')}"
def reference_sql(self, expression: exp.Reference) -> str:
this = self.sql(expression, "this")
expressions = self.expressions(expression, flat=True)
expressions = f"({expressions})" if expressions else ""
options = self.expressions(expression, key="options", flat=True, sep=" ")
options = f" {options}" if options else ""
return f"REFERENCES {this}{expressions}{options}"
def anonymous_sql(self, expression: exp.Anonymous) -> str:
return self.func(self.sql(expression, "this"), *expression.expressions)
def paren_sql(self, expression: exp.Paren) -> str:
sql = self.seg(self.indent(self.sql(expression, "this")), sep="")
return f"({sql}{self.seg(')', sep='')}"
def neg_sql(self, expression: exp.Neg) -> str:
# This makes sure we don't convert "- - 5" to "--5", which is a comment
this_sql = self.sql(expression, "this")
sep = " " if this_sql[0] == "-" else ""
return f"-{sep}{this_sql}"
def not_sql(self, expression: exp.Not) -> str:
return f"NOT {self.sql(expression, 'this')}"
def alias_sql(self, expression: exp.Alias) -> str:
alias = self.sql(expression, "alias")
alias = f" AS {alias}" if alias else ""
return f"{self.sql(expression, 'this')}{alias}"
def pivotalias_sql(self, expression: exp.PivotAlias) -> str:
alias = expression.args["alias"]
identifier_alias = isinstance(alias, exp.Identifier)
if identifier_alias and not self.UNPIVOT_ALIASES_ARE_IDENTIFIERS:
alias.replace(exp.Literal.string(alias.output_name))
elif not identifier_alias and self.UNPIVOT_ALIASES_ARE_IDENTIFIERS:
alias.replace(exp.to_identifier(alias.output_name))
return self.alias_sql(expression)
def aliases_sql(self, expression: exp.Aliases) -> str:
return f"{self.sql(expression, 'this')} AS ({self.expressions(expression, flat=True)})"
def atindex_sql(self, expression: exp.AtTimeZone) -> str:
this = self.sql(expression, "this")
index = self.sql(expression, "expression")
return f"{this} AT {index}"
def attimezone_sql(self, expression: exp.AtTimeZone) -> str:
this = self.sql(expression, "this")
zone = self.sql(expression, "zone")
return f"{this} AT TIME ZONE {zone}"
def fromtimezone_sql(self, expression: exp.FromTimeZone) -> str:
this = self.sql(expression, "this")
zone = self.sql(expression, "zone")
return f"{this} AT TIME ZONE {zone} AT TIME ZONE 'UTC'"
def add_sql(self, expression: exp.Add) -> str:
return self.binary(expression, "+")
def and_sql(
self, expression: exp.And, stack: t.Optional[t.List[str | exp.Expression]] = None
) -> str:
return self.connector_sql(expression, "AND", stack)
def or_sql(
self, expression: exp.Or, stack: t.Optional[t.List[str | exp.Expression]] = None
) -> str:
return self.connector_sql(expression, "OR", stack)
def xor_sql(
self, expression: exp.Xor, stack: t.Optional[t.List[str | exp.Expression]] = None
) -> str:
return self.connector_sql(expression, "XOR", stack)
def connector_sql(
self,
expression: exp.Connector,
op: str,
stack: t.Optional[t.List[str | exp.Expression]] = None,
) -> str:
if stack is not None:
if expression.expressions:
stack.append(self.expressions(expression, sep=f" {op} "))
else:
stack.append(expression.right)
if expression.comments:
for comment in expression.comments:
op += f" /*{self.pad_comment(comment)}*/"
stack.extend((op, expression.left))
return op
stack = [expression]
sqls: t.List[str] = []
ops = set()
while stack:
node = stack.pop()
if isinstance(node, exp.Connector):
ops.add(getattr(self, f"{node.key}_sql")(node, stack))
else:
sql = self.sql(node)
if sqls and sqls[-1] in ops:
sqls[-1] += f" {sql}"
else:
sqls.append(sql)
sep = "\n" if self.pretty and self.text_width(sqls) > self.max_text_width else " "
return sep.join(sqls)
def bitwiseand_sql(self, expression: exp.BitwiseAnd) -> str:
return self.binary(expression, "&")
def bitwiseleftshift_sql(self, expression: exp.BitwiseLeftShift) -> str:
return self.binary(expression, "<<")
def bitwisenot_sql(self, expression: exp.BitwiseNot) -> str:
return f"~{self.sql(expression, 'this')}"
def bitwiseor_sql(self, expression: exp.BitwiseOr) -> str:
return self.binary(expression, "|")
def bitwiserightshift_sql(self, expression: exp.BitwiseRightShift) -> str:
return self.binary(expression, ">>")
def bitwisexor_sql(self, expression: exp.BitwiseXor) -> str:
return self.binary(expression, "^")
def cast_sql(self, expression: exp.Cast, safe_prefix: t.Optional[str] = None) -> str:
format_sql = self.sql(expression, "format")
format_sql = f" FORMAT {format_sql}" if format_sql else ""
to_sql = self.sql(expression, "to")
to_sql = f" {to_sql}" if to_sql else ""
action = self.sql(expression, "action")
action = f" {action}" if action else ""
return f"{safe_prefix or ''}CAST({self.sql(expression, 'this')} AS{to_sql}{format_sql}{action})"
def currentdate_sql(self, expression: exp.CurrentDate) -> str:
zone = self.sql(expression, "this")
return f"CURRENT_DATE({zone})" if zone else "CURRENT_DATE"
def currenttimestamp_sql(self, expression: exp.CurrentTimestamp) -> str:
return self.func("CURRENT_TIMESTAMP", expression.this)
def collate_sql(self, expression: exp.Collate) -> str:
if self.COLLATE_IS_FUNC:
return self.function_fallback_sql(expression)
return self.binary(expression, "COLLATE")
def command_sql(self, expression: exp.Command) -> str:
return f"{self.sql(expression, 'this')} {expression.text('expression').strip()}"
def comment_sql(self, expression: exp.Comment) -> str:
this = self.sql(expression, "this")
kind = expression.args["kind"]
exists_sql = " IF EXISTS " if expression.args.get("exists") else " "
expression_sql = self.sql(expression, "expression")
return f"COMMENT{exists_sql}ON {kind} {this} IS {expression_sql}"
def mergetreettlaction_sql(self, expression: exp.MergeTreeTTLAction) -> str:
this = self.sql(expression, "this")
delete = " DELETE" if expression.args.get("delete") else ""
recompress = self.sql(expression, "recompress")
recompress = f" RECOMPRESS {recompress}" if recompress else ""
to_disk = self.sql(expression, "to_disk")
to_disk = f" TO DISK {to_disk}" if to_disk else ""
to_volume = self.sql(expression, "to_volume")
to_volume = f" TO VOLUME {to_volume}" if to_volume else ""
return f"{this}{delete}{recompress}{to_disk}{to_volume}"
def mergetreettl_sql(self, expression: exp.MergeTreeTTL) -> str:
where = self.sql(expression, "where")
group = self.sql(expression, "group")
aggregates = self.expressions(expression, key="aggregates")
aggregates = self.seg("SET") + self.seg(aggregates) if aggregates else ""
if not (where or group or aggregates) and len(expression.expressions) == 1:
return f"TTL {self.expressions(expression, flat=True)}"
return f"TTL{self.seg(self.expressions(expression))}{where}{group}{aggregates}"
def transaction_sql(self, expression: exp.Transaction) -> str:
return "BEGIN"
def commit_sql(self, expression: exp.Commit) -> str:
chain = expression.args.get("chain")
if chain is not None:
chain = " AND CHAIN" if chain else " AND NO CHAIN"
return f"COMMIT{chain or ''}"
def rollback_sql(self, expression: exp.Rollback) -> str:
savepoint = expression.args.get("savepoint")
savepoint = f" TO {savepoint}" if savepoint else ""
return f"ROLLBACK{savepoint}"
def altercolumn_sql(self, expression: exp.AlterColumn) -> str:
this = self.sql(expression, "this")
dtype = self.sql(expression, "dtype")
if dtype:
collate = self.sql(expression, "collate")
collate = f" COLLATE {collate}" if collate else ""
using = self.sql(expression, "using")
using = f" USING {using}" if using else ""
return f"ALTER COLUMN {this} SET DATA TYPE {dtype}{collate}{using}"
default = self.sql(expression, "default")
if default:
return f"ALTER COLUMN {this} SET DEFAULT {default}"
comment = self.sql(expression, "comment")
if comment:
return f"ALTER COLUMN {this} COMMENT {comment}"
if not expression.args.get("drop"):
self.unsupported("Unsupported ALTER COLUMN syntax")
return f"ALTER COLUMN {this} DROP DEFAULT"
def renametable_sql(self, expression: exp.RenameTable) -> str:
if not self.RENAME_TABLE_WITH_DB:
# Remove db from tables
expression = expression.transform(
lambda n: exp.table_(n.this) if isinstance(n, exp.Table) else n
).assert_is(exp.RenameTable)
this = self.sql(expression, "this")
return f"RENAME TO {this}"
def renamecolumn_sql(self, expression: exp.RenameColumn) -> str:
exists = " IF EXISTS" if expression.args.get("exists") else ""
old_column = self.sql(expression, "this")
new_column = self.sql(expression, "to")
return f"RENAME COLUMN{exists} {old_column} TO {new_column}"
def altertable_sql(self, expression: exp.AlterTable) -> str:
actions = expression.args["actions"]
if isinstance(actions[0], exp.ColumnDef):
actions = self.add_column_sql(expression)
elif isinstance(actions[0], exp.Schema):
actions = self.expressions(expression, key="actions", prefix="ADD COLUMNS ")
elif isinstance(actions[0], exp.Delete):
actions = self.expressions(expression, key="actions", flat=True)
else:
actions = self.expressions(expression, key="actions", flat=True)
exists = " IF EXISTS" if expression.args.get("exists") else ""
only = " ONLY" if expression.args.get("only") else ""
options = self.expressions(expression, key="options")
options = f", {options}" if options else ""
return f"ALTER TABLE{exists}{only} {self.sql(expression, 'this')} {actions}{options}"
def add_column_sql(self, expression: exp.AlterTable) -> str:
if self.ALTER_TABLE_INCLUDE_COLUMN_KEYWORD:
return self.expressions(
expression,
key="actions",
prefix="ADD COLUMN ",
)
return f"ADD {self.expressions(expression, key='actions', flat=True)}"
def droppartition_sql(self, expression: exp.DropPartition) -> str:
expressions = self.expressions(expression)
exists = " IF EXISTS " if expression.args.get("exists") else " "
return f"DROP{exists}{expressions}"
def addconstraint_sql(self, expression: exp.AddConstraint) -> str:
return f"ADD {self.expressions(expression)}"
def distinct_sql(self, expression: exp.Distinct) -> str:
this = self.expressions(expression, flat=True)
if not self.MULTI_ARG_DISTINCT and len(expression.expressions) > 1:
case = exp.case()
for arg in expression.expressions:
case = case.when(arg.is_(exp.null()), exp.null())
this = self.sql(case.else_(f"({this})"))
this = f" {this}" if this else ""
on = self.sql(expression, "on")
on = f" ON {on}" if on else ""
return f"DISTINCT{this}{on}"
def ignorenulls_sql(self, expression: exp.IgnoreNulls) -> str:
return self._embed_ignore_nulls(expression, "IGNORE NULLS")
def respectnulls_sql(self, expression: exp.RespectNulls) -> str:
return self._embed_ignore_nulls(expression, "RESPECT NULLS")
def havingmax_sql(self, expression: exp.HavingMax) -> str:
this_sql = self.sql(expression, "this")
expression_sql = self.sql(expression, "expression")
kind = "MAX" if expression.args.get("max") else "MIN"
return f"{this_sql} HAVING {kind} {expression_sql}"
def _embed_ignore_nulls(self, expression: exp.IgnoreNulls | exp.RespectNulls, text: str) -> str:
if self.IGNORE_NULLS_IN_FUNC and not expression.meta.get("inline"):
# The first modifier here will be the one closest to the AggFunc's arg
mods = sorted(
expression.find_all(exp.HavingMax, exp.Order, exp.Limit),
key=lambda x: 0
if isinstance(x, exp.HavingMax)
else (1 if isinstance(x, exp.Order) else 2),
)
if mods:
mod = mods[0]
this = expression.__class__(this=mod.this.copy())
this.meta["inline"] = True
mod.this.replace(this)
return self.sql(expression.this)
agg_func = expression.find(exp.AggFunc)
if agg_func:
return self.sql(agg_func)[:-1] + f" {text})"
return f"{self.sql(expression, 'this')} {text}"
def intdiv_sql(self, expression: exp.IntDiv) -> str:
return self.sql(
exp.Cast(
this=exp.Div(this=expression.this, expression=expression.expression),
to=exp.DataType(this=exp.DataType.Type.INT),
)
)
def dpipe_sql(self, expression: exp.DPipe) -> str:
if self.dialect.STRICT_STRING_CONCAT and expression.args.get("safe"):
return self.func("CONCAT", *(exp.cast(e, "text") for e in expression.flatten()))
return self.binary(expression, "||")
def div_sql(self, expression: exp.Div) -> str:
l, r = expression.left, expression.right
if not self.dialect.SAFE_DIVISION and expression.args.get("safe"):
r.replace(exp.Nullif(this=r.copy(), expression=exp.Literal.number(0)))
if self.dialect.TYPED_DIVISION and not expression.args.get("typed"):
if not l.is_type(*exp.DataType.REAL_TYPES) and not r.is_type(*exp.DataType.REAL_TYPES):
l.replace(exp.cast(l.copy(), to=exp.DataType.Type.DOUBLE))
elif not self.dialect.TYPED_DIVISION and expression.args.get("typed"):
if l.is_type(*exp.DataType.INTEGER_TYPES) and r.is_type(*exp.DataType.INTEGER_TYPES):
return self.sql(
exp.cast(
l / r,
to=exp.DataType.Type.BIGINT,
)
)
return self.binary(expression, "/")
def overlaps_sql(self, expression: exp.Overlaps) -> str:
return self.binary(expression, "OVERLAPS")
def distance_sql(self, expression: exp.Distance) -> str:
return self.binary(expression, "<->")
def dot_sql(self, expression: exp.Dot) -> str:
return f"{self.sql(expression, 'this')}.{self.sql(expression, 'expression')}"
def eq_sql(self, expression: exp.EQ) -> str:
return self.binary(expression, "=")
def propertyeq_sql(self, expression: exp.PropertyEQ) -> str:
return self.binary(expression, ":=")
def escape_sql(self, expression: exp.Escape) -> str:
return self.binary(expression, "ESCAPE")
def glob_sql(self, expression: exp.Glob) -> str:
return self.binary(expression, "GLOB")
def gt_sql(self, expression: exp.GT) -> str:
return self.binary(expression, ">")
def gte_sql(self, expression: exp.GTE) -> str:
return self.binary(expression, ">=")
def ilike_sql(self, expression: exp.ILike) -> str:
return self.binary(expression, "ILIKE")
def ilikeany_sql(self, expression: exp.ILikeAny) -> str:
return self.binary(expression, "ILIKE ANY")
def is_sql(self, expression: exp.Is) -> str:
if not self.IS_BOOL_ALLOWED and isinstance(expression.expression, exp.Boolean):
return self.sql(
expression.this if expression.expression.this else exp.not_(expression.this)
)
return self.binary(expression, "IS")
def like_sql(self, expression: exp.Like) -> str:
return self.binary(expression, "LIKE")
def likeany_sql(self, expression: exp.LikeAny) -> str:
return self.binary(expression, "LIKE ANY")
def similarto_sql(self, expression: exp.SimilarTo) -> str:
return self.binary(expression, "SIMILAR TO")
def lt_sql(self, expression: exp.LT) -> str:
return self.binary(expression, "<")
def lte_sql(self, expression: exp.LTE) -> str:
return self.binary(expression, "<=")
def mod_sql(self, expression: exp.Mod) -> str:
return self.binary(expression, "%")
def mul_sql(self, expression: exp.Mul) -> str:
return self.binary(expression, "*")
def neq_sql(self, expression: exp.NEQ) -> str:
return self.binary(expression, "<>")
def nullsafeeq_sql(self, expression: exp.NullSafeEQ) -> str:
return self.binary(expression, "IS NOT DISTINCT FROM")
def nullsafeneq_sql(self, expression: exp.NullSafeNEQ) -> str:
return self.binary(expression, "IS DISTINCT FROM")
def slice_sql(self, expression: exp.Slice) -> str:
return self.binary(expression, ":")
def sub_sql(self, expression: exp.Sub) -> str:
return self.binary(expression, "-")
def trycast_sql(self, expression: exp.TryCast) -> str:
return self.cast_sql(expression, safe_prefix="TRY_")
def log_sql(self, expression: exp.Log) -> str:
this = expression.this
expr = expression.expression
if self.dialect.LOG_BASE_FIRST is False:
this, expr = expr, this
elif self.dialect.LOG_BASE_FIRST is None and expr:
if this.name in ("2", "10"):
return self.func(f"LOG{this.name}", expr)
self.unsupported(f"Unsupported logarithm with base {self.sql(this)}")
return self.func("LOG", this, expr)
def use_sql(self, expression: exp.Use) -> str:
kind = self.sql(expression, "kind")
kind = f" {kind}" if kind else ""
this = self.sql(expression, "this")
this = f" {this}" if this else ""
return f"USE{kind}{this}"
def binary(self, expression: exp.Binary, op: str) -> str:
op = self.maybe_comment(op, comments=expression.comments)
return f"{self.sql(expression, 'this')} {op} {self.sql(expression, 'expression')}"
def function_fallback_sql(self, expression: exp.Func) -> str:
args = []
for key in expression.arg_types:
arg_value = expression.args.get(key)
if isinstance(arg_value, list):
for value in arg_value:
args.append(value)
elif arg_value is not None:
args.append(arg_value)
if self.normalize_functions:
name = expression.sql_name()
else:
name = (expression._meta and expression.meta.get("name")) or expression.sql_name()
return self.func(name, *args)
def func(
self,
name: str,
*args: t.Optional[exp.Expression | str],
prefix: str = "(",
suffix: str = ")",
) -> str:
return f"{self.normalize_func(name)}{prefix}{self.format_args(*args)}{suffix}"
def format_args(self, *args: t.Optional[str | exp.Expression]) -> str:
arg_sqls = tuple(self.sql(arg) for arg in args if arg is not None)
if self.pretty and self.text_width(arg_sqls) > self.max_text_width:
return self.indent("\n" + ",\n".join(arg_sqls) + "\n", skip_first=True, skip_last=True)
return ", ".join(arg_sqls)
def text_width(self, args: t.Iterable) -> int:
return sum(len(arg) for arg in args)
def format_time(self, expression: exp.Expression) -> t.Optional[str]:
return format_time(
self.sql(expression, "format"),
self.dialect.INVERSE_TIME_MAPPING,
self.dialect.INVERSE_TIME_TRIE,
)
def expressions(
self,
expression: t.Optional[exp.Expression] = None,
key: t.Optional[str] = None,
sqls: t.Optional[t.Collection[str | exp.Expression]] = None,
flat: bool = False,
indent: bool = True,
skip_first: bool = False,
sep: str = ", ",
prefix: str = "",
) -> str:
expressions = expression.args.get(key or "expressions") if expression else sqls
if not expressions:
return ""
if flat:
return sep.join(sql for sql in (self.sql(e) for e in expressions) if sql)
num_sqls = len(expressions)
# These are calculated once in case we have the leading_comma / pretty option set, correspondingly
pad = " " * self.pad
stripped_sep = sep.strip()
result_sqls = []
for i, e in enumerate(expressions):
sql = self.sql(e, comment=False)
if not sql:
continue
comments = self.maybe_comment("", e) if isinstance(e, exp.Expression) else ""
if self.pretty:
if self.leading_comma:
result_sqls.append(f"{sep if i > 0 else pad}{prefix}{sql}{comments}")
else:
result_sqls.append(
f"{prefix}{sql}{stripped_sep if i + 1 < num_sqls else ''}{comments}"
)
else:
result_sqls.append(f"{prefix}{sql}{comments}{sep if i + 1 < num_sqls else ''}")
result_sql = "\n".join(result_sqls) if self.pretty else "".join(result_sqls)
return self.indent(result_sql, skip_first=skip_first) if indent else result_sql
def op_expressions(self, op: str, expression: exp.Expression, flat: bool = False) -> str:
flat = flat or isinstance(expression.parent, exp.Properties)
expressions_sql = self.expressions(expression, flat=flat)
if flat:
return f"{op} {expressions_sql}"
return f"{self.seg(op)}{self.sep() if expressions_sql else ''}{expressions_sql}"
def naked_property(self, expression: exp.Property) -> str:
property_name = exp.Properties.PROPERTY_TO_NAME.get(expression.__class__)
if not property_name:
self.unsupported(f"Unsupported property {expression.__class__.__name__}")
return f"{property_name} {self.sql(expression, 'this')}"
def tag_sql(self, expression: exp.Tag) -> str:
return f"{expression.args.get('prefix')}{self.sql(expression.this)}{expression.args.get('postfix')}"
def token_sql(self, token_type: TokenType) -> str:
return self.TOKEN_MAPPING.get(token_type, token_type.name)
def userdefinedfunction_sql(self, expression: exp.UserDefinedFunction) -> str:
this = self.sql(expression, "this")
expressions = self.no_identify(self.expressions, expression)
expressions = (
self.wrap(expressions) if expression.args.get("wrapped") else f" {expressions}"
)
return f"{this}{expressions}"
def joinhint_sql(self, expression: exp.JoinHint) -> str:
this = self.sql(expression, "this")
expressions = self.expressions(expression, flat=True)
return f"{this}({expressions})"
def kwarg_sql(self, expression: exp.Kwarg) -> str:
return self.binary(expression, "=>")
def when_sql(self, expression: exp.When) -> str:
matched = "MATCHED" if expression.args["matched"] else "NOT MATCHED"
source = " BY SOURCE" if self.MATCHED_BY_SOURCE and expression.args.get("source") else ""
condition = self.sql(expression, "condition")
condition = f" AND {condition}" if condition else ""
then_expression = expression.args.get("then")
if isinstance(then_expression, exp.Insert):
then = f"INSERT {self.sql(then_expression, 'this')}"
if "expression" in then_expression.args:
then += f" VALUES {self.sql(then_expression, 'expression')}"
elif isinstance(then_expression, exp.Update):
if isinstance(then_expression.args.get("expressions"), exp.Star):
then = f"UPDATE {self.sql(then_expression, 'expressions')}"
else:
then = f"UPDATE SET {self.expressions(then_expression, flat=True)}"
else:
then = self.sql(then_expression)
return f"WHEN {matched}{source}{condition} THEN {then}"
def merge_sql(self, expression: exp.Merge) -> str:
table = expression.this
table_alias = ""
hints = table.args.get("hints")
if hints and table.alias and isinstance(hints[0], exp.WithTableHint):
# T-SQL syntax is MERGE ... <target_table> [WITH (<merge_hint>)] [[AS] table_alias]
table_alias = f" AS {self.sql(table.args['alias'].pop())}"
this = self.sql(table)
using = f"USING {self.sql(expression, 'using')}"
on = f"ON {self.sql(expression, 'on')}"
expressions = self.expressions(expression, sep=" ")
return self.prepend_ctes(
expression, f"MERGE INTO {this}{table_alias} {using} {on} {expressions}"
)
def tochar_sql(self, expression: exp.ToChar) -> str:
if expression.args.get("format"):
self.unsupported("Format argument unsupported for TO_CHAR/TO_VARCHAR function")
return self.sql(exp.cast(expression.this, "text"))
def tonumber_sql(self, expression: exp.ToNumber) -> str:
if not self.SUPPORTS_TO_NUMBER:
self.unsupported("Unsupported TO_NUMBER function")
return self.sql(exp.cast(expression.this, "double"))
fmt = expression.args.get("format")
if not fmt:
self.unsupported("Conversion format is required for TO_NUMBER")
return self.sql(exp.cast(expression.this, "double"))
return self.func("TO_NUMBER", expression.this, fmt)
def dictproperty_sql(self, expression: exp.DictProperty) -> str:
this = self.sql(expression, "this")
kind = self.sql(expression, "kind")
settings_sql = self.expressions(expression, key="settings", sep=" ")
args = f"({self.sep('')}{settings_sql}{self.seg(')', sep='')}" if settings_sql else "()"
return f"{this}({kind}{args})"
def dictrange_sql(self, expression: exp.DictRange) -> str:
this = self.sql(expression, "this")
max = self.sql(expression, "max")
min = self.sql(expression, "min")
return f"{this}(MIN {min} MAX {max})"
def dictsubproperty_sql(self, expression: exp.DictSubProperty) -> str:
return f"{self.sql(expression, 'this')} {self.sql(expression, 'value')}"
def oncluster_sql(self, expression: exp.OnCluster) -> str:
return ""
def clusteredbyproperty_sql(self, expression: exp.ClusteredByProperty) -> str:
expressions = self.expressions(expression, key="expressions", flat=True)
sorted_by = self.expressions(expression, key="sorted_by", flat=True)
sorted_by = f" SORTED BY ({sorted_by})" if sorted_by else ""
buckets = self.sql(expression, "buckets")
return f"CLUSTERED BY ({expressions}){sorted_by} INTO {buckets} BUCKETS"
def anyvalue_sql(self, expression: exp.AnyValue) -> str:
this = self.sql(expression, "this")
having = self.sql(expression, "having")
if having:
this = f"{this} HAVING {'MAX' if expression.args.get('max') else 'MIN'} {having}"
return self.func("ANY_VALUE", this)
def querytransform_sql(self, expression: exp.QueryTransform) -> str:
transform = self.func("TRANSFORM", *expression.expressions)
row_format_before = self.sql(expression, "row_format_before")
row_format_before = f" {row_format_before}" if row_format_before else ""
record_writer = self.sql(expression, "record_writer")
record_writer = f" RECORDWRITER {record_writer}" if record_writer else ""
using = f" USING {self.sql(expression, 'command_script')}"
schema = self.sql(expression, "schema")
schema = f" AS {schema}" if schema else ""
row_format_after = self.sql(expression, "row_format_after")
row_format_after = f" {row_format_after}" if row_format_after else ""
record_reader = self.sql(expression, "record_reader")
record_reader = f" RECORDREADER {record_reader}" if record_reader else ""
return f"{transform}{row_format_before}{record_writer}{using}{schema}{row_format_after}{record_reader}"
def indexconstraintoption_sql(self, expression: exp.IndexConstraintOption) -> str:
key_block_size = self.sql(expression, "key_block_size")
if key_block_size:
return f"KEY_BLOCK_SIZE = {key_block_size}"
using = self.sql(expression, "using")
if using:
return f"USING {using}"
parser = self.sql(expression, "parser")
if parser:
return f"WITH PARSER {parser}"
comment = self.sql(expression, "comment")
if comment:
return f"COMMENT {comment}"
visible = expression.args.get("visible")
if visible is not None:
return "VISIBLE" if visible else "INVISIBLE"
engine_attr = self.sql(expression, "engine_attr")
if engine_attr:
return f"ENGINE_ATTRIBUTE = {engine_attr}"
secondary_engine_attr = self.sql(expression, "secondary_engine_attr")
if secondary_engine_attr:
return f"SECONDARY_ENGINE_ATTRIBUTE = {secondary_engine_attr}"
self.unsupported("Unsupported index constraint option.")
return ""
def checkcolumnconstraint_sql(self, expression: exp.CheckColumnConstraint) -> str:
enforced = " ENFORCED" if expression.args.get("enforced") else ""
return f"CHECK ({self.sql(expression, 'this')}){enforced}"
def indexcolumnconstraint_sql(self, expression: exp.IndexColumnConstraint) -> str:
kind = self.sql(expression, "kind")
kind = f"{kind} INDEX" if kind else "INDEX"
this = self.sql(expression, "this")
this = f" {this}" if this else ""
index_type = self.sql(expression, "index_type")
index_type = f" USING {index_type}" if index_type else ""
schema = self.sql(expression, "schema")
schema = f" {schema}" if schema else ""
options = self.expressions(expression, key="options", sep=" ")
options = f" {options}" if options else ""
return f"{kind}{this}{index_type}{schema}{options}"
def nvl2_sql(self, expression: exp.Nvl2) -> str:
if self.NVL2_SUPPORTED:
return self.function_fallback_sql(expression)
case = exp.Case().when(
expression.this.is_(exp.null()).not_(copy=False),
expression.args["true"],
copy=False,
)
else_cond = expression.args.get("false")
if else_cond:
case.else_(else_cond, copy=False)
return self.sql(case)
def comprehension_sql(self, expression: exp.Comprehension) -> str:
this = self.sql(expression, "this")
expr = self.sql(expression, "expression")
iterator = self.sql(expression, "iterator")
condition = self.sql(expression, "condition")
condition = f" IF {condition}" if condition else ""
return f"{this} FOR {expr} IN {iterator}{condition}"
def columnprefix_sql(self, expression: exp.ColumnPrefix) -> str:
return f"{self.sql(expression, 'this')}({self.sql(expression, 'expression')})"
def opclass_sql(self, expression: exp.Opclass) -> str:
return f"{self.sql(expression, 'this')} {self.sql(expression, 'expression')}"
def predict_sql(self, expression: exp.Predict) -> str:
model = self.sql(expression, "this")
model = f"MODEL {model}"
table = self.sql(expression, "expression")
table = f"TABLE {table}" if not isinstance(expression.expression, exp.Subquery) else table
parameters = self.sql(expression, "params_struct")
return self.func("PREDICT", model, table, parameters or None)
def forin_sql(self, expression: exp.ForIn) -> str:
this = self.sql(expression, "this")
expression_sql = self.sql(expression, "expression")
return f"FOR {this} DO {expression_sql}"
def refresh_sql(self, expression: exp.Refresh) -> str:
this = self.sql(expression, "this")
table = "" if isinstance(expression.this, exp.Literal) else "TABLE "
return f"REFRESH {table}{this}"
def operator_sql(self, expression: exp.Operator) -> str:
return self.binary(expression, f"OPERATOR({self.sql(expression, 'operator')})")
def toarray_sql(self, expression: exp.ToArray) -> str:
arg = expression.this
if not arg.type:
from sqlglot.optimizer.annotate_types import annotate_types
arg = annotate_types(arg)
if arg.is_type(exp.DataType.Type.ARRAY):
return self.sql(arg)
cond_for_null = arg.is_(exp.null())
return self.sql(exp.func("IF", cond_for_null, exp.null(), exp.array(arg, copy=False)))
def tsordstotime_sql(self, expression: exp.TsOrDsToTime) -> str:
this = expression.this
if isinstance(this, exp.TsOrDsToTime) or this.is_type(exp.DataType.Type.TIME):
return self.sql(this)
return self.sql(exp.cast(this, "time"))
def tsordstodate_sql(self, expression: exp.TsOrDsToDate) -> str:
this = expression.this
time_format = self.format_time(expression)
if time_format and time_format not in (self.dialect.TIME_FORMAT, self.dialect.DATE_FORMAT):
return self.sql(
exp.cast(exp.StrToTime(this=this, format=expression.args["format"]), "date")
)
if isinstance(this, exp.TsOrDsToDate) or this.is_type(exp.DataType.Type.DATE):
return self.sql(this)
return self.sql(exp.cast(this, "date"))
def unixdate_sql(self, expression: exp.UnixDate) -> str:
return self.sql(
exp.func(
"DATEDIFF",
expression.this,
exp.cast(exp.Literal.string("1970-01-01"), "date"),
"day",
)
)
def lastday_sql(self, expression: exp.LastDay) -> str:
if self.LAST_DAY_SUPPORTS_DATE_PART:
return self.function_fallback_sql(expression)
unit = expression.text("unit")
if unit and unit != "MONTH":
self.unsupported("Date parts are not supported in LAST_DAY.")
return self.func("LAST_DAY", expression.this)
def arrayany_sql(self, expression: exp.ArrayAny) -> str:
if self.CAN_IMPLEMENT_ARRAY_ANY:
filtered = exp.ArrayFilter(this=expression.this, expression=expression.expression)
filtered_not_empty = exp.ArraySize(this=filtered).neq(0)
original_is_empty = exp.ArraySize(this=expression.this).eq(0)
return self.sql(exp.paren(original_is_empty.or_(filtered_not_empty)))
from sqlglot.dialects import Dialect
# SQLGlot's executor supports ARRAY_ANY, so we don't wanna warn for the SQLGlot dialect
if self.dialect.__class__ != Dialect:
self.unsupported("ARRAY_ANY is unsupported")
return self.function_fallback_sql(expression)
def _jsonpathkey_sql(self, expression: exp.JSONPathKey) -> str:
this = expression.this
if isinstance(this, exp.JSONPathWildcard):
this = self.json_path_part(this)
return f".{this}" if this else ""
if exp.SAFE_IDENTIFIER_RE.match(this):
return f".{this}"
this = self.json_path_part(this)
return f"[{this}]" if self.JSON_PATH_BRACKETED_KEY_SUPPORTED else f".{this}"
def _jsonpathsubscript_sql(self, expression: exp.JSONPathSubscript) -> str:
this = self.json_path_part(expression.this)
return f"[{this}]" if this else ""
def _simplify_unless_literal(self, expression: E) -> E:
if not isinstance(expression, exp.Literal):
from sqlglot.optimizer.simplify import simplify
expression = simplify(expression, dialect=self.dialect)
return expression
def generateseries_sql(self, expression: exp.GenerateSeries) -> str:
expression.set("is_end_exclusive", None)
return self.function_fallback_sql(expression)
def struct_sql(self, expression: exp.Struct) -> str:
expression.set(
"expressions",
[
exp.alias_(e.expression, e.this) if isinstance(e, exp.PropertyEQ) else e
for e in expression.expressions
],
)
return self.function_fallback_sql(expression)
def partitionrange_sql(self, expression: exp.PartitionRange) -> str:
low = self.sql(expression, "this")
high = self.sql(expression, "expression")
return f"{low} TO {high}"
def truncatetable_sql(self, expression: exp.TruncateTable) -> str:
target = "DATABASE" if expression.args.get("is_database") else "TABLE"
tables = f" {self.expressions(expression)}"
exists = " IF EXISTS" if expression.args.get("exists") else ""
on_cluster = self.sql(expression, "cluster")
on_cluster = f" {on_cluster}" if on_cluster else ""
identity = self.sql(expression, "identity")
identity = f" {identity} IDENTITY" if identity else ""
option = self.sql(expression, "option")
option = f" {option}" if option else ""
partition = self.sql(expression, "partition")
partition = f" {partition}" if partition else ""
return f"TRUNCATE {target}{exists}{tables}{on_cluster}{identity}{option}{partition}"
# This transpiles T-SQL's CONVERT function
# https://learn.microsoft.com/en-us/sql/t-sql/functions/cast-and-convert-transact-sql?view=sql-server-ver16
def convert_sql(self, expression: exp.Convert) -> str:
to = expression.this
value = expression.expression
style = expression.args.get("style")
safe = expression.args.get("safe")
strict = expression.args.get("strict")
if not to or not value:
return ""
# Retrieve length of datatype and override to default if not specified
if not seq_get(to.expressions, 0) and to.this in self.PARAMETERIZABLE_TEXT_TYPES:
to = exp.DataType.build(to.this, expressions=[exp.Literal.number(30)], nested=False)
transformed: t.Optional[exp.Expression] = None
cast = exp.Cast if strict else exp.TryCast
# Check whether a conversion with format (T-SQL calls this 'style') is applicable
if isinstance(style, exp.Literal) and style.is_int:
from sqlglot.dialects.tsql import TSQL
style_value = style.name
converted_style = TSQL.CONVERT_FORMAT_MAPPING.get(style_value)
if not converted_style:
self.unsupported(f"Unsupported T-SQL 'style' value: {style_value}")
fmt = exp.Literal.string(converted_style)
if to.this == exp.DataType.Type.DATE:
transformed = exp.StrToDate(this=value, format=fmt)
elif to.this == exp.DataType.Type.DATETIME:
transformed = exp.StrToTime(this=value, format=fmt)
elif to.this in self.PARAMETERIZABLE_TEXT_TYPES:
transformed = cast(this=exp.TimeToStr(this=value, format=fmt), to=to, safe=safe)
elif to.this == exp.DataType.Type.TEXT:
transformed = exp.TimeToStr(this=value, format=fmt)
if not transformed:
transformed = cast(this=value, to=to, safe=safe)
return self.sql(transformed)
def interval(this, unit):
plural = unit + "S"
if plural in Generator.TIME_PART_SINGULARS:
unit = plural
return datetime.timedelta(**{unit.lower(): float(this)}) | null |
152,972 | import datetime
import inspect
import re
import statistics
from functools import wraps
from sqlglot import exp
from sqlglot.generator import Generator
from sqlglot.helper import PYTHON_VERSION, is_int, seq_get
def arraytostring(this, expression, null=None):
return expression.join(x for x in (x if x is not None else null for x in this) if x is not None) | null |
152,973 | import datetime
import inspect
import re
import statistics
from functools import wraps
from sqlglot import exp
from sqlglot.generator import Generator
from sqlglot.helper import PYTHON_VERSION, is_int, seq_get
def seq_get(seq: t.Sequence[T], index: int) -> t.Optional[T]:
"""Returns the value in `seq` at position `index`, or `None` if `index` is out of bounds."""
try:
return seq[index]
except IndexError:
return None
def is_int(text: str) -> bool:
return is_type(text, int)
def jsonextract(this, expression):
for path_segment in expression:
if isinstance(this, dict):
this = this.get(path_segment)
elif isinstance(this, list) and is_int(path_segment):
this = seq_get(this, int(path_segment))
else:
raise NotImplementedError(f"Unable to extract value for {this} at {path_segment}.")
if this is None:
break
return this | null |
152,974 | from __future__ import annotations
import datetime
import inspect
import logging
import re
import sys
import typing as t
from collections.abc import Collection, Set
from contextlib import contextmanager
from copy import copy
from enum import Enum
from itertools import count
if t.TYPE_CHECKING:
from sqlglot import exp
from sqlglot._typing import A, E, T
from sqlglot.expressions import Expression
The provided code snippet includes necessary dependencies for implementing the `subclasses` function. Write a Python function `def subclasses( module_name: str, classes: t.Type | t.Tuple[t.Type, ...], exclude: t.Type | t.Tuple[t.Type, ...] = (), ) -> t.List[t.Type]` to solve the following problem:
Returns all subclasses for a collection of classes, possibly excluding some of them. Args: module_name: The name of the module to search for subclasses in. classes: Class(es) we want to find the subclasses of. exclude: Class(es) we want to exclude from the returned list. Returns: The target subclasses.
Here is the function:
def subclasses(
module_name: str,
classes: t.Type | t.Tuple[t.Type, ...],
exclude: t.Type | t.Tuple[t.Type, ...] = (),
) -> t.List[t.Type]:
"""
Returns all subclasses for a collection of classes, possibly excluding some of them.
Args:
module_name: The name of the module to search for subclasses in.
classes: Class(es) we want to find the subclasses of.
exclude: Class(es) we want to exclude from the returned list.
Returns:
The target subclasses.
"""
return [
obj
for _, obj in inspect.getmembers(
sys.modules[module_name],
lambda obj: inspect.isclass(obj) and issubclass(obj, classes) and obj not in exclude,
)
] | Returns all subclasses for a collection of classes, possibly excluding some of them. Args: module_name: The name of the module to search for subclasses in. classes: Class(es) we want to find the subclasses of. exclude: Class(es) we want to exclude from the returned list. Returns: The target subclasses. |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.