id int64 0 190k | prompt stringlengths 21 13.4M | docstring stringlengths 1 12k ⌀ |
|---|---|---|
153,175 | from __future__ import annotations
import typing as t
from sqlglot import exp as expression
from sqlglot.dataframe.sql.column import Column
from sqlglot.helper import ensure_list, flatten as _flatten
if t.TYPE_CHECKING:
from sqlglot.dataframe.sql._typing import ColumnOrLiteral, ColumnOrName
from sqlglot.dataframe.sql.dataframe import DataFrame
def lit(value: t.Optional[t.Any] = None) -> Column:
if isinstance(value, str):
return Column(expression.Literal.string(str(value)))
return Column(value)
def create_map(*cols: t.Union[ColumnOrName, t.Iterable[ColumnOrName]]) -> Column:
cols = list(_flatten(cols)) if not isinstance(cols[0], (str, Column)) else cols # type: ignore
return Column.invoke_expression_over_column(
None,
expression.VarMap,
keys=array(*cols[::2]).expression,
values=array(*cols[1::2]).expression,
)
class Column:
def __init__(self, expression: t.Optional[t.Union[ColumnOrLiteral, exp.Expression]]):
from sqlglot.dataframe.sql.session import SparkSession
if isinstance(expression, Column):
expression = expression.expression # type: ignore
elif expression is None or not isinstance(expression, (str, exp.Expression)):
expression = self._lit(expression).expression # type: ignore
elif not isinstance(expression, exp.Column):
expression = sqlglot.maybe_parse(expression, dialect=SparkSession().dialect).transform(
SparkSession().dialect.normalize_identifier, copy=False
)
if expression is None:
raise ValueError(f"Could not parse {expression}")
self.expression: exp.Expression = expression # type: ignore
def __repr__(self):
return repr(self.expression)
def __hash__(self):
return hash(self.expression)
def __eq__(self, other: ColumnOrLiteral) -> Column: # type: ignore
return self.binary_op(exp.EQ, other)
def __ne__(self, other: ColumnOrLiteral) -> Column: # type: ignore
return self.binary_op(exp.NEQ, other)
def __gt__(self, other: ColumnOrLiteral) -> Column:
return self.binary_op(exp.GT, other)
def __ge__(self, other: ColumnOrLiteral) -> Column:
return self.binary_op(exp.GTE, other)
def __lt__(self, other: ColumnOrLiteral) -> Column:
return self.binary_op(exp.LT, other)
def __le__(self, other: ColumnOrLiteral) -> Column:
return self.binary_op(exp.LTE, other)
def __and__(self, other: ColumnOrLiteral) -> Column:
return self.binary_op(exp.And, other)
def __or__(self, other: ColumnOrLiteral) -> Column:
return self.binary_op(exp.Or, other)
def __mod__(self, other: ColumnOrLiteral) -> Column:
return self.binary_op(exp.Mod, other)
def __add__(self, other: ColumnOrLiteral) -> Column:
return self.binary_op(exp.Add, other)
def __sub__(self, other: ColumnOrLiteral) -> Column:
return self.binary_op(exp.Sub, other)
def __mul__(self, other: ColumnOrLiteral) -> Column:
return self.binary_op(exp.Mul, other)
def __truediv__(self, other: ColumnOrLiteral) -> Column:
return self.binary_op(exp.Div, other)
def __div__(self, other: ColumnOrLiteral) -> Column:
return self.binary_op(exp.Div, other)
def __neg__(self) -> Column:
return self.unary_op(exp.Neg)
def __radd__(self, other: ColumnOrLiteral) -> Column:
return self.inverse_binary_op(exp.Add, other)
def __rsub__(self, other: ColumnOrLiteral) -> Column:
return self.inverse_binary_op(exp.Sub, other)
def __rmul__(self, other: ColumnOrLiteral) -> Column:
return self.inverse_binary_op(exp.Mul, other)
def __rdiv__(self, other: ColumnOrLiteral) -> Column:
return self.inverse_binary_op(exp.Div, other)
def __rtruediv__(self, other: ColumnOrLiteral) -> Column:
return self.inverse_binary_op(exp.Div, other)
def __rmod__(self, other: ColumnOrLiteral) -> Column:
return self.inverse_binary_op(exp.Mod, other)
def __pow__(self, power: ColumnOrLiteral, modulo=None):
return Column(exp.Pow(this=self.expression, expression=Column(power).expression))
def __rpow__(self, power: ColumnOrLiteral):
return Column(exp.Pow(this=Column(power).expression, expression=self.expression))
def __invert__(self):
return self.unary_op(exp.Not)
def __rand__(self, other: ColumnOrLiteral) -> Column:
return self.inverse_binary_op(exp.And, other)
def __ror__(self, other: ColumnOrLiteral) -> Column:
return self.inverse_binary_op(exp.Or, other)
def ensure_col(cls, value: t.Optional[t.Union[ColumnOrLiteral, exp.Expression]]) -> Column:
return cls(value)
def ensure_cols(cls, args: t.List[t.Union[ColumnOrLiteral, exp.Expression]]) -> t.List[Column]:
return [cls.ensure_col(x) if not isinstance(x, Column) else x for x in args]
def _lit(cls, value: ColumnOrLiteral) -> Column:
if isinstance(value, dict):
columns = [cls._lit(v).alias(k).expression for k, v in value.items()]
return cls(exp.Struct(expressions=columns))
return cls(exp.convert(value))
def invoke_anonymous_function(
cls, column: t.Optional[ColumnOrLiteral], func_name: str, *args: t.Optional[ColumnOrLiteral]
) -> Column:
columns = [] if column is None else [cls.ensure_col(column)]
column_args = [cls.ensure_col(arg) for arg in args]
expressions = [x.expression for x in columns + column_args]
new_expression = exp.Anonymous(this=func_name.upper(), expressions=expressions)
return Column(new_expression)
def invoke_expression_over_column(
cls, column: t.Optional[ColumnOrLiteral], callable_expression: t.Callable, **kwargs
) -> Column:
ensured_column = None if column is None else cls.ensure_col(column)
ensure_expression_values = {
k: (
[Column.ensure_col(x).expression for x in v]
if is_iterable(v)
else Column.ensure_col(v).expression
)
for k, v in kwargs.items()
if v is not None
}
new_expression = (
callable_expression(**ensure_expression_values)
if ensured_column is None
else callable_expression(
this=ensured_column.column_expression, **ensure_expression_values
)
)
return Column(new_expression)
def binary_op(self, klass: t.Callable, other: ColumnOrLiteral, **kwargs) -> Column:
return Column(
klass(this=self.column_expression, expression=Column(other).column_expression, **kwargs)
)
def inverse_binary_op(self, klass: t.Callable, other: ColumnOrLiteral, **kwargs) -> Column:
return Column(
klass(this=Column(other).column_expression, expression=self.column_expression, **kwargs)
)
def unary_op(self, klass: t.Callable, **kwargs) -> Column:
return Column(klass(this=self.column_expression, **kwargs))
def is_alias(self):
return isinstance(self.expression, exp.Alias)
def is_column(self):
return isinstance(self.expression, exp.Column)
def column_expression(self) -> t.Union[exp.Column, exp.Literal]:
return self.expression.unalias()
def alias_or_name(self) -> str:
return self.expression.alias_or_name
def ensure_literal(cls, value) -> Column:
from sqlglot.dataframe.sql.functions import lit
if isinstance(value, cls):
value = value.expression
if not isinstance(value, exp.Literal):
return lit(value)
return Column(value)
def copy(self) -> Column:
return Column(self.expression.copy())
def set_table_name(self, table_name: str, copy=False) -> Column:
expression = self.expression.copy() if copy else self.expression
expression.set("table", exp.to_identifier(table_name))
return Column(expression)
def sql(self, **kwargs) -> str:
from sqlglot.dataframe.sql.session import SparkSession
return self.expression.sql(**{"dialect": SparkSession().dialect, **kwargs})
def alias(self, name: str) -> Column:
from sqlglot.dataframe.sql.session import SparkSession
dialect = SparkSession().dialect
alias: exp.Expression = sqlglot.maybe_parse(name, dialect=dialect)
new_expression = exp.alias_(
self.column_expression,
alias.this if isinstance(alias, exp.Column) else name,
dialect=dialect,
)
return Column(new_expression)
def asc(self) -> Column:
new_expression = exp.Ordered(this=self.column_expression, desc=False, nulls_first=True)
return Column(new_expression)
def desc(self) -> Column:
new_expression = exp.Ordered(this=self.column_expression, desc=True, nulls_first=False)
return Column(new_expression)
asc_nulls_first = asc
def asc_nulls_last(self) -> Column:
new_expression = exp.Ordered(this=self.column_expression, desc=False, nulls_first=False)
return Column(new_expression)
def desc_nulls_first(self) -> Column:
new_expression = exp.Ordered(this=self.column_expression, desc=True, nulls_first=True)
return Column(new_expression)
desc_nulls_last = desc
def when(self, condition: Column, value: t.Any) -> Column:
from sqlglot.dataframe.sql.functions import when
column_with_if = when(condition, value)
if not isinstance(self.expression, exp.Case):
return column_with_if
new_column = self.copy()
new_column.expression.args["ifs"].extend(column_with_if.expression.args["ifs"])
return new_column
def otherwise(self, value: t.Any) -> Column:
from sqlglot.dataframe.sql.functions import lit
true_value = value if isinstance(value, Column) else lit(value)
new_column = self.copy()
new_column.expression.set("default", true_value.column_expression)
return new_column
def isNull(self) -> Column:
new_expression = exp.Is(this=self.column_expression, expression=exp.Null())
return Column(new_expression)
def isNotNull(self) -> Column:
new_expression = exp.Not(this=exp.Is(this=self.column_expression, expression=exp.Null()))
return Column(new_expression)
def cast(self, dataType: t.Union[str, DataType]) -> Column:
"""
Functionality Difference: PySpark cast accepts a datatype instance of the datatype class
Sqlglot doesn't currently replicate this class so it only accepts a string
"""
from sqlglot.dataframe.sql.session import SparkSession
if isinstance(dataType, DataType):
dataType = dataType.simpleString()
return Column(exp.cast(self.column_expression, dataType, dialect=SparkSession().dialect))
def startswith(self, value: t.Union[str, Column]) -> Column:
value = self._lit(value) if not isinstance(value, Column) else value
return self.invoke_anonymous_function(self, "STARTSWITH", value)
def endswith(self, value: t.Union[str, Column]) -> Column:
value = self._lit(value) if not isinstance(value, Column) else value
return self.invoke_anonymous_function(self, "ENDSWITH", value)
def rlike(self, regexp: str) -> Column:
return self.invoke_expression_over_column(
column=self, callable_expression=exp.RegexpLike, expression=self._lit(regexp).expression
)
def like(self, other: str):
return self.invoke_expression_over_column(
self, exp.Like, expression=self._lit(other).expression
)
def ilike(self, other: str):
return self.invoke_expression_over_column(
self, exp.ILike, expression=self._lit(other).expression
)
def substr(self, startPos: t.Union[int, Column], length: t.Union[int, Column]) -> Column:
startPos = self._lit(startPos) if not isinstance(startPos, Column) else startPos
length = self._lit(length) if not isinstance(length, Column) else length
return Column.invoke_expression_over_column(
self, exp.Substring, start=startPos.expression, length=length.expression
)
def isin(self, *cols: t.Union[ColumnOrLiteral, t.Iterable[ColumnOrLiteral]]):
columns = flatten(cols) if isinstance(cols[0], (list, set, tuple)) else cols # type: ignore
expressions = [self._lit(x).expression for x in columns]
return Column.invoke_expression_over_column(self, exp.In, expressions=expressions) # type: ignore
def between(
self,
lowerBound: t.Union[ColumnOrLiteral],
upperBound: t.Union[ColumnOrLiteral],
) -> Column:
lower_bound_exp = (
self._lit(lowerBound) if not isinstance(lowerBound, Column) else lowerBound
)
upper_bound_exp = (
self._lit(upperBound) if not isinstance(upperBound, Column) else upperBound
)
return Column(
exp.Between(
this=self.column_expression,
low=lower_bound_exp.expression,
high=upper_bound_exp.expression,
)
)
def over(self, window: WindowSpec) -> Column:
window_expression = window.expression.copy()
window_expression.set("this", self.column_expression)
return Column(window_expression)
ColumnOrName = t.Union[Column, str]
def from_json(
col: ColumnOrName,
schema: t.Union[Column, str],
options: t.Optional[t.Dict[str, str]] = None,
) -> Column:
schema = schema if isinstance(schema, Column) else lit(schema)
if options is not None:
options_col = create_map([lit(x) for x in _flatten(options.items())])
return Column.invoke_anonymous_function(col, "FROM_JSON", schema, options_col)
return Column.invoke_anonymous_function(col, "FROM_JSON", schema) | null |
153,176 | from __future__ import annotations
import typing as t
from sqlglot import exp as expression
from sqlglot.dataframe.sql.column import Column
from sqlglot.helper import ensure_list, flatten as _flatten
if t.TYPE_CHECKING:
from sqlglot.dataframe.sql._typing import ColumnOrLiteral, ColumnOrName
from sqlglot.dataframe.sql.dataframe import DataFrame
def lit(value: t.Optional[t.Any] = None) -> Column:
if isinstance(value, str):
return Column(expression.Literal.string(str(value)))
return Column(value)
def create_map(*cols: t.Union[ColumnOrName, t.Iterable[ColumnOrName]]) -> Column:
cols = list(_flatten(cols)) if not isinstance(cols[0], (str, Column)) else cols # type: ignore
return Column.invoke_expression_over_column(
None,
expression.VarMap,
keys=array(*cols[::2]).expression,
values=array(*cols[1::2]).expression,
)
class Column:
def __init__(self, expression: t.Optional[t.Union[ColumnOrLiteral, exp.Expression]]):
from sqlglot.dataframe.sql.session import SparkSession
if isinstance(expression, Column):
expression = expression.expression # type: ignore
elif expression is None or not isinstance(expression, (str, exp.Expression)):
expression = self._lit(expression).expression # type: ignore
elif not isinstance(expression, exp.Column):
expression = sqlglot.maybe_parse(expression, dialect=SparkSession().dialect).transform(
SparkSession().dialect.normalize_identifier, copy=False
)
if expression is None:
raise ValueError(f"Could not parse {expression}")
self.expression: exp.Expression = expression # type: ignore
def __repr__(self):
return repr(self.expression)
def __hash__(self):
return hash(self.expression)
def __eq__(self, other: ColumnOrLiteral) -> Column: # type: ignore
return self.binary_op(exp.EQ, other)
def __ne__(self, other: ColumnOrLiteral) -> Column: # type: ignore
return self.binary_op(exp.NEQ, other)
def __gt__(self, other: ColumnOrLiteral) -> Column:
return self.binary_op(exp.GT, other)
def __ge__(self, other: ColumnOrLiteral) -> Column:
return self.binary_op(exp.GTE, other)
def __lt__(self, other: ColumnOrLiteral) -> Column:
return self.binary_op(exp.LT, other)
def __le__(self, other: ColumnOrLiteral) -> Column:
return self.binary_op(exp.LTE, other)
def __and__(self, other: ColumnOrLiteral) -> Column:
return self.binary_op(exp.And, other)
def __or__(self, other: ColumnOrLiteral) -> Column:
return self.binary_op(exp.Or, other)
def __mod__(self, other: ColumnOrLiteral) -> Column:
return self.binary_op(exp.Mod, other)
def __add__(self, other: ColumnOrLiteral) -> Column:
return self.binary_op(exp.Add, other)
def __sub__(self, other: ColumnOrLiteral) -> Column:
return self.binary_op(exp.Sub, other)
def __mul__(self, other: ColumnOrLiteral) -> Column:
return self.binary_op(exp.Mul, other)
def __truediv__(self, other: ColumnOrLiteral) -> Column:
return self.binary_op(exp.Div, other)
def __div__(self, other: ColumnOrLiteral) -> Column:
return self.binary_op(exp.Div, other)
def __neg__(self) -> Column:
return self.unary_op(exp.Neg)
def __radd__(self, other: ColumnOrLiteral) -> Column:
return self.inverse_binary_op(exp.Add, other)
def __rsub__(self, other: ColumnOrLiteral) -> Column:
return self.inverse_binary_op(exp.Sub, other)
def __rmul__(self, other: ColumnOrLiteral) -> Column:
return self.inverse_binary_op(exp.Mul, other)
def __rdiv__(self, other: ColumnOrLiteral) -> Column:
return self.inverse_binary_op(exp.Div, other)
def __rtruediv__(self, other: ColumnOrLiteral) -> Column:
return self.inverse_binary_op(exp.Div, other)
def __rmod__(self, other: ColumnOrLiteral) -> Column:
return self.inverse_binary_op(exp.Mod, other)
def __pow__(self, power: ColumnOrLiteral, modulo=None):
return Column(exp.Pow(this=self.expression, expression=Column(power).expression))
def __rpow__(self, power: ColumnOrLiteral):
return Column(exp.Pow(this=Column(power).expression, expression=self.expression))
def __invert__(self):
return self.unary_op(exp.Not)
def __rand__(self, other: ColumnOrLiteral) -> Column:
return self.inverse_binary_op(exp.And, other)
def __ror__(self, other: ColumnOrLiteral) -> Column:
return self.inverse_binary_op(exp.Or, other)
def ensure_col(cls, value: t.Optional[t.Union[ColumnOrLiteral, exp.Expression]]) -> Column:
return cls(value)
def ensure_cols(cls, args: t.List[t.Union[ColumnOrLiteral, exp.Expression]]) -> t.List[Column]:
return [cls.ensure_col(x) if not isinstance(x, Column) else x for x in args]
def _lit(cls, value: ColumnOrLiteral) -> Column:
if isinstance(value, dict):
columns = [cls._lit(v).alias(k).expression for k, v in value.items()]
return cls(exp.Struct(expressions=columns))
return cls(exp.convert(value))
def invoke_anonymous_function(
cls, column: t.Optional[ColumnOrLiteral], func_name: str, *args: t.Optional[ColumnOrLiteral]
) -> Column:
columns = [] if column is None else [cls.ensure_col(column)]
column_args = [cls.ensure_col(arg) for arg in args]
expressions = [x.expression for x in columns + column_args]
new_expression = exp.Anonymous(this=func_name.upper(), expressions=expressions)
return Column(new_expression)
def invoke_expression_over_column(
cls, column: t.Optional[ColumnOrLiteral], callable_expression: t.Callable, **kwargs
) -> Column:
ensured_column = None if column is None else cls.ensure_col(column)
ensure_expression_values = {
k: (
[Column.ensure_col(x).expression for x in v]
if is_iterable(v)
else Column.ensure_col(v).expression
)
for k, v in kwargs.items()
if v is not None
}
new_expression = (
callable_expression(**ensure_expression_values)
if ensured_column is None
else callable_expression(
this=ensured_column.column_expression, **ensure_expression_values
)
)
return Column(new_expression)
def binary_op(self, klass: t.Callable, other: ColumnOrLiteral, **kwargs) -> Column:
return Column(
klass(this=self.column_expression, expression=Column(other).column_expression, **kwargs)
)
def inverse_binary_op(self, klass: t.Callable, other: ColumnOrLiteral, **kwargs) -> Column:
return Column(
klass(this=Column(other).column_expression, expression=self.column_expression, **kwargs)
)
def unary_op(self, klass: t.Callable, **kwargs) -> Column:
return Column(klass(this=self.column_expression, **kwargs))
def is_alias(self):
return isinstance(self.expression, exp.Alias)
def is_column(self):
return isinstance(self.expression, exp.Column)
def column_expression(self) -> t.Union[exp.Column, exp.Literal]:
return self.expression.unalias()
def alias_or_name(self) -> str:
return self.expression.alias_or_name
def ensure_literal(cls, value) -> Column:
from sqlglot.dataframe.sql.functions import lit
if isinstance(value, cls):
value = value.expression
if not isinstance(value, exp.Literal):
return lit(value)
return Column(value)
def copy(self) -> Column:
return Column(self.expression.copy())
def set_table_name(self, table_name: str, copy=False) -> Column:
expression = self.expression.copy() if copy else self.expression
expression.set("table", exp.to_identifier(table_name))
return Column(expression)
def sql(self, **kwargs) -> str:
from sqlglot.dataframe.sql.session import SparkSession
return self.expression.sql(**{"dialect": SparkSession().dialect, **kwargs})
def alias(self, name: str) -> Column:
from sqlglot.dataframe.sql.session import SparkSession
dialect = SparkSession().dialect
alias: exp.Expression = sqlglot.maybe_parse(name, dialect=dialect)
new_expression = exp.alias_(
self.column_expression,
alias.this if isinstance(alias, exp.Column) else name,
dialect=dialect,
)
return Column(new_expression)
def asc(self) -> Column:
new_expression = exp.Ordered(this=self.column_expression, desc=False, nulls_first=True)
return Column(new_expression)
def desc(self) -> Column:
new_expression = exp.Ordered(this=self.column_expression, desc=True, nulls_first=False)
return Column(new_expression)
asc_nulls_first = asc
def asc_nulls_last(self) -> Column:
new_expression = exp.Ordered(this=self.column_expression, desc=False, nulls_first=False)
return Column(new_expression)
def desc_nulls_first(self) -> Column:
new_expression = exp.Ordered(this=self.column_expression, desc=True, nulls_first=True)
return Column(new_expression)
desc_nulls_last = desc
def when(self, condition: Column, value: t.Any) -> Column:
from sqlglot.dataframe.sql.functions import when
column_with_if = when(condition, value)
if not isinstance(self.expression, exp.Case):
return column_with_if
new_column = self.copy()
new_column.expression.args["ifs"].extend(column_with_if.expression.args["ifs"])
return new_column
def otherwise(self, value: t.Any) -> Column:
from sqlglot.dataframe.sql.functions import lit
true_value = value if isinstance(value, Column) else lit(value)
new_column = self.copy()
new_column.expression.set("default", true_value.column_expression)
return new_column
def isNull(self) -> Column:
new_expression = exp.Is(this=self.column_expression, expression=exp.Null())
return Column(new_expression)
def isNotNull(self) -> Column:
new_expression = exp.Not(this=exp.Is(this=self.column_expression, expression=exp.Null()))
return Column(new_expression)
def cast(self, dataType: t.Union[str, DataType]) -> Column:
"""
Functionality Difference: PySpark cast accepts a datatype instance of the datatype class
Sqlglot doesn't currently replicate this class so it only accepts a string
"""
from sqlglot.dataframe.sql.session import SparkSession
if isinstance(dataType, DataType):
dataType = dataType.simpleString()
return Column(exp.cast(self.column_expression, dataType, dialect=SparkSession().dialect))
def startswith(self, value: t.Union[str, Column]) -> Column:
value = self._lit(value) if not isinstance(value, Column) else value
return self.invoke_anonymous_function(self, "STARTSWITH", value)
def endswith(self, value: t.Union[str, Column]) -> Column:
value = self._lit(value) if not isinstance(value, Column) else value
return self.invoke_anonymous_function(self, "ENDSWITH", value)
def rlike(self, regexp: str) -> Column:
return self.invoke_expression_over_column(
column=self, callable_expression=exp.RegexpLike, expression=self._lit(regexp).expression
)
def like(self, other: str):
return self.invoke_expression_over_column(
self, exp.Like, expression=self._lit(other).expression
)
def ilike(self, other: str):
return self.invoke_expression_over_column(
self, exp.ILike, expression=self._lit(other).expression
)
def substr(self, startPos: t.Union[int, Column], length: t.Union[int, Column]) -> Column:
startPos = self._lit(startPos) if not isinstance(startPos, Column) else startPos
length = self._lit(length) if not isinstance(length, Column) else length
return Column.invoke_expression_over_column(
self, exp.Substring, start=startPos.expression, length=length.expression
)
def isin(self, *cols: t.Union[ColumnOrLiteral, t.Iterable[ColumnOrLiteral]]):
columns = flatten(cols) if isinstance(cols[0], (list, set, tuple)) else cols # type: ignore
expressions = [self._lit(x).expression for x in columns]
return Column.invoke_expression_over_column(self, exp.In, expressions=expressions) # type: ignore
def between(
self,
lowerBound: t.Union[ColumnOrLiteral],
upperBound: t.Union[ColumnOrLiteral],
) -> Column:
lower_bound_exp = (
self._lit(lowerBound) if not isinstance(lowerBound, Column) else lowerBound
)
upper_bound_exp = (
self._lit(upperBound) if not isinstance(upperBound, Column) else upperBound
)
return Column(
exp.Between(
this=self.column_expression,
low=lower_bound_exp.expression,
high=upper_bound_exp.expression,
)
)
def over(self, window: WindowSpec) -> Column:
window_expression = window.expression.copy()
window_expression.set("this", self.column_expression)
return Column(window_expression)
ColumnOrName = t.Union[Column, str]
def to_json(col: ColumnOrName, options: t.Optional[t.Dict[str, str]] = None) -> Column:
if options is not None:
options_col = create_map([lit(x) for x in _flatten(options.items())])
return Column.invoke_expression_over_column(col, expression.JSONFormat, options=options_col)
return Column.invoke_expression_over_column(col, expression.JSONFormat) | null |
153,177 | from __future__ import annotations
import typing as t
from sqlglot import exp as expression
from sqlglot.dataframe.sql.column import Column
from sqlglot.helper import ensure_list, flatten as _flatten
if t.TYPE_CHECKING:
from sqlglot.dataframe.sql._typing import ColumnOrLiteral, ColumnOrName
from sqlglot.dataframe.sql.dataframe import DataFrame
def lit(value: t.Optional[t.Any] = None) -> Column:
def create_map(*cols: t.Union[ColumnOrName, t.Iterable[ColumnOrName]]) -> Column:
class Column:
def __init__(self, expression: t.Optional[t.Union[ColumnOrLiteral, exp.Expression]]):
def __repr__(self):
def __hash__(self):
def __eq__(self, other: ColumnOrLiteral) -> Column:
def __ne__(self, other: ColumnOrLiteral) -> Column:
def __gt__(self, other: ColumnOrLiteral) -> Column:
def __ge__(self, other: ColumnOrLiteral) -> Column:
def __lt__(self, other: ColumnOrLiteral) -> Column:
def __le__(self, other: ColumnOrLiteral) -> Column:
def __and__(self, other: ColumnOrLiteral) -> Column:
def __or__(self, other: ColumnOrLiteral) -> Column:
def __mod__(self, other: ColumnOrLiteral) -> Column:
def __add__(self, other: ColumnOrLiteral) -> Column:
def __sub__(self, other: ColumnOrLiteral) -> Column:
def __mul__(self, other: ColumnOrLiteral) -> Column:
def __truediv__(self, other: ColumnOrLiteral) -> Column:
def __div__(self, other: ColumnOrLiteral) -> Column:
def __neg__(self) -> Column:
def __radd__(self, other: ColumnOrLiteral) -> Column:
def __rsub__(self, other: ColumnOrLiteral) -> Column:
def __rmul__(self, other: ColumnOrLiteral) -> Column:
def __rdiv__(self, other: ColumnOrLiteral) -> Column:
def __rtruediv__(self, other: ColumnOrLiteral) -> Column:
def __rmod__(self, other: ColumnOrLiteral) -> Column:
def __pow__(self, power: ColumnOrLiteral, modulo=None):
def __rpow__(self, power: ColumnOrLiteral):
def __invert__(self):
def __rand__(self, other: ColumnOrLiteral) -> Column:
def __ror__(self, other: ColumnOrLiteral) -> Column:
def ensure_col(cls, value: t.Optional[t.Union[ColumnOrLiteral, exp.Expression]]) -> Column:
def ensure_cols(cls, args: t.List[t.Union[ColumnOrLiteral, exp.Expression]]) -> t.List[Column]:
def _lit(cls, value: ColumnOrLiteral) -> Column:
def invoke_anonymous_function(
cls, column: t.Optional[ColumnOrLiteral], func_name: str, *args: t.Optional[ColumnOrLiteral]
) -> Column:
def invoke_expression_over_column(
cls, column: t.Optional[ColumnOrLiteral], callable_expression: t.Callable, **kwargs
) -> Column:
def binary_op(self, klass: t.Callable, other: ColumnOrLiteral, **kwargs) -> Column:
def inverse_binary_op(self, klass: t.Callable, other: ColumnOrLiteral, **kwargs) -> Column:
def unary_op(self, klass: t.Callable, **kwargs) -> Column:
def is_alias(self):
def is_column(self):
def column_expression(self) -> t.Union[exp.Column, exp.Literal]:
def alias_or_name(self) -> str:
def ensure_literal(cls, value) -> Column:
def copy(self) -> Column:
def set_table_name(self, table_name: str, copy=False) -> Column:
def sql(self, **kwargs) -> str:
def alias(self, name: str) -> Column:
def asc(self) -> Column:
def desc(self) -> Column:
def asc_nulls_last(self) -> Column:
def desc_nulls_first(self) -> Column:
def when(self, condition: Column, value: t.Any) -> Column:
def otherwise(self, value: t.Any) -> Column:
def isNull(self) -> Column:
def isNotNull(self) -> Column:
def cast(self, dataType: t.Union[str, DataType]) -> Column:
def startswith(self, value: t.Union[str, Column]) -> Column:
def endswith(self, value: t.Union[str, Column]) -> Column:
def rlike(self, regexp: str) -> Column:
def like(self, other: str):
def ilike(self, other: str):
def substr(self, startPos: t.Union[int, Column], length: t.Union[int, Column]) -> Column:
def isin(self, *cols: t.Union[ColumnOrLiteral, t.Iterable[ColumnOrLiteral]]):
def between(
self,
lowerBound: t.Union[ColumnOrLiteral],
upperBound: t.Union[ColumnOrLiteral],
) -> Column:
def over(self, window: WindowSpec) -> Column:
ColumnOrName = t.Union[Column, str]
def schema_of_json(col: ColumnOrName, options: t.Optional[t.Dict[str, str]] = None) -> Column:
if options is not None:
options_col = create_map([lit(x) for x in _flatten(options.items())])
return Column.invoke_anonymous_function(col, "SCHEMA_OF_JSON", options_col)
return Column.invoke_anonymous_function(col, "SCHEMA_OF_JSON") | null |
153,178 | from __future__ import annotations
import typing as t
from sqlglot import exp as expression
from sqlglot.dataframe.sql.column import Column
from sqlglot.helper import ensure_list, flatten as _flatten
if t.TYPE_CHECKING:
from sqlglot.dataframe.sql._typing import ColumnOrLiteral, ColumnOrName
from sqlglot.dataframe.sql.dataframe import DataFrame
def lit(value: t.Optional[t.Any] = None) -> Column:
if isinstance(value, str):
return Column(expression.Literal.string(str(value)))
return Column(value)
def create_map(*cols: t.Union[ColumnOrName, t.Iterable[ColumnOrName]]) -> Column:
cols = list(_flatten(cols)) if not isinstance(cols[0], (str, Column)) else cols # type: ignore
return Column.invoke_expression_over_column(
None,
expression.VarMap,
keys=array(*cols[::2]).expression,
values=array(*cols[1::2]).expression,
)
class Column:
def __init__(self, expression: t.Optional[t.Union[ColumnOrLiteral, exp.Expression]]):
from sqlglot.dataframe.sql.session import SparkSession
if isinstance(expression, Column):
expression = expression.expression # type: ignore
elif expression is None or not isinstance(expression, (str, exp.Expression)):
expression = self._lit(expression).expression # type: ignore
elif not isinstance(expression, exp.Column):
expression = sqlglot.maybe_parse(expression, dialect=SparkSession().dialect).transform(
SparkSession().dialect.normalize_identifier, copy=False
)
if expression is None:
raise ValueError(f"Could not parse {expression}")
self.expression: exp.Expression = expression # type: ignore
def __repr__(self):
return repr(self.expression)
def __hash__(self):
return hash(self.expression)
def __eq__(self, other: ColumnOrLiteral) -> Column: # type: ignore
return self.binary_op(exp.EQ, other)
def __ne__(self, other: ColumnOrLiteral) -> Column: # type: ignore
return self.binary_op(exp.NEQ, other)
def __gt__(self, other: ColumnOrLiteral) -> Column:
return self.binary_op(exp.GT, other)
def __ge__(self, other: ColumnOrLiteral) -> Column:
return self.binary_op(exp.GTE, other)
def __lt__(self, other: ColumnOrLiteral) -> Column:
return self.binary_op(exp.LT, other)
def __le__(self, other: ColumnOrLiteral) -> Column:
return self.binary_op(exp.LTE, other)
def __and__(self, other: ColumnOrLiteral) -> Column:
return self.binary_op(exp.And, other)
def __or__(self, other: ColumnOrLiteral) -> Column:
return self.binary_op(exp.Or, other)
def __mod__(self, other: ColumnOrLiteral) -> Column:
return self.binary_op(exp.Mod, other)
def __add__(self, other: ColumnOrLiteral) -> Column:
return self.binary_op(exp.Add, other)
def __sub__(self, other: ColumnOrLiteral) -> Column:
return self.binary_op(exp.Sub, other)
def __mul__(self, other: ColumnOrLiteral) -> Column:
return self.binary_op(exp.Mul, other)
def __truediv__(self, other: ColumnOrLiteral) -> Column:
return self.binary_op(exp.Div, other)
def __div__(self, other: ColumnOrLiteral) -> Column:
return self.binary_op(exp.Div, other)
def __neg__(self) -> Column:
return self.unary_op(exp.Neg)
def __radd__(self, other: ColumnOrLiteral) -> Column:
return self.inverse_binary_op(exp.Add, other)
def __rsub__(self, other: ColumnOrLiteral) -> Column:
return self.inverse_binary_op(exp.Sub, other)
def __rmul__(self, other: ColumnOrLiteral) -> Column:
return self.inverse_binary_op(exp.Mul, other)
def __rdiv__(self, other: ColumnOrLiteral) -> Column:
return self.inverse_binary_op(exp.Div, other)
def __rtruediv__(self, other: ColumnOrLiteral) -> Column:
return self.inverse_binary_op(exp.Div, other)
def __rmod__(self, other: ColumnOrLiteral) -> Column:
return self.inverse_binary_op(exp.Mod, other)
def __pow__(self, power: ColumnOrLiteral, modulo=None):
return Column(exp.Pow(this=self.expression, expression=Column(power).expression))
def __rpow__(self, power: ColumnOrLiteral):
return Column(exp.Pow(this=Column(power).expression, expression=self.expression))
def __invert__(self):
return self.unary_op(exp.Not)
def __rand__(self, other: ColumnOrLiteral) -> Column:
return self.inverse_binary_op(exp.And, other)
def __ror__(self, other: ColumnOrLiteral) -> Column:
return self.inverse_binary_op(exp.Or, other)
def ensure_col(cls, value: t.Optional[t.Union[ColumnOrLiteral, exp.Expression]]) -> Column:
return cls(value)
def ensure_cols(cls, args: t.List[t.Union[ColumnOrLiteral, exp.Expression]]) -> t.List[Column]:
return [cls.ensure_col(x) if not isinstance(x, Column) else x for x in args]
def _lit(cls, value: ColumnOrLiteral) -> Column:
if isinstance(value, dict):
columns = [cls._lit(v).alias(k).expression for k, v in value.items()]
return cls(exp.Struct(expressions=columns))
return cls(exp.convert(value))
def invoke_anonymous_function(
cls, column: t.Optional[ColumnOrLiteral], func_name: str, *args: t.Optional[ColumnOrLiteral]
) -> Column:
columns = [] if column is None else [cls.ensure_col(column)]
column_args = [cls.ensure_col(arg) for arg in args]
expressions = [x.expression for x in columns + column_args]
new_expression = exp.Anonymous(this=func_name.upper(), expressions=expressions)
return Column(new_expression)
def invoke_expression_over_column(
cls, column: t.Optional[ColumnOrLiteral], callable_expression: t.Callable, **kwargs
) -> Column:
ensured_column = None if column is None else cls.ensure_col(column)
ensure_expression_values = {
k: (
[Column.ensure_col(x).expression for x in v]
if is_iterable(v)
else Column.ensure_col(v).expression
)
for k, v in kwargs.items()
if v is not None
}
new_expression = (
callable_expression(**ensure_expression_values)
if ensured_column is None
else callable_expression(
this=ensured_column.column_expression, **ensure_expression_values
)
)
return Column(new_expression)
def binary_op(self, klass: t.Callable, other: ColumnOrLiteral, **kwargs) -> Column:
return Column(
klass(this=self.column_expression, expression=Column(other).column_expression, **kwargs)
)
def inverse_binary_op(self, klass: t.Callable, other: ColumnOrLiteral, **kwargs) -> Column:
return Column(
klass(this=Column(other).column_expression, expression=self.column_expression, **kwargs)
)
def unary_op(self, klass: t.Callable, **kwargs) -> Column:
return Column(klass(this=self.column_expression, **kwargs))
def is_alias(self):
return isinstance(self.expression, exp.Alias)
def is_column(self):
return isinstance(self.expression, exp.Column)
def column_expression(self) -> t.Union[exp.Column, exp.Literal]:
return self.expression.unalias()
def alias_or_name(self) -> str:
return self.expression.alias_or_name
def ensure_literal(cls, value) -> Column:
from sqlglot.dataframe.sql.functions import lit
if isinstance(value, cls):
value = value.expression
if not isinstance(value, exp.Literal):
return lit(value)
return Column(value)
def copy(self) -> Column:
return Column(self.expression.copy())
def set_table_name(self, table_name: str, copy=False) -> Column:
expression = self.expression.copy() if copy else self.expression
expression.set("table", exp.to_identifier(table_name))
return Column(expression)
def sql(self, **kwargs) -> str:
from sqlglot.dataframe.sql.session import SparkSession
return self.expression.sql(**{"dialect": SparkSession().dialect, **kwargs})
def alias(self, name: str) -> Column:
from sqlglot.dataframe.sql.session import SparkSession
dialect = SparkSession().dialect
alias: exp.Expression = sqlglot.maybe_parse(name, dialect=dialect)
new_expression = exp.alias_(
self.column_expression,
alias.this if isinstance(alias, exp.Column) else name,
dialect=dialect,
)
return Column(new_expression)
def asc(self) -> Column:
new_expression = exp.Ordered(this=self.column_expression, desc=False, nulls_first=True)
return Column(new_expression)
def desc(self) -> Column:
new_expression = exp.Ordered(this=self.column_expression, desc=True, nulls_first=False)
return Column(new_expression)
asc_nulls_first = asc
def asc_nulls_last(self) -> Column:
new_expression = exp.Ordered(this=self.column_expression, desc=False, nulls_first=False)
return Column(new_expression)
def desc_nulls_first(self) -> Column:
new_expression = exp.Ordered(this=self.column_expression, desc=True, nulls_first=True)
return Column(new_expression)
desc_nulls_last = desc
def when(self, condition: Column, value: t.Any) -> Column:
from sqlglot.dataframe.sql.functions import when
column_with_if = when(condition, value)
if not isinstance(self.expression, exp.Case):
return column_with_if
new_column = self.copy()
new_column.expression.args["ifs"].extend(column_with_if.expression.args["ifs"])
return new_column
def otherwise(self, value: t.Any) -> Column:
from sqlglot.dataframe.sql.functions import lit
true_value = value if isinstance(value, Column) else lit(value)
new_column = self.copy()
new_column.expression.set("default", true_value.column_expression)
return new_column
def isNull(self) -> Column:
new_expression = exp.Is(this=self.column_expression, expression=exp.Null())
return Column(new_expression)
def isNotNull(self) -> Column:
new_expression = exp.Not(this=exp.Is(this=self.column_expression, expression=exp.Null()))
return Column(new_expression)
def cast(self, dataType: t.Union[str, DataType]) -> Column:
"""
Functionality Difference: PySpark cast accepts a datatype instance of the datatype class
Sqlglot doesn't currently replicate this class so it only accepts a string
"""
from sqlglot.dataframe.sql.session import SparkSession
if isinstance(dataType, DataType):
dataType = dataType.simpleString()
return Column(exp.cast(self.column_expression, dataType, dialect=SparkSession().dialect))
def startswith(self, value: t.Union[str, Column]) -> Column:
value = self._lit(value) if not isinstance(value, Column) else value
return self.invoke_anonymous_function(self, "STARTSWITH", value)
def endswith(self, value: t.Union[str, Column]) -> Column:
value = self._lit(value) if not isinstance(value, Column) else value
return self.invoke_anonymous_function(self, "ENDSWITH", value)
def rlike(self, regexp: str) -> Column:
return self.invoke_expression_over_column(
column=self, callable_expression=exp.RegexpLike, expression=self._lit(regexp).expression
)
def like(self, other: str):
return self.invoke_expression_over_column(
self, exp.Like, expression=self._lit(other).expression
)
def ilike(self, other: str):
return self.invoke_expression_over_column(
self, exp.ILike, expression=self._lit(other).expression
)
def substr(self, startPos: t.Union[int, Column], length: t.Union[int, Column]) -> Column:
startPos = self._lit(startPos) if not isinstance(startPos, Column) else startPos
length = self._lit(length) if not isinstance(length, Column) else length
return Column.invoke_expression_over_column(
self, exp.Substring, start=startPos.expression, length=length.expression
)
def isin(self, *cols: t.Union[ColumnOrLiteral, t.Iterable[ColumnOrLiteral]]):
columns = flatten(cols) if isinstance(cols[0], (list, set, tuple)) else cols # type: ignore
expressions = [self._lit(x).expression for x in columns]
return Column.invoke_expression_over_column(self, exp.In, expressions=expressions) # type: ignore
def between(
self,
lowerBound: t.Union[ColumnOrLiteral],
upperBound: t.Union[ColumnOrLiteral],
) -> Column:
lower_bound_exp = (
self._lit(lowerBound) if not isinstance(lowerBound, Column) else lowerBound
)
upper_bound_exp = (
self._lit(upperBound) if not isinstance(upperBound, Column) else upperBound
)
return Column(
exp.Between(
this=self.column_expression,
low=lower_bound_exp.expression,
high=upper_bound_exp.expression,
)
)
def over(self, window: WindowSpec) -> Column:
window_expression = window.expression.copy()
window_expression.set("this", self.column_expression)
return Column(window_expression)
ColumnOrName = t.Union[Column, str]
def schema_of_csv(col: ColumnOrName, options: t.Optional[t.Dict[str, str]] = None) -> Column:
if options is not None:
options_col = create_map([lit(x) for x in _flatten(options.items())])
return Column.invoke_anonymous_function(col, "SCHEMA_OF_CSV", options_col)
return Column.invoke_anonymous_function(col, "SCHEMA_OF_CSV") | null |
153,179 | from __future__ import annotations
import typing as t
from sqlglot import exp as expression
from sqlglot.dataframe.sql.column import Column
from sqlglot.helper import ensure_list, flatten as _flatten
if t.TYPE_CHECKING:
from sqlglot.dataframe.sql._typing import ColumnOrLiteral, ColumnOrName
from sqlglot.dataframe.sql.dataframe import DataFrame
def lit(value: t.Optional[t.Any] = None) -> Column:
if isinstance(value, str):
return Column(expression.Literal.string(str(value)))
return Column(value)
def create_map(*cols: t.Union[ColumnOrName, t.Iterable[ColumnOrName]]) -> Column:
cols = list(_flatten(cols)) if not isinstance(cols[0], (str, Column)) else cols # type: ignore
return Column.invoke_expression_over_column(
None,
expression.VarMap,
keys=array(*cols[::2]).expression,
values=array(*cols[1::2]).expression,
)
class Column:
def __init__(self, expression: t.Optional[t.Union[ColumnOrLiteral, exp.Expression]]):
from sqlglot.dataframe.sql.session import SparkSession
if isinstance(expression, Column):
expression = expression.expression # type: ignore
elif expression is None or not isinstance(expression, (str, exp.Expression)):
expression = self._lit(expression).expression # type: ignore
elif not isinstance(expression, exp.Column):
expression = sqlglot.maybe_parse(expression, dialect=SparkSession().dialect).transform(
SparkSession().dialect.normalize_identifier, copy=False
)
if expression is None:
raise ValueError(f"Could not parse {expression}")
self.expression: exp.Expression = expression # type: ignore
def __repr__(self):
return repr(self.expression)
def __hash__(self):
return hash(self.expression)
def __eq__(self, other: ColumnOrLiteral) -> Column: # type: ignore
return self.binary_op(exp.EQ, other)
def __ne__(self, other: ColumnOrLiteral) -> Column: # type: ignore
return self.binary_op(exp.NEQ, other)
def __gt__(self, other: ColumnOrLiteral) -> Column:
return self.binary_op(exp.GT, other)
def __ge__(self, other: ColumnOrLiteral) -> Column:
return self.binary_op(exp.GTE, other)
def __lt__(self, other: ColumnOrLiteral) -> Column:
return self.binary_op(exp.LT, other)
def __le__(self, other: ColumnOrLiteral) -> Column:
return self.binary_op(exp.LTE, other)
def __and__(self, other: ColumnOrLiteral) -> Column:
return self.binary_op(exp.And, other)
def __or__(self, other: ColumnOrLiteral) -> Column:
return self.binary_op(exp.Or, other)
def __mod__(self, other: ColumnOrLiteral) -> Column:
return self.binary_op(exp.Mod, other)
def __add__(self, other: ColumnOrLiteral) -> Column:
return self.binary_op(exp.Add, other)
def __sub__(self, other: ColumnOrLiteral) -> Column:
return self.binary_op(exp.Sub, other)
def __mul__(self, other: ColumnOrLiteral) -> Column:
return self.binary_op(exp.Mul, other)
def __truediv__(self, other: ColumnOrLiteral) -> Column:
return self.binary_op(exp.Div, other)
def __div__(self, other: ColumnOrLiteral) -> Column:
return self.binary_op(exp.Div, other)
def __neg__(self) -> Column:
return self.unary_op(exp.Neg)
def __radd__(self, other: ColumnOrLiteral) -> Column:
return self.inverse_binary_op(exp.Add, other)
def __rsub__(self, other: ColumnOrLiteral) -> Column:
return self.inverse_binary_op(exp.Sub, other)
def __rmul__(self, other: ColumnOrLiteral) -> Column:
return self.inverse_binary_op(exp.Mul, other)
def __rdiv__(self, other: ColumnOrLiteral) -> Column:
return self.inverse_binary_op(exp.Div, other)
def __rtruediv__(self, other: ColumnOrLiteral) -> Column:
return self.inverse_binary_op(exp.Div, other)
def __rmod__(self, other: ColumnOrLiteral) -> Column:
return self.inverse_binary_op(exp.Mod, other)
def __pow__(self, power: ColumnOrLiteral, modulo=None):
return Column(exp.Pow(this=self.expression, expression=Column(power).expression))
def __rpow__(self, power: ColumnOrLiteral):
return Column(exp.Pow(this=Column(power).expression, expression=self.expression))
def __invert__(self):
return self.unary_op(exp.Not)
def __rand__(self, other: ColumnOrLiteral) -> Column:
return self.inverse_binary_op(exp.And, other)
def __ror__(self, other: ColumnOrLiteral) -> Column:
return self.inverse_binary_op(exp.Or, other)
def ensure_col(cls, value: t.Optional[t.Union[ColumnOrLiteral, exp.Expression]]) -> Column:
return cls(value)
def ensure_cols(cls, args: t.List[t.Union[ColumnOrLiteral, exp.Expression]]) -> t.List[Column]:
return [cls.ensure_col(x) if not isinstance(x, Column) else x for x in args]
def _lit(cls, value: ColumnOrLiteral) -> Column:
if isinstance(value, dict):
columns = [cls._lit(v).alias(k).expression for k, v in value.items()]
return cls(exp.Struct(expressions=columns))
return cls(exp.convert(value))
def invoke_anonymous_function(
cls, column: t.Optional[ColumnOrLiteral], func_name: str, *args: t.Optional[ColumnOrLiteral]
) -> Column:
columns = [] if column is None else [cls.ensure_col(column)]
column_args = [cls.ensure_col(arg) for arg in args]
expressions = [x.expression for x in columns + column_args]
new_expression = exp.Anonymous(this=func_name.upper(), expressions=expressions)
return Column(new_expression)
def invoke_expression_over_column(
cls, column: t.Optional[ColumnOrLiteral], callable_expression: t.Callable, **kwargs
) -> Column:
ensured_column = None if column is None else cls.ensure_col(column)
ensure_expression_values = {
k: (
[Column.ensure_col(x).expression for x in v]
if is_iterable(v)
else Column.ensure_col(v).expression
)
for k, v in kwargs.items()
if v is not None
}
new_expression = (
callable_expression(**ensure_expression_values)
if ensured_column is None
else callable_expression(
this=ensured_column.column_expression, **ensure_expression_values
)
)
return Column(new_expression)
def binary_op(self, klass: t.Callable, other: ColumnOrLiteral, **kwargs) -> Column:
return Column(
klass(this=self.column_expression, expression=Column(other).column_expression, **kwargs)
)
def inverse_binary_op(self, klass: t.Callable, other: ColumnOrLiteral, **kwargs) -> Column:
return Column(
klass(this=Column(other).column_expression, expression=self.column_expression, **kwargs)
)
def unary_op(self, klass: t.Callable, **kwargs) -> Column:
return Column(klass(this=self.column_expression, **kwargs))
def is_alias(self):
return isinstance(self.expression, exp.Alias)
def is_column(self):
return isinstance(self.expression, exp.Column)
def column_expression(self) -> t.Union[exp.Column, exp.Literal]:
return self.expression.unalias()
def alias_or_name(self) -> str:
return self.expression.alias_or_name
def ensure_literal(cls, value) -> Column:
from sqlglot.dataframe.sql.functions import lit
if isinstance(value, cls):
value = value.expression
if not isinstance(value, exp.Literal):
return lit(value)
return Column(value)
def copy(self) -> Column:
return Column(self.expression.copy())
def set_table_name(self, table_name: str, copy=False) -> Column:
expression = self.expression.copy() if copy else self.expression
expression.set("table", exp.to_identifier(table_name))
return Column(expression)
def sql(self, **kwargs) -> str:
from sqlglot.dataframe.sql.session import SparkSession
return self.expression.sql(**{"dialect": SparkSession().dialect, **kwargs})
def alias(self, name: str) -> Column:
from sqlglot.dataframe.sql.session import SparkSession
dialect = SparkSession().dialect
alias: exp.Expression = sqlglot.maybe_parse(name, dialect=dialect)
new_expression = exp.alias_(
self.column_expression,
alias.this if isinstance(alias, exp.Column) else name,
dialect=dialect,
)
return Column(new_expression)
def asc(self) -> Column:
new_expression = exp.Ordered(this=self.column_expression, desc=False, nulls_first=True)
return Column(new_expression)
def desc(self) -> Column:
new_expression = exp.Ordered(this=self.column_expression, desc=True, nulls_first=False)
return Column(new_expression)
asc_nulls_first = asc
def asc_nulls_last(self) -> Column:
new_expression = exp.Ordered(this=self.column_expression, desc=False, nulls_first=False)
return Column(new_expression)
def desc_nulls_first(self) -> Column:
new_expression = exp.Ordered(this=self.column_expression, desc=True, nulls_first=True)
return Column(new_expression)
desc_nulls_last = desc
def when(self, condition: Column, value: t.Any) -> Column:
from sqlglot.dataframe.sql.functions import when
column_with_if = when(condition, value)
if not isinstance(self.expression, exp.Case):
return column_with_if
new_column = self.copy()
new_column.expression.args["ifs"].extend(column_with_if.expression.args["ifs"])
return new_column
def otherwise(self, value: t.Any) -> Column:
from sqlglot.dataframe.sql.functions import lit
true_value = value if isinstance(value, Column) else lit(value)
new_column = self.copy()
new_column.expression.set("default", true_value.column_expression)
return new_column
def isNull(self) -> Column:
new_expression = exp.Is(this=self.column_expression, expression=exp.Null())
return Column(new_expression)
def isNotNull(self) -> Column:
new_expression = exp.Not(this=exp.Is(this=self.column_expression, expression=exp.Null()))
return Column(new_expression)
def cast(self, dataType: t.Union[str, DataType]) -> Column:
"""
Functionality Difference: PySpark cast accepts a datatype instance of the datatype class
Sqlglot doesn't currently replicate this class so it only accepts a string
"""
from sqlglot.dataframe.sql.session import SparkSession
if isinstance(dataType, DataType):
dataType = dataType.simpleString()
return Column(exp.cast(self.column_expression, dataType, dialect=SparkSession().dialect))
def startswith(self, value: t.Union[str, Column]) -> Column:
value = self._lit(value) if not isinstance(value, Column) else value
return self.invoke_anonymous_function(self, "STARTSWITH", value)
def endswith(self, value: t.Union[str, Column]) -> Column:
value = self._lit(value) if not isinstance(value, Column) else value
return self.invoke_anonymous_function(self, "ENDSWITH", value)
def rlike(self, regexp: str) -> Column:
return self.invoke_expression_over_column(
column=self, callable_expression=exp.RegexpLike, expression=self._lit(regexp).expression
)
def like(self, other: str):
return self.invoke_expression_over_column(
self, exp.Like, expression=self._lit(other).expression
)
def ilike(self, other: str):
return self.invoke_expression_over_column(
self, exp.ILike, expression=self._lit(other).expression
)
def substr(self, startPos: t.Union[int, Column], length: t.Union[int, Column]) -> Column:
startPos = self._lit(startPos) if not isinstance(startPos, Column) else startPos
length = self._lit(length) if not isinstance(length, Column) else length
return Column.invoke_expression_over_column(
self, exp.Substring, start=startPos.expression, length=length.expression
)
def isin(self, *cols: t.Union[ColumnOrLiteral, t.Iterable[ColumnOrLiteral]]):
columns = flatten(cols) if isinstance(cols[0], (list, set, tuple)) else cols # type: ignore
expressions = [self._lit(x).expression for x in columns]
return Column.invoke_expression_over_column(self, exp.In, expressions=expressions) # type: ignore
def between(
self,
lowerBound: t.Union[ColumnOrLiteral],
upperBound: t.Union[ColumnOrLiteral],
) -> Column:
lower_bound_exp = (
self._lit(lowerBound) if not isinstance(lowerBound, Column) else lowerBound
)
upper_bound_exp = (
self._lit(upperBound) if not isinstance(upperBound, Column) else upperBound
)
return Column(
exp.Between(
this=self.column_expression,
low=lower_bound_exp.expression,
high=upper_bound_exp.expression,
)
)
def over(self, window: WindowSpec) -> Column:
window_expression = window.expression.copy()
window_expression.set("this", self.column_expression)
return Column(window_expression)
ColumnOrName = t.Union[Column, str]
def to_csv(col: ColumnOrName, options: t.Optional[t.Dict[str, str]] = None) -> Column:
if options is not None:
options_col = create_map([lit(x) for x in _flatten(options.items())])
return Column.invoke_anonymous_function(col, "TO_CSV", options_col)
return Column.invoke_anonymous_function(col, "TO_CSV") | null |
153,180 | from __future__ import annotations
import typing as t
from sqlglot import exp as expression
from sqlglot.dataframe.sql.column import Column
from sqlglot.helper import ensure_list, flatten as _flatten
class Column:
def __init__(self, expression: t.Optional[t.Union[ColumnOrLiteral, exp.Expression]]):
def __repr__(self):
def __hash__(self):
def __eq__(self, other: ColumnOrLiteral) -> Column:
def __ne__(self, other: ColumnOrLiteral) -> Column:
def __gt__(self, other: ColumnOrLiteral) -> Column:
def __ge__(self, other: ColumnOrLiteral) -> Column:
def __lt__(self, other: ColumnOrLiteral) -> Column:
def __le__(self, other: ColumnOrLiteral) -> Column:
def __and__(self, other: ColumnOrLiteral) -> Column:
def __or__(self, other: ColumnOrLiteral) -> Column:
def __mod__(self, other: ColumnOrLiteral) -> Column:
def __add__(self, other: ColumnOrLiteral) -> Column:
def __sub__(self, other: ColumnOrLiteral) -> Column:
def __mul__(self, other: ColumnOrLiteral) -> Column:
def __truediv__(self, other: ColumnOrLiteral) -> Column:
def __div__(self, other: ColumnOrLiteral) -> Column:
def __neg__(self) -> Column:
def __radd__(self, other: ColumnOrLiteral) -> Column:
def __rsub__(self, other: ColumnOrLiteral) -> Column:
def __rmul__(self, other: ColumnOrLiteral) -> Column:
def __rdiv__(self, other: ColumnOrLiteral) -> Column:
def __rtruediv__(self, other: ColumnOrLiteral) -> Column:
def __rmod__(self, other: ColumnOrLiteral) -> Column:
def __pow__(self, power: ColumnOrLiteral, modulo=None):
def __rpow__(self, power: ColumnOrLiteral):
def __invert__(self):
def __rand__(self, other: ColumnOrLiteral) -> Column:
def __ror__(self, other: ColumnOrLiteral) -> Column:
def ensure_col(cls, value: t.Optional[t.Union[ColumnOrLiteral, exp.Expression]]) -> Column:
def ensure_cols(cls, args: t.List[t.Union[ColumnOrLiteral, exp.Expression]]) -> t.List[Column]:
def _lit(cls, value: ColumnOrLiteral) -> Column:
def invoke_anonymous_function(
cls, column: t.Optional[ColumnOrLiteral], func_name: str, *args: t.Optional[ColumnOrLiteral]
) -> Column:
def invoke_expression_over_column(
cls, column: t.Optional[ColumnOrLiteral], callable_expression: t.Callable, **kwargs
) -> Column:
def binary_op(self, klass: t.Callable, other: ColumnOrLiteral, **kwargs) -> Column:
def inverse_binary_op(self, klass: t.Callable, other: ColumnOrLiteral, **kwargs) -> Column:
def unary_op(self, klass: t.Callable, **kwargs) -> Column:
def is_alias(self):
def is_column(self):
def column_expression(self) -> t.Union[exp.Column, exp.Literal]:
def alias_or_name(self) -> str:
def ensure_literal(cls, value) -> Column:
def copy(self) -> Column:
def set_table_name(self, table_name: str, copy=False) -> Column:
def sql(self, **kwargs) -> str:
def alias(self, name: str) -> Column:
def asc(self) -> Column:
def desc(self) -> Column:
def asc_nulls_last(self) -> Column:
def desc_nulls_first(self) -> Column:
def when(self, condition: Column, value: t.Any) -> Column:
def otherwise(self, value: t.Any) -> Column:
def isNull(self) -> Column:
def isNotNull(self) -> Column:
def cast(self, dataType: t.Union[str, DataType]) -> Column:
def startswith(self, value: t.Union[str, Column]) -> Column:
def endswith(self, value: t.Union[str, Column]) -> Column:
def rlike(self, regexp: str) -> Column:
def like(self, other: str):
def ilike(self, other: str):
def substr(self, startPos: t.Union[int, Column], length: t.Union[int, Column]) -> Column:
def isin(self, *cols: t.Union[ColumnOrLiteral, t.Iterable[ColumnOrLiteral]]):
def between(
self,
lowerBound: t.Union[ColumnOrLiteral],
upperBound: t.Union[ColumnOrLiteral],
) -> Column:
def over(self, window: WindowSpec) -> Column:
ColumnOrName = t.Union[Column, str]
def size(col: ColumnOrName) -> Column:
return Column.invoke_expression_over_column(col, expression.ArraySize) | null |
153,181 | from __future__ import annotations
import typing as t
from sqlglot import exp as expression
from sqlglot.dataframe.sql.column import Column
from sqlglot.helper import ensure_list, flatten as _flatten
class Column:
def __init__(self, expression: t.Optional[t.Union[ColumnOrLiteral, exp.Expression]]):
from sqlglot.dataframe.sql.session import SparkSession
if isinstance(expression, Column):
expression = expression.expression # type: ignore
elif expression is None or not isinstance(expression, (str, exp.Expression)):
expression = self._lit(expression).expression # type: ignore
elif not isinstance(expression, exp.Column):
expression = sqlglot.maybe_parse(expression, dialect=SparkSession().dialect).transform(
SparkSession().dialect.normalize_identifier, copy=False
)
if expression is None:
raise ValueError(f"Could not parse {expression}")
self.expression: exp.Expression = expression # type: ignore
def __repr__(self):
return repr(self.expression)
def __hash__(self):
return hash(self.expression)
def __eq__(self, other: ColumnOrLiteral) -> Column: # type: ignore
return self.binary_op(exp.EQ, other)
def __ne__(self, other: ColumnOrLiteral) -> Column: # type: ignore
return self.binary_op(exp.NEQ, other)
def __gt__(self, other: ColumnOrLiteral) -> Column:
return self.binary_op(exp.GT, other)
def __ge__(self, other: ColumnOrLiteral) -> Column:
return self.binary_op(exp.GTE, other)
def __lt__(self, other: ColumnOrLiteral) -> Column:
return self.binary_op(exp.LT, other)
def __le__(self, other: ColumnOrLiteral) -> Column:
return self.binary_op(exp.LTE, other)
def __and__(self, other: ColumnOrLiteral) -> Column:
return self.binary_op(exp.And, other)
def __or__(self, other: ColumnOrLiteral) -> Column:
return self.binary_op(exp.Or, other)
def __mod__(self, other: ColumnOrLiteral) -> Column:
return self.binary_op(exp.Mod, other)
def __add__(self, other: ColumnOrLiteral) -> Column:
return self.binary_op(exp.Add, other)
def __sub__(self, other: ColumnOrLiteral) -> Column:
return self.binary_op(exp.Sub, other)
def __mul__(self, other: ColumnOrLiteral) -> Column:
return self.binary_op(exp.Mul, other)
def __truediv__(self, other: ColumnOrLiteral) -> Column:
return self.binary_op(exp.Div, other)
def __div__(self, other: ColumnOrLiteral) -> Column:
return self.binary_op(exp.Div, other)
def __neg__(self) -> Column:
return self.unary_op(exp.Neg)
def __radd__(self, other: ColumnOrLiteral) -> Column:
return self.inverse_binary_op(exp.Add, other)
def __rsub__(self, other: ColumnOrLiteral) -> Column:
return self.inverse_binary_op(exp.Sub, other)
def __rmul__(self, other: ColumnOrLiteral) -> Column:
return self.inverse_binary_op(exp.Mul, other)
def __rdiv__(self, other: ColumnOrLiteral) -> Column:
return self.inverse_binary_op(exp.Div, other)
def __rtruediv__(self, other: ColumnOrLiteral) -> Column:
return self.inverse_binary_op(exp.Div, other)
def __rmod__(self, other: ColumnOrLiteral) -> Column:
return self.inverse_binary_op(exp.Mod, other)
def __pow__(self, power: ColumnOrLiteral, modulo=None):
return Column(exp.Pow(this=self.expression, expression=Column(power).expression))
def __rpow__(self, power: ColumnOrLiteral):
return Column(exp.Pow(this=Column(power).expression, expression=self.expression))
def __invert__(self):
return self.unary_op(exp.Not)
def __rand__(self, other: ColumnOrLiteral) -> Column:
return self.inverse_binary_op(exp.And, other)
def __ror__(self, other: ColumnOrLiteral) -> Column:
return self.inverse_binary_op(exp.Or, other)
def ensure_col(cls, value: t.Optional[t.Union[ColumnOrLiteral, exp.Expression]]) -> Column:
return cls(value)
def ensure_cols(cls, args: t.List[t.Union[ColumnOrLiteral, exp.Expression]]) -> t.List[Column]:
return [cls.ensure_col(x) if not isinstance(x, Column) else x for x in args]
def _lit(cls, value: ColumnOrLiteral) -> Column:
if isinstance(value, dict):
columns = [cls._lit(v).alias(k).expression for k, v in value.items()]
return cls(exp.Struct(expressions=columns))
return cls(exp.convert(value))
def invoke_anonymous_function(
cls, column: t.Optional[ColumnOrLiteral], func_name: str, *args: t.Optional[ColumnOrLiteral]
) -> Column:
columns = [] if column is None else [cls.ensure_col(column)]
column_args = [cls.ensure_col(arg) for arg in args]
expressions = [x.expression for x in columns + column_args]
new_expression = exp.Anonymous(this=func_name.upper(), expressions=expressions)
return Column(new_expression)
def invoke_expression_over_column(
cls, column: t.Optional[ColumnOrLiteral], callable_expression: t.Callable, **kwargs
) -> Column:
ensured_column = None if column is None else cls.ensure_col(column)
ensure_expression_values = {
k: (
[Column.ensure_col(x).expression for x in v]
if is_iterable(v)
else Column.ensure_col(v).expression
)
for k, v in kwargs.items()
if v is not None
}
new_expression = (
callable_expression(**ensure_expression_values)
if ensured_column is None
else callable_expression(
this=ensured_column.column_expression, **ensure_expression_values
)
)
return Column(new_expression)
def binary_op(self, klass: t.Callable, other: ColumnOrLiteral, **kwargs) -> Column:
return Column(
klass(this=self.column_expression, expression=Column(other).column_expression, **kwargs)
)
def inverse_binary_op(self, klass: t.Callable, other: ColumnOrLiteral, **kwargs) -> Column:
return Column(
klass(this=Column(other).column_expression, expression=self.column_expression, **kwargs)
)
def unary_op(self, klass: t.Callable, **kwargs) -> Column:
return Column(klass(this=self.column_expression, **kwargs))
def is_alias(self):
return isinstance(self.expression, exp.Alias)
def is_column(self):
return isinstance(self.expression, exp.Column)
def column_expression(self) -> t.Union[exp.Column, exp.Literal]:
return self.expression.unalias()
def alias_or_name(self) -> str:
return self.expression.alias_or_name
def ensure_literal(cls, value) -> Column:
from sqlglot.dataframe.sql.functions import lit
if isinstance(value, cls):
value = value.expression
if not isinstance(value, exp.Literal):
return lit(value)
return Column(value)
def copy(self) -> Column:
return Column(self.expression.copy())
def set_table_name(self, table_name: str, copy=False) -> Column:
expression = self.expression.copy() if copy else self.expression
expression.set("table", exp.to_identifier(table_name))
return Column(expression)
def sql(self, **kwargs) -> str:
from sqlglot.dataframe.sql.session import SparkSession
return self.expression.sql(**{"dialect": SparkSession().dialect, **kwargs})
def alias(self, name: str) -> Column:
from sqlglot.dataframe.sql.session import SparkSession
dialect = SparkSession().dialect
alias: exp.Expression = sqlglot.maybe_parse(name, dialect=dialect)
new_expression = exp.alias_(
self.column_expression,
alias.this if isinstance(alias, exp.Column) else name,
dialect=dialect,
)
return Column(new_expression)
def asc(self) -> Column:
new_expression = exp.Ordered(this=self.column_expression, desc=False, nulls_first=True)
return Column(new_expression)
def desc(self) -> Column:
new_expression = exp.Ordered(this=self.column_expression, desc=True, nulls_first=False)
return Column(new_expression)
asc_nulls_first = asc
def asc_nulls_last(self) -> Column:
new_expression = exp.Ordered(this=self.column_expression, desc=False, nulls_first=False)
return Column(new_expression)
def desc_nulls_first(self) -> Column:
new_expression = exp.Ordered(this=self.column_expression, desc=True, nulls_first=True)
return Column(new_expression)
desc_nulls_last = desc
def when(self, condition: Column, value: t.Any) -> Column:
from sqlglot.dataframe.sql.functions import when
column_with_if = when(condition, value)
if not isinstance(self.expression, exp.Case):
return column_with_if
new_column = self.copy()
new_column.expression.args["ifs"].extend(column_with_if.expression.args["ifs"])
return new_column
def otherwise(self, value: t.Any) -> Column:
from sqlglot.dataframe.sql.functions import lit
true_value = value if isinstance(value, Column) else lit(value)
new_column = self.copy()
new_column.expression.set("default", true_value.column_expression)
return new_column
def isNull(self) -> Column:
new_expression = exp.Is(this=self.column_expression, expression=exp.Null())
return Column(new_expression)
def isNotNull(self) -> Column:
new_expression = exp.Not(this=exp.Is(this=self.column_expression, expression=exp.Null()))
return Column(new_expression)
def cast(self, dataType: t.Union[str, DataType]) -> Column:
"""
Functionality Difference: PySpark cast accepts a datatype instance of the datatype class
Sqlglot doesn't currently replicate this class so it only accepts a string
"""
from sqlglot.dataframe.sql.session import SparkSession
if isinstance(dataType, DataType):
dataType = dataType.simpleString()
return Column(exp.cast(self.column_expression, dataType, dialect=SparkSession().dialect))
def startswith(self, value: t.Union[str, Column]) -> Column:
value = self._lit(value) if not isinstance(value, Column) else value
return self.invoke_anonymous_function(self, "STARTSWITH", value)
def endswith(self, value: t.Union[str, Column]) -> Column:
value = self._lit(value) if not isinstance(value, Column) else value
return self.invoke_anonymous_function(self, "ENDSWITH", value)
def rlike(self, regexp: str) -> Column:
return self.invoke_expression_over_column(
column=self, callable_expression=exp.RegexpLike, expression=self._lit(regexp).expression
)
def like(self, other: str):
return self.invoke_expression_over_column(
self, exp.Like, expression=self._lit(other).expression
)
def ilike(self, other: str):
return self.invoke_expression_over_column(
self, exp.ILike, expression=self._lit(other).expression
)
def substr(self, startPos: t.Union[int, Column], length: t.Union[int, Column]) -> Column:
startPos = self._lit(startPos) if not isinstance(startPos, Column) else startPos
length = self._lit(length) if not isinstance(length, Column) else length
return Column.invoke_expression_over_column(
self, exp.Substring, start=startPos.expression, length=length.expression
)
def isin(self, *cols: t.Union[ColumnOrLiteral, t.Iterable[ColumnOrLiteral]]):
columns = flatten(cols) if isinstance(cols[0], (list, set, tuple)) else cols # type: ignore
expressions = [self._lit(x).expression for x in columns]
return Column.invoke_expression_over_column(self, exp.In, expressions=expressions) # type: ignore
def between(
self,
lowerBound: t.Union[ColumnOrLiteral],
upperBound: t.Union[ColumnOrLiteral],
) -> Column:
lower_bound_exp = (
self._lit(lowerBound) if not isinstance(lowerBound, Column) else lowerBound
)
upper_bound_exp = (
self._lit(upperBound) if not isinstance(upperBound, Column) else upperBound
)
return Column(
exp.Between(
this=self.column_expression,
low=lower_bound_exp.expression,
high=upper_bound_exp.expression,
)
)
def over(self, window: WindowSpec) -> Column:
window_expression = window.expression.copy()
window_expression.set("this", self.column_expression)
return Column(window_expression)
ColumnOrName = t.Union[Column, str]
def array_min(col: ColumnOrName) -> Column:
return Column.invoke_anonymous_function(col, "ARRAY_MIN") | null |
153,182 | from __future__ import annotations
import typing as t
from sqlglot import exp as expression
from sqlglot.dataframe.sql.column import Column
from sqlglot.helper import ensure_list, flatten as _flatten
class Column:
def __init__(self, expression: t.Optional[t.Union[ColumnOrLiteral, exp.Expression]]):
def __repr__(self):
def __hash__(self):
def __eq__(self, other: ColumnOrLiteral) -> Column:
def __ne__(self, other: ColumnOrLiteral) -> Column:
def __gt__(self, other: ColumnOrLiteral) -> Column:
def __ge__(self, other: ColumnOrLiteral) -> Column:
def __lt__(self, other: ColumnOrLiteral) -> Column:
def __le__(self, other: ColumnOrLiteral) -> Column:
def __and__(self, other: ColumnOrLiteral) -> Column:
def __or__(self, other: ColumnOrLiteral) -> Column:
def __mod__(self, other: ColumnOrLiteral) -> Column:
def __add__(self, other: ColumnOrLiteral) -> Column:
def __sub__(self, other: ColumnOrLiteral) -> Column:
def __mul__(self, other: ColumnOrLiteral) -> Column:
def __truediv__(self, other: ColumnOrLiteral) -> Column:
def __div__(self, other: ColumnOrLiteral) -> Column:
def __neg__(self) -> Column:
def __radd__(self, other: ColumnOrLiteral) -> Column:
def __rsub__(self, other: ColumnOrLiteral) -> Column:
def __rmul__(self, other: ColumnOrLiteral) -> Column:
def __rdiv__(self, other: ColumnOrLiteral) -> Column:
def __rtruediv__(self, other: ColumnOrLiteral) -> Column:
def __rmod__(self, other: ColumnOrLiteral) -> Column:
def __pow__(self, power: ColumnOrLiteral, modulo=None):
def __rpow__(self, power: ColumnOrLiteral):
def __invert__(self):
def __rand__(self, other: ColumnOrLiteral) -> Column:
def __ror__(self, other: ColumnOrLiteral) -> Column:
def ensure_col(cls, value: t.Optional[t.Union[ColumnOrLiteral, exp.Expression]]) -> Column:
def ensure_cols(cls, args: t.List[t.Union[ColumnOrLiteral, exp.Expression]]) -> t.List[Column]:
def _lit(cls, value: ColumnOrLiteral) -> Column:
def invoke_anonymous_function(
cls, column: t.Optional[ColumnOrLiteral], func_name: str, *args: t.Optional[ColumnOrLiteral]
) -> Column:
def invoke_expression_over_column(
cls, column: t.Optional[ColumnOrLiteral], callable_expression: t.Callable, **kwargs
) -> Column:
def binary_op(self, klass: t.Callable, other: ColumnOrLiteral, **kwargs) -> Column:
def inverse_binary_op(self, klass: t.Callable, other: ColumnOrLiteral, **kwargs) -> Column:
def unary_op(self, klass: t.Callable, **kwargs) -> Column:
def is_alias(self):
def is_column(self):
def column_expression(self) -> t.Union[exp.Column, exp.Literal]:
def alias_or_name(self) -> str:
def ensure_literal(cls, value) -> Column:
def copy(self) -> Column:
def set_table_name(self, table_name: str, copy=False) -> Column:
def sql(self, **kwargs) -> str:
def alias(self, name: str) -> Column:
def asc(self) -> Column:
def desc(self) -> Column:
def asc_nulls_last(self) -> Column:
def desc_nulls_first(self) -> Column:
def when(self, condition: Column, value: t.Any) -> Column:
def otherwise(self, value: t.Any) -> Column:
def isNull(self) -> Column:
def isNotNull(self) -> Column:
def cast(self, dataType: t.Union[str, DataType]) -> Column:
def startswith(self, value: t.Union[str, Column]) -> Column:
def endswith(self, value: t.Union[str, Column]) -> Column:
def rlike(self, regexp: str) -> Column:
def like(self, other: str):
def ilike(self, other: str):
def substr(self, startPos: t.Union[int, Column], length: t.Union[int, Column]) -> Column:
def isin(self, *cols: t.Union[ColumnOrLiteral, t.Iterable[ColumnOrLiteral]]):
def between(
self,
lowerBound: t.Union[ColumnOrLiteral],
upperBound: t.Union[ColumnOrLiteral],
) -> Column:
def over(self, window: WindowSpec) -> Column:
ColumnOrName = t.Union[Column, str]
def array_max(col: ColumnOrName) -> Column:
return Column.invoke_anonymous_function(col, "ARRAY_MAX") | null |
153,183 | from __future__ import annotations
import typing as t
from sqlglot import exp as expression
from sqlglot.dataframe.sql.column import Column
from sqlglot.helper import ensure_list, flatten as _flatten
if t.TYPE_CHECKING:
from sqlglot.dataframe.sql._typing import ColumnOrLiteral, ColumnOrName
from sqlglot.dataframe.sql.dataframe import DataFrame
class Column:
def __init__(self, expression: t.Optional[t.Union[ColumnOrLiteral, exp.Expression]]):
def __repr__(self):
def __hash__(self):
def __eq__(self, other: ColumnOrLiteral) -> Column:
def __ne__(self, other: ColumnOrLiteral) -> Column:
def __gt__(self, other: ColumnOrLiteral) -> Column:
def __ge__(self, other: ColumnOrLiteral) -> Column:
def __lt__(self, other: ColumnOrLiteral) -> Column:
def __le__(self, other: ColumnOrLiteral) -> Column:
def __and__(self, other: ColumnOrLiteral) -> Column:
def __or__(self, other: ColumnOrLiteral) -> Column:
def __mod__(self, other: ColumnOrLiteral) -> Column:
def __add__(self, other: ColumnOrLiteral) -> Column:
def __sub__(self, other: ColumnOrLiteral) -> Column:
def __mul__(self, other: ColumnOrLiteral) -> Column:
def __truediv__(self, other: ColumnOrLiteral) -> Column:
def __div__(self, other: ColumnOrLiteral) -> Column:
def __neg__(self) -> Column:
def __radd__(self, other: ColumnOrLiteral) -> Column:
def __rsub__(self, other: ColumnOrLiteral) -> Column:
def __rmul__(self, other: ColumnOrLiteral) -> Column:
def __rdiv__(self, other: ColumnOrLiteral) -> Column:
def __rtruediv__(self, other: ColumnOrLiteral) -> Column:
def __rmod__(self, other: ColumnOrLiteral) -> Column:
def __pow__(self, power: ColumnOrLiteral, modulo=None):
def __rpow__(self, power: ColumnOrLiteral):
def __invert__(self):
def __rand__(self, other: ColumnOrLiteral) -> Column:
def __ror__(self, other: ColumnOrLiteral) -> Column:
def ensure_col(cls, value: t.Optional[t.Union[ColumnOrLiteral, exp.Expression]]) -> Column:
def ensure_cols(cls, args: t.List[t.Union[ColumnOrLiteral, exp.Expression]]) -> t.List[Column]:
def _lit(cls, value: ColumnOrLiteral) -> Column:
def invoke_anonymous_function(
cls, column: t.Optional[ColumnOrLiteral], func_name: str, *args: t.Optional[ColumnOrLiteral]
) -> Column:
def invoke_expression_over_column(
cls, column: t.Optional[ColumnOrLiteral], callable_expression: t.Callable, **kwargs
) -> Column:
def binary_op(self, klass: t.Callable, other: ColumnOrLiteral, **kwargs) -> Column:
def inverse_binary_op(self, klass: t.Callable, other: ColumnOrLiteral, **kwargs) -> Column:
def unary_op(self, klass: t.Callable, **kwargs) -> Column:
def is_alias(self):
def is_column(self):
def column_expression(self) -> t.Union[exp.Column, exp.Literal]:
def alias_or_name(self) -> str:
def ensure_literal(cls, value) -> Column:
def copy(self) -> Column:
def set_table_name(self, table_name: str, copy=False) -> Column:
def sql(self, **kwargs) -> str:
def alias(self, name: str) -> Column:
def asc(self) -> Column:
def desc(self) -> Column:
def asc_nulls_last(self) -> Column:
def desc_nulls_first(self) -> Column:
def when(self, condition: Column, value: t.Any) -> Column:
def otherwise(self, value: t.Any) -> Column:
def isNull(self) -> Column:
def isNotNull(self) -> Column:
def cast(self, dataType: t.Union[str, DataType]) -> Column:
def startswith(self, value: t.Union[str, Column]) -> Column:
def endswith(self, value: t.Union[str, Column]) -> Column:
def rlike(self, regexp: str) -> Column:
def like(self, other: str):
def ilike(self, other: str):
def substr(self, startPos: t.Union[int, Column], length: t.Union[int, Column]) -> Column:
def isin(self, *cols: t.Union[ColumnOrLiteral, t.Iterable[ColumnOrLiteral]]):
def between(
self,
lowerBound: t.Union[ColumnOrLiteral],
upperBound: t.Union[ColumnOrLiteral],
) -> Column:
def over(self, window: WindowSpec) -> Column:
ColumnOrName = t.Union[Column, str]
def sort_array(col: ColumnOrName, asc: t.Optional[bool] = None) -> Column:
if asc is not None:
return Column.invoke_expression_over_column(col, expression.SortArray, asc=asc)
return Column.invoke_expression_over_column(col, expression.SortArray) | null |
153,184 | from __future__ import annotations
import typing as t
from sqlglot import exp as expression
from sqlglot.dataframe.sql.column import Column
from sqlglot.helper import ensure_list, flatten as _flatten
if t.TYPE_CHECKING:
from sqlglot.dataframe.sql._typing import ColumnOrLiteral, ColumnOrName
from sqlglot.dataframe.sql.dataframe import DataFrame
def _get_lambda_from_func(lambda_expression: t.Callable):
variables = [
expression.to_identifier(x, quoted=_lambda_quoted(x))
for x in lambda_expression.__code__.co_varnames
]
return expression.Lambda(
this=lambda_expression(*[Column(x) for x in variables]).expression,
expressions=variables,
)
class Column:
def __init__(self, expression: t.Optional[t.Union[ColumnOrLiteral, exp.Expression]]):
from sqlglot.dataframe.sql.session import SparkSession
if isinstance(expression, Column):
expression = expression.expression # type: ignore
elif expression is None or not isinstance(expression, (str, exp.Expression)):
expression = self._lit(expression).expression # type: ignore
elif not isinstance(expression, exp.Column):
expression = sqlglot.maybe_parse(expression, dialect=SparkSession().dialect).transform(
SparkSession().dialect.normalize_identifier, copy=False
)
if expression is None:
raise ValueError(f"Could not parse {expression}")
self.expression: exp.Expression = expression # type: ignore
def __repr__(self):
return repr(self.expression)
def __hash__(self):
return hash(self.expression)
def __eq__(self, other: ColumnOrLiteral) -> Column: # type: ignore
return self.binary_op(exp.EQ, other)
def __ne__(self, other: ColumnOrLiteral) -> Column: # type: ignore
return self.binary_op(exp.NEQ, other)
def __gt__(self, other: ColumnOrLiteral) -> Column:
return self.binary_op(exp.GT, other)
def __ge__(self, other: ColumnOrLiteral) -> Column:
return self.binary_op(exp.GTE, other)
def __lt__(self, other: ColumnOrLiteral) -> Column:
return self.binary_op(exp.LT, other)
def __le__(self, other: ColumnOrLiteral) -> Column:
return self.binary_op(exp.LTE, other)
def __and__(self, other: ColumnOrLiteral) -> Column:
return self.binary_op(exp.And, other)
def __or__(self, other: ColumnOrLiteral) -> Column:
return self.binary_op(exp.Or, other)
def __mod__(self, other: ColumnOrLiteral) -> Column:
return self.binary_op(exp.Mod, other)
def __add__(self, other: ColumnOrLiteral) -> Column:
return self.binary_op(exp.Add, other)
def __sub__(self, other: ColumnOrLiteral) -> Column:
return self.binary_op(exp.Sub, other)
def __mul__(self, other: ColumnOrLiteral) -> Column:
return self.binary_op(exp.Mul, other)
def __truediv__(self, other: ColumnOrLiteral) -> Column:
return self.binary_op(exp.Div, other)
def __div__(self, other: ColumnOrLiteral) -> Column:
return self.binary_op(exp.Div, other)
def __neg__(self) -> Column:
return self.unary_op(exp.Neg)
def __radd__(self, other: ColumnOrLiteral) -> Column:
return self.inverse_binary_op(exp.Add, other)
def __rsub__(self, other: ColumnOrLiteral) -> Column:
return self.inverse_binary_op(exp.Sub, other)
def __rmul__(self, other: ColumnOrLiteral) -> Column:
return self.inverse_binary_op(exp.Mul, other)
def __rdiv__(self, other: ColumnOrLiteral) -> Column:
return self.inverse_binary_op(exp.Div, other)
def __rtruediv__(self, other: ColumnOrLiteral) -> Column:
return self.inverse_binary_op(exp.Div, other)
def __rmod__(self, other: ColumnOrLiteral) -> Column:
return self.inverse_binary_op(exp.Mod, other)
def __pow__(self, power: ColumnOrLiteral, modulo=None):
return Column(exp.Pow(this=self.expression, expression=Column(power).expression))
def __rpow__(self, power: ColumnOrLiteral):
return Column(exp.Pow(this=Column(power).expression, expression=self.expression))
def __invert__(self):
return self.unary_op(exp.Not)
def __rand__(self, other: ColumnOrLiteral) -> Column:
return self.inverse_binary_op(exp.And, other)
def __ror__(self, other: ColumnOrLiteral) -> Column:
return self.inverse_binary_op(exp.Or, other)
def ensure_col(cls, value: t.Optional[t.Union[ColumnOrLiteral, exp.Expression]]) -> Column:
return cls(value)
def ensure_cols(cls, args: t.List[t.Union[ColumnOrLiteral, exp.Expression]]) -> t.List[Column]:
return [cls.ensure_col(x) if not isinstance(x, Column) else x for x in args]
def _lit(cls, value: ColumnOrLiteral) -> Column:
if isinstance(value, dict):
columns = [cls._lit(v).alias(k).expression for k, v in value.items()]
return cls(exp.Struct(expressions=columns))
return cls(exp.convert(value))
def invoke_anonymous_function(
cls, column: t.Optional[ColumnOrLiteral], func_name: str, *args: t.Optional[ColumnOrLiteral]
) -> Column:
columns = [] if column is None else [cls.ensure_col(column)]
column_args = [cls.ensure_col(arg) for arg in args]
expressions = [x.expression for x in columns + column_args]
new_expression = exp.Anonymous(this=func_name.upper(), expressions=expressions)
return Column(new_expression)
def invoke_expression_over_column(
cls, column: t.Optional[ColumnOrLiteral], callable_expression: t.Callable, **kwargs
) -> Column:
ensured_column = None if column is None else cls.ensure_col(column)
ensure_expression_values = {
k: (
[Column.ensure_col(x).expression for x in v]
if is_iterable(v)
else Column.ensure_col(v).expression
)
for k, v in kwargs.items()
if v is not None
}
new_expression = (
callable_expression(**ensure_expression_values)
if ensured_column is None
else callable_expression(
this=ensured_column.column_expression, **ensure_expression_values
)
)
return Column(new_expression)
def binary_op(self, klass: t.Callable, other: ColumnOrLiteral, **kwargs) -> Column:
return Column(
klass(this=self.column_expression, expression=Column(other).column_expression, **kwargs)
)
def inverse_binary_op(self, klass: t.Callable, other: ColumnOrLiteral, **kwargs) -> Column:
return Column(
klass(this=Column(other).column_expression, expression=self.column_expression, **kwargs)
)
def unary_op(self, klass: t.Callable, **kwargs) -> Column:
return Column(klass(this=self.column_expression, **kwargs))
def is_alias(self):
return isinstance(self.expression, exp.Alias)
def is_column(self):
return isinstance(self.expression, exp.Column)
def column_expression(self) -> t.Union[exp.Column, exp.Literal]:
return self.expression.unalias()
def alias_or_name(self) -> str:
return self.expression.alias_or_name
def ensure_literal(cls, value) -> Column:
from sqlglot.dataframe.sql.functions import lit
if isinstance(value, cls):
value = value.expression
if not isinstance(value, exp.Literal):
return lit(value)
return Column(value)
def copy(self) -> Column:
return Column(self.expression.copy())
def set_table_name(self, table_name: str, copy=False) -> Column:
expression = self.expression.copy() if copy else self.expression
expression.set("table", exp.to_identifier(table_name))
return Column(expression)
def sql(self, **kwargs) -> str:
from sqlglot.dataframe.sql.session import SparkSession
return self.expression.sql(**{"dialect": SparkSession().dialect, **kwargs})
def alias(self, name: str) -> Column:
from sqlglot.dataframe.sql.session import SparkSession
dialect = SparkSession().dialect
alias: exp.Expression = sqlglot.maybe_parse(name, dialect=dialect)
new_expression = exp.alias_(
self.column_expression,
alias.this if isinstance(alias, exp.Column) else name,
dialect=dialect,
)
return Column(new_expression)
def asc(self) -> Column:
new_expression = exp.Ordered(this=self.column_expression, desc=False, nulls_first=True)
return Column(new_expression)
def desc(self) -> Column:
new_expression = exp.Ordered(this=self.column_expression, desc=True, nulls_first=False)
return Column(new_expression)
asc_nulls_first = asc
def asc_nulls_last(self) -> Column:
new_expression = exp.Ordered(this=self.column_expression, desc=False, nulls_first=False)
return Column(new_expression)
def desc_nulls_first(self) -> Column:
new_expression = exp.Ordered(this=self.column_expression, desc=True, nulls_first=True)
return Column(new_expression)
desc_nulls_last = desc
def when(self, condition: Column, value: t.Any) -> Column:
from sqlglot.dataframe.sql.functions import when
column_with_if = when(condition, value)
if not isinstance(self.expression, exp.Case):
return column_with_if
new_column = self.copy()
new_column.expression.args["ifs"].extend(column_with_if.expression.args["ifs"])
return new_column
def otherwise(self, value: t.Any) -> Column:
from sqlglot.dataframe.sql.functions import lit
true_value = value if isinstance(value, Column) else lit(value)
new_column = self.copy()
new_column.expression.set("default", true_value.column_expression)
return new_column
def isNull(self) -> Column:
new_expression = exp.Is(this=self.column_expression, expression=exp.Null())
return Column(new_expression)
def isNotNull(self) -> Column:
new_expression = exp.Not(this=exp.Is(this=self.column_expression, expression=exp.Null()))
return Column(new_expression)
def cast(self, dataType: t.Union[str, DataType]) -> Column:
"""
Functionality Difference: PySpark cast accepts a datatype instance of the datatype class
Sqlglot doesn't currently replicate this class so it only accepts a string
"""
from sqlglot.dataframe.sql.session import SparkSession
if isinstance(dataType, DataType):
dataType = dataType.simpleString()
return Column(exp.cast(self.column_expression, dataType, dialect=SparkSession().dialect))
def startswith(self, value: t.Union[str, Column]) -> Column:
value = self._lit(value) if not isinstance(value, Column) else value
return self.invoke_anonymous_function(self, "STARTSWITH", value)
def endswith(self, value: t.Union[str, Column]) -> Column:
value = self._lit(value) if not isinstance(value, Column) else value
return self.invoke_anonymous_function(self, "ENDSWITH", value)
def rlike(self, regexp: str) -> Column:
return self.invoke_expression_over_column(
column=self, callable_expression=exp.RegexpLike, expression=self._lit(regexp).expression
)
def like(self, other: str):
return self.invoke_expression_over_column(
self, exp.Like, expression=self._lit(other).expression
)
def ilike(self, other: str):
return self.invoke_expression_over_column(
self, exp.ILike, expression=self._lit(other).expression
)
def substr(self, startPos: t.Union[int, Column], length: t.Union[int, Column]) -> Column:
startPos = self._lit(startPos) if not isinstance(startPos, Column) else startPos
length = self._lit(length) if not isinstance(length, Column) else length
return Column.invoke_expression_over_column(
self, exp.Substring, start=startPos.expression, length=length.expression
)
def isin(self, *cols: t.Union[ColumnOrLiteral, t.Iterable[ColumnOrLiteral]]):
columns = flatten(cols) if isinstance(cols[0], (list, set, tuple)) else cols # type: ignore
expressions = [self._lit(x).expression for x in columns]
return Column.invoke_expression_over_column(self, exp.In, expressions=expressions) # type: ignore
def between(
self,
lowerBound: t.Union[ColumnOrLiteral],
upperBound: t.Union[ColumnOrLiteral],
) -> Column:
lower_bound_exp = (
self._lit(lowerBound) if not isinstance(lowerBound, Column) else lowerBound
)
upper_bound_exp = (
self._lit(upperBound) if not isinstance(upperBound, Column) else upperBound
)
return Column(
exp.Between(
this=self.column_expression,
low=lower_bound_exp.expression,
high=upper_bound_exp.expression,
)
)
def over(self, window: WindowSpec) -> Column:
window_expression = window.expression.copy()
window_expression.set("this", self.column_expression)
return Column(window_expression)
ColumnOrName = t.Union[Column, str]
def array_sort(
col: ColumnOrName,
comparator: t.Optional[t.Union[t.Callable[[Column, Column], Column]]] = None,
) -> Column:
if comparator is not None:
f_expression = _get_lambda_from_func(comparator)
return Column.invoke_expression_over_column(
col, expression.ArraySort, expression=f_expression
)
return Column.invoke_expression_over_column(col, expression.ArraySort) | null |
153,185 | from __future__ import annotations
import typing as t
from sqlglot import exp as expression
from sqlglot.dataframe.sql.column import Column
from sqlglot.helper import ensure_list, flatten as _flatten
class Column:
def __init__(self, expression: t.Optional[t.Union[ColumnOrLiteral, exp.Expression]]):
from sqlglot.dataframe.sql.session import SparkSession
if isinstance(expression, Column):
expression = expression.expression # type: ignore
elif expression is None or not isinstance(expression, (str, exp.Expression)):
expression = self._lit(expression).expression # type: ignore
elif not isinstance(expression, exp.Column):
expression = sqlglot.maybe_parse(expression, dialect=SparkSession().dialect).transform(
SparkSession().dialect.normalize_identifier, copy=False
)
if expression is None:
raise ValueError(f"Could not parse {expression}")
self.expression: exp.Expression = expression # type: ignore
def __repr__(self):
return repr(self.expression)
def __hash__(self):
return hash(self.expression)
def __eq__(self, other: ColumnOrLiteral) -> Column: # type: ignore
return self.binary_op(exp.EQ, other)
def __ne__(self, other: ColumnOrLiteral) -> Column: # type: ignore
return self.binary_op(exp.NEQ, other)
def __gt__(self, other: ColumnOrLiteral) -> Column:
return self.binary_op(exp.GT, other)
def __ge__(self, other: ColumnOrLiteral) -> Column:
return self.binary_op(exp.GTE, other)
def __lt__(self, other: ColumnOrLiteral) -> Column:
return self.binary_op(exp.LT, other)
def __le__(self, other: ColumnOrLiteral) -> Column:
return self.binary_op(exp.LTE, other)
def __and__(self, other: ColumnOrLiteral) -> Column:
return self.binary_op(exp.And, other)
def __or__(self, other: ColumnOrLiteral) -> Column:
return self.binary_op(exp.Or, other)
def __mod__(self, other: ColumnOrLiteral) -> Column:
return self.binary_op(exp.Mod, other)
def __add__(self, other: ColumnOrLiteral) -> Column:
return self.binary_op(exp.Add, other)
def __sub__(self, other: ColumnOrLiteral) -> Column:
return self.binary_op(exp.Sub, other)
def __mul__(self, other: ColumnOrLiteral) -> Column:
return self.binary_op(exp.Mul, other)
def __truediv__(self, other: ColumnOrLiteral) -> Column:
return self.binary_op(exp.Div, other)
def __div__(self, other: ColumnOrLiteral) -> Column:
return self.binary_op(exp.Div, other)
def __neg__(self) -> Column:
return self.unary_op(exp.Neg)
def __radd__(self, other: ColumnOrLiteral) -> Column:
return self.inverse_binary_op(exp.Add, other)
def __rsub__(self, other: ColumnOrLiteral) -> Column:
return self.inverse_binary_op(exp.Sub, other)
def __rmul__(self, other: ColumnOrLiteral) -> Column:
return self.inverse_binary_op(exp.Mul, other)
def __rdiv__(self, other: ColumnOrLiteral) -> Column:
return self.inverse_binary_op(exp.Div, other)
def __rtruediv__(self, other: ColumnOrLiteral) -> Column:
return self.inverse_binary_op(exp.Div, other)
def __rmod__(self, other: ColumnOrLiteral) -> Column:
return self.inverse_binary_op(exp.Mod, other)
def __pow__(self, power: ColumnOrLiteral, modulo=None):
return Column(exp.Pow(this=self.expression, expression=Column(power).expression))
def __rpow__(self, power: ColumnOrLiteral):
return Column(exp.Pow(this=Column(power).expression, expression=self.expression))
def __invert__(self):
return self.unary_op(exp.Not)
def __rand__(self, other: ColumnOrLiteral) -> Column:
return self.inverse_binary_op(exp.And, other)
def __ror__(self, other: ColumnOrLiteral) -> Column:
return self.inverse_binary_op(exp.Or, other)
def ensure_col(cls, value: t.Optional[t.Union[ColumnOrLiteral, exp.Expression]]) -> Column:
return cls(value)
def ensure_cols(cls, args: t.List[t.Union[ColumnOrLiteral, exp.Expression]]) -> t.List[Column]:
return [cls.ensure_col(x) if not isinstance(x, Column) else x for x in args]
def _lit(cls, value: ColumnOrLiteral) -> Column:
if isinstance(value, dict):
columns = [cls._lit(v).alias(k).expression for k, v in value.items()]
return cls(exp.Struct(expressions=columns))
return cls(exp.convert(value))
def invoke_anonymous_function(
cls, column: t.Optional[ColumnOrLiteral], func_name: str, *args: t.Optional[ColumnOrLiteral]
) -> Column:
columns = [] if column is None else [cls.ensure_col(column)]
column_args = [cls.ensure_col(arg) for arg in args]
expressions = [x.expression for x in columns + column_args]
new_expression = exp.Anonymous(this=func_name.upper(), expressions=expressions)
return Column(new_expression)
def invoke_expression_over_column(
cls, column: t.Optional[ColumnOrLiteral], callable_expression: t.Callable, **kwargs
) -> Column:
ensured_column = None if column is None else cls.ensure_col(column)
ensure_expression_values = {
k: (
[Column.ensure_col(x).expression for x in v]
if is_iterable(v)
else Column.ensure_col(v).expression
)
for k, v in kwargs.items()
if v is not None
}
new_expression = (
callable_expression(**ensure_expression_values)
if ensured_column is None
else callable_expression(
this=ensured_column.column_expression, **ensure_expression_values
)
)
return Column(new_expression)
def binary_op(self, klass: t.Callable, other: ColumnOrLiteral, **kwargs) -> Column:
return Column(
klass(this=self.column_expression, expression=Column(other).column_expression, **kwargs)
)
def inverse_binary_op(self, klass: t.Callable, other: ColumnOrLiteral, **kwargs) -> Column:
return Column(
klass(this=Column(other).column_expression, expression=self.column_expression, **kwargs)
)
def unary_op(self, klass: t.Callable, **kwargs) -> Column:
return Column(klass(this=self.column_expression, **kwargs))
def is_alias(self):
return isinstance(self.expression, exp.Alias)
def is_column(self):
return isinstance(self.expression, exp.Column)
def column_expression(self) -> t.Union[exp.Column, exp.Literal]:
return self.expression.unalias()
def alias_or_name(self) -> str:
return self.expression.alias_or_name
def ensure_literal(cls, value) -> Column:
from sqlglot.dataframe.sql.functions import lit
if isinstance(value, cls):
value = value.expression
if not isinstance(value, exp.Literal):
return lit(value)
return Column(value)
def copy(self) -> Column:
return Column(self.expression.copy())
def set_table_name(self, table_name: str, copy=False) -> Column:
expression = self.expression.copy() if copy else self.expression
expression.set("table", exp.to_identifier(table_name))
return Column(expression)
def sql(self, **kwargs) -> str:
from sqlglot.dataframe.sql.session import SparkSession
return self.expression.sql(**{"dialect": SparkSession().dialect, **kwargs})
def alias(self, name: str) -> Column:
from sqlglot.dataframe.sql.session import SparkSession
dialect = SparkSession().dialect
alias: exp.Expression = sqlglot.maybe_parse(name, dialect=dialect)
new_expression = exp.alias_(
self.column_expression,
alias.this if isinstance(alias, exp.Column) else name,
dialect=dialect,
)
return Column(new_expression)
def asc(self) -> Column:
new_expression = exp.Ordered(this=self.column_expression, desc=False, nulls_first=True)
return Column(new_expression)
def desc(self) -> Column:
new_expression = exp.Ordered(this=self.column_expression, desc=True, nulls_first=False)
return Column(new_expression)
asc_nulls_first = asc
def asc_nulls_last(self) -> Column:
new_expression = exp.Ordered(this=self.column_expression, desc=False, nulls_first=False)
return Column(new_expression)
def desc_nulls_first(self) -> Column:
new_expression = exp.Ordered(this=self.column_expression, desc=True, nulls_first=True)
return Column(new_expression)
desc_nulls_last = desc
def when(self, condition: Column, value: t.Any) -> Column:
from sqlglot.dataframe.sql.functions import when
column_with_if = when(condition, value)
if not isinstance(self.expression, exp.Case):
return column_with_if
new_column = self.copy()
new_column.expression.args["ifs"].extend(column_with_if.expression.args["ifs"])
return new_column
def otherwise(self, value: t.Any) -> Column:
from sqlglot.dataframe.sql.functions import lit
true_value = value if isinstance(value, Column) else lit(value)
new_column = self.copy()
new_column.expression.set("default", true_value.column_expression)
return new_column
def isNull(self) -> Column:
new_expression = exp.Is(this=self.column_expression, expression=exp.Null())
return Column(new_expression)
def isNotNull(self) -> Column:
new_expression = exp.Not(this=exp.Is(this=self.column_expression, expression=exp.Null()))
return Column(new_expression)
def cast(self, dataType: t.Union[str, DataType]) -> Column:
"""
Functionality Difference: PySpark cast accepts a datatype instance of the datatype class
Sqlglot doesn't currently replicate this class so it only accepts a string
"""
from sqlglot.dataframe.sql.session import SparkSession
if isinstance(dataType, DataType):
dataType = dataType.simpleString()
return Column(exp.cast(self.column_expression, dataType, dialect=SparkSession().dialect))
def startswith(self, value: t.Union[str, Column]) -> Column:
value = self._lit(value) if not isinstance(value, Column) else value
return self.invoke_anonymous_function(self, "STARTSWITH", value)
def endswith(self, value: t.Union[str, Column]) -> Column:
value = self._lit(value) if not isinstance(value, Column) else value
return self.invoke_anonymous_function(self, "ENDSWITH", value)
def rlike(self, regexp: str) -> Column:
return self.invoke_expression_over_column(
column=self, callable_expression=exp.RegexpLike, expression=self._lit(regexp).expression
)
def like(self, other: str):
return self.invoke_expression_over_column(
self, exp.Like, expression=self._lit(other).expression
)
def ilike(self, other: str):
return self.invoke_expression_over_column(
self, exp.ILike, expression=self._lit(other).expression
)
def substr(self, startPos: t.Union[int, Column], length: t.Union[int, Column]) -> Column:
startPos = self._lit(startPos) if not isinstance(startPos, Column) else startPos
length = self._lit(length) if not isinstance(length, Column) else length
return Column.invoke_expression_over_column(
self, exp.Substring, start=startPos.expression, length=length.expression
)
def isin(self, *cols: t.Union[ColumnOrLiteral, t.Iterable[ColumnOrLiteral]]):
columns = flatten(cols) if isinstance(cols[0], (list, set, tuple)) else cols # type: ignore
expressions = [self._lit(x).expression for x in columns]
return Column.invoke_expression_over_column(self, exp.In, expressions=expressions) # type: ignore
def between(
self,
lowerBound: t.Union[ColumnOrLiteral],
upperBound: t.Union[ColumnOrLiteral],
) -> Column:
lower_bound_exp = (
self._lit(lowerBound) if not isinstance(lowerBound, Column) else lowerBound
)
upper_bound_exp = (
self._lit(upperBound) if not isinstance(upperBound, Column) else upperBound
)
return Column(
exp.Between(
this=self.column_expression,
low=lower_bound_exp.expression,
high=upper_bound_exp.expression,
)
)
def over(self, window: WindowSpec) -> Column:
window_expression = window.expression.copy()
window_expression.set("this", self.column_expression)
return Column(window_expression)
ColumnOrName = t.Union[Column, str]
def shuffle(col: ColumnOrName) -> Column:
return Column.invoke_anonymous_function(col, "SHUFFLE") | null |
153,186 | from __future__ import annotations
import typing as t
from sqlglot import exp as expression
from sqlglot.dataframe.sql.column import Column
from sqlglot.helper import ensure_list, flatten as _flatten
class Column:
def __init__(self, expression: t.Optional[t.Union[ColumnOrLiteral, exp.Expression]]):
def __repr__(self):
def __hash__(self):
def __eq__(self, other: ColumnOrLiteral) -> Column:
def __ne__(self, other: ColumnOrLiteral) -> Column:
def __gt__(self, other: ColumnOrLiteral) -> Column:
def __ge__(self, other: ColumnOrLiteral) -> Column:
def __lt__(self, other: ColumnOrLiteral) -> Column:
def __le__(self, other: ColumnOrLiteral) -> Column:
def __and__(self, other: ColumnOrLiteral) -> Column:
def __or__(self, other: ColumnOrLiteral) -> Column:
def __mod__(self, other: ColumnOrLiteral) -> Column:
def __add__(self, other: ColumnOrLiteral) -> Column:
def __sub__(self, other: ColumnOrLiteral) -> Column:
def __mul__(self, other: ColumnOrLiteral) -> Column:
def __truediv__(self, other: ColumnOrLiteral) -> Column:
def __div__(self, other: ColumnOrLiteral) -> Column:
def __neg__(self) -> Column:
def __radd__(self, other: ColumnOrLiteral) -> Column:
def __rsub__(self, other: ColumnOrLiteral) -> Column:
def __rmul__(self, other: ColumnOrLiteral) -> Column:
def __rdiv__(self, other: ColumnOrLiteral) -> Column:
def __rtruediv__(self, other: ColumnOrLiteral) -> Column:
def __rmod__(self, other: ColumnOrLiteral) -> Column:
def __pow__(self, power: ColumnOrLiteral, modulo=None):
def __rpow__(self, power: ColumnOrLiteral):
def __invert__(self):
def __rand__(self, other: ColumnOrLiteral) -> Column:
def __ror__(self, other: ColumnOrLiteral) -> Column:
def ensure_col(cls, value: t.Optional[t.Union[ColumnOrLiteral, exp.Expression]]) -> Column:
def ensure_cols(cls, args: t.List[t.Union[ColumnOrLiteral, exp.Expression]]) -> t.List[Column]:
def _lit(cls, value: ColumnOrLiteral) -> Column:
def invoke_anonymous_function(
cls, column: t.Optional[ColumnOrLiteral], func_name: str, *args: t.Optional[ColumnOrLiteral]
) -> Column:
def invoke_expression_over_column(
cls, column: t.Optional[ColumnOrLiteral], callable_expression: t.Callable, **kwargs
) -> Column:
def binary_op(self, klass: t.Callable, other: ColumnOrLiteral, **kwargs) -> Column:
def inverse_binary_op(self, klass: t.Callable, other: ColumnOrLiteral, **kwargs) -> Column:
def unary_op(self, klass: t.Callable, **kwargs) -> Column:
def is_alias(self):
def is_column(self):
def column_expression(self) -> t.Union[exp.Column, exp.Literal]:
def alias_or_name(self) -> str:
def ensure_literal(cls, value) -> Column:
def copy(self) -> Column:
def set_table_name(self, table_name: str, copy=False) -> Column:
def sql(self, **kwargs) -> str:
def alias(self, name: str) -> Column:
def asc(self) -> Column:
def desc(self) -> Column:
def asc_nulls_last(self) -> Column:
def desc_nulls_first(self) -> Column:
def when(self, condition: Column, value: t.Any) -> Column:
def otherwise(self, value: t.Any) -> Column:
def isNull(self) -> Column:
def isNotNull(self) -> Column:
def cast(self, dataType: t.Union[str, DataType]) -> Column:
def startswith(self, value: t.Union[str, Column]) -> Column:
def endswith(self, value: t.Union[str, Column]) -> Column:
def rlike(self, regexp: str) -> Column:
def like(self, other: str):
def ilike(self, other: str):
def substr(self, startPos: t.Union[int, Column], length: t.Union[int, Column]) -> Column:
def isin(self, *cols: t.Union[ColumnOrLiteral, t.Iterable[ColumnOrLiteral]]):
def between(
self,
lowerBound: t.Union[ColumnOrLiteral],
upperBound: t.Union[ColumnOrLiteral],
) -> Column:
def over(self, window: WindowSpec) -> Column:
ColumnOrName = t.Union[Column, str]
def reverse(col: ColumnOrName) -> Column:
return Column.invoke_anonymous_function(col, "REVERSE") | null |
153,187 | from __future__ import annotations
import typing as t
from sqlglot import exp as expression
from sqlglot.dataframe.sql.column import Column
from sqlglot.helper import ensure_list, flatten as _flatten
class Column:
def __init__(self, expression: t.Optional[t.Union[ColumnOrLiteral, exp.Expression]]):
from sqlglot.dataframe.sql.session import SparkSession
if isinstance(expression, Column):
expression = expression.expression # type: ignore
elif expression is None or not isinstance(expression, (str, exp.Expression)):
expression = self._lit(expression).expression # type: ignore
elif not isinstance(expression, exp.Column):
expression = sqlglot.maybe_parse(expression, dialect=SparkSession().dialect).transform(
SparkSession().dialect.normalize_identifier, copy=False
)
if expression is None:
raise ValueError(f"Could not parse {expression}")
self.expression: exp.Expression = expression # type: ignore
def __repr__(self):
return repr(self.expression)
def __hash__(self):
return hash(self.expression)
def __eq__(self, other: ColumnOrLiteral) -> Column: # type: ignore
return self.binary_op(exp.EQ, other)
def __ne__(self, other: ColumnOrLiteral) -> Column: # type: ignore
return self.binary_op(exp.NEQ, other)
def __gt__(self, other: ColumnOrLiteral) -> Column:
return self.binary_op(exp.GT, other)
def __ge__(self, other: ColumnOrLiteral) -> Column:
return self.binary_op(exp.GTE, other)
def __lt__(self, other: ColumnOrLiteral) -> Column:
return self.binary_op(exp.LT, other)
def __le__(self, other: ColumnOrLiteral) -> Column:
return self.binary_op(exp.LTE, other)
def __and__(self, other: ColumnOrLiteral) -> Column:
return self.binary_op(exp.And, other)
def __or__(self, other: ColumnOrLiteral) -> Column:
return self.binary_op(exp.Or, other)
def __mod__(self, other: ColumnOrLiteral) -> Column:
return self.binary_op(exp.Mod, other)
def __add__(self, other: ColumnOrLiteral) -> Column:
return self.binary_op(exp.Add, other)
def __sub__(self, other: ColumnOrLiteral) -> Column:
return self.binary_op(exp.Sub, other)
def __mul__(self, other: ColumnOrLiteral) -> Column:
return self.binary_op(exp.Mul, other)
def __truediv__(self, other: ColumnOrLiteral) -> Column:
return self.binary_op(exp.Div, other)
def __div__(self, other: ColumnOrLiteral) -> Column:
return self.binary_op(exp.Div, other)
def __neg__(self) -> Column:
return self.unary_op(exp.Neg)
def __radd__(self, other: ColumnOrLiteral) -> Column:
return self.inverse_binary_op(exp.Add, other)
def __rsub__(self, other: ColumnOrLiteral) -> Column:
return self.inverse_binary_op(exp.Sub, other)
def __rmul__(self, other: ColumnOrLiteral) -> Column:
return self.inverse_binary_op(exp.Mul, other)
def __rdiv__(self, other: ColumnOrLiteral) -> Column:
return self.inverse_binary_op(exp.Div, other)
def __rtruediv__(self, other: ColumnOrLiteral) -> Column:
return self.inverse_binary_op(exp.Div, other)
def __rmod__(self, other: ColumnOrLiteral) -> Column:
return self.inverse_binary_op(exp.Mod, other)
def __pow__(self, power: ColumnOrLiteral, modulo=None):
return Column(exp.Pow(this=self.expression, expression=Column(power).expression))
def __rpow__(self, power: ColumnOrLiteral):
return Column(exp.Pow(this=Column(power).expression, expression=self.expression))
def __invert__(self):
return self.unary_op(exp.Not)
def __rand__(self, other: ColumnOrLiteral) -> Column:
return self.inverse_binary_op(exp.And, other)
def __ror__(self, other: ColumnOrLiteral) -> Column:
return self.inverse_binary_op(exp.Or, other)
def ensure_col(cls, value: t.Optional[t.Union[ColumnOrLiteral, exp.Expression]]) -> Column:
return cls(value)
def ensure_cols(cls, args: t.List[t.Union[ColumnOrLiteral, exp.Expression]]) -> t.List[Column]:
return [cls.ensure_col(x) if not isinstance(x, Column) else x for x in args]
def _lit(cls, value: ColumnOrLiteral) -> Column:
if isinstance(value, dict):
columns = [cls._lit(v).alias(k).expression for k, v in value.items()]
return cls(exp.Struct(expressions=columns))
return cls(exp.convert(value))
def invoke_anonymous_function(
cls, column: t.Optional[ColumnOrLiteral], func_name: str, *args: t.Optional[ColumnOrLiteral]
) -> Column:
columns = [] if column is None else [cls.ensure_col(column)]
column_args = [cls.ensure_col(arg) for arg in args]
expressions = [x.expression for x in columns + column_args]
new_expression = exp.Anonymous(this=func_name.upper(), expressions=expressions)
return Column(new_expression)
def invoke_expression_over_column(
cls, column: t.Optional[ColumnOrLiteral], callable_expression: t.Callable, **kwargs
) -> Column:
ensured_column = None if column is None else cls.ensure_col(column)
ensure_expression_values = {
k: (
[Column.ensure_col(x).expression for x in v]
if is_iterable(v)
else Column.ensure_col(v).expression
)
for k, v in kwargs.items()
if v is not None
}
new_expression = (
callable_expression(**ensure_expression_values)
if ensured_column is None
else callable_expression(
this=ensured_column.column_expression, **ensure_expression_values
)
)
return Column(new_expression)
def binary_op(self, klass: t.Callable, other: ColumnOrLiteral, **kwargs) -> Column:
return Column(
klass(this=self.column_expression, expression=Column(other).column_expression, **kwargs)
)
def inverse_binary_op(self, klass: t.Callable, other: ColumnOrLiteral, **kwargs) -> Column:
return Column(
klass(this=Column(other).column_expression, expression=self.column_expression, **kwargs)
)
def unary_op(self, klass: t.Callable, **kwargs) -> Column:
return Column(klass(this=self.column_expression, **kwargs))
def is_alias(self):
return isinstance(self.expression, exp.Alias)
def is_column(self):
return isinstance(self.expression, exp.Column)
def column_expression(self) -> t.Union[exp.Column, exp.Literal]:
return self.expression.unalias()
def alias_or_name(self) -> str:
return self.expression.alias_or_name
def ensure_literal(cls, value) -> Column:
from sqlglot.dataframe.sql.functions import lit
if isinstance(value, cls):
value = value.expression
if not isinstance(value, exp.Literal):
return lit(value)
return Column(value)
def copy(self) -> Column:
return Column(self.expression.copy())
def set_table_name(self, table_name: str, copy=False) -> Column:
expression = self.expression.copy() if copy else self.expression
expression.set("table", exp.to_identifier(table_name))
return Column(expression)
def sql(self, **kwargs) -> str:
from sqlglot.dataframe.sql.session import SparkSession
return self.expression.sql(**{"dialect": SparkSession().dialect, **kwargs})
def alias(self, name: str) -> Column:
from sqlglot.dataframe.sql.session import SparkSession
dialect = SparkSession().dialect
alias: exp.Expression = sqlglot.maybe_parse(name, dialect=dialect)
new_expression = exp.alias_(
self.column_expression,
alias.this if isinstance(alias, exp.Column) else name,
dialect=dialect,
)
return Column(new_expression)
def asc(self) -> Column:
new_expression = exp.Ordered(this=self.column_expression, desc=False, nulls_first=True)
return Column(new_expression)
def desc(self) -> Column:
new_expression = exp.Ordered(this=self.column_expression, desc=True, nulls_first=False)
return Column(new_expression)
asc_nulls_first = asc
def asc_nulls_last(self) -> Column:
new_expression = exp.Ordered(this=self.column_expression, desc=False, nulls_first=False)
return Column(new_expression)
def desc_nulls_first(self) -> Column:
new_expression = exp.Ordered(this=self.column_expression, desc=True, nulls_first=True)
return Column(new_expression)
desc_nulls_last = desc
def when(self, condition: Column, value: t.Any) -> Column:
from sqlglot.dataframe.sql.functions import when
column_with_if = when(condition, value)
if not isinstance(self.expression, exp.Case):
return column_with_if
new_column = self.copy()
new_column.expression.args["ifs"].extend(column_with_if.expression.args["ifs"])
return new_column
def otherwise(self, value: t.Any) -> Column:
from sqlglot.dataframe.sql.functions import lit
true_value = value if isinstance(value, Column) else lit(value)
new_column = self.copy()
new_column.expression.set("default", true_value.column_expression)
return new_column
def isNull(self) -> Column:
new_expression = exp.Is(this=self.column_expression, expression=exp.Null())
return Column(new_expression)
def isNotNull(self) -> Column:
new_expression = exp.Not(this=exp.Is(this=self.column_expression, expression=exp.Null()))
return Column(new_expression)
def cast(self, dataType: t.Union[str, DataType]) -> Column:
"""
Functionality Difference: PySpark cast accepts a datatype instance of the datatype class
Sqlglot doesn't currently replicate this class so it only accepts a string
"""
from sqlglot.dataframe.sql.session import SparkSession
if isinstance(dataType, DataType):
dataType = dataType.simpleString()
return Column(exp.cast(self.column_expression, dataType, dialect=SparkSession().dialect))
def startswith(self, value: t.Union[str, Column]) -> Column:
value = self._lit(value) if not isinstance(value, Column) else value
return self.invoke_anonymous_function(self, "STARTSWITH", value)
def endswith(self, value: t.Union[str, Column]) -> Column:
value = self._lit(value) if not isinstance(value, Column) else value
return self.invoke_anonymous_function(self, "ENDSWITH", value)
def rlike(self, regexp: str) -> Column:
return self.invoke_expression_over_column(
column=self, callable_expression=exp.RegexpLike, expression=self._lit(regexp).expression
)
def like(self, other: str):
return self.invoke_expression_over_column(
self, exp.Like, expression=self._lit(other).expression
)
def ilike(self, other: str):
return self.invoke_expression_over_column(
self, exp.ILike, expression=self._lit(other).expression
)
def substr(self, startPos: t.Union[int, Column], length: t.Union[int, Column]) -> Column:
startPos = self._lit(startPos) if not isinstance(startPos, Column) else startPos
length = self._lit(length) if not isinstance(length, Column) else length
return Column.invoke_expression_over_column(
self, exp.Substring, start=startPos.expression, length=length.expression
)
def isin(self, *cols: t.Union[ColumnOrLiteral, t.Iterable[ColumnOrLiteral]]):
columns = flatten(cols) if isinstance(cols[0], (list, set, tuple)) else cols # type: ignore
expressions = [self._lit(x).expression for x in columns]
return Column.invoke_expression_over_column(self, exp.In, expressions=expressions) # type: ignore
def between(
self,
lowerBound: t.Union[ColumnOrLiteral],
upperBound: t.Union[ColumnOrLiteral],
) -> Column:
lower_bound_exp = (
self._lit(lowerBound) if not isinstance(lowerBound, Column) else lowerBound
)
upper_bound_exp = (
self._lit(upperBound) if not isinstance(upperBound, Column) else upperBound
)
return Column(
exp.Between(
this=self.column_expression,
low=lower_bound_exp.expression,
high=upper_bound_exp.expression,
)
)
def over(self, window: WindowSpec) -> Column:
window_expression = window.expression.copy()
window_expression.set("this", self.column_expression)
return Column(window_expression)
ColumnOrName = t.Union[Column, str]
def map_keys(col: ColumnOrName) -> Column:
return Column.invoke_anonymous_function(col, "MAP_KEYS") | null |
153,188 | from __future__ import annotations
import typing as t
from sqlglot import exp as expression
from sqlglot.dataframe.sql.column import Column
from sqlglot.helper import ensure_list, flatten as _flatten
class Column:
def __init__(self, expression: t.Optional[t.Union[ColumnOrLiteral, exp.Expression]]):
from sqlglot.dataframe.sql.session import SparkSession
if isinstance(expression, Column):
expression = expression.expression # type: ignore
elif expression is None or not isinstance(expression, (str, exp.Expression)):
expression = self._lit(expression).expression # type: ignore
elif not isinstance(expression, exp.Column):
expression = sqlglot.maybe_parse(expression, dialect=SparkSession().dialect).transform(
SparkSession().dialect.normalize_identifier, copy=False
)
if expression is None:
raise ValueError(f"Could not parse {expression}")
self.expression: exp.Expression = expression # type: ignore
def __repr__(self):
return repr(self.expression)
def __hash__(self):
return hash(self.expression)
def __eq__(self, other: ColumnOrLiteral) -> Column: # type: ignore
return self.binary_op(exp.EQ, other)
def __ne__(self, other: ColumnOrLiteral) -> Column: # type: ignore
return self.binary_op(exp.NEQ, other)
def __gt__(self, other: ColumnOrLiteral) -> Column:
return self.binary_op(exp.GT, other)
def __ge__(self, other: ColumnOrLiteral) -> Column:
return self.binary_op(exp.GTE, other)
def __lt__(self, other: ColumnOrLiteral) -> Column:
return self.binary_op(exp.LT, other)
def __le__(self, other: ColumnOrLiteral) -> Column:
return self.binary_op(exp.LTE, other)
def __and__(self, other: ColumnOrLiteral) -> Column:
return self.binary_op(exp.And, other)
def __or__(self, other: ColumnOrLiteral) -> Column:
return self.binary_op(exp.Or, other)
def __mod__(self, other: ColumnOrLiteral) -> Column:
return self.binary_op(exp.Mod, other)
def __add__(self, other: ColumnOrLiteral) -> Column:
return self.binary_op(exp.Add, other)
def __sub__(self, other: ColumnOrLiteral) -> Column:
return self.binary_op(exp.Sub, other)
def __mul__(self, other: ColumnOrLiteral) -> Column:
return self.binary_op(exp.Mul, other)
def __truediv__(self, other: ColumnOrLiteral) -> Column:
return self.binary_op(exp.Div, other)
def __div__(self, other: ColumnOrLiteral) -> Column:
return self.binary_op(exp.Div, other)
def __neg__(self) -> Column:
return self.unary_op(exp.Neg)
def __radd__(self, other: ColumnOrLiteral) -> Column:
return self.inverse_binary_op(exp.Add, other)
def __rsub__(self, other: ColumnOrLiteral) -> Column:
return self.inverse_binary_op(exp.Sub, other)
def __rmul__(self, other: ColumnOrLiteral) -> Column:
return self.inverse_binary_op(exp.Mul, other)
def __rdiv__(self, other: ColumnOrLiteral) -> Column:
return self.inverse_binary_op(exp.Div, other)
def __rtruediv__(self, other: ColumnOrLiteral) -> Column:
return self.inverse_binary_op(exp.Div, other)
def __rmod__(self, other: ColumnOrLiteral) -> Column:
return self.inverse_binary_op(exp.Mod, other)
def __pow__(self, power: ColumnOrLiteral, modulo=None):
return Column(exp.Pow(this=self.expression, expression=Column(power).expression))
def __rpow__(self, power: ColumnOrLiteral):
return Column(exp.Pow(this=Column(power).expression, expression=self.expression))
def __invert__(self):
return self.unary_op(exp.Not)
def __rand__(self, other: ColumnOrLiteral) -> Column:
return self.inverse_binary_op(exp.And, other)
def __ror__(self, other: ColumnOrLiteral) -> Column:
return self.inverse_binary_op(exp.Or, other)
def ensure_col(cls, value: t.Optional[t.Union[ColumnOrLiteral, exp.Expression]]) -> Column:
return cls(value)
def ensure_cols(cls, args: t.List[t.Union[ColumnOrLiteral, exp.Expression]]) -> t.List[Column]:
return [cls.ensure_col(x) if not isinstance(x, Column) else x for x in args]
def _lit(cls, value: ColumnOrLiteral) -> Column:
if isinstance(value, dict):
columns = [cls._lit(v).alias(k).expression for k, v in value.items()]
return cls(exp.Struct(expressions=columns))
return cls(exp.convert(value))
def invoke_anonymous_function(
cls, column: t.Optional[ColumnOrLiteral], func_name: str, *args: t.Optional[ColumnOrLiteral]
) -> Column:
columns = [] if column is None else [cls.ensure_col(column)]
column_args = [cls.ensure_col(arg) for arg in args]
expressions = [x.expression for x in columns + column_args]
new_expression = exp.Anonymous(this=func_name.upper(), expressions=expressions)
return Column(new_expression)
def invoke_expression_over_column(
cls, column: t.Optional[ColumnOrLiteral], callable_expression: t.Callable, **kwargs
) -> Column:
ensured_column = None if column is None else cls.ensure_col(column)
ensure_expression_values = {
k: (
[Column.ensure_col(x).expression for x in v]
if is_iterable(v)
else Column.ensure_col(v).expression
)
for k, v in kwargs.items()
if v is not None
}
new_expression = (
callable_expression(**ensure_expression_values)
if ensured_column is None
else callable_expression(
this=ensured_column.column_expression, **ensure_expression_values
)
)
return Column(new_expression)
def binary_op(self, klass: t.Callable, other: ColumnOrLiteral, **kwargs) -> Column:
return Column(
klass(this=self.column_expression, expression=Column(other).column_expression, **kwargs)
)
def inverse_binary_op(self, klass: t.Callable, other: ColumnOrLiteral, **kwargs) -> Column:
return Column(
klass(this=Column(other).column_expression, expression=self.column_expression, **kwargs)
)
def unary_op(self, klass: t.Callable, **kwargs) -> Column:
return Column(klass(this=self.column_expression, **kwargs))
def is_alias(self):
return isinstance(self.expression, exp.Alias)
def is_column(self):
return isinstance(self.expression, exp.Column)
def column_expression(self) -> t.Union[exp.Column, exp.Literal]:
return self.expression.unalias()
def alias_or_name(self) -> str:
return self.expression.alias_or_name
def ensure_literal(cls, value) -> Column:
from sqlglot.dataframe.sql.functions import lit
if isinstance(value, cls):
value = value.expression
if not isinstance(value, exp.Literal):
return lit(value)
return Column(value)
def copy(self) -> Column:
return Column(self.expression.copy())
def set_table_name(self, table_name: str, copy=False) -> Column:
expression = self.expression.copy() if copy else self.expression
expression.set("table", exp.to_identifier(table_name))
return Column(expression)
def sql(self, **kwargs) -> str:
from sqlglot.dataframe.sql.session import SparkSession
return self.expression.sql(**{"dialect": SparkSession().dialect, **kwargs})
def alias(self, name: str) -> Column:
from sqlglot.dataframe.sql.session import SparkSession
dialect = SparkSession().dialect
alias: exp.Expression = sqlglot.maybe_parse(name, dialect=dialect)
new_expression = exp.alias_(
self.column_expression,
alias.this if isinstance(alias, exp.Column) else name,
dialect=dialect,
)
return Column(new_expression)
def asc(self) -> Column:
new_expression = exp.Ordered(this=self.column_expression, desc=False, nulls_first=True)
return Column(new_expression)
def desc(self) -> Column:
new_expression = exp.Ordered(this=self.column_expression, desc=True, nulls_first=False)
return Column(new_expression)
asc_nulls_first = asc
def asc_nulls_last(self) -> Column:
new_expression = exp.Ordered(this=self.column_expression, desc=False, nulls_first=False)
return Column(new_expression)
def desc_nulls_first(self) -> Column:
new_expression = exp.Ordered(this=self.column_expression, desc=True, nulls_first=True)
return Column(new_expression)
desc_nulls_last = desc
def when(self, condition: Column, value: t.Any) -> Column:
from sqlglot.dataframe.sql.functions import when
column_with_if = when(condition, value)
if not isinstance(self.expression, exp.Case):
return column_with_if
new_column = self.copy()
new_column.expression.args["ifs"].extend(column_with_if.expression.args["ifs"])
return new_column
def otherwise(self, value: t.Any) -> Column:
from sqlglot.dataframe.sql.functions import lit
true_value = value if isinstance(value, Column) else lit(value)
new_column = self.copy()
new_column.expression.set("default", true_value.column_expression)
return new_column
def isNull(self) -> Column:
new_expression = exp.Is(this=self.column_expression, expression=exp.Null())
return Column(new_expression)
def isNotNull(self) -> Column:
new_expression = exp.Not(this=exp.Is(this=self.column_expression, expression=exp.Null()))
return Column(new_expression)
def cast(self, dataType: t.Union[str, DataType]) -> Column:
"""
Functionality Difference: PySpark cast accepts a datatype instance of the datatype class
Sqlglot doesn't currently replicate this class so it only accepts a string
"""
from sqlglot.dataframe.sql.session import SparkSession
if isinstance(dataType, DataType):
dataType = dataType.simpleString()
return Column(exp.cast(self.column_expression, dataType, dialect=SparkSession().dialect))
def startswith(self, value: t.Union[str, Column]) -> Column:
value = self._lit(value) if not isinstance(value, Column) else value
return self.invoke_anonymous_function(self, "STARTSWITH", value)
def endswith(self, value: t.Union[str, Column]) -> Column:
value = self._lit(value) if not isinstance(value, Column) else value
return self.invoke_anonymous_function(self, "ENDSWITH", value)
def rlike(self, regexp: str) -> Column:
return self.invoke_expression_over_column(
column=self, callable_expression=exp.RegexpLike, expression=self._lit(regexp).expression
)
def like(self, other: str):
return self.invoke_expression_over_column(
self, exp.Like, expression=self._lit(other).expression
)
def ilike(self, other: str):
return self.invoke_expression_over_column(
self, exp.ILike, expression=self._lit(other).expression
)
def substr(self, startPos: t.Union[int, Column], length: t.Union[int, Column]) -> Column:
startPos = self._lit(startPos) if not isinstance(startPos, Column) else startPos
length = self._lit(length) if not isinstance(length, Column) else length
return Column.invoke_expression_over_column(
self, exp.Substring, start=startPos.expression, length=length.expression
)
def isin(self, *cols: t.Union[ColumnOrLiteral, t.Iterable[ColumnOrLiteral]]):
columns = flatten(cols) if isinstance(cols[0], (list, set, tuple)) else cols # type: ignore
expressions = [self._lit(x).expression for x in columns]
return Column.invoke_expression_over_column(self, exp.In, expressions=expressions) # type: ignore
def between(
self,
lowerBound: t.Union[ColumnOrLiteral],
upperBound: t.Union[ColumnOrLiteral],
) -> Column:
lower_bound_exp = (
self._lit(lowerBound) if not isinstance(lowerBound, Column) else lowerBound
)
upper_bound_exp = (
self._lit(upperBound) if not isinstance(upperBound, Column) else upperBound
)
return Column(
exp.Between(
this=self.column_expression,
low=lower_bound_exp.expression,
high=upper_bound_exp.expression,
)
)
def over(self, window: WindowSpec) -> Column:
window_expression = window.expression.copy()
window_expression.set("this", self.column_expression)
return Column(window_expression)
ColumnOrName = t.Union[Column, str]
def map_values(col: ColumnOrName) -> Column:
return Column.invoke_anonymous_function(col, "MAP_VALUES") | null |
153,189 | from __future__ import annotations
import typing as t
from sqlglot import exp as expression
from sqlglot.dataframe.sql.column import Column
from sqlglot.helper import ensure_list, flatten as _flatten
class Column:
def __init__(self, expression: t.Optional[t.Union[ColumnOrLiteral, exp.Expression]]):
from sqlglot.dataframe.sql.session import SparkSession
if isinstance(expression, Column):
expression = expression.expression # type: ignore
elif expression is None or not isinstance(expression, (str, exp.Expression)):
expression = self._lit(expression).expression # type: ignore
elif not isinstance(expression, exp.Column):
expression = sqlglot.maybe_parse(expression, dialect=SparkSession().dialect).transform(
SparkSession().dialect.normalize_identifier, copy=False
)
if expression is None:
raise ValueError(f"Could not parse {expression}")
self.expression: exp.Expression = expression # type: ignore
def __repr__(self):
return repr(self.expression)
def __hash__(self):
return hash(self.expression)
def __eq__(self, other: ColumnOrLiteral) -> Column: # type: ignore
return self.binary_op(exp.EQ, other)
def __ne__(self, other: ColumnOrLiteral) -> Column: # type: ignore
return self.binary_op(exp.NEQ, other)
def __gt__(self, other: ColumnOrLiteral) -> Column:
return self.binary_op(exp.GT, other)
def __ge__(self, other: ColumnOrLiteral) -> Column:
return self.binary_op(exp.GTE, other)
def __lt__(self, other: ColumnOrLiteral) -> Column:
return self.binary_op(exp.LT, other)
def __le__(self, other: ColumnOrLiteral) -> Column:
return self.binary_op(exp.LTE, other)
def __and__(self, other: ColumnOrLiteral) -> Column:
return self.binary_op(exp.And, other)
def __or__(self, other: ColumnOrLiteral) -> Column:
return self.binary_op(exp.Or, other)
def __mod__(self, other: ColumnOrLiteral) -> Column:
return self.binary_op(exp.Mod, other)
def __add__(self, other: ColumnOrLiteral) -> Column:
return self.binary_op(exp.Add, other)
def __sub__(self, other: ColumnOrLiteral) -> Column:
return self.binary_op(exp.Sub, other)
def __mul__(self, other: ColumnOrLiteral) -> Column:
return self.binary_op(exp.Mul, other)
def __truediv__(self, other: ColumnOrLiteral) -> Column:
return self.binary_op(exp.Div, other)
def __div__(self, other: ColumnOrLiteral) -> Column:
return self.binary_op(exp.Div, other)
def __neg__(self) -> Column:
return self.unary_op(exp.Neg)
def __radd__(self, other: ColumnOrLiteral) -> Column:
return self.inverse_binary_op(exp.Add, other)
def __rsub__(self, other: ColumnOrLiteral) -> Column:
return self.inverse_binary_op(exp.Sub, other)
def __rmul__(self, other: ColumnOrLiteral) -> Column:
return self.inverse_binary_op(exp.Mul, other)
def __rdiv__(self, other: ColumnOrLiteral) -> Column:
return self.inverse_binary_op(exp.Div, other)
def __rtruediv__(self, other: ColumnOrLiteral) -> Column:
return self.inverse_binary_op(exp.Div, other)
def __rmod__(self, other: ColumnOrLiteral) -> Column:
return self.inverse_binary_op(exp.Mod, other)
def __pow__(self, power: ColumnOrLiteral, modulo=None):
return Column(exp.Pow(this=self.expression, expression=Column(power).expression))
def __rpow__(self, power: ColumnOrLiteral):
return Column(exp.Pow(this=Column(power).expression, expression=self.expression))
def __invert__(self):
return self.unary_op(exp.Not)
def __rand__(self, other: ColumnOrLiteral) -> Column:
return self.inverse_binary_op(exp.And, other)
def __ror__(self, other: ColumnOrLiteral) -> Column:
return self.inverse_binary_op(exp.Or, other)
def ensure_col(cls, value: t.Optional[t.Union[ColumnOrLiteral, exp.Expression]]) -> Column:
return cls(value)
def ensure_cols(cls, args: t.List[t.Union[ColumnOrLiteral, exp.Expression]]) -> t.List[Column]:
return [cls.ensure_col(x) if not isinstance(x, Column) else x for x in args]
def _lit(cls, value: ColumnOrLiteral) -> Column:
if isinstance(value, dict):
columns = [cls._lit(v).alias(k).expression for k, v in value.items()]
return cls(exp.Struct(expressions=columns))
return cls(exp.convert(value))
def invoke_anonymous_function(
cls, column: t.Optional[ColumnOrLiteral], func_name: str, *args: t.Optional[ColumnOrLiteral]
) -> Column:
columns = [] if column is None else [cls.ensure_col(column)]
column_args = [cls.ensure_col(arg) for arg in args]
expressions = [x.expression for x in columns + column_args]
new_expression = exp.Anonymous(this=func_name.upper(), expressions=expressions)
return Column(new_expression)
def invoke_expression_over_column(
cls, column: t.Optional[ColumnOrLiteral], callable_expression: t.Callable, **kwargs
) -> Column:
ensured_column = None if column is None else cls.ensure_col(column)
ensure_expression_values = {
k: (
[Column.ensure_col(x).expression for x in v]
if is_iterable(v)
else Column.ensure_col(v).expression
)
for k, v in kwargs.items()
if v is not None
}
new_expression = (
callable_expression(**ensure_expression_values)
if ensured_column is None
else callable_expression(
this=ensured_column.column_expression, **ensure_expression_values
)
)
return Column(new_expression)
def binary_op(self, klass: t.Callable, other: ColumnOrLiteral, **kwargs) -> Column:
return Column(
klass(this=self.column_expression, expression=Column(other).column_expression, **kwargs)
)
def inverse_binary_op(self, klass: t.Callable, other: ColumnOrLiteral, **kwargs) -> Column:
return Column(
klass(this=Column(other).column_expression, expression=self.column_expression, **kwargs)
)
def unary_op(self, klass: t.Callable, **kwargs) -> Column:
return Column(klass(this=self.column_expression, **kwargs))
def is_alias(self):
return isinstance(self.expression, exp.Alias)
def is_column(self):
return isinstance(self.expression, exp.Column)
def column_expression(self) -> t.Union[exp.Column, exp.Literal]:
return self.expression.unalias()
def alias_or_name(self) -> str:
return self.expression.alias_or_name
def ensure_literal(cls, value) -> Column:
from sqlglot.dataframe.sql.functions import lit
if isinstance(value, cls):
value = value.expression
if not isinstance(value, exp.Literal):
return lit(value)
return Column(value)
def copy(self) -> Column:
return Column(self.expression.copy())
def set_table_name(self, table_name: str, copy=False) -> Column:
expression = self.expression.copy() if copy else self.expression
expression.set("table", exp.to_identifier(table_name))
return Column(expression)
def sql(self, **kwargs) -> str:
from sqlglot.dataframe.sql.session import SparkSession
return self.expression.sql(**{"dialect": SparkSession().dialect, **kwargs})
def alias(self, name: str) -> Column:
from sqlglot.dataframe.sql.session import SparkSession
dialect = SparkSession().dialect
alias: exp.Expression = sqlglot.maybe_parse(name, dialect=dialect)
new_expression = exp.alias_(
self.column_expression,
alias.this if isinstance(alias, exp.Column) else name,
dialect=dialect,
)
return Column(new_expression)
def asc(self) -> Column:
new_expression = exp.Ordered(this=self.column_expression, desc=False, nulls_first=True)
return Column(new_expression)
def desc(self) -> Column:
new_expression = exp.Ordered(this=self.column_expression, desc=True, nulls_first=False)
return Column(new_expression)
asc_nulls_first = asc
def asc_nulls_last(self) -> Column:
new_expression = exp.Ordered(this=self.column_expression, desc=False, nulls_first=False)
return Column(new_expression)
def desc_nulls_first(self) -> Column:
new_expression = exp.Ordered(this=self.column_expression, desc=True, nulls_first=True)
return Column(new_expression)
desc_nulls_last = desc
def when(self, condition: Column, value: t.Any) -> Column:
from sqlglot.dataframe.sql.functions import when
column_with_if = when(condition, value)
if not isinstance(self.expression, exp.Case):
return column_with_if
new_column = self.copy()
new_column.expression.args["ifs"].extend(column_with_if.expression.args["ifs"])
return new_column
def otherwise(self, value: t.Any) -> Column:
from sqlglot.dataframe.sql.functions import lit
true_value = value if isinstance(value, Column) else lit(value)
new_column = self.copy()
new_column.expression.set("default", true_value.column_expression)
return new_column
def isNull(self) -> Column:
new_expression = exp.Is(this=self.column_expression, expression=exp.Null())
return Column(new_expression)
def isNotNull(self) -> Column:
new_expression = exp.Not(this=exp.Is(this=self.column_expression, expression=exp.Null()))
return Column(new_expression)
def cast(self, dataType: t.Union[str, DataType]) -> Column:
"""
Functionality Difference: PySpark cast accepts a datatype instance of the datatype class
Sqlglot doesn't currently replicate this class so it only accepts a string
"""
from sqlglot.dataframe.sql.session import SparkSession
if isinstance(dataType, DataType):
dataType = dataType.simpleString()
return Column(exp.cast(self.column_expression, dataType, dialect=SparkSession().dialect))
def startswith(self, value: t.Union[str, Column]) -> Column:
value = self._lit(value) if not isinstance(value, Column) else value
return self.invoke_anonymous_function(self, "STARTSWITH", value)
def endswith(self, value: t.Union[str, Column]) -> Column:
value = self._lit(value) if not isinstance(value, Column) else value
return self.invoke_anonymous_function(self, "ENDSWITH", value)
def rlike(self, regexp: str) -> Column:
return self.invoke_expression_over_column(
column=self, callable_expression=exp.RegexpLike, expression=self._lit(regexp).expression
)
def like(self, other: str):
return self.invoke_expression_over_column(
self, exp.Like, expression=self._lit(other).expression
)
def ilike(self, other: str):
return self.invoke_expression_over_column(
self, exp.ILike, expression=self._lit(other).expression
)
def substr(self, startPos: t.Union[int, Column], length: t.Union[int, Column]) -> Column:
startPos = self._lit(startPos) if not isinstance(startPos, Column) else startPos
length = self._lit(length) if not isinstance(length, Column) else length
return Column.invoke_expression_over_column(
self, exp.Substring, start=startPos.expression, length=length.expression
)
def isin(self, *cols: t.Union[ColumnOrLiteral, t.Iterable[ColumnOrLiteral]]):
columns = flatten(cols) if isinstance(cols[0], (list, set, tuple)) else cols # type: ignore
expressions = [self._lit(x).expression for x in columns]
return Column.invoke_expression_over_column(self, exp.In, expressions=expressions) # type: ignore
def between(
self,
lowerBound: t.Union[ColumnOrLiteral],
upperBound: t.Union[ColumnOrLiteral],
) -> Column:
lower_bound_exp = (
self._lit(lowerBound) if not isinstance(lowerBound, Column) else lowerBound
)
upper_bound_exp = (
self._lit(upperBound) if not isinstance(upperBound, Column) else upperBound
)
return Column(
exp.Between(
this=self.column_expression,
low=lower_bound_exp.expression,
high=upper_bound_exp.expression,
)
)
def over(self, window: WindowSpec) -> Column:
window_expression = window.expression.copy()
window_expression.set("this", self.column_expression)
return Column(window_expression)
ColumnOrName = t.Union[Column, str]
def map_entries(col: ColumnOrName) -> Column:
return Column.invoke_anonymous_function(col, "MAP_ENTRIES") | null |
153,190 | from __future__ import annotations
import typing as t
from sqlglot import exp as expression
from sqlglot.dataframe.sql.column import Column
from sqlglot.helper import ensure_list, flatten as _flatten
class Column:
def __init__(self, expression: t.Optional[t.Union[ColumnOrLiteral, exp.Expression]]):
def __repr__(self):
def __hash__(self):
def __eq__(self, other: ColumnOrLiteral) -> Column:
def __ne__(self, other: ColumnOrLiteral) -> Column:
def __gt__(self, other: ColumnOrLiteral) -> Column:
def __ge__(self, other: ColumnOrLiteral) -> Column:
def __lt__(self, other: ColumnOrLiteral) -> Column:
def __le__(self, other: ColumnOrLiteral) -> Column:
def __and__(self, other: ColumnOrLiteral) -> Column:
def __or__(self, other: ColumnOrLiteral) -> Column:
def __mod__(self, other: ColumnOrLiteral) -> Column:
def __add__(self, other: ColumnOrLiteral) -> Column:
def __sub__(self, other: ColumnOrLiteral) -> Column:
def __mul__(self, other: ColumnOrLiteral) -> Column:
def __truediv__(self, other: ColumnOrLiteral) -> Column:
def __div__(self, other: ColumnOrLiteral) -> Column:
def __neg__(self) -> Column:
def __radd__(self, other: ColumnOrLiteral) -> Column:
def __rsub__(self, other: ColumnOrLiteral) -> Column:
def __rmul__(self, other: ColumnOrLiteral) -> Column:
def __rdiv__(self, other: ColumnOrLiteral) -> Column:
def __rtruediv__(self, other: ColumnOrLiteral) -> Column:
def __rmod__(self, other: ColumnOrLiteral) -> Column:
def __pow__(self, power: ColumnOrLiteral, modulo=None):
def __rpow__(self, power: ColumnOrLiteral):
def __invert__(self):
def __rand__(self, other: ColumnOrLiteral) -> Column:
def __ror__(self, other: ColumnOrLiteral) -> Column:
def ensure_col(cls, value: t.Optional[t.Union[ColumnOrLiteral, exp.Expression]]) -> Column:
def ensure_cols(cls, args: t.List[t.Union[ColumnOrLiteral, exp.Expression]]) -> t.List[Column]:
def _lit(cls, value: ColumnOrLiteral) -> Column:
def invoke_anonymous_function(
cls, column: t.Optional[ColumnOrLiteral], func_name: str, *args: t.Optional[ColumnOrLiteral]
) -> Column:
def invoke_expression_over_column(
cls, column: t.Optional[ColumnOrLiteral], callable_expression: t.Callable, **kwargs
) -> Column:
def binary_op(self, klass: t.Callable, other: ColumnOrLiteral, **kwargs) -> Column:
def inverse_binary_op(self, klass: t.Callable, other: ColumnOrLiteral, **kwargs) -> Column:
def unary_op(self, klass: t.Callable, **kwargs) -> Column:
def is_alias(self):
def is_column(self):
def column_expression(self) -> t.Union[exp.Column, exp.Literal]:
def alias_or_name(self) -> str:
def ensure_literal(cls, value) -> Column:
def copy(self) -> Column:
def set_table_name(self, table_name: str, copy=False) -> Column:
def sql(self, **kwargs) -> str:
def alias(self, name: str) -> Column:
def asc(self) -> Column:
def desc(self) -> Column:
def asc_nulls_last(self) -> Column:
def desc_nulls_first(self) -> Column:
def when(self, condition: Column, value: t.Any) -> Column:
def otherwise(self, value: t.Any) -> Column:
def isNull(self) -> Column:
def isNotNull(self) -> Column:
def cast(self, dataType: t.Union[str, DataType]) -> Column:
def startswith(self, value: t.Union[str, Column]) -> Column:
def endswith(self, value: t.Union[str, Column]) -> Column:
def rlike(self, regexp: str) -> Column:
def like(self, other: str):
def ilike(self, other: str):
def substr(self, startPos: t.Union[int, Column], length: t.Union[int, Column]) -> Column:
def isin(self, *cols: t.Union[ColumnOrLiteral, t.Iterable[ColumnOrLiteral]]):
def between(
self,
lowerBound: t.Union[ColumnOrLiteral],
upperBound: t.Union[ColumnOrLiteral],
) -> Column:
def over(self, window: WindowSpec) -> Column:
ColumnOrName = t.Union[Column, str]
def map_from_entries(col: ColumnOrName) -> Column:
return Column.invoke_expression_over_column(col, expression.MapFromEntries) | null |
153,191 | from __future__ import annotations
import typing as t
from sqlglot import exp as expression
from sqlglot.dataframe.sql.column import Column
from sqlglot.helper import ensure_list, flatten as _flatten
if t.TYPE_CHECKING:
from sqlglot.dataframe.sql._typing import ColumnOrLiteral, ColumnOrName
from sqlglot.dataframe.sql.dataframe import DataFrame
def lit(value: t.Optional[t.Any] = None) -> Column:
if isinstance(value, str):
return Column(expression.Literal.string(str(value)))
return Column(value)
class Column:
def __init__(self, expression: t.Optional[t.Union[ColumnOrLiteral, exp.Expression]]):
from sqlglot.dataframe.sql.session import SparkSession
if isinstance(expression, Column):
expression = expression.expression # type: ignore
elif expression is None or not isinstance(expression, (str, exp.Expression)):
expression = self._lit(expression).expression # type: ignore
elif not isinstance(expression, exp.Column):
expression = sqlglot.maybe_parse(expression, dialect=SparkSession().dialect).transform(
SparkSession().dialect.normalize_identifier, copy=False
)
if expression is None:
raise ValueError(f"Could not parse {expression}")
self.expression: exp.Expression = expression # type: ignore
def __repr__(self):
return repr(self.expression)
def __hash__(self):
return hash(self.expression)
def __eq__(self, other: ColumnOrLiteral) -> Column: # type: ignore
return self.binary_op(exp.EQ, other)
def __ne__(self, other: ColumnOrLiteral) -> Column: # type: ignore
return self.binary_op(exp.NEQ, other)
def __gt__(self, other: ColumnOrLiteral) -> Column:
return self.binary_op(exp.GT, other)
def __ge__(self, other: ColumnOrLiteral) -> Column:
return self.binary_op(exp.GTE, other)
def __lt__(self, other: ColumnOrLiteral) -> Column:
return self.binary_op(exp.LT, other)
def __le__(self, other: ColumnOrLiteral) -> Column:
return self.binary_op(exp.LTE, other)
def __and__(self, other: ColumnOrLiteral) -> Column:
return self.binary_op(exp.And, other)
def __or__(self, other: ColumnOrLiteral) -> Column:
return self.binary_op(exp.Or, other)
def __mod__(self, other: ColumnOrLiteral) -> Column:
return self.binary_op(exp.Mod, other)
def __add__(self, other: ColumnOrLiteral) -> Column:
return self.binary_op(exp.Add, other)
def __sub__(self, other: ColumnOrLiteral) -> Column:
return self.binary_op(exp.Sub, other)
def __mul__(self, other: ColumnOrLiteral) -> Column:
return self.binary_op(exp.Mul, other)
def __truediv__(self, other: ColumnOrLiteral) -> Column:
return self.binary_op(exp.Div, other)
def __div__(self, other: ColumnOrLiteral) -> Column:
return self.binary_op(exp.Div, other)
def __neg__(self) -> Column:
return self.unary_op(exp.Neg)
def __radd__(self, other: ColumnOrLiteral) -> Column:
return self.inverse_binary_op(exp.Add, other)
def __rsub__(self, other: ColumnOrLiteral) -> Column:
return self.inverse_binary_op(exp.Sub, other)
def __rmul__(self, other: ColumnOrLiteral) -> Column:
return self.inverse_binary_op(exp.Mul, other)
def __rdiv__(self, other: ColumnOrLiteral) -> Column:
return self.inverse_binary_op(exp.Div, other)
def __rtruediv__(self, other: ColumnOrLiteral) -> Column:
return self.inverse_binary_op(exp.Div, other)
def __rmod__(self, other: ColumnOrLiteral) -> Column:
return self.inverse_binary_op(exp.Mod, other)
def __pow__(self, power: ColumnOrLiteral, modulo=None):
return Column(exp.Pow(this=self.expression, expression=Column(power).expression))
def __rpow__(self, power: ColumnOrLiteral):
return Column(exp.Pow(this=Column(power).expression, expression=self.expression))
def __invert__(self):
return self.unary_op(exp.Not)
def __rand__(self, other: ColumnOrLiteral) -> Column:
return self.inverse_binary_op(exp.And, other)
def __ror__(self, other: ColumnOrLiteral) -> Column:
return self.inverse_binary_op(exp.Or, other)
def ensure_col(cls, value: t.Optional[t.Union[ColumnOrLiteral, exp.Expression]]) -> Column:
return cls(value)
def ensure_cols(cls, args: t.List[t.Union[ColumnOrLiteral, exp.Expression]]) -> t.List[Column]:
return [cls.ensure_col(x) if not isinstance(x, Column) else x for x in args]
def _lit(cls, value: ColumnOrLiteral) -> Column:
if isinstance(value, dict):
columns = [cls._lit(v).alias(k).expression for k, v in value.items()]
return cls(exp.Struct(expressions=columns))
return cls(exp.convert(value))
def invoke_anonymous_function(
cls, column: t.Optional[ColumnOrLiteral], func_name: str, *args: t.Optional[ColumnOrLiteral]
) -> Column:
columns = [] if column is None else [cls.ensure_col(column)]
column_args = [cls.ensure_col(arg) for arg in args]
expressions = [x.expression for x in columns + column_args]
new_expression = exp.Anonymous(this=func_name.upper(), expressions=expressions)
return Column(new_expression)
def invoke_expression_over_column(
cls, column: t.Optional[ColumnOrLiteral], callable_expression: t.Callable, **kwargs
) -> Column:
ensured_column = None if column is None else cls.ensure_col(column)
ensure_expression_values = {
k: (
[Column.ensure_col(x).expression for x in v]
if is_iterable(v)
else Column.ensure_col(v).expression
)
for k, v in kwargs.items()
if v is not None
}
new_expression = (
callable_expression(**ensure_expression_values)
if ensured_column is None
else callable_expression(
this=ensured_column.column_expression, **ensure_expression_values
)
)
return Column(new_expression)
def binary_op(self, klass: t.Callable, other: ColumnOrLiteral, **kwargs) -> Column:
return Column(
klass(this=self.column_expression, expression=Column(other).column_expression, **kwargs)
)
def inverse_binary_op(self, klass: t.Callable, other: ColumnOrLiteral, **kwargs) -> Column:
return Column(
klass(this=Column(other).column_expression, expression=self.column_expression, **kwargs)
)
def unary_op(self, klass: t.Callable, **kwargs) -> Column:
return Column(klass(this=self.column_expression, **kwargs))
def is_alias(self):
return isinstance(self.expression, exp.Alias)
def is_column(self):
return isinstance(self.expression, exp.Column)
def column_expression(self) -> t.Union[exp.Column, exp.Literal]:
return self.expression.unalias()
def alias_or_name(self) -> str:
return self.expression.alias_or_name
def ensure_literal(cls, value) -> Column:
from sqlglot.dataframe.sql.functions import lit
if isinstance(value, cls):
value = value.expression
if not isinstance(value, exp.Literal):
return lit(value)
return Column(value)
def copy(self) -> Column:
return Column(self.expression.copy())
def set_table_name(self, table_name: str, copy=False) -> Column:
expression = self.expression.copy() if copy else self.expression
expression.set("table", exp.to_identifier(table_name))
return Column(expression)
def sql(self, **kwargs) -> str:
from sqlglot.dataframe.sql.session import SparkSession
return self.expression.sql(**{"dialect": SparkSession().dialect, **kwargs})
def alias(self, name: str) -> Column:
from sqlglot.dataframe.sql.session import SparkSession
dialect = SparkSession().dialect
alias: exp.Expression = sqlglot.maybe_parse(name, dialect=dialect)
new_expression = exp.alias_(
self.column_expression,
alias.this if isinstance(alias, exp.Column) else name,
dialect=dialect,
)
return Column(new_expression)
def asc(self) -> Column:
new_expression = exp.Ordered(this=self.column_expression, desc=False, nulls_first=True)
return Column(new_expression)
def desc(self) -> Column:
new_expression = exp.Ordered(this=self.column_expression, desc=True, nulls_first=False)
return Column(new_expression)
asc_nulls_first = asc
def asc_nulls_last(self) -> Column:
new_expression = exp.Ordered(this=self.column_expression, desc=False, nulls_first=False)
return Column(new_expression)
def desc_nulls_first(self) -> Column:
new_expression = exp.Ordered(this=self.column_expression, desc=True, nulls_first=True)
return Column(new_expression)
desc_nulls_last = desc
def when(self, condition: Column, value: t.Any) -> Column:
from sqlglot.dataframe.sql.functions import when
column_with_if = when(condition, value)
if not isinstance(self.expression, exp.Case):
return column_with_if
new_column = self.copy()
new_column.expression.args["ifs"].extend(column_with_if.expression.args["ifs"])
return new_column
def otherwise(self, value: t.Any) -> Column:
from sqlglot.dataframe.sql.functions import lit
true_value = value if isinstance(value, Column) else lit(value)
new_column = self.copy()
new_column.expression.set("default", true_value.column_expression)
return new_column
def isNull(self) -> Column:
new_expression = exp.Is(this=self.column_expression, expression=exp.Null())
return Column(new_expression)
def isNotNull(self) -> Column:
new_expression = exp.Not(this=exp.Is(this=self.column_expression, expression=exp.Null()))
return Column(new_expression)
def cast(self, dataType: t.Union[str, DataType]) -> Column:
"""
Functionality Difference: PySpark cast accepts a datatype instance of the datatype class
Sqlglot doesn't currently replicate this class so it only accepts a string
"""
from sqlglot.dataframe.sql.session import SparkSession
if isinstance(dataType, DataType):
dataType = dataType.simpleString()
return Column(exp.cast(self.column_expression, dataType, dialect=SparkSession().dialect))
def startswith(self, value: t.Union[str, Column]) -> Column:
value = self._lit(value) if not isinstance(value, Column) else value
return self.invoke_anonymous_function(self, "STARTSWITH", value)
def endswith(self, value: t.Union[str, Column]) -> Column:
value = self._lit(value) if not isinstance(value, Column) else value
return self.invoke_anonymous_function(self, "ENDSWITH", value)
def rlike(self, regexp: str) -> Column:
return self.invoke_expression_over_column(
column=self, callable_expression=exp.RegexpLike, expression=self._lit(regexp).expression
)
def like(self, other: str):
return self.invoke_expression_over_column(
self, exp.Like, expression=self._lit(other).expression
)
def ilike(self, other: str):
return self.invoke_expression_over_column(
self, exp.ILike, expression=self._lit(other).expression
)
def substr(self, startPos: t.Union[int, Column], length: t.Union[int, Column]) -> Column:
startPos = self._lit(startPos) if not isinstance(startPos, Column) else startPos
length = self._lit(length) if not isinstance(length, Column) else length
return Column.invoke_expression_over_column(
self, exp.Substring, start=startPos.expression, length=length.expression
)
def isin(self, *cols: t.Union[ColumnOrLiteral, t.Iterable[ColumnOrLiteral]]):
columns = flatten(cols) if isinstance(cols[0], (list, set, tuple)) else cols # type: ignore
expressions = [self._lit(x).expression for x in columns]
return Column.invoke_expression_over_column(self, exp.In, expressions=expressions) # type: ignore
def between(
self,
lowerBound: t.Union[ColumnOrLiteral],
upperBound: t.Union[ColumnOrLiteral],
) -> Column:
lower_bound_exp = (
self._lit(lowerBound) if not isinstance(lowerBound, Column) else lowerBound
)
upper_bound_exp = (
self._lit(upperBound) if not isinstance(upperBound, Column) else upperBound
)
return Column(
exp.Between(
this=self.column_expression,
low=lower_bound_exp.expression,
high=upper_bound_exp.expression,
)
)
def over(self, window: WindowSpec) -> Column:
window_expression = window.expression.copy()
window_expression.set("this", self.column_expression)
return Column(window_expression)
ColumnOrName = t.Union[Column, str]
def array_repeat(col: ColumnOrName, count: t.Union[ColumnOrName, int]) -> Column:
count_col = count if isinstance(count, Column) else lit(count)
return Column.invoke_anonymous_function(col, "ARRAY_REPEAT", count_col) | null |
153,192 | from __future__ import annotations
import typing as t
from sqlglot import exp as expression
from sqlglot.dataframe.sql.column import Column
from sqlglot.helper import ensure_list, flatten as _flatten
class Column:
def __init__(self, expression: t.Optional[t.Union[ColumnOrLiteral, exp.Expression]]):
from sqlglot.dataframe.sql.session import SparkSession
if isinstance(expression, Column):
expression = expression.expression # type: ignore
elif expression is None or not isinstance(expression, (str, exp.Expression)):
expression = self._lit(expression).expression # type: ignore
elif not isinstance(expression, exp.Column):
expression = sqlglot.maybe_parse(expression, dialect=SparkSession().dialect).transform(
SparkSession().dialect.normalize_identifier, copy=False
)
if expression is None:
raise ValueError(f"Could not parse {expression}")
self.expression: exp.Expression = expression # type: ignore
def __repr__(self):
return repr(self.expression)
def __hash__(self):
return hash(self.expression)
def __eq__(self, other: ColumnOrLiteral) -> Column: # type: ignore
return self.binary_op(exp.EQ, other)
def __ne__(self, other: ColumnOrLiteral) -> Column: # type: ignore
return self.binary_op(exp.NEQ, other)
def __gt__(self, other: ColumnOrLiteral) -> Column:
return self.binary_op(exp.GT, other)
def __ge__(self, other: ColumnOrLiteral) -> Column:
return self.binary_op(exp.GTE, other)
def __lt__(self, other: ColumnOrLiteral) -> Column:
return self.binary_op(exp.LT, other)
def __le__(self, other: ColumnOrLiteral) -> Column:
return self.binary_op(exp.LTE, other)
def __and__(self, other: ColumnOrLiteral) -> Column:
return self.binary_op(exp.And, other)
def __or__(self, other: ColumnOrLiteral) -> Column:
return self.binary_op(exp.Or, other)
def __mod__(self, other: ColumnOrLiteral) -> Column:
return self.binary_op(exp.Mod, other)
def __add__(self, other: ColumnOrLiteral) -> Column:
return self.binary_op(exp.Add, other)
def __sub__(self, other: ColumnOrLiteral) -> Column:
return self.binary_op(exp.Sub, other)
def __mul__(self, other: ColumnOrLiteral) -> Column:
return self.binary_op(exp.Mul, other)
def __truediv__(self, other: ColumnOrLiteral) -> Column:
return self.binary_op(exp.Div, other)
def __div__(self, other: ColumnOrLiteral) -> Column:
return self.binary_op(exp.Div, other)
def __neg__(self) -> Column:
return self.unary_op(exp.Neg)
def __radd__(self, other: ColumnOrLiteral) -> Column:
return self.inverse_binary_op(exp.Add, other)
def __rsub__(self, other: ColumnOrLiteral) -> Column:
return self.inverse_binary_op(exp.Sub, other)
def __rmul__(self, other: ColumnOrLiteral) -> Column:
return self.inverse_binary_op(exp.Mul, other)
def __rdiv__(self, other: ColumnOrLiteral) -> Column:
return self.inverse_binary_op(exp.Div, other)
def __rtruediv__(self, other: ColumnOrLiteral) -> Column:
return self.inverse_binary_op(exp.Div, other)
def __rmod__(self, other: ColumnOrLiteral) -> Column:
return self.inverse_binary_op(exp.Mod, other)
def __pow__(self, power: ColumnOrLiteral, modulo=None):
return Column(exp.Pow(this=self.expression, expression=Column(power).expression))
def __rpow__(self, power: ColumnOrLiteral):
return Column(exp.Pow(this=Column(power).expression, expression=self.expression))
def __invert__(self):
return self.unary_op(exp.Not)
def __rand__(self, other: ColumnOrLiteral) -> Column:
return self.inverse_binary_op(exp.And, other)
def __ror__(self, other: ColumnOrLiteral) -> Column:
return self.inverse_binary_op(exp.Or, other)
def ensure_col(cls, value: t.Optional[t.Union[ColumnOrLiteral, exp.Expression]]) -> Column:
return cls(value)
def ensure_cols(cls, args: t.List[t.Union[ColumnOrLiteral, exp.Expression]]) -> t.List[Column]:
return [cls.ensure_col(x) if not isinstance(x, Column) else x for x in args]
def _lit(cls, value: ColumnOrLiteral) -> Column:
if isinstance(value, dict):
columns = [cls._lit(v).alias(k).expression for k, v in value.items()]
return cls(exp.Struct(expressions=columns))
return cls(exp.convert(value))
def invoke_anonymous_function(
cls, column: t.Optional[ColumnOrLiteral], func_name: str, *args: t.Optional[ColumnOrLiteral]
) -> Column:
columns = [] if column is None else [cls.ensure_col(column)]
column_args = [cls.ensure_col(arg) for arg in args]
expressions = [x.expression for x in columns + column_args]
new_expression = exp.Anonymous(this=func_name.upper(), expressions=expressions)
return Column(new_expression)
def invoke_expression_over_column(
cls, column: t.Optional[ColumnOrLiteral], callable_expression: t.Callable, **kwargs
) -> Column:
ensured_column = None if column is None else cls.ensure_col(column)
ensure_expression_values = {
k: (
[Column.ensure_col(x).expression for x in v]
if is_iterable(v)
else Column.ensure_col(v).expression
)
for k, v in kwargs.items()
if v is not None
}
new_expression = (
callable_expression(**ensure_expression_values)
if ensured_column is None
else callable_expression(
this=ensured_column.column_expression, **ensure_expression_values
)
)
return Column(new_expression)
def binary_op(self, klass: t.Callable, other: ColumnOrLiteral, **kwargs) -> Column:
return Column(
klass(this=self.column_expression, expression=Column(other).column_expression, **kwargs)
)
def inverse_binary_op(self, klass: t.Callable, other: ColumnOrLiteral, **kwargs) -> Column:
return Column(
klass(this=Column(other).column_expression, expression=self.column_expression, **kwargs)
)
def unary_op(self, klass: t.Callable, **kwargs) -> Column:
return Column(klass(this=self.column_expression, **kwargs))
def is_alias(self):
return isinstance(self.expression, exp.Alias)
def is_column(self):
return isinstance(self.expression, exp.Column)
def column_expression(self) -> t.Union[exp.Column, exp.Literal]:
return self.expression.unalias()
def alias_or_name(self) -> str:
return self.expression.alias_or_name
def ensure_literal(cls, value) -> Column:
from sqlglot.dataframe.sql.functions import lit
if isinstance(value, cls):
value = value.expression
if not isinstance(value, exp.Literal):
return lit(value)
return Column(value)
def copy(self) -> Column:
return Column(self.expression.copy())
def set_table_name(self, table_name: str, copy=False) -> Column:
expression = self.expression.copy() if copy else self.expression
expression.set("table", exp.to_identifier(table_name))
return Column(expression)
def sql(self, **kwargs) -> str:
from sqlglot.dataframe.sql.session import SparkSession
return self.expression.sql(**{"dialect": SparkSession().dialect, **kwargs})
def alias(self, name: str) -> Column:
from sqlglot.dataframe.sql.session import SparkSession
dialect = SparkSession().dialect
alias: exp.Expression = sqlglot.maybe_parse(name, dialect=dialect)
new_expression = exp.alias_(
self.column_expression,
alias.this if isinstance(alias, exp.Column) else name,
dialect=dialect,
)
return Column(new_expression)
def asc(self) -> Column:
new_expression = exp.Ordered(this=self.column_expression, desc=False, nulls_first=True)
return Column(new_expression)
def desc(self) -> Column:
new_expression = exp.Ordered(this=self.column_expression, desc=True, nulls_first=False)
return Column(new_expression)
asc_nulls_first = asc
def asc_nulls_last(self) -> Column:
new_expression = exp.Ordered(this=self.column_expression, desc=False, nulls_first=False)
return Column(new_expression)
def desc_nulls_first(self) -> Column:
new_expression = exp.Ordered(this=self.column_expression, desc=True, nulls_first=True)
return Column(new_expression)
desc_nulls_last = desc
def when(self, condition: Column, value: t.Any) -> Column:
from sqlglot.dataframe.sql.functions import when
column_with_if = when(condition, value)
if not isinstance(self.expression, exp.Case):
return column_with_if
new_column = self.copy()
new_column.expression.args["ifs"].extend(column_with_if.expression.args["ifs"])
return new_column
def otherwise(self, value: t.Any) -> Column:
from sqlglot.dataframe.sql.functions import lit
true_value = value if isinstance(value, Column) else lit(value)
new_column = self.copy()
new_column.expression.set("default", true_value.column_expression)
return new_column
def isNull(self) -> Column:
new_expression = exp.Is(this=self.column_expression, expression=exp.Null())
return Column(new_expression)
def isNotNull(self) -> Column:
new_expression = exp.Not(this=exp.Is(this=self.column_expression, expression=exp.Null()))
return Column(new_expression)
def cast(self, dataType: t.Union[str, DataType]) -> Column:
"""
Functionality Difference: PySpark cast accepts a datatype instance of the datatype class
Sqlglot doesn't currently replicate this class so it only accepts a string
"""
from sqlglot.dataframe.sql.session import SparkSession
if isinstance(dataType, DataType):
dataType = dataType.simpleString()
return Column(exp.cast(self.column_expression, dataType, dialect=SparkSession().dialect))
def startswith(self, value: t.Union[str, Column]) -> Column:
value = self._lit(value) if not isinstance(value, Column) else value
return self.invoke_anonymous_function(self, "STARTSWITH", value)
def endswith(self, value: t.Union[str, Column]) -> Column:
value = self._lit(value) if not isinstance(value, Column) else value
return self.invoke_anonymous_function(self, "ENDSWITH", value)
def rlike(self, regexp: str) -> Column:
return self.invoke_expression_over_column(
column=self, callable_expression=exp.RegexpLike, expression=self._lit(regexp).expression
)
def like(self, other: str):
return self.invoke_expression_over_column(
self, exp.Like, expression=self._lit(other).expression
)
def ilike(self, other: str):
return self.invoke_expression_over_column(
self, exp.ILike, expression=self._lit(other).expression
)
def substr(self, startPos: t.Union[int, Column], length: t.Union[int, Column]) -> Column:
startPos = self._lit(startPos) if not isinstance(startPos, Column) else startPos
length = self._lit(length) if not isinstance(length, Column) else length
return Column.invoke_expression_over_column(
self, exp.Substring, start=startPos.expression, length=length.expression
)
def isin(self, *cols: t.Union[ColumnOrLiteral, t.Iterable[ColumnOrLiteral]]):
columns = flatten(cols) if isinstance(cols[0], (list, set, tuple)) else cols # type: ignore
expressions = [self._lit(x).expression for x in columns]
return Column.invoke_expression_over_column(self, exp.In, expressions=expressions) # type: ignore
def between(
self,
lowerBound: t.Union[ColumnOrLiteral],
upperBound: t.Union[ColumnOrLiteral],
) -> Column:
lower_bound_exp = (
self._lit(lowerBound) if not isinstance(lowerBound, Column) else lowerBound
)
upper_bound_exp = (
self._lit(upperBound) if not isinstance(upperBound, Column) else upperBound
)
return Column(
exp.Between(
this=self.column_expression,
low=lower_bound_exp.expression,
high=upper_bound_exp.expression,
)
)
def over(self, window: WindowSpec) -> Column:
window_expression = window.expression.copy()
window_expression.set("this", self.column_expression)
return Column(window_expression)
ColumnOrName = t.Union[Column, str]
def array_zip(*cols: ColumnOrName) -> Column:
if len(cols) == 1:
return Column.invoke_anonymous_function(cols[0], "ARRAY_ZIP")
return Column.invoke_anonymous_function(cols[0], "ARRAY_ZIP", *cols[1:]) | null |
153,193 | from __future__ import annotations
import typing as t
from sqlglot import exp as expression
from sqlglot.dataframe.sql.column import Column
from sqlglot.helper import ensure_list, flatten as _flatten
if t.TYPE_CHECKING:
from sqlglot.dataframe.sql._typing import ColumnOrLiteral, ColumnOrName
from sqlglot.dataframe.sql.dataframe import DataFrame
def flatten(col: ColumnOrName) -> Column:
return Column.invoke_expression_over_column(col, expression.Flatten)
class Column:
def __init__(self, expression: t.Optional[t.Union[ColumnOrLiteral, exp.Expression]]):
from sqlglot.dataframe.sql.session import SparkSession
if isinstance(expression, Column):
expression = expression.expression # type: ignore
elif expression is None or not isinstance(expression, (str, exp.Expression)):
expression = self._lit(expression).expression # type: ignore
elif not isinstance(expression, exp.Column):
expression = sqlglot.maybe_parse(expression, dialect=SparkSession().dialect).transform(
SparkSession().dialect.normalize_identifier, copy=False
)
if expression is None:
raise ValueError(f"Could not parse {expression}")
self.expression: exp.Expression = expression # type: ignore
def __repr__(self):
return repr(self.expression)
def __hash__(self):
return hash(self.expression)
def __eq__(self, other: ColumnOrLiteral) -> Column: # type: ignore
return self.binary_op(exp.EQ, other)
def __ne__(self, other: ColumnOrLiteral) -> Column: # type: ignore
return self.binary_op(exp.NEQ, other)
def __gt__(self, other: ColumnOrLiteral) -> Column:
return self.binary_op(exp.GT, other)
def __ge__(self, other: ColumnOrLiteral) -> Column:
return self.binary_op(exp.GTE, other)
def __lt__(self, other: ColumnOrLiteral) -> Column:
return self.binary_op(exp.LT, other)
def __le__(self, other: ColumnOrLiteral) -> Column:
return self.binary_op(exp.LTE, other)
def __and__(self, other: ColumnOrLiteral) -> Column:
return self.binary_op(exp.And, other)
def __or__(self, other: ColumnOrLiteral) -> Column:
return self.binary_op(exp.Or, other)
def __mod__(self, other: ColumnOrLiteral) -> Column:
return self.binary_op(exp.Mod, other)
def __add__(self, other: ColumnOrLiteral) -> Column:
return self.binary_op(exp.Add, other)
def __sub__(self, other: ColumnOrLiteral) -> Column:
return self.binary_op(exp.Sub, other)
def __mul__(self, other: ColumnOrLiteral) -> Column:
return self.binary_op(exp.Mul, other)
def __truediv__(self, other: ColumnOrLiteral) -> Column:
return self.binary_op(exp.Div, other)
def __div__(self, other: ColumnOrLiteral) -> Column:
return self.binary_op(exp.Div, other)
def __neg__(self) -> Column:
return self.unary_op(exp.Neg)
def __radd__(self, other: ColumnOrLiteral) -> Column:
return self.inverse_binary_op(exp.Add, other)
def __rsub__(self, other: ColumnOrLiteral) -> Column:
return self.inverse_binary_op(exp.Sub, other)
def __rmul__(self, other: ColumnOrLiteral) -> Column:
return self.inverse_binary_op(exp.Mul, other)
def __rdiv__(self, other: ColumnOrLiteral) -> Column:
return self.inverse_binary_op(exp.Div, other)
def __rtruediv__(self, other: ColumnOrLiteral) -> Column:
return self.inverse_binary_op(exp.Div, other)
def __rmod__(self, other: ColumnOrLiteral) -> Column:
return self.inverse_binary_op(exp.Mod, other)
def __pow__(self, power: ColumnOrLiteral, modulo=None):
return Column(exp.Pow(this=self.expression, expression=Column(power).expression))
def __rpow__(self, power: ColumnOrLiteral):
return Column(exp.Pow(this=Column(power).expression, expression=self.expression))
def __invert__(self):
return self.unary_op(exp.Not)
def __rand__(self, other: ColumnOrLiteral) -> Column:
return self.inverse_binary_op(exp.And, other)
def __ror__(self, other: ColumnOrLiteral) -> Column:
return self.inverse_binary_op(exp.Or, other)
def ensure_col(cls, value: t.Optional[t.Union[ColumnOrLiteral, exp.Expression]]) -> Column:
return cls(value)
def ensure_cols(cls, args: t.List[t.Union[ColumnOrLiteral, exp.Expression]]) -> t.List[Column]:
return [cls.ensure_col(x) if not isinstance(x, Column) else x for x in args]
def _lit(cls, value: ColumnOrLiteral) -> Column:
if isinstance(value, dict):
columns = [cls._lit(v).alias(k).expression for k, v in value.items()]
return cls(exp.Struct(expressions=columns))
return cls(exp.convert(value))
def invoke_anonymous_function(
cls, column: t.Optional[ColumnOrLiteral], func_name: str, *args: t.Optional[ColumnOrLiteral]
) -> Column:
columns = [] if column is None else [cls.ensure_col(column)]
column_args = [cls.ensure_col(arg) for arg in args]
expressions = [x.expression for x in columns + column_args]
new_expression = exp.Anonymous(this=func_name.upper(), expressions=expressions)
return Column(new_expression)
def invoke_expression_over_column(
cls, column: t.Optional[ColumnOrLiteral], callable_expression: t.Callable, **kwargs
) -> Column:
ensured_column = None if column is None else cls.ensure_col(column)
ensure_expression_values = {
k: (
[Column.ensure_col(x).expression for x in v]
if is_iterable(v)
else Column.ensure_col(v).expression
)
for k, v in kwargs.items()
if v is not None
}
new_expression = (
callable_expression(**ensure_expression_values)
if ensured_column is None
else callable_expression(
this=ensured_column.column_expression, **ensure_expression_values
)
)
return Column(new_expression)
def binary_op(self, klass: t.Callable, other: ColumnOrLiteral, **kwargs) -> Column:
return Column(
klass(this=self.column_expression, expression=Column(other).column_expression, **kwargs)
)
def inverse_binary_op(self, klass: t.Callable, other: ColumnOrLiteral, **kwargs) -> Column:
return Column(
klass(this=Column(other).column_expression, expression=self.column_expression, **kwargs)
)
def unary_op(self, klass: t.Callable, **kwargs) -> Column:
return Column(klass(this=self.column_expression, **kwargs))
def is_alias(self):
return isinstance(self.expression, exp.Alias)
def is_column(self):
return isinstance(self.expression, exp.Column)
def column_expression(self) -> t.Union[exp.Column, exp.Literal]:
return self.expression.unalias()
def alias_or_name(self) -> str:
return self.expression.alias_or_name
def ensure_literal(cls, value) -> Column:
from sqlglot.dataframe.sql.functions import lit
if isinstance(value, cls):
value = value.expression
if not isinstance(value, exp.Literal):
return lit(value)
return Column(value)
def copy(self) -> Column:
return Column(self.expression.copy())
def set_table_name(self, table_name: str, copy=False) -> Column:
expression = self.expression.copy() if copy else self.expression
expression.set("table", exp.to_identifier(table_name))
return Column(expression)
def sql(self, **kwargs) -> str:
from sqlglot.dataframe.sql.session import SparkSession
return self.expression.sql(**{"dialect": SparkSession().dialect, **kwargs})
def alias(self, name: str) -> Column:
from sqlglot.dataframe.sql.session import SparkSession
dialect = SparkSession().dialect
alias: exp.Expression = sqlglot.maybe_parse(name, dialect=dialect)
new_expression = exp.alias_(
self.column_expression,
alias.this if isinstance(alias, exp.Column) else name,
dialect=dialect,
)
return Column(new_expression)
def asc(self) -> Column:
new_expression = exp.Ordered(this=self.column_expression, desc=False, nulls_first=True)
return Column(new_expression)
def desc(self) -> Column:
new_expression = exp.Ordered(this=self.column_expression, desc=True, nulls_first=False)
return Column(new_expression)
asc_nulls_first = asc
def asc_nulls_last(self) -> Column:
new_expression = exp.Ordered(this=self.column_expression, desc=False, nulls_first=False)
return Column(new_expression)
def desc_nulls_first(self) -> Column:
new_expression = exp.Ordered(this=self.column_expression, desc=True, nulls_first=True)
return Column(new_expression)
desc_nulls_last = desc
def when(self, condition: Column, value: t.Any) -> Column:
from sqlglot.dataframe.sql.functions import when
column_with_if = when(condition, value)
if not isinstance(self.expression, exp.Case):
return column_with_if
new_column = self.copy()
new_column.expression.args["ifs"].extend(column_with_if.expression.args["ifs"])
return new_column
def otherwise(self, value: t.Any) -> Column:
from sqlglot.dataframe.sql.functions import lit
true_value = value if isinstance(value, Column) else lit(value)
new_column = self.copy()
new_column.expression.set("default", true_value.column_expression)
return new_column
def isNull(self) -> Column:
new_expression = exp.Is(this=self.column_expression, expression=exp.Null())
return Column(new_expression)
def isNotNull(self) -> Column:
new_expression = exp.Not(this=exp.Is(this=self.column_expression, expression=exp.Null()))
return Column(new_expression)
def cast(self, dataType: t.Union[str, DataType]) -> Column:
"""
Functionality Difference: PySpark cast accepts a datatype instance of the datatype class
Sqlglot doesn't currently replicate this class so it only accepts a string
"""
from sqlglot.dataframe.sql.session import SparkSession
if isinstance(dataType, DataType):
dataType = dataType.simpleString()
return Column(exp.cast(self.column_expression, dataType, dialect=SparkSession().dialect))
def startswith(self, value: t.Union[str, Column]) -> Column:
value = self._lit(value) if not isinstance(value, Column) else value
return self.invoke_anonymous_function(self, "STARTSWITH", value)
def endswith(self, value: t.Union[str, Column]) -> Column:
value = self._lit(value) if not isinstance(value, Column) else value
return self.invoke_anonymous_function(self, "ENDSWITH", value)
def rlike(self, regexp: str) -> Column:
return self.invoke_expression_over_column(
column=self, callable_expression=exp.RegexpLike, expression=self._lit(regexp).expression
)
def like(self, other: str):
return self.invoke_expression_over_column(
self, exp.Like, expression=self._lit(other).expression
)
def ilike(self, other: str):
return self.invoke_expression_over_column(
self, exp.ILike, expression=self._lit(other).expression
)
def substr(self, startPos: t.Union[int, Column], length: t.Union[int, Column]) -> Column:
startPos = self._lit(startPos) if not isinstance(startPos, Column) else startPos
length = self._lit(length) if not isinstance(length, Column) else length
return Column.invoke_expression_over_column(
self, exp.Substring, start=startPos.expression, length=length.expression
)
def isin(self, *cols: t.Union[ColumnOrLiteral, t.Iterable[ColumnOrLiteral]]):
columns = flatten(cols) if isinstance(cols[0], (list, set, tuple)) else cols # type: ignore
expressions = [self._lit(x).expression for x in columns]
return Column.invoke_expression_over_column(self, exp.In, expressions=expressions) # type: ignore
def between(
self,
lowerBound: t.Union[ColumnOrLiteral],
upperBound: t.Union[ColumnOrLiteral],
) -> Column:
lower_bound_exp = (
self._lit(lowerBound) if not isinstance(lowerBound, Column) else lowerBound
)
upper_bound_exp = (
self._lit(upperBound) if not isinstance(upperBound, Column) else upperBound
)
return Column(
exp.Between(
this=self.column_expression,
low=lower_bound_exp.expression,
high=upper_bound_exp.expression,
)
)
def over(self, window: WindowSpec) -> Column:
window_expression = window.expression.copy()
window_expression.set("this", self.column_expression)
return Column(window_expression)
def flatten(values: t.Iterable[t.Iterable[t.Any] | t.Any]) -> t.Iterator[t.Any]:
"""
Flattens an iterable that can contain both iterable and non-iterable elements. Objects of
type `str` and `bytes` are not regarded as iterables.
Examples:
>>> list(flatten([[1, 2], 3, {4}, (5, "bla")]))
[1, 2, 3, 4, 5, 'bla']
>>> list(flatten([1, 2, 3]))
[1, 2, 3]
Args:
values: The value to be flattened.
Yields:
Non-iterable elements in `values`.
"""
for value in values:
if is_iterable(value):
yield from flatten(value)
else:
yield value
ColumnOrName = t.Union[Column, str]
def map_concat(*cols: t.Union[ColumnOrName, t.Iterable[ColumnOrName]]) -> Column:
columns = list(flatten(cols)) if not isinstance(cols[0], (str, Column)) else cols # type: ignore
if len(columns) == 1:
return Column.invoke_anonymous_function(columns[0], "MAP_CONCAT")
return Column.invoke_anonymous_function(columns[0], "MAP_CONCAT", *columns[1:]) | null |
153,194 | from __future__ import annotations
import typing as t
from sqlglot import exp as expression
from sqlglot.dataframe.sql.column import Column
from sqlglot.helper import ensure_list, flatten as _flatten
if t.TYPE_CHECKING:
from sqlglot.dataframe.sql._typing import ColumnOrLiteral, ColumnOrName
from sqlglot.dataframe.sql.dataframe import DataFrame
class Column:
def __init__(self, expression: t.Optional[t.Union[ColumnOrLiteral, exp.Expression]]):
from sqlglot.dataframe.sql.session import SparkSession
if isinstance(expression, Column):
expression = expression.expression # type: ignore
elif expression is None or not isinstance(expression, (str, exp.Expression)):
expression = self._lit(expression).expression # type: ignore
elif not isinstance(expression, exp.Column):
expression = sqlglot.maybe_parse(expression, dialect=SparkSession().dialect).transform(
SparkSession().dialect.normalize_identifier, copy=False
)
if expression is None:
raise ValueError(f"Could not parse {expression}")
self.expression: exp.Expression = expression # type: ignore
def __repr__(self):
return repr(self.expression)
def __hash__(self):
return hash(self.expression)
def __eq__(self, other: ColumnOrLiteral) -> Column: # type: ignore
return self.binary_op(exp.EQ, other)
def __ne__(self, other: ColumnOrLiteral) -> Column: # type: ignore
return self.binary_op(exp.NEQ, other)
def __gt__(self, other: ColumnOrLiteral) -> Column:
return self.binary_op(exp.GT, other)
def __ge__(self, other: ColumnOrLiteral) -> Column:
return self.binary_op(exp.GTE, other)
def __lt__(self, other: ColumnOrLiteral) -> Column:
return self.binary_op(exp.LT, other)
def __le__(self, other: ColumnOrLiteral) -> Column:
return self.binary_op(exp.LTE, other)
def __and__(self, other: ColumnOrLiteral) -> Column:
return self.binary_op(exp.And, other)
def __or__(self, other: ColumnOrLiteral) -> Column:
return self.binary_op(exp.Or, other)
def __mod__(self, other: ColumnOrLiteral) -> Column:
return self.binary_op(exp.Mod, other)
def __add__(self, other: ColumnOrLiteral) -> Column:
return self.binary_op(exp.Add, other)
def __sub__(self, other: ColumnOrLiteral) -> Column:
return self.binary_op(exp.Sub, other)
def __mul__(self, other: ColumnOrLiteral) -> Column:
return self.binary_op(exp.Mul, other)
def __truediv__(self, other: ColumnOrLiteral) -> Column:
return self.binary_op(exp.Div, other)
def __div__(self, other: ColumnOrLiteral) -> Column:
return self.binary_op(exp.Div, other)
def __neg__(self) -> Column:
return self.unary_op(exp.Neg)
def __radd__(self, other: ColumnOrLiteral) -> Column:
return self.inverse_binary_op(exp.Add, other)
def __rsub__(self, other: ColumnOrLiteral) -> Column:
return self.inverse_binary_op(exp.Sub, other)
def __rmul__(self, other: ColumnOrLiteral) -> Column:
return self.inverse_binary_op(exp.Mul, other)
def __rdiv__(self, other: ColumnOrLiteral) -> Column:
return self.inverse_binary_op(exp.Div, other)
def __rtruediv__(self, other: ColumnOrLiteral) -> Column:
return self.inverse_binary_op(exp.Div, other)
def __rmod__(self, other: ColumnOrLiteral) -> Column:
return self.inverse_binary_op(exp.Mod, other)
def __pow__(self, power: ColumnOrLiteral, modulo=None):
return Column(exp.Pow(this=self.expression, expression=Column(power).expression))
def __rpow__(self, power: ColumnOrLiteral):
return Column(exp.Pow(this=Column(power).expression, expression=self.expression))
def __invert__(self):
return self.unary_op(exp.Not)
def __rand__(self, other: ColumnOrLiteral) -> Column:
return self.inverse_binary_op(exp.And, other)
def __ror__(self, other: ColumnOrLiteral) -> Column:
return self.inverse_binary_op(exp.Or, other)
def ensure_col(cls, value: t.Optional[t.Union[ColumnOrLiteral, exp.Expression]]) -> Column:
return cls(value)
def ensure_cols(cls, args: t.List[t.Union[ColumnOrLiteral, exp.Expression]]) -> t.List[Column]:
return [cls.ensure_col(x) if not isinstance(x, Column) else x for x in args]
def _lit(cls, value: ColumnOrLiteral) -> Column:
if isinstance(value, dict):
columns = [cls._lit(v).alias(k).expression for k, v in value.items()]
return cls(exp.Struct(expressions=columns))
return cls(exp.convert(value))
def invoke_anonymous_function(
cls, column: t.Optional[ColumnOrLiteral], func_name: str, *args: t.Optional[ColumnOrLiteral]
) -> Column:
columns = [] if column is None else [cls.ensure_col(column)]
column_args = [cls.ensure_col(arg) for arg in args]
expressions = [x.expression for x in columns + column_args]
new_expression = exp.Anonymous(this=func_name.upper(), expressions=expressions)
return Column(new_expression)
def invoke_expression_over_column(
cls, column: t.Optional[ColumnOrLiteral], callable_expression: t.Callable, **kwargs
) -> Column:
ensured_column = None if column is None else cls.ensure_col(column)
ensure_expression_values = {
k: (
[Column.ensure_col(x).expression for x in v]
if is_iterable(v)
else Column.ensure_col(v).expression
)
for k, v in kwargs.items()
if v is not None
}
new_expression = (
callable_expression(**ensure_expression_values)
if ensured_column is None
else callable_expression(
this=ensured_column.column_expression, **ensure_expression_values
)
)
return Column(new_expression)
def binary_op(self, klass: t.Callable, other: ColumnOrLiteral, **kwargs) -> Column:
return Column(
klass(this=self.column_expression, expression=Column(other).column_expression, **kwargs)
)
def inverse_binary_op(self, klass: t.Callable, other: ColumnOrLiteral, **kwargs) -> Column:
return Column(
klass(this=Column(other).column_expression, expression=self.column_expression, **kwargs)
)
def unary_op(self, klass: t.Callable, **kwargs) -> Column:
return Column(klass(this=self.column_expression, **kwargs))
def is_alias(self):
return isinstance(self.expression, exp.Alias)
def is_column(self):
return isinstance(self.expression, exp.Column)
def column_expression(self) -> t.Union[exp.Column, exp.Literal]:
return self.expression.unalias()
def alias_or_name(self) -> str:
return self.expression.alias_or_name
def ensure_literal(cls, value) -> Column:
from sqlglot.dataframe.sql.functions import lit
if isinstance(value, cls):
value = value.expression
if not isinstance(value, exp.Literal):
return lit(value)
return Column(value)
def copy(self) -> Column:
return Column(self.expression.copy())
def set_table_name(self, table_name: str, copy=False) -> Column:
expression = self.expression.copy() if copy else self.expression
expression.set("table", exp.to_identifier(table_name))
return Column(expression)
def sql(self, **kwargs) -> str:
from sqlglot.dataframe.sql.session import SparkSession
return self.expression.sql(**{"dialect": SparkSession().dialect, **kwargs})
def alias(self, name: str) -> Column:
from sqlglot.dataframe.sql.session import SparkSession
dialect = SparkSession().dialect
alias: exp.Expression = sqlglot.maybe_parse(name, dialect=dialect)
new_expression = exp.alias_(
self.column_expression,
alias.this if isinstance(alias, exp.Column) else name,
dialect=dialect,
)
return Column(new_expression)
def asc(self) -> Column:
new_expression = exp.Ordered(this=self.column_expression, desc=False, nulls_first=True)
return Column(new_expression)
def desc(self) -> Column:
new_expression = exp.Ordered(this=self.column_expression, desc=True, nulls_first=False)
return Column(new_expression)
asc_nulls_first = asc
def asc_nulls_last(self) -> Column:
new_expression = exp.Ordered(this=self.column_expression, desc=False, nulls_first=False)
return Column(new_expression)
def desc_nulls_first(self) -> Column:
new_expression = exp.Ordered(this=self.column_expression, desc=True, nulls_first=True)
return Column(new_expression)
desc_nulls_last = desc
def when(self, condition: Column, value: t.Any) -> Column:
from sqlglot.dataframe.sql.functions import when
column_with_if = when(condition, value)
if not isinstance(self.expression, exp.Case):
return column_with_if
new_column = self.copy()
new_column.expression.args["ifs"].extend(column_with_if.expression.args["ifs"])
return new_column
def otherwise(self, value: t.Any) -> Column:
from sqlglot.dataframe.sql.functions import lit
true_value = value if isinstance(value, Column) else lit(value)
new_column = self.copy()
new_column.expression.set("default", true_value.column_expression)
return new_column
def isNull(self) -> Column:
new_expression = exp.Is(this=self.column_expression, expression=exp.Null())
return Column(new_expression)
def isNotNull(self) -> Column:
new_expression = exp.Not(this=exp.Is(this=self.column_expression, expression=exp.Null()))
return Column(new_expression)
def cast(self, dataType: t.Union[str, DataType]) -> Column:
"""
Functionality Difference: PySpark cast accepts a datatype instance of the datatype class
Sqlglot doesn't currently replicate this class so it only accepts a string
"""
from sqlglot.dataframe.sql.session import SparkSession
if isinstance(dataType, DataType):
dataType = dataType.simpleString()
return Column(exp.cast(self.column_expression, dataType, dialect=SparkSession().dialect))
def startswith(self, value: t.Union[str, Column]) -> Column:
value = self._lit(value) if not isinstance(value, Column) else value
return self.invoke_anonymous_function(self, "STARTSWITH", value)
def endswith(self, value: t.Union[str, Column]) -> Column:
value = self._lit(value) if not isinstance(value, Column) else value
return self.invoke_anonymous_function(self, "ENDSWITH", value)
def rlike(self, regexp: str) -> Column:
return self.invoke_expression_over_column(
column=self, callable_expression=exp.RegexpLike, expression=self._lit(regexp).expression
)
def like(self, other: str):
return self.invoke_expression_over_column(
self, exp.Like, expression=self._lit(other).expression
)
def ilike(self, other: str):
return self.invoke_expression_over_column(
self, exp.ILike, expression=self._lit(other).expression
)
def substr(self, startPos: t.Union[int, Column], length: t.Union[int, Column]) -> Column:
startPos = self._lit(startPos) if not isinstance(startPos, Column) else startPos
length = self._lit(length) if not isinstance(length, Column) else length
return Column.invoke_expression_over_column(
self, exp.Substring, start=startPos.expression, length=length.expression
)
def isin(self, *cols: t.Union[ColumnOrLiteral, t.Iterable[ColumnOrLiteral]]):
columns = flatten(cols) if isinstance(cols[0], (list, set, tuple)) else cols # type: ignore
expressions = [self._lit(x).expression for x in columns]
return Column.invoke_expression_over_column(self, exp.In, expressions=expressions) # type: ignore
def between(
self,
lowerBound: t.Union[ColumnOrLiteral],
upperBound: t.Union[ColumnOrLiteral],
) -> Column:
lower_bound_exp = (
self._lit(lowerBound) if not isinstance(lowerBound, Column) else lowerBound
)
upper_bound_exp = (
self._lit(upperBound) if not isinstance(upperBound, Column) else upperBound
)
return Column(
exp.Between(
this=self.column_expression,
low=lower_bound_exp.expression,
high=upper_bound_exp.expression,
)
)
def over(self, window: WindowSpec) -> Column:
window_expression = window.expression.copy()
window_expression.set("this", self.column_expression)
return Column(window_expression)
ColumnOrName = t.Union[Column, str]
def sequence(
start: ColumnOrName, stop: ColumnOrName, step: t.Optional[ColumnOrName] = None
) -> Column:
if step is not None:
return Column.invoke_anonymous_function(start, "SEQUENCE", stop, step)
return Column.invoke_anonymous_function(start, "SEQUENCE", stop) | null |
153,195 | from __future__ import annotations
import typing as t
from sqlglot import exp as expression
from sqlglot.dataframe.sql.column import Column
from sqlglot.helper import ensure_list, flatten as _flatten
if t.TYPE_CHECKING:
from sqlglot.dataframe.sql._typing import ColumnOrLiteral, ColumnOrName
from sqlglot.dataframe.sql.dataframe import DataFrame
def lit(value: t.Optional[t.Any] = None) -> Column:
if isinstance(value, str):
return Column(expression.Literal.string(str(value)))
return Column(value)
def create_map(*cols: t.Union[ColumnOrName, t.Iterable[ColumnOrName]]) -> Column:
cols = list(_flatten(cols)) if not isinstance(cols[0], (str, Column)) else cols # type: ignore
return Column.invoke_expression_over_column(
None,
expression.VarMap,
keys=array(*cols[::2]).expression,
values=array(*cols[1::2]).expression,
)
class Column:
def __init__(self, expression: t.Optional[t.Union[ColumnOrLiteral, exp.Expression]]):
from sqlglot.dataframe.sql.session import SparkSession
if isinstance(expression, Column):
expression = expression.expression # type: ignore
elif expression is None or not isinstance(expression, (str, exp.Expression)):
expression = self._lit(expression).expression # type: ignore
elif not isinstance(expression, exp.Column):
expression = sqlglot.maybe_parse(expression, dialect=SparkSession().dialect).transform(
SparkSession().dialect.normalize_identifier, copy=False
)
if expression is None:
raise ValueError(f"Could not parse {expression}")
self.expression: exp.Expression = expression # type: ignore
def __repr__(self):
return repr(self.expression)
def __hash__(self):
return hash(self.expression)
def __eq__(self, other: ColumnOrLiteral) -> Column: # type: ignore
return self.binary_op(exp.EQ, other)
def __ne__(self, other: ColumnOrLiteral) -> Column: # type: ignore
return self.binary_op(exp.NEQ, other)
def __gt__(self, other: ColumnOrLiteral) -> Column:
return self.binary_op(exp.GT, other)
def __ge__(self, other: ColumnOrLiteral) -> Column:
return self.binary_op(exp.GTE, other)
def __lt__(self, other: ColumnOrLiteral) -> Column:
return self.binary_op(exp.LT, other)
def __le__(self, other: ColumnOrLiteral) -> Column:
return self.binary_op(exp.LTE, other)
def __and__(self, other: ColumnOrLiteral) -> Column:
return self.binary_op(exp.And, other)
def __or__(self, other: ColumnOrLiteral) -> Column:
return self.binary_op(exp.Or, other)
def __mod__(self, other: ColumnOrLiteral) -> Column:
return self.binary_op(exp.Mod, other)
def __add__(self, other: ColumnOrLiteral) -> Column:
return self.binary_op(exp.Add, other)
def __sub__(self, other: ColumnOrLiteral) -> Column:
return self.binary_op(exp.Sub, other)
def __mul__(self, other: ColumnOrLiteral) -> Column:
return self.binary_op(exp.Mul, other)
def __truediv__(self, other: ColumnOrLiteral) -> Column:
return self.binary_op(exp.Div, other)
def __div__(self, other: ColumnOrLiteral) -> Column:
return self.binary_op(exp.Div, other)
def __neg__(self) -> Column:
return self.unary_op(exp.Neg)
def __radd__(self, other: ColumnOrLiteral) -> Column:
return self.inverse_binary_op(exp.Add, other)
def __rsub__(self, other: ColumnOrLiteral) -> Column:
return self.inverse_binary_op(exp.Sub, other)
def __rmul__(self, other: ColumnOrLiteral) -> Column:
return self.inverse_binary_op(exp.Mul, other)
def __rdiv__(self, other: ColumnOrLiteral) -> Column:
return self.inverse_binary_op(exp.Div, other)
def __rtruediv__(self, other: ColumnOrLiteral) -> Column:
return self.inverse_binary_op(exp.Div, other)
def __rmod__(self, other: ColumnOrLiteral) -> Column:
return self.inverse_binary_op(exp.Mod, other)
def __pow__(self, power: ColumnOrLiteral, modulo=None):
return Column(exp.Pow(this=self.expression, expression=Column(power).expression))
def __rpow__(self, power: ColumnOrLiteral):
return Column(exp.Pow(this=Column(power).expression, expression=self.expression))
def __invert__(self):
return self.unary_op(exp.Not)
def __rand__(self, other: ColumnOrLiteral) -> Column:
return self.inverse_binary_op(exp.And, other)
def __ror__(self, other: ColumnOrLiteral) -> Column:
return self.inverse_binary_op(exp.Or, other)
def ensure_col(cls, value: t.Optional[t.Union[ColumnOrLiteral, exp.Expression]]) -> Column:
return cls(value)
def ensure_cols(cls, args: t.List[t.Union[ColumnOrLiteral, exp.Expression]]) -> t.List[Column]:
return [cls.ensure_col(x) if not isinstance(x, Column) else x for x in args]
def _lit(cls, value: ColumnOrLiteral) -> Column:
if isinstance(value, dict):
columns = [cls._lit(v).alias(k).expression for k, v in value.items()]
return cls(exp.Struct(expressions=columns))
return cls(exp.convert(value))
def invoke_anonymous_function(
cls, column: t.Optional[ColumnOrLiteral], func_name: str, *args: t.Optional[ColumnOrLiteral]
) -> Column:
columns = [] if column is None else [cls.ensure_col(column)]
column_args = [cls.ensure_col(arg) for arg in args]
expressions = [x.expression for x in columns + column_args]
new_expression = exp.Anonymous(this=func_name.upper(), expressions=expressions)
return Column(new_expression)
def invoke_expression_over_column(
cls, column: t.Optional[ColumnOrLiteral], callable_expression: t.Callable, **kwargs
) -> Column:
ensured_column = None if column is None else cls.ensure_col(column)
ensure_expression_values = {
k: (
[Column.ensure_col(x).expression for x in v]
if is_iterable(v)
else Column.ensure_col(v).expression
)
for k, v in kwargs.items()
if v is not None
}
new_expression = (
callable_expression(**ensure_expression_values)
if ensured_column is None
else callable_expression(
this=ensured_column.column_expression, **ensure_expression_values
)
)
return Column(new_expression)
def binary_op(self, klass: t.Callable, other: ColumnOrLiteral, **kwargs) -> Column:
return Column(
klass(this=self.column_expression, expression=Column(other).column_expression, **kwargs)
)
def inverse_binary_op(self, klass: t.Callable, other: ColumnOrLiteral, **kwargs) -> Column:
return Column(
klass(this=Column(other).column_expression, expression=self.column_expression, **kwargs)
)
def unary_op(self, klass: t.Callable, **kwargs) -> Column:
return Column(klass(this=self.column_expression, **kwargs))
def is_alias(self):
return isinstance(self.expression, exp.Alias)
def is_column(self):
return isinstance(self.expression, exp.Column)
def column_expression(self) -> t.Union[exp.Column, exp.Literal]:
return self.expression.unalias()
def alias_or_name(self) -> str:
return self.expression.alias_or_name
def ensure_literal(cls, value) -> Column:
from sqlglot.dataframe.sql.functions import lit
if isinstance(value, cls):
value = value.expression
if not isinstance(value, exp.Literal):
return lit(value)
return Column(value)
def copy(self) -> Column:
return Column(self.expression.copy())
def set_table_name(self, table_name: str, copy=False) -> Column:
expression = self.expression.copy() if copy else self.expression
expression.set("table", exp.to_identifier(table_name))
return Column(expression)
def sql(self, **kwargs) -> str:
from sqlglot.dataframe.sql.session import SparkSession
return self.expression.sql(**{"dialect": SparkSession().dialect, **kwargs})
def alias(self, name: str) -> Column:
from sqlglot.dataframe.sql.session import SparkSession
dialect = SparkSession().dialect
alias: exp.Expression = sqlglot.maybe_parse(name, dialect=dialect)
new_expression = exp.alias_(
self.column_expression,
alias.this if isinstance(alias, exp.Column) else name,
dialect=dialect,
)
return Column(new_expression)
def asc(self) -> Column:
new_expression = exp.Ordered(this=self.column_expression, desc=False, nulls_first=True)
return Column(new_expression)
def desc(self) -> Column:
new_expression = exp.Ordered(this=self.column_expression, desc=True, nulls_first=False)
return Column(new_expression)
asc_nulls_first = asc
def asc_nulls_last(self) -> Column:
new_expression = exp.Ordered(this=self.column_expression, desc=False, nulls_first=False)
return Column(new_expression)
def desc_nulls_first(self) -> Column:
new_expression = exp.Ordered(this=self.column_expression, desc=True, nulls_first=True)
return Column(new_expression)
desc_nulls_last = desc
def when(self, condition: Column, value: t.Any) -> Column:
from sqlglot.dataframe.sql.functions import when
column_with_if = when(condition, value)
if not isinstance(self.expression, exp.Case):
return column_with_if
new_column = self.copy()
new_column.expression.args["ifs"].extend(column_with_if.expression.args["ifs"])
return new_column
def otherwise(self, value: t.Any) -> Column:
from sqlglot.dataframe.sql.functions import lit
true_value = value if isinstance(value, Column) else lit(value)
new_column = self.copy()
new_column.expression.set("default", true_value.column_expression)
return new_column
def isNull(self) -> Column:
new_expression = exp.Is(this=self.column_expression, expression=exp.Null())
return Column(new_expression)
def isNotNull(self) -> Column:
new_expression = exp.Not(this=exp.Is(this=self.column_expression, expression=exp.Null()))
return Column(new_expression)
def cast(self, dataType: t.Union[str, DataType]) -> Column:
"""
Functionality Difference: PySpark cast accepts a datatype instance of the datatype class
Sqlglot doesn't currently replicate this class so it only accepts a string
"""
from sqlglot.dataframe.sql.session import SparkSession
if isinstance(dataType, DataType):
dataType = dataType.simpleString()
return Column(exp.cast(self.column_expression, dataType, dialect=SparkSession().dialect))
def startswith(self, value: t.Union[str, Column]) -> Column:
value = self._lit(value) if not isinstance(value, Column) else value
return self.invoke_anonymous_function(self, "STARTSWITH", value)
def endswith(self, value: t.Union[str, Column]) -> Column:
value = self._lit(value) if not isinstance(value, Column) else value
return self.invoke_anonymous_function(self, "ENDSWITH", value)
def rlike(self, regexp: str) -> Column:
return self.invoke_expression_over_column(
column=self, callable_expression=exp.RegexpLike, expression=self._lit(regexp).expression
)
def like(self, other: str):
return self.invoke_expression_over_column(
self, exp.Like, expression=self._lit(other).expression
)
def ilike(self, other: str):
return self.invoke_expression_over_column(
self, exp.ILike, expression=self._lit(other).expression
)
def substr(self, startPos: t.Union[int, Column], length: t.Union[int, Column]) -> Column:
startPos = self._lit(startPos) if not isinstance(startPos, Column) else startPos
length = self._lit(length) if not isinstance(length, Column) else length
return Column.invoke_expression_over_column(
self, exp.Substring, start=startPos.expression, length=length.expression
)
def isin(self, *cols: t.Union[ColumnOrLiteral, t.Iterable[ColumnOrLiteral]]):
columns = flatten(cols) if isinstance(cols[0], (list, set, tuple)) else cols # type: ignore
expressions = [self._lit(x).expression for x in columns]
return Column.invoke_expression_over_column(self, exp.In, expressions=expressions) # type: ignore
def between(
self,
lowerBound: t.Union[ColumnOrLiteral],
upperBound: t.Union[ColumnOrLiteral],
) -> Column:
lower_bound_exp = (
self._lit(lowerBound) if not isinstance(lowerBound, Column) else lowerBound
)
upper_bound_exp = (
self._lit(upperBound) if not isinstance(upperBound, Column) else upperBound
)
return Column(
exp.Between(
this=self.column_expression,
low=lower_bound_exp.expression,
high=upper_bound_exp.expression,
)
)
def over(self, window: WindowSpec) -> Column:
window_expression = window.expression.copy()
window_expression.set("this", self.column_expression)
return Column(window_expression)
ColumnOrName = t.Union[Column, str]
def from_csv(
col: ColumnOrName,
schema: t.Union[Column, str],
options: t.Optional[t.Dict[str, str]] = None,
) -> Column:
schema = schema if isinstance(schema, Column) else lit(schema)
if options is not None:
option_cols = create_map([lit(x) for x in _flatten(options.items())])
return Column.invoke_anonymous_function(col, "FROM_CSV", schema, option_cols)
return Column.invoke_anonymous_function(col, "FROM_CSV", schema) | null |
153,196 | from __future__ import annotations
import typing as t
from sqlglot import exp as expression
from sqlglot.dataframe.sql.column import Column
from sqlglot.helper import ensure_list, flatten as _flatten
if t.TYPE_CHECKING:
from sqlglot.dataframe.sql._typing import ColumnOrLiteral, ColumnOrName
from sqlglot.dataframe.sql.dataframe import DataFrame
def _get_lambda_from_func(lambda_expression: t.Callable):
class Column:
def __init__(self, expression: t.Optional[t.Union[ColumnOrLiteral, exp.Expression]]):
def __repr__(self):
def __hash__(self):
def __eq__(self, other: ColumnOrLiteral) -> Column:
def __ne__(self, other: ColumnOrLiteral) -> Column:
def __gt__(self, other: ColumnOrLiteral) -> Column:
def __ge__(self, other: ColumnOrLiteral) -> Column:
def __lt__(self, other: ColumnOrLiteral) -> Column:
def __le__(self, other: ColumnOrLiteral) -> Column:
def __and__(self, other: ColumnOrLiteral) -> Column:
def __or__(self, other: ColumnOrLiteral) -> Column:
def __mod__(self, other: ColumnOrLiteral) -> Column:
def __add__(self, other: ColumnOrLiteral) -> Column:
def __sub__(self, other: ColumnOrLiteral) -> Column:
def __mul__(self, other: ColumnOrLiteral) -> Column:
def __truediv__(self, other: ColumnOrLiteral) -> Column:
def __div__(self, other: ColumnOrLiteral) -> Column:
def __neg__(self) -> Column:
def __radd__(self, other: ColumnOrLiteral) -> Column:
def __rsub__(self, other: ColumnOrLiteral) -> Column:
def __rmul__(self, other: ColumnOrLiteral) -> Column:
def __rdiv__(self, other: ColumnOrLiteral) -> Column:
def __rtruediv__(self, other: ColumnOrLiteral) -> Column:
def __rmod__(self, other: ColumnOrLiteral) -> Column:
def __pow__(self, power: ColumnOrLiteral, modulo=None):
def __rpow__(self, power: ColumnOrLiteral):
def __invert__(self):
def __rand__(self, other: ColumnOrLiteral) -> Column:
def __ror__(self, other: ColumnOrLiteral) -> Column:
def ensure_col(cls, value: t.Optional[t.Union[ColumnOrLiteral, exp.Expression]]) -> Column:
def ensure_cols(cls, args: t.List[t.Union[ColumnOrLiteral, exp.Expression]]) -> t.List[Column]:
def _lit(cls, value: ColumnOrLiteral) -> Column:
def invoke_anonymous_function(
cls, column: t.Optional[ColumnOrLiteral], func_name: str, *args: t.Optional[ColumnOrLiteral]
) -> Column:
def invoke_expression_over_column(
cls, column: t.Optional[ColumnOrLiteral], callable_expression: t.Callable, **kwargs
) -> Column:
def binary_op(self, klass: t.Callable, other: ColumnOrLiteral, **kwargs) -> Column:
def inverse_binary_op(self, klass: t.Callable, other: ColumnOrLiteral, **kwargs) -> Column:
def unary_op(self, klass: t.Callable, **kwargs) -> Column:
def is_alias(self):
def is_column(self):
def column_expression(self) -> t.Union[exp.Column, exp.Literal]:
def alias_or_name(self) -> str:
def ensure_literal(cls, value) -> Column:
def copy(self) -> Column:
def set_table_name(self, table_name: str, copy=False) -> Column:
def sql(self, **kwargs) -> str:
def alias(self, name: str) -> Column:
def asc(self) -> Column:
def desc(self) -> Column:
def asc_nulls_last(self) -> Column:
def desc_nulls_first(self) -> Column:
def when(self, condition: Column, value: t.Any) -> Column:
def otherwise(self, value: t.Any) -> Column:
def isNull(self) -> Column:
def isNotNull(self) -> Column:
def cast(self, dataType: t.Union[str, DataType]) -> Column:
def startswith(self, value: t.Union[str, Column]) -> Column:
def endswith(self, value: t.Union[str, Column]) -> Column:
def rlike(self, regexp: str) -> Column:
def like(self, other: str):
def ilike(self, other: str):
def substr(self, startPos: t.Union[int, Column], length: t.Union[int, Column]) -> Column:
def isin(self, *cols: t.Union[ColumnOrLiteral, t.Iterable[ColumnOrLiteral]]):
def between(
self,
lowerBound: t.Union[ColumnOrLiteral],
upperBound: t.Union[ColumnOrLiteral],
) -> Column:
def over(self, window: WindowSpec) -> Column:
ColumnOrName = t.Union[Column, str]
def aggregate(
col: ColumnOrName,
initialValue: ColumnOrName,
merge: t.Callable[[Column, Column], Column],
finish: t.Optional[t.Callable[[Column], Column]] = None,
) -> Column:
merge_exp = _get_lambda_from_func(merge)
if finish is not None:
finish_exp = _get_lambda_from_func(finish)
return Column.invoke_expression_over_column(
col,
expression.Reduce,
initial=initialValue,
merge=Column(merge_exp),
finish=Column(finish_exp),
)
return Column.invoke_expression_over_column(
col, expression.Reduce, initial=initialValue, merge=Column(merge_exp)
) | null |
153,197 | from __future__ import annotations
import typing as t
from sqlglot import exp as expression
from sqlglot.dataframe.sql.column import Column
from sqlglot.helper import ensure_list, flatten as _flatten
if t.TYPE_CHECKING:
from sqlglot.dataframe.sql._typing import ColumnOrLiteral, ColumnOrName
from sqlglot.dataframe.sql.dataframe import DataFrame
def _get_lambda_from_func(lambda_expression: t.Callable):
variables = [
expression.to_identifier(x, quoted=_lambda_quoted(x))
for x in lambda_expression.__code__.co_varnames
]
return expression.Lambda(
this=lambda_expression(*[Column(x) for x in variables]).expression,
expressions=variables,
)
class Column:
def __init__(self, expression: t.Optional[t.Union[ColumnOrLiteral, exp.Expression]]):
from sqlglot.dataframe.sql.session import SparkSession
if isinstance(expression, Column):
expression = expression.expression # type: ignore
elif expression is None or not isinstance(expression, (str, exp.Expression)):
expression = self._lit(expression).expression # type: ignore
elif not isinstance(expression, exp.Column):
expression = sqlglot.maybe_parse(expression, dialect=SparkSession().dialect).transform(
SparkSession().dialect.normalize_identifier, copy=False
)
if expression is None:
raise ValueError(f"Could not parse {expression}")
self.expression: exp.Expression = expression # type: ignore
def __repr__(self):
return repr(self.expression)
def __hash__(self):
return hash(self.expression)
def __eq__(self, other: ColumnOrLiteral) -> Column: # type: ignore
return self.binary_op(exp.EQ, other)
def __ne__(self, other: ColumnOrLiteral) -> Column: # type: ignore
return self.binary_op(exp.NEQ, other)
def __gt__(self, other: ColumnOrLiteral) -> Column:
return self.binary_op(exp.GT, other)
def __ge__(self, other: ColumnOrLiteral) -> Column:
return self.binary_op(exp.GTE, other)
def __lt__(self, other: ColumnOrLiteral) -> Column:
return self.binary_op(exp.LT, other)
def __le__(self, other: ColumnOrLiteral) -> Column:
return self.binary_op(exp.LTE, other)
def __and__(self, other: ColumnOrLiteral) -> Column:
return self.binary_op(exp.And, other)
def __or__(self, other: ColumnOrLiteral) -> Column:
return self.binary_op(exp.Or, other)
def __mod__(self, other: ColumnOrLiteral) -> Column:
return self.binary_op(exp.Mod, other)
def __add__(self, other: ColumnOrLiteral) -> Column:
return self.binary_op(exp.Add, other)
def __sub__(self, other: ColumnOrLiteral) -> Column:
return self.binary_op(exp.Sub, other)
def __mul__(self, other: ColumnOrLiteral) -> Column:
return self.binary_op(exp.Mul, other)
def __truediv__(self, other: ColumnOrLiteral) -> Column:
return self.binary_op(exp.Div, other)
def __div__(self, other: ColumnOrLiteral) -> Column:
return self.binary_op(exp.Div, other)
def __neg__(self) -> Column:
return self.unary_op(exp.Neg)
def __radd__(self, other: ColumnOrLiteral) -> Column:
return self.inverse_binary_op(exp.Add, other)
def __rsub__(self, other: ColumnOrLiteral) -> Column:
return self.inverse_binary_op(exp.Sub, other)
def __rmul__(self, other: ColumnOrLiteral) -> Column:
return self.inverse_binary_op(exp.Mul, other)
def __rdiv__(self, other: ColumnOrLiteral) -> Column:
return self.inverse_binary_op(exp.Div, other)
def __rtruediv__(self, other: ColumnOrLiteral) -> Column:
return self.inverse_binary_op(exp.Div, other)
def __rmod__(self, other: ColumnOrLiteral) -> Column:
return self.inverse_binary_op(exp.Mod, other)
def __pow__(self, power: ColumnOrLiteral, modulo=None):
return Column(exp.Pow(this=self.expression, expression=Column(power).expression))
def __rpow__(self, power: ColumnOrLiteral):
return Column(exp.Pow(this=Column(power).expression, expression=self.expression))
def __invert__(self):
return self.unary_op(exp.Not)
def __rand__(self, other: ColumnOrLiteral) -> Column:
return self.inverse_binary_op(exp.And, other)
def __ror__(self, other: ColumnOrLiteral) -> Column:
return self.inverse_binary_op(exp.Or, other)
def ensure_col(cls, value: t.Optional[t.Union[ColumnOrLiteral, exp.Expression]]) -> Column:
return cls(value)
def ensure_cols(cls, args: t.List[t.Union[ColumnOrLiteral, exp.Expression]]) -> t.List[Column]:
return [cls.ensure_col(x) if not isinstance(x, Column) else x for x in args]
def _lit(cls, value: ColumnOrLiteral) -> Column:
if isinstance(value, dict):
columns = [cls._lit(v).alias(k).expression for k, v in value.items()]
return cls(exp.Struct(expressions=columns))
return cls(exp.convert(value))
def invoke_anonymous_function(
cls, column: t.Optional[ColumnOrLiteral], func_name: str, *args: t.Optional[ColumnOrLiteral]
) -> Column:
columns = [] if column is None else [cls.ensure_col(column)]
column_args = [cls.ensure_col(arg) for arg in args]
expressions = [x.expression for x in columns + column_args]
new_expression = exp.Anonymous(this=func_name.upper(), expressions=expressions)
return Column(new_expression)
def invoke_expression_over_column(
cls, column: t.Optional[ColumnOrLiteral], callable_expression: t.Callable, **kwargs
) -> Column:
ensured_column = None if column is None else cls.ensure_col(column)
ensure_expression_values = {
k: (
[Column.ensure_col(x).expression for x in v]
if is_iterable(v)
else Column.ensure_col(v).expression
)
for k, v in kwargs.items()
if v is not None
}
new_expression = (
callable_expression(**ensure_expression_values)
if ensured_column is None
else callable_expression(
this=ensured_column.column_expression, **ensure_expression_values
)
)
return Column(new_expression)
def binary_op(self, klass: t.Callable, other: ColumnOrLiteral, **kwargs) -> Column:
return Column(
klass(this=self.column_expression, expression=Column(other).column_expression, **kwargs)
)
def inverse_binary_op(self, klass: t.Callable, other: ColumnOrLiteral, **kwargs) -> Column:
return Column(
klass(this=Column(other).column_expression, expression=self.column_expression, **kwargs)
)
def unary_op(self, klass: t.Callable, **kwargs) -> Column:
return Column(klass(this=self.column_expression, **kwargs))
def is_alias(self):
return isinstance(self.expression, exp.Alias)
def is_column(self):
return isinstance(self.expression, exp.Column)
def column_expression(self) -> t.Union[exp.Column, exp.Literal]:
return self.expression.unalias()
def alias_or_name(self) -> str:
return self.expression.alias_or_name
def ensure_literal(cls, value) -> Column:
from sqlglot.dataframe.sql.functions import lit
if isinstance(value, cls):
value = value.expression
if not isinstance(value, exp.Literal):
return lit(value)
return Column(value)
def copy(self) -> Column:
return Column(self.expression.copy())
def set_table_name(self, table_name: str, copy=False) -> Column:
expression = self.expression.copy() if copy else self.expression
expression.set("table", exp.to_identifier(table_name))
return Column(expression)
def sql(self, **kwargs) -> str:
from sqlglot.dataframe.sql.session import SparkSession
return self.expression.sql(**{"dialect": SparkSession().dialect, **kwargs})
def alias(self, name: str) -> Column:
from sqlglot.dataframe.sql.session import SparkSession
dialect = SparkSession().dialect
alias: exp.Expression = sqlglot.maybe_parse(name, dialect=dialect)
new_expression = exp.alias_(
self.column_expression,
alias.this if isinstance(alias, exp.Column) else name,
dialect=dialect,
)
return Column(new_expression)
def asc(self) -> Column:
new_expression = exp.Ordered(this=self.column_expression, desc=False, nulls_first=True)
return Column(new_expression)
def desc(self) -> Column:
new_expression = exp.Ordered(this=self.column_expression, desc=True, nulls_first=False)
return Column(new_expression)
asc_nulls_first = asc
def asc_nulls_last(self) -> Column:
new_expression = exp.Ordered(this=self.column_expression, desc=False, nulls_first=False)
return Column(new_expression)
def desc_nulls_first(self) -> Column:
new_expression = exp.Ordered(this=self.column_expression, desc=True, nulls_first=True)
return Column(new_expression)
desc_nulls_last = desc
def when(self, condition: Column, value: t.Any) -> Column:
from sqlglot.dataframe.sql.functions import when
column_with_if = when(condition, value)
if not isinstance(self.expression, exp.Case):
return column_with_if
new_column = self.copy()
new_column.expression.args["ifs"].extend(column_with_if.expression.args["ifs"])
return new_column
def otherwise(self, value: t.Any) -> Column:
from sqlglot.dataframe.sql.functions import lit
true_value = value if isinstance(value, Column) else lit(value)
new_column = self.copy()
new_column.expression.set("default", true_value.column_expression)
return new_column
def isNull(self) -> Column:
new_expression = exp.Is(this=self.column_expression, expression=exp.Null())
return Column(new_expression)
def isNotNull(self) -> Column:
new_expression = exp.Not(this=exp.Is(this=self.column_expression, expression=exp.Null()))
return Column(new_expression)
def cast(self, dataType: t.Union[str, DataType]) -> Column:
"""
Functionality Difference: PySpark cast accepts a datatype instance of the datatype class
Sqlglot doesn't currently replicate this class so it only accepts a string
"""
from sqlglot.dataframe.sql.session import SparkSession
if isinstance(dataType, DataType):
dataType = dataType.simpleString()
return Column(exp.cast(self.column_expression, dataType, dialect=SparkSession().dialect))
def startswith(self, value: t.Union[str, Column]) -> Column:
value = self._lit(value) if not isinstance(value, Column) else value
return self.invoke_anonymous_function(self, "STARTSWITH", value)
def endswith(self, value: t.Union[str, Column]) -> Column:
value = self._lit(value) if not isinstance(value, Column) else value
return self.invoke_anonymous_function(self, "ENDSWITH", value)
def rlike(self, regexp: str) -> Column:
return self.invoke_expression_over_column(
column=self, callable_expression=exp.RegexpLike, expression=self._lit(regexp).expression
)
def like(self, other: str):
return self.invoke_expression_over_column(
self, exp.Like, expression=self._lit(other).expression
)
def ilike(self, other: str):
return self.invoke_expression_over_column(
self, exp.ILike, expression=self._lit(other).expression
)
def substr(self, startPos: t.Union[int, Column], length: t.Union[int, Column]) -> Column:
startPos = self._lit(startPos) if not isinstance(startPos, Column) else startPos
length = self._lit(length) if not isinstance(length, Column) else length
return Column.invoke_expression_over_column(
self, exp.Substring, start=startPos.expression, length=length.expression
)
def isin(self, *cols: t.Union[ColumnOrLiteral, t.Iterable[ColumnOrLiteral]]):
columns = flatten(cols) if isinstance(cols[0], (list, set, tuple)) else cols # type: ignore
expressions = [self._lit(x).expression for x in columns]
return Column.invoke_expression_over_column(self, exp.In, expressions=expressions) # type: ignore
def between(
self,
lowerBound: t.Union[ColumnOrLiteral],
upperBound: t.Union[ColumnOrLiteral],
) -> Column:
lower_bound_exp = (
self._lit(lowerBound) if not isinstance(lowerBound, Column) else lowerBound
)
upper_bound_exp = (
self._lit(upperBound) if not isinstance(upperBound, Column) else upperBound
)
return Column(
exp.Between(
this=self.column_expression,
low=lower_bound_exp.expression,
high=upper_bound_exp.expression,
)
)
def over(self, window: WindowSpec) -> Column:
window_expression = window.expression.copy()
window_expression.set("this", self.column_expression)
return Column(window_expression)
ColumnOrName = t.Union[Column, str]
def transform(
col: ColumnOrName,
f: t.Union[t.Callable[[Column], Column], t.Callable[[Column, Column], Column]],
) -> Column:
f_expression = _get_lambda_from_func(f)
return Column.invoke_expression_over_column(
col, expression.Transform, expression=Column(f_expression)
) | null |
153,198 | from __future__ import annotations
import typing as t
from sqlglot import exp as expression
from sqlglot.dataframe.sql.column import Column
from sqlglot.helper import ensure_list, flatten as _flatten
if t.TYPE_CHECKING:
from sqlglot.dataframe.sql._typing import ColumnOrLiteral, ColumnOrName
from sqlglot.dataframe.sql.dataframe import DataFrame
def _get_lambda_from_func(lambda_expression: t.Callable):
variables = [
expression.to_identifier(x, quoted=_lambda_quoted(x))
for x in lambda_expression.__code__.co_varnames
]
return expression.Lambda(
this=lambda_expression(*[Column(x) for x in variables]).expression,
expressions=variables,
)
class Column:
def __init__(self, expression: t.Optional[t.Union[ColumnOrLiteral, exp.Expression]]):
from sqlglot.dataframe.sql.session import SparkSession
if isinstance(expression, Column):
expression = expression.expression # type: ignore
elif expression is None or not isinstance(expression, (str, exp.Expression)):
expression = self._lit(expression).expression # type: ignore
elif not isinstance(expression, exp.Column):
expression = sqlglot.maybe_parse(expression, dialect=SparkSession().dialect).transform(
SparkSession().dialect.normalize_identifier, copy=False
)
if expression is None:
raise ValueError(f"Could not parse {expression}")
self.expression: exp.Expression = expression # type: ignore
def __repr__(self):
return repr(self.expression)
def __hash__(self):
return hash(self.expression)
def __eq__(self, other: ColumnOrLiteral) -> Column: # type: ignore
return self.binary_op(exp.EQ, other)
def __ne__(self, other: ColumnOrLiteral) -> Column: # type: ignore
return self.binary_op(exp.NEQ, other)
def __gt__(self, other: ColumnOrLiteral) -> Column:
return self.binary_op(exp.GT, other)
def __ge__(self, other: ColumnOrLiteral) -> Column:
return self.binary_op(exp.GTE, other)
def __lt__(self, other: ColumnOrLiteral) -> Column:
return self.binary_op(exp.LT, other)
def __le__(self, other: ColumnOrLiteral) -> Column:
return self.binary_op(exp.LTE, other)
def __and__(self, other: ColumnOrLiteral) -> Column:
return self.binary_op(exp.And, other)
def __or__(self, other: ColumnOrLiteral) -> Column:
return self.binary_op(exp.Or, other)
def __mod__(self, other: ColumnOrLiteral) -> Column:
return self.binary_op(exp.Mod, other)
def __add__(self, other: ColumnOrLiteral) -> Column:
return self.binary_op(exp.Add, other)
def __sub__(self, other: ColumnOrLiteral) -> Column:
return self.binary_op(exp.Sub, other)
def __mul__(self, other: ColumnOrLiteral) -> Column:
return self.binary_op(exp.Mul, other)
def __truediv__(self, other: ColumnOrLiteral) -> Column:
return self.binary_op(exp.Div, other)
def __div__(self, other: ColumnOrLiteral) -> Column:
return self.binary_op(exp.Div, other)
def __neg__(self) -> Column:
return self.unary_op(exp.Neg)
def __radd__(self, other: ColumnOrLiteral) -> Column:
return self.inverse_binary_op(exp.Add, other)
def __rsub__(self, other: ColumnOrLiteral) -> Column:
return self.inverse_binary_op(exp.Sub, other)
def __rmul__(self, other: ColumnOrLiteral) -> Column:
return self.inverse_binary_op(exp.Mul, other)
def __rdiv__(self, other: ColumnOrLiteral) -> Column:
return self.inverse_binary_op(exp.Div, other)
def __rtruediv__(self, other: ColumnOrLiteral) -> Column:
return self.inverse_binary_op(exp.Div, other)
def __rmod__(self, other: ColumnOrLiteral) -> Column:
return self.inverse_binary_op(exp.Mod, other)
def __pow__(self, power: ColumnOrLiteral, modulo=None):
return Column(exp.Pow(this=self.expression, expression=Column(power).expression))
def __rpow__(self, power: ColumnOrLiteral):
return Column(exp.Pow(this=Column(power).expression, expression=self.expression))
def __invert__(self):
return self.unary_op(exp.Not)
def __rand__(self, other: ColumnOrLiteral) -> Column:
return self.inverse_binary_op(exp.And, other)
def __ror__(self, other: ColumnOrLiteral) -> Column:
return self.inverse_binary_op(exp.Or, other)
def ensure_col(cls, value: t.Optional[t.Union[ColumnOrLiteral, exp.Expression]]) -> Column:
return cls(value)
def ensure_cols(cls, args: t.List[t.Union[ColumnOrLiteral, exp.Expression]]) -> t.List[Column]:
return [cls.ensure_col(x) if not isinstance(x, Column) else x for x in args]
def _lit(cls, value: ColumnOrLiteral) -> Column:
if isinstance(value, dict):
columns = [cls._lit(v).alias(k).expression for k, v in value.items()]
return cls(exp.Struct(expressions=columns))
return cls(exp.convert(value))
def invoke_anonymous_function(
cls, column: t.Optional[ColumnOrLiteral], func_name: str, *args: t.Optional[ColumnOrLiteral]
) -> Column:
columns = [] if column is None else [cls.ensure_col(column)]
column_args = [cls.ensure_col(arg) for arg in args]
expressions = [x.expression for x in columns + column_args]
new_expression = exp.Anonymous(this=func_name.upper(), expressions=expressions)
return Column(new_expression)
def invoke_expression_over_column(
cls, column: t.Optional[ColumnOrLiteral], callable_expression: t.Callable, **kwargs
) -> Column:
ensured_column = None if column is None else cls.ensure_col(column)
ensure_expression_values = {
k: (
[Column.ensure_col(x).expression for x in v]
if is_iterable(v)
else Column.ensure_col(v).expression
)
for k, v in kwargs.items()
if v is not None
}
new_expression = (
callable_expression(**ensure_expression_values)
if ensured_column is None
else callable_expression(
this=ensured_column.column_expression, **ensure_expression_values
)
)
return Column(new_expression)
def binary_op(self, klass: t.Callable, other: ColumnOrLiteral, **kwargs) -> Column:
return Column(
klass(this=self.column_expression, expression=Column(other).column_expression, **kwargs)
)
def inverse_binary_op(self, klass: t.Callable, other: ColumnOrLiteral, **kwargs) -> Column:
return Column(
klass(this=Column(other).column_expression, expression=self.column_expression, **kwargs)
)
def unary_op(self, klass: t.Callable, **kwargs) -> Column:
return Column(klass(this=self.column_expression, **kwargs))
def is_alias(self):
return isinstance(self.expression, exp.Alias)
def is_column(self):
return isinstance(self.expression, exp.Column)
def column_expression(self) -> t.Union[exp.Column, exp.Literal]:
return self.expression.unalias()
def alias_or_name(self) -> str:
return self.expression.alias_or_name
def ensure_literal(cls, value) -> Column:
from sqlglot.dataframe.sql.functions import lit
if isinstance(value, cls):
value = value.expression
if not isinstance(value, exp.Literal):
return lit(value)
return Column(value)
def copy(self) -> Column:
return Column(self.expression.copy())
def set_table_name(self, table_name: str, copy=False) -> Column:
expression = self.expression.copy() if copy else self.expression
expression.set("table", exp.to_identifier(table_name))
return Column(expression)
def sql(self, **kwargs) -> str:
from sqlglot.dataframe.sql.session import SparkSession
return self.expression.sql(**{"dialect": SparkSession().dialect, **kwargs})
def alias(self, name: str) -> Column:
from sqlglot.dataframe.sql.session import SparkSession
dialect = SparkSession().dialect
alias: exp.Expression = sqlglot.maybe_parse(name, dialect=dialect)
new_expression = exp.alias_(
self.column_expression,
alias.this if isinstance(alias, exp.Column) else name,
dialect=dialect,
)
return Column(new_expression)
def asc(self) -> Column:
new_expression = exp.Ordered(this=self.column_expression, desc=False, nulls_first=True)
return Column(new_expression)
def desc(self) -> Column:
new_expression = exp.Ordered(this=self.column_expression, desc=True, nulls_first=False)
return Column(new_expression)
asc_nulls_first = asc
def asc_nulls_last(self) -> Column:
new_expression = exp.Ordered(this=self.column_expression, desc=False, nulls_first=False)
return Column(new_expression)
def desc_nulls_first(self) -> Column:
new_expression = exp.Ordered(this=self.column_expression, desc=True, nulls_first=True)
return Column(new_expression)
desc_nulls_last = desc
def when(self, condition: Column, value: t.Any) -> Column:
from sqlglot.dataframe.sql.functions import when
column_with_if = when(condition, value)
if not isinstance(self.expression, exp.Case):
return column_with_if
new_column = self.copy()
new_column.expression.args["ifs"].extend(column_with_if.expression.args["ifs"])
return new_column
def otherwise(self, value: t.Any) -> Column:
from sqlglot.dataframe.sql.functions import lit
true_value = value if isinstance(value, Column) else lit(value)
new_column = self.copy()
new_column.expression.set("default", true_value.column_expression)
return new_column
def isNull(self) -> Column:
new_expression = exp.Is(this=self.column_expression, expression=exp.Null())
return Column(new_expression)
def isNotNull(self) -> Column:
new_expression = exp.Not(this=exp.Is(this=self.column_expression, expression=exp.Null()))
return Column(new_expression)
def cast(self, dataType: t.Union[str, DataType]) -> Column:
"""
Functionality Difference: PySpark cast accepts a datatype instance of the datatype class
Sqlglot doesn't currently replicate this class so it only accepts a string
"""
from sqlglot.dataframe.sql.session import SparkSession
if isinstance(dataType, DataType):
dataType = dataType.simpleString()
return Column(exp.cast(self.column_expression, dataType, dialect=SparkSession().dialect))
def startswith(self, value: t.Union[str, Column]) -> Column:
value = self._lit(value) if not isinstance(value, Column) else value
return self.invoke_anonymous_function(self, "STARTSWITH", value)
def endswith(self, value: t.Union[str, Column]) -> Column:
value = self._lit(value) if not isinstance(value, Column) else value
return self.invoke_anonymous_function(self, "ENDSWITH", value)
def rlike(self, regexp: str) -> Column:
return self.invoke_expression_over_column(
column=self, callable_expression=exp.RegexpLike, expression=self._lit(regexp).expression
)
def like(self, other: str):
return self.invoke_expression_over_column(
self, exp.Like, expression=self._lit(other).expression
)
def ilike(self, other: str):
return self.invoke_expression_over_column(
self, exp.ILike, expression=self._lit(other).expression
)
def substr(self, startPos: t.Union[int, Column], length: t.Union[int, Column]) -> Column:
startPos = self._lit(startPos) if not isinstance(startPos, Column) else startPos
length = self._lit(length) if not isinstance(length, Column) else length
return Column.invoke_expression_over_column(
self, exp.Substring, start=startPos.expression, length=length.expression
)
def isin(self, *cols: t.Union[ColumnOrLiteral, t.Iterable[ColumnOrLiteral]]):
columns = flatten(cols) if isinstance(cols[0], (list, set, tuple)) else cols # type: ignore
expressions = [self._lit(x).expression for x in columns]
return Column.invoke_expression_over_column(self, exp.In, expressions=expressions) # type: ignore
def between(
self,
lowerBound: t.Union[ColumnOrLiteral],
upperBound: t.Union[ColumnOrLiteral],
) -> Column:
lower_bound_exp = (
self._lit(lowerBound) if not isinstance(lowerBound, Column) else lowerBound
)
upper_bound_exp = (
self._lit(upperBound) if not isinstance(upperBound, Column) else upperBound
)
return Column(
exp.Between(
this=self.column_expression,
low=lower_bound_exp.expression,
high=upper_bound_exp.expression,
)
)
def over(self, window: WindowSpec) -> Column:
window_expression = window.expression.copy()
window_expression.set("this", self.column_expression)
return Column(window_expression)
ColumnOrName = t.Union[Column, str]
def exists(col: ColumnOrName, f: t.Callable[[Column], Column]) -> Column:
f_expression = _get_lambda_from_func(f)
return Column.invoke_anonymous_function(col, "EXISTS", Column(f_expression)) | null |
153,199 | from __future__ import annotations
import typing as t
from sqlglot import exp as expression
from sqlglot.dataframe.sql.column import Column
from sqlglot.helper import ensure_list, flatten as _flatten
if t.TYPE_CHECKING:
from sqlglot.dataframe.sql._typing import ColumnOrLiteral, ColumnOrName
from sqlglot.dataframe.sql.dataframe import DataFrame
def _get_lambda_from_func(lambda_expression: t.Callable):
variables = [
expression.to_identifier(x, quoted=_lambda_quoted(x))
for x in lambda_expression.__code__.co_varnames
]
return expression.Lambda(
this=lambda_expression(*[Column(x) for x in variables]).expression,
expressions=variables,
)
class Column:
def __init__(self, expression: t.Optional[t.Union[ColumnOrLiteral, exp.Expression]]):
from sqlglot.dataframe.sql.session import SparkSession
if isinstance(expression, Column):
expression = expression.expression # type: ignore
elif expression is None or not isinstance(expression, (str, exp.Expression)):
expression = self._lit(expression).expression # type: ignore
elif not isinstance(expression, exp.Column):
expression = sqlglot.maybe_parse(expression, dialect=SparkSession().dialect).transform(
SparkSession().dialect.normalize_identifier, copy=False
)
if expression is None:
raise ValueError(f"Could not parse {expression}")
self.expression: exp.Expression = expression # type: ignore
def __repr__(self):
return repr(self.expression)
def __hash__(self):
return hash(self.expression)
def __eq__(self, other: ColumnOrLiteral) -> Column: # type: ignore
return self.binary_op(exp.EQ, other)
def __ne__(self, other: ColumnOrLiteral) -> Column: # type: ignore
return self.binary_op(exp.NEQ, other)
def __gt__(self, other: ColumnOrLiteral) -> Column:
return self.binary_op(exp.GT, other)
def __ge__(self, other: ColumnOrLiteral) -> Column:
return self.binary_op(exp.GTE, other)
def __lt__(self, other: ColumnOrLiteral) -> Column:
return self.binary_op(exp.LT, other)
def __le__(self, other: ColumnOrLiteral) -> Column:
return self.binary_op(exp.LTE, other)
def __and__(self, other: ColumnOrLiteral) -> Column:
return self.binary_op(exp.And, other)
def __or__(self, other: ColumnOrLiteral) -> Column:
return self.binary_op(exp.Or, other)
def __mod__(self, other: ColumnOrLiteral) -> Column:
return self.binary_op(exp.Mod, other)
def __add__(self, other: ColumnOrLiteral) -> Column:
return self.binary_op(exp.Add, other)
def __sub__(self, other: ColumnOrLiteral) -> Column:
return self.binary_op(exp.Sub, other)
def __mul__(self, other: ColumnOrLiteral) -> Column:
return self.binary_op(exp.Mul, other)
def __truediv__(self, other: ColumnOrLiteral) -> Column:
return self.binary_op(exp.Div, other)
def __div__(self, other: ColumnOrLiteral) -> Column:
return self.binary_op(exp.Div, other)
def __neg__(self) -> Column:
return self.unary_op(exp.Neg)
def __radd__(self, other: ColumnOrLiteral) -> Column:
return self.inverse_binary_op(exp.Add, other)
def __rsub__(self, other: ColumnOrLiteral) -> Column:
return self.inverse_binary_op(exp.Sub, other)
def __rmul__(self, other: ColumnOrLiteral) -> Column:
return self.inverse_binary_op(exp.Mul, other)
def __rdiv__(self, other: ColumnOrLiteral) -> Column:
return self.inverse_binary_op(exp.Div, other)
def __rtruediv__(self, other: ColumnOrLiteral) -> Column:
return self.inverse_binary_op(exp.Div, other)
def __rmod__(self, other: ColumnOrLiteral) -> Column:
return self.inverse_binary_op(exp.Mod, other)
def __pow__(self, power: ColumnOrLiteral, modulo=None):
return Column(exp.Pow(this=self.expression, expression=Column(power).expression))
def __rpow__(self, power: ColumnOrLiteral):
return Column(exp.Pow(this=Column(power).expression, expression=self.expression))
def __invert__(self):
return self.unary_op(exp.Not)
def __rand__(self, other: ColumnOrLiteral) -> Column:
return self.inverse_binary_op(exp.And, other)
def __ror__(self, other: ColumnOrLiteral) -> Column:
return self.inverse_binary_op(exp.Or, other)
def ensure_col(cls, value: t.Optional[t.Union[ColumnOrLiteral, exp.Expression]]) -> Column:
return cls(value)
def ensure_cols(cls, args: t.List[t.Union[ColumnOrLiteral, exp.Expression]]) -> t.List[Column]:
return [cls.ensure_col(x) if not isinstance(x, Column) else x for x in args]
def _lit(cls, value: ColumnOrLiteral) -> Column:
if isinstance(value, dict):
columns = [cls._lit(v).alias(k).expression for k, v in value.items()]
return cls(exp.Struct(expressions=columns))
return cls(exp.convert(value))
def invoke_anonymous_function(
cls, column: t.Optional[ColumnOrLiteral], func_name: str, *args: t.Optional[ColumnOrLiteral]
) -> Column:
columns = [] if column is None else [cls.ensure_col(column)]
column_args = [cls.ensure_col(arg) for arg in args]
expressions = [x.expression for x in columns + column_args]
new_expression = exp.Anonymous(this=func_name.upper(), expressions=expressions)
return Column(new_expression)
def invoke_expression_over_column(
cls, column: t.Optional[ColumnOrLiteral], callable_expression: t.Callable, **kwargs
) -> Column:
ensured_column = None if column is None else cls.ensure_col(column)
ensure_expression_values = {
k: (
[Column.ensure_col(x).expression for x in v]
if is_iterable(v)
else Column.ensure_col(v).expression
)
for k, v in kwargs.items()
if v is not None
}
new_expression = (
callable_expression(**ensure_expression_values)
if ensured_column is None
else callable_expression(
this=ensured_column.column_expression, **ensure_expression_values
)
)
return Column(new_expression)
def binary_op(self, klass: t.Callable, other: ColumnOrLiteral, **kwargs) -> Column:
return Column(
klass(this=self.column_expression, expression=Column(other).column_expression, **kwargs)
)
def inverse_binary_op(self, klass: t.Callable, other: ColumnOrLiteral, **kwargs) -> Column:
return Column(
klass(this=Column(other).column_expression, expression=self.column_expression, **kwargs)
)
def unary_op(self, klass: t.Callable, **kwargs) -> Column:
return Column(klass(this=self.column_expression, **kwargs))
def is_alias(self):
return isinstance(self.expression, exp.Alias)
def is_column(self):
return isinstance(self.expression, exp.Column)
def column_expression(self) -> t.Union[exp.Column, exp.Literal]:
return self.expression.unalias()
def alias_or_name(self) -> str:
return self.expression.alias_or_name
def ensure_literal(cls, value) -> Column:
from sqlglot.dataframe.sql.functions import lit
if isinstance(value, cls):
value = value.expression
if not isinstance(value, exp.Literal):
return lit(value)
return Column(value)
def copy(self) -> Column:
return Column(self.expression.copy())
def set_table_name(self, table_name: str, copy=False) -> Column:
expression = self.expression.copy() if copy else self.expression
expression.set("table", exp.to_identifier(table_name))
return Column(expression)
def sql(self, **kwargs) -> str:
from sqlglot.dataframe.sql.session import SparkSession
return self.expression.sql(**{"dialect": SparkSession().dialect, **kwargs})
def alias(self, name: str) -> Column:
from sqlglot.dataframe.sql.session import SparkSession
dialect = SparkSession().dialect
alias: exp.Expression = sqlglot.maybe_parse(name, dialect=dialect)
new_expression = exp.alias_(
self.column_expression,
alias.this if isinstance(alias, exp.Column) else name,
dialect=dialect,
)
return Column(new_expression)
def asc(self) -> Column:
new_expression = exp.Ordered(this=self.column_expression, desc=False, nulls_first=True)
return Column(new_expression)
def desc(self) -> Column:
new_expression = exp.Ordered(this=self.column_expression, desc=True, nulls_first=False)
return Column(new_expression)
asc_nulls_first = asc
def asc_nulls_last(self) -> Column:
new_expression = exp.Ordered(this=self.column_expression, desc=False, nulls_first=False)
return Column(new_expression)
def desc_nulls_first(self) -> Column:
new_expression = exp.Ordered(this=self.column_expression, desc=True, nulls_first=True)
return Column(new_expression)
desc_nulls_last = desc
def when(self, condition: Column, value: t.Any) -> Column:
from sqlglot.dataframe.sql.functions import when
column_with_if = when(condition, value)
if not isinstance(self.expression, exp.Case):
return column_with_if
new_column = self.copy()
new_column.expression.args["ifs"].extend(column_with_if.expression.args["ifs"])
return new_column
def otherwise(self, value: t.Any) -> Column:
from sqlglot.dataframe.sql.functions import lit
true_value = value if isinstance(value, Column) else lit(value)
new_column = self.copy()
new_column.expression.set("default", true_value.column_expression)
return new_column
def isNull(self) -> Column:
new_expression = exp.Is(this=self.column_expression, expression=exp.Null())
return Column(new_expression)
def isNotNull(self) -> Column:
new_expression = exp.Not(this=exp.Is(this=self.column_expression, expression=exp.Null()))
return Column(new_expression)
def cast(self, dataType: t.Union[str, DataType]) -> Column:
"""
Functionality Difference: PySpark cast accepts a datatype instance of the datatype class
Sqlglot doesn't currently replicate this class so it only accepts a string
"""
from sqlglot.dataframe.sql.session import SparkSession
if isinstance(dataType, DataType):
dataType = dataType.simpleString()
return Column(exp.cast(self.column_expression, dataType, dialect=SparkSession().dialect))
def startswith(self, value: t.Union[str, Column]) -> Column:
value = self._lit(value) if not isinstance(value, Column) else value
return self.invoke_anonymous_function(self, "STARTSWITH", value)
def endswith(self, value: t.Union[str, Column]) -> Column:
value = self._lit(value) if not isinstance(value, Column) else value
return self.invoke_anonymous_function(self, "ENDSWITH", value)
def rlike(self, regexp: str) -> Column:
return self.invoke_expression_over_column(
column=self, callable_expression=exp.RegexpLike, expression=self._lit(regexp).expression
)
def like(self, other: str):
return self.invoke_expression_over_column(
self, exp.Like, expression=self._lit(other).expression
)
def ilike(self, other: str):
return self.invoke_expression_over_column(
self, exp.ILike, expression=self._lit(other).expression
)
def substr(self, startPos: t.Union[int, Column], length: t.Union[int, Column]) -> Column:
startPos = self._lit(startPos) if not isinstance(startPos, Column) else startPos
length = self._lit(length) if not isinstance(length, Column) else length
return Column.invoke_expression_over_column(
self, exp.Substring, start=startPos.expression, length=length.expression
)
def isin(self, *cols: t.Union[ColumnOrLiteral, t.Iterable[ColumnOrLiteral]]):
columns = flatten(cols) if isinstance(cols[0], (list, set, tuple)) else cols # type: ignore
expressions = [self._lit(x).expression for x in columns]
return Column.invoke_expression_over_column(self, exp.In, expressions=expressions) # type: ignore
def between(
self,
lowerBound: t.Union[ColumnOrLiteral],
upperBound: t.Union[ColumnOrLiteral],
) -> Column:
lower_bound_exp = (
self._lit(lowerBound) if not isinstance(lowerBound, Column) else lowerBound
)
upper_bound_exp = (
self._lit(upperBound) if not isinstance(upperBound, Column) else upperBound
)
return Column(
exp.Between(
this=self.column_expression,
low=lower_bound_exp.expression,
high=upper_bound_exp.expression,
)
)
def over(self, window: WindowSpec) -> Column:
window_expression = window.expression.copy()
window_expression.set("this", self.column_expression)
return Column(window_expression)
ColumnOrName = t.Union[Column, str]
def forall(col: ColumnOrName, f: t.Callable[[Column], Column]) -> Column:
f_expression = _get_lambda_from_func(f)
return Column.invoke_anonymous_function(col, "FORALL", Column(f_expression)) | null |
153,200 | from __future__ import annotations
import typing as t
from sqlglot import exp as expression
from sqlglot.dataframe.sql.column import Column
from sqlglot.helper import ensure_list, flatten as _flatten
if t.TYPE_CHECKING:
from sqlglot.dataframe.sql._typing import ColumnOrLiteral, ColumnOrName
from sqlglot.dataframe.sql.dataframe import DataFrame
def _get_lambda_from_func(lambda_expression: t.Callable):
class Column:
def __init__(self, expression: t.Optional[t.Union[ColumnOrLiteral, exp.Expression]]):
def __repr__(self):
def __hash__(self):
def __eq__(self, other: ColumnOrLiteral) -> Column:
def __ne__(self, other: ColumnOrLiteral) -> Column:
def __gt__(self, other: ColumnOrLiteral) -> Column:
def __ge__(self, other: ColumnOrLiteral) -> Column:
def __lt__(self, other: ColumnOrLiteral) -> Column:
def __le__(self, other: ColumnOrLiteral) -> Column:
def __and__(self, other: ColumnOrLiteral) -> Column:
def __or__(self, other: ColumnOrLiteral) -> Column:
def __mod__(self, other: ColumnOrLiteral) -> Column:
def __add__(self, other: ColumnOrLiteral) -> Column:
def __sub__(self, other: ColumnOrLiteral) -> Column:
def __mul__(self, other: ColumnOrLiteral) -> Column:
def __truediv__(self, other: ColumnOrLiteral) -> Column:
def __div__(self, other: ColumnOrLiteral) -> Column:
def __neg__(self) -> Column:
def __radd__(self, other: ColumnOrLiteral) -> Column:
def __rsub__(self, other: ColumnOrLiteral) -> Column:
def __rmul__(self, other: ColumnOrLiteral) -> Column:
def __rdiv__(self, other: ColumnOrLiteral) -> Column:
def __rtruediv__(self, other: ColumnOrLiteral) -> Column:
def __rmod__(self, other: ColumnOrLiteral) -> Column:
def __pow__(self, power: ColumnOrLiteral, modulo=None):
def __rpow__(self, power: ColumnOrLiteral):
def __invert__(self):
def __rand__(self, other: ColumnOrLiteral) -> Column:
def __ror__(self, other: ColumnOrLiteral) -> Column:
def ensure_col(cls, value: t.Optional[t.Union[ColumnOrLiteral, exp.Expression]]) -> Column:
def ensure_cols(cls, args: t.List[t.Union[ColumnOrLiteral, exp.Expression]]) -> t.List[Column]:
def _lit(cls, value: ColumnOrLiteral) -> Column:
def invoke_anonymous_function(
cls, column: t.Optional[ColumnOrLiteral], func_name: str, *args: t.Optional[ColumnOrLiteral]
) -> Column:
def invoke_expression_over_column(
cls, column: t.Optional[ColumnOrLiteral], callable_expression: t.Callable, **kwargs
) -> Column:
def binary_op(self, klass: t.Callable, other: ColumnOrLiteral, **kwargs) -> Column:
def inverse_binary_op(self, klass: t.Callable, other: ColumnOrLiteral, **kwargs) -> Column:
def unary_op(self, klass: t.Callable, **kwargs) -> Column:
def is_alias(self):
def is_column(self):
def column_expression(self) -> t.Union[exp.Column, exp.Literal]:
def alias_or_name(self) -> str:
def ensure_literal(cls, value) -> Column:
def copy(self) -> Column:
def set_table_name(self, table_name: str, copy=False) -> Column:
def sql(self, **kwargs) -> str:
def alias(self, name: str) -> Column:
def asc(self) -> Column:
def desc(self) -> Column:
def asc_nulls_last(self) -> Column:
def desc_nulls_first(self) -> Column:
def when(self, condition: Column, value: t.Any) -> Column:
def otherwise(self, value: t.Any) -> Column:
def isNull(self) -> Column:
def isNotNull(self) -> Column:
def cast(self, dataType: t.Union[str, DataType]) -> Column:
def startswith(self, value: t.Union[str, Column]) -> Column:
def endswith(self, value: t.Union[str, Column]) -> Column:
def rlike(self, regexp: str) -> Column:
def like(self, other: str):
def ilike(self, other: str):
def substr(self, startPos: t.Union[int, Column], length: t.Union[int, Column]) -> Column:
def isin(self, *cols: t.Union[ColumnOrLiteral, t.Iterable[ColumnOrLiteral]]):
def between(
self,
lowerBound: t.Union[ColumnOrLiteral],
upperBound: t.Union[ColumnOrLiteral],
) -> Column:
def over(self, window: WindowSpec) -> Column:
ColumnOrName = t.Union[Column, str]
def filter(
col: ColumnOrName,
f: t.Union[t.Callable[[Column], Column], t.Callable[[Column, Column], Column]],
) -> Column:
f_expression = _get_lambda_from_func(f)
return Column.invoke_expression_over_column(
col, expression.ArrayFilter, expression=f_expression
) | null |
153,201 | from __future__ import annotations
import typing as t
from sqlglot import exp as expression
from sqlglot.dataframe.sql.column import Column
from sqlglot.helper import ensure_list, flatten as _flatten
if t.TYPE_CHECKING:
from sqlglot.dataframe.sql._typing import ColumnOrLiteral, ColumnOrName
from sqlglot.dataframe.sql.dataframe import DataFrame
def _get_lambda_from_func(lambda_expression: t.Callable):
variables = [
expression.to_identifier(x, quoted=_lambda_quoted(x))
for x in lambda_expression.__code__.co_varnames
]
return expression.Lambda(
this=lambda_expression(*[Column(x) for x in variables]).expression,
expressions=variables,
)
class Column:
def __init__(self, expression: t.Optional[t.Union[ColumnOrLiteral, exp.Expression]]):
from sqlglot.dataframe.sql.session import SparkSession
if isinstance(expression, Column):
expression = expression.expression # type: ignore
elif expression is None or not isinstance(expression, (str, exp.Expression)):
expression = self._lit(expression).expression # type: ignore
elif not isinstance(expression, exp.Column):
expression = sqlglot.maybe_parse(expression, dialect=SparkSession().dialect).transform(
SparkSession().dialect.normalize_identifier, copy=False
)
if expression is None:
raise ValueError(f"Could not parse {expression}")
self.expression: exp.Expression = expression # type: ignore
def __repr__(self):
return repr(self.expression)
def __hash__(self):
return hash(self.expression)
def __eq__(self, other: ColumnOrLiteral) -> Column: # type: ignore
return self.binary_op(exp.EQ, other)
def __ne__(self, other: ColumnOrLiteral) -> Column: # type: ignore
return self.binary_op(exp.NEQ, other)
def __gt__(self, other: ColumnOrLiteral) -> Column:
return self.binary_op(exp.GT, other)
def __ge__(self, other: ColumnOrLiteral) -> Column:
return self.binary_op(exp.GTE, other)
def __lt__(self, other: ColumnOrLiteral) -> Column:
return self.binary_op(exp.LT, other)
def __le__(self, other: ColumnOrLiteral) -> Column:
return self.binary_op(exp.LTE, other)
def __and__(self, other: ColumnOrLiteral) -> Column:
return self.binary_op(exp.And, other)
def __or__(self, other: ColumnOrLiteral) -> Column:
return self.binary_op(exp.Or, other)
def __mod__(self, other: ColumnOrLiteral) -> Column:
return self.binary_op(exp.Mod, other)
def __add__(self, other: ColumnOrLiteral) -> Column:
return self.binary_op(exp.Add, other)
def __sub__(self, other: ColumnOrLiteral) -> Column:
return self.binary_op(exp.Sub, other)
def __mul__(self, other: ColumnOrLiteral) -> Column:
return self.binary_op(exp.Mul, other)
def __truediv__(self, other: ColumnOrLiteral) -> Column:
return self.binary_op(exp.Div, other)
def __div__(self, other: ColumnOrLiteral) -> Column:
return self.binary_op(exp.Div, other)
def __neg__(self) -> Column:
return self.unary_op(exp.Neg)
def __radd__(self, other: ColumnOrLiteral) -> Column:
return self.inverse_binary_op(exp.Add, other)
def __rsub__(self, other: ColumnOrLiteral) -> Column:
return self.inverse_binary_op(exp.Sub, other)
def __rmul__(self, other: ColumnOrLiteral) -> Column:
return self.inverse_binary_op(exp.Mul, other)
def __rdiv__(self, other: ColumnOrLiteral) -> Column:
return self.inverse_binary_op(exp.Div, other)
def __rtruediv__(self, other: ColumnOrLiteral) -> Column:
return self.inverse_binary_op(exp.Div, other)
def __rmod__(self, other: ColumnOrLiteral) -> Column:
return self.inverse_binary_op(exp.Mod, other)
def __pow__(self, power: ColumnOrLiteral, modulo=None):
return Column(exp.Pow(this=self.expression, expression=Column(power).expression))
def __rpow__(self, power: ColumnOrLiteral):
return Column(exp.Pow(this=Column(power).expression, expression=self.expression))
def __invert__(self):
return self.unary_op(exp.Not)
def __rand__(self, other: ColumnOrLiteral) -> Column:
return self.inverse_binary_op(exp.And, other)
def __ror__(self, other: ColumnOrLiteral) -> Column:
return self.inverse_binary_op(exp.Or, other)
def ensure_col(cls, value: t.Optional[t.Union[ColumnOrLiteral, exp.Expression]]) -> Column:
return cls(value)
def ensure_cols(cls, args: t.List[t.Union[ColumnOrLiteral, exp.Expression]]) -> t.List[Column]:
return [cls.ensure_col(x) if not isinstance(x, Column) else x for x in args]
def _lit(cls, value: ColumnOrLiteral) -> Column:
if isinstance(value, dict):
columns = [cls._lit(v).alias(k).expression for k, v in value.items()]
return cls(exp.Struct(expressions=columns))
return cls(exp.convert(value))
def invoke_anonymous_function(
cls, column: t.Optional[ColumnOrLiteral], func_name: str, *args: t.Optional[ColumnOrLiteral]
) -> Column:
columns = [] if column is None else [cls.ensure_col(column)]
column_args = [cls.ensure_col(arg) for arg in args]
expressions = [x.expression for x in columns + column_args]
new_expression = exp.Anonymous(this=func_name.upper(), expressions=expressions)
return Column(new_expression)
def invoke_expression_over_column(
cls, column: t.Optional[ColumnOrLiteral], callable_expression: t.Callable, **kwargs
) -> Column:
ensured_column = None if column is None else cls.ensure_col(column)
ensure_expression_values = {
k: (
[Column.ensure_col(x).expression for x in v]
if is_iterable(v)
else Column.ensure_col(v).expression
)
for k, v in kwargs.items()
if v is not None
}
new_expression = (
callable_expression(**ensure_expression_values)
if ensured_column is None
else callable_expression(
this=ensured_column.column_expression, **ensure_expression_values
)
)
return Column(new_expression)
def binary_op(self, klass: t.Callable, other: ColumnOrLiteral, **kwargs) -> Column:
return Column(
klass(this=self.column_expression, expression=Column(other).column_expression, **kwargs)
)
def inverse_binary_op(self, klass: t.Callable, other: ColumnOrLiteral, **kwargs) -> Column:
return Column(
klass(this=Column(other).column_expression, expression=self.column_expression, **kwargs)
)
def unary_op(self, klass: t.Callable, **kwargs) -> Column:
return Column(klass(this=self.column_expression, **kwargs))
def is_alias(self):
return isinstance(self.expression, exp.Alias)
def is_column(self):
return isinstance(self.expression, exp.Column)
def column_expression(self) -> t.Union[exp.Column, exp.Literal]:
return self.expression.unalias()
def alias_or_name(self) -> str:
return self.expression.alias_or_name
def ensure_literal(cls, value) -> Column:
from sqlglot.dataframe.sql.functions import lit
if isinstance(value, cls):
value = value.expression
if not isinstance(value, exp.Literal):
return lit(value)
return Column(value)
def copy(self) -> Column:
return Column(self.expression.copy())
def set_table_name(self, table_name: str, copy=False) -> Column:
expression = self.expression.copy() if copy else self.expression
expression.set("table", exp.to_identifier(table_name))
return Column(expression)
def sql(self, **kwargs) -> str:
from sqlglot.dataframe.sql.session import SparkSession
return self.expression.sql(**{"dialect": SparkSession().dialect, **kwargs})
def alias(self, name: str) -> Column:
from sqlglot.dataframe.sql.session import SparkSession
dialect = SparkSession().dialect
alias: exp.Expression = sqlglot.maybe_parse(name, dialect=dialect)
new_expression = exp.alias_(
self.column_expression,
alias.this if isinstance(alias, exp.Column) else name,
dialect=dialect,
)
return Column(new_expression)
def asc(self) -> Column:
new_expression = exp.Ordered(this=self.column_expression, desc=False, nulls_first=True)
return Column(new_expression)
def desc(self) -> Column:
new_expression = exp.Ordered(this=self.column_expression, desc=True, nulls_first=False)
return Column(new_expression)
asc_nulls_first = asc
def asc_nulls_last(self) -> Column:
new_expression = exp.Ordered(this=self.column_expression, desc=False, nulls_first=False)
return Column(new_expression)
def desc_nulls_first(self) -> Column:
new_expression = exp.Ordered(this=self.column_expression, desc=True, nulls_first=True)
return Column(new_expression)
desc_nulls_last = desc
def when(self, condition: Column, value: t.Any) -> Column:
from sqlglot.dataframe.sql.functions import when
column_with_if = when(condition, value)
if not isinstance(self.expression, exp.Case):
return column_with_if
new_column = self.copy()
new_column.expression.args["ifs"].extend(column_with_if.expression.args["ifs"])
return new_column
def otherwise(self, value: t.Any) -> Column:
from sqlglot.dataframe.sql.functions import lit
true_value = value if isinstance(value, Column) else lit(value)
new_column = self.copy()
new_column.expression.set("default", true_value.column_expression)
return new_column
def isNull(self) -> Column:
new_expression = exp.Is(this=self.column_expression, expression=exp.Null())
return Column(new_expression)
def isNotNull(self) -> Column:
new_expression = exp.Not(this=exp.Is(this=self.column_expression, expression=exp.Null()))
return Column(new_expression)
def cast(self, dataType: t.Union[str, DataType]) -> Column:
"""
Functionality Difference: PySpark cast accepts a datatype instance of the datatype class
Sqlglot doesn't currently replicate this class so it only accepts a string
"""
from sqlglot.dataframe.sql.session import SparkSession
if isinstance(dataType, DataType):
dataType = dataType.simpleString()
return Column(exp.cast(self.column_expression, dataType, dialect=SparkSession().dialect))
def startswith(self, value: t.Union[str, Column]) -> Column:
value = self._lit(value) if not isinstance(value, Column) else value
return self.invoke_anonymous_function(self, "STARTSWITH", value)
def endswith(self, value: t.Union[str, Column]) -> Column:
value = self._lit(value) if not isinstance(value, Column) else value
return self.invoke_anonymous_function(self, "ENDSWITH", value)
def rlike(self, regexp: str) -> Column:
return self.invoke_expression_over_column(
column=self, callable_expression=exp.RegexpLike, expression=self._lit(regexp).expression
)
def like(self, other: str):
return self.invoke_expression_over_column(
self, exp.Like, expression=self._lit(other).expression
)
def ilike(self, other: str):
return self.invoke_expression_over_column(
self, exp.ILike, expression=self._lit(other).expression
)
def substr(self, startPos: t.Union[int, Column], length: t.Union[int, Column]) -> Column:
startPos = self._lit(startPos) if not isinstance(startPos, Column) else startPos
length = self._lit(length) if not isinstance(length, Column) else length
return Column.invoke_expression_over_column(
self, exp.Substring, start=startPos.expression, length=length.expression
)
def isin(self, *cols: t.Union[ColumnOrLiteral, t.Iterable[ColumnOrLiteral]]):
columns = flatten(cols) if isinstance(cols[0], (list, set, tuple)) else cols # type: ignore
expressions = [self._lit(x).expression for x in columns]
return Column.invoke_expression_over_column(self, exp.In, expressions=expressions) # type: ignore
def between(
self,
lowerBound: t.Union[ColumnOrLiteral],
upperBound: t.Union[ColumnOrLiteral],
) -> Column:
lower_bound_exp = (
self._lit(lowerBound) if not isinstance(lowerBound, Column) else lowerBound
)
upper_bound_exp = (
self._lit(upperBound) if not isinstance(upperBound, Column) else upperBound
)
return Column(
exp.Between(
this=self.column_expression,
low=lower_bound_exp.expression,
high=upper_bound_exp.expression,
)
)
def over(self, window: WindowSpec) -> Column:
window_expression = window.expression.copy()
window_expression.set("this", self.column_expression)
return Column(window_expression)
ColumnOrName = t.Union[Column, str]
def zip_with(
left: ColumnOrName, right: ColumnOrName, f: t.Callable[[Column, Column], Column]
) -> Column:
f_expression = _get_lambda_from_func(f)
return Column.invoke_anonymous_function(left, "ZIP_WITH", right, Column(f_expression)) | null |
153,202 | from __future__ import annotations
import typing as t
from sqlglot import exp as expression
from sqlglot.dataframe.sql.column import Column
from sqlglot.helper import ensure_list, flatten as _flatten
if t.TYPE_CHECKING:
from sqlglot.dataframe.sql._typing import ColumnOrLiteral, ColumnOrName
from sqlglot.dataframe.sql.dataframe import DataFrame
def _get_lambda_from_func(lambda_expression: t.Callable):
variables = [
expression.to_identifier(x, quoted=_lambda_quoted(x))
for x in lambda_expression.__code__.co_varnames
]
return expression.Lambda(
this=lambda_expression(*[Column(x) for x in variables]).expression,
expressions=variables,
)
class Column:
def __init__(self, expression: t.Optional[t.Union[ColumnOrLiteral, exp.Expression]]):
from sqlglot.dataframe.sql.session import SparkSession
if isinstance(expression, Column):
expression = expression.expression # type: ignore
elif expression is None or not isinstance(expression, (str, exp.Expression)):
expression = self._lit(expression).expression # type: ignore
elif not isinstance(expression, exp.Column):
expression = sqlglot.maybe_parse(expression, dialect=SparkSession().dialect).transform(
SparkSession().dialect.normalize_identifier, copy=False
)
if expression is None:
raise ValueError(f"Could not parse {expression}")
self.expression: exp.Expression = expression # type: ignore
def __repr__(self):
return repr(self.expression)
def __hash__(self):
return hash(self.expression)
def __eq__(self, other: ColumnOrLiteral) -> Column: # type: ignore
return self.binary_op(exp.EQ, other)
def __ne__(self, other: ColumnOrLiteral) -> Column: # type: ignore
return self.binary_op(exp.NEQ, other)
def __gt__(self, other: ColumnOrLiteral) -> Column:
return self.binary_op(exp.GT, other)
def __ge__(self, other: ColumnOrLiteral) -> Column:
return self.binary_op(exp.GTE, other)
def __lt__(self, other: ColumnOrLiteral) -> Column:
return self.binary_op(exp.LT, other)
def __le__(self, other: ColumnOrLiteral) -> Column:
return self.binary_op(exp.LTE, other)
def __and__(self, other: ColumnOrLiteral) -> Column:
return self.binary_op(exp.And, other)
def __or__(self, other: ColumnOrLiteral) -> Column:
return self.binary_op(exp.Or, other)
def __mod__(self, other: ColumnOrLiteral) -> Column:
return self.binary_op(exp.Mod, other)
def __add__(self, other: ColumnOrLiteral) -> Column:
return self.binary_op(exp.Add, other)
def __sub__(self, other: ColumnOrLiteral) -> Column:
return self.binary_op(exp.Sub, other)
def __mul__(self, other: ColumnOrLiteral) -> Column:
return self.binary_op(exp.Mul, other)
def __truediv__(self, other: ColumnOrLiteral) -> Column:
return self.binary_op(exp.Div, other)
def __div__(self, other: ColumnOrLiteral) -> Column:
return self.binary_op(exp.Div, other)
def __neg__(self) -> Column:
return self.unary_op(exp.Neg)
def __radd__(self, other: ColumnOrLiteral) -> Column:
return self.inverse_binary_op(exp.Add, other)
def __rsub__(self, other: ColumnOrLiteral) -> Column:
return self.inverse_binary_op(exp.Sub, other)
def __rmul__(self, other: ColumnOrLiteral) -> Column:
return self.inverse_binary_op(exp.Mul, other)
def __rdiv__(self, other: ColumnOrLiteral) -> Column:
return self.inverse_binary_op(exp.Div, other)
def __rtruediv__(self, other: ColumnOrLiteral) -> Column:
return self.inverse_binary_op(exp.Div, other)
def __rmod__(self, other: ColumnOrLiteral) -> Column:
return self.inverse_binary_op(exp.Mod, other)
def __pow__(self, power: ColumnOrLiteral, modulo=None):
return Column(exp.Pow(this=self.expression, expression=Column(power).expression))
def __rpow__(self, power: ColumnOrLiteral):
return Column(exp.Pow(this=Column(power).expression, expression=self.expression))
def __invert__(self):
return self.unary_op(exp.Not)
def __rand__(self, other: ColumnOrLiteral) -> Column:
return self.inverse_binary_op(exp.And, other)
def __ror__(self, other: ColumnOrLiteral) -> Column:
return self.inverse_binary_op(exp.Or, other)
def ensure_col(cls, value: t.Optional[t.Union[ColumnOrLiteral, exp.Expression]]) -> Column:
return cls(value)
def ensure_cols(cls, args: t.List[t.Union[ColumnOrLiteral, exp.Expression]]) -> t.List[Column]:
return [cls.ensure_col(x) if not isinstance(x, Column) else x for x in args]
def _lit(cls, value: ColumnOrLiteral) -> Column:
if isinstance(value, dict):
columns = [cls._lit(v).alias(k).expression for k, v in value.items()]
return cls(exp.Struct(expressions=columns))
return cls(exp.convert(value))
def invoke_anonymous_function(
cls, column: t.Optional[ColumnOrLiteral], func_name: str, *args: t.Optional[ColumnOrLiteral]
) -> Column:
columns = [] if column is None else [cls.ensure_col(column)]
column_args = [cls.ensure_col(arg) for arg in args]
expressions = [x.expression for x in columns + column_args]
new_expression = exp.Anonymous(this=func_name.upper(), expressions=expressions)
return Column(new_expression)
def invoke_expression_over_column(
cls, column: t.Optional[ColumnOrLiteral], callable_expression: t.Callable, **kwargs
) -> Column:
ensured_column = None if column is None else cls.ensure_col(column)
ensure_expression_values = {
k: (
[Column.ensure_col(x).expression for x in v]
if is_iterable(v)
else Column.ensure_col(v).expression
)
for k, v in kwargs.items()
if v is not None
}
new_expression = (
callable_expression(**ensure_expression_values)
if ensured_column is None
else callable_expression(
this=ensured_column.column_expression, **ensure_expression_values
)
)
return Column(new_expression)
def binary_op(self, klass: t.Callable, other: ColumnOrLiteral, **kwargs) -> Column:
return Column(
klass(this=self.column_expression, expression=Column(other).column_expression, **kwargs)
)
def inverse_binary_op(self, klass: t.Callable, other: ColumnOrLiteral, **kwargs) -> Column:
return Column(
klass(this=Column(other).column_expression, expression=self.column_expression, **kwargs)
)
def unary_op(self, klass: t.Callable, **kwargs) -> Column:
return Column(klass(this=self.column_expression, **kwargs))
def is_alias(self):
return isinstance(self.expression, exp.Alias)
def is_column(self):
return isinstance(self.expression, exp.Column)
def column_expression(self) -> t.Union[exp.Column, exp.Literal]:
return self.expression.unalias()
def alias_or_name(self) -> str:
return self.expression.alias_or_name
def ensure_literal(cls, value) -> Column:
from sqlglot.dataframe.sql.functions import lit
if isinstance(value, cls):
value = value.expression
if not isinstance(value, exp.Literal):
return lit(value)
return Column(value)
def copy(self) -> Column:
return Column(self.expression.copy())
def set_table_name(self, table_name: str, copy=False) -> Column:
expression = self.expression.copy() if copy else self.expression
expression.set("table", exp.to_identifier(table_name))
return Column(expression)
def sql(self, **kwargs) -> str:
from sqlglot.dataframe.sql.session import SparkSession
return self.expression.sql(**{"dialect": SparkSession().dialect, **kwargs})
def alias(self, name: str) -> Column:
from sqlglot.dataframe.sql.session import SparkSession
dialect = SparkSession().dialect
alias: exp.Expression = sqlglot.maybe_parse(name, dialect=dialect)
new_expression = exp.alias_(
self.column_expression,
alias.this if isinstance(alias, exp.Column) else name,
dialect=dialect,
)
return Column(new_expression)
def asc(self) -> Column:
new_expression = exp.Ordered(this=self.column_expression, desc=False, nulls_first=True)
return Column(new_expression)
def desc(self) -> Column:
new_expression = exp.Ordered(this=self.column_expression, desc=True, nulls_first=False)
return Column(new_expression)
asc_nulls_first = asc
def asc_nulls_last(self) -> Column:
new_expression = exp.Ordered(this=self.column_expression, desc=False, nulls_first=False)
return Column(new_expression)
def desc_nulls_first(self) -> Column:
new_expression = exp.Ordered(this=self.column_expression, desc=True, nulls_first=True)
return Column(new_expression)
desc_nulls_last = desc
def when(self, condition: Column, value: t.Any) -> Column:
from sqlglot.dataframe.sql.functions import when
column_with_if = when(condition, value)
if not isinstance(self.expression, exp.Case):
return column_with_if
new_column = self.copy()
new_column.expression.args["ifs"].extend(column_with_if.expression.args["ifs"])
return new_column
def otherwise(self, value: t.Any) -> Column:
from sqlglot.dataframe.sql.functions import lit
true_value = value if isinstance(value, Column) else lit(value)
new_column = self.copy()
new_column.expression.set("default", true_value.column_expression)
return new_column
def isNull(self) -> Column:
new_expression = exp.Is(this=self.column_expression, expression=exp.Null())
return Column(new_expression)
def isNotNull(self) -> Column:
new_expression = exp.Not(this=exp.Is(this=self.column_expression, expression=exp.Null()))
return Column(new_expression)
def cast(self, dataType: t.Union[str, DataType]) -> Column:
"""
Functionality Difference: PySpark cast accepts a datatype instance of the datatype class
Sqlglot doesn't currently replicate this class so it only accepts a string
"""
from sqlglot.dataframe.sql.session import SparkSession
if isinstance(dataType, DataType):
dataType = dataType.simpleString()
return Column(exp.cast(self.column_expression, dataType, dialect=SparkSession().dialect))
def startswith(self, value: t.Union[str, Column]) -> Column:
value = self._lit(value) if not isinstance(value, Column) else value
return self.invoke_anonymous_function(self, "STARTSWITH", value)
def endswith(self, value: t.Union[str, Column]) -> Column:
value = self._lit(value) if not isinstance(value, Column) else value
return self.invoke_anonymous_function(self, "ENDSWITH", value)
def rlike(self, regexp: str) -> Column:
return self.invoke_expression_over_column(
column=self, callable_expression=exp.RegexpLike, expression=self._lit(regexp).expression
)
def like(self, other: str):
return self.invoke_expression_over_column(
self, exp.Like, expression=self._lit(other).expression
)
def ilike(self, other: str):
return self.invoke_expression_over_column(
self, exp.ILike, expression=self._lit(other).expression
)
def substr(self, startPos: t.Union[int, Column], length: t.Union[int, Column]) -> Column:
startPos = self._lit(startPos) if not isinstance(startPos, Column) else startPos
length = self._lit(length) if not isinstance(length, Column) else length
return Column.invoke_expression_over_column(
self, exp.Substring, start=startPos.expression, length=length.expression
)
def isin(self, *cols: t.Union[ColumnOrLiteral, t.Iterable[ColumnOrLiteral]]):
columns = flatten(cols) if isinstance(cols[0], (list, set, tuple)) else cols # type: ignore
expressions = [self._lit(x).expression for x in columns]
return Column.invoke_expression_over_column(self, exp.In, expressions=expressions) # type: ignore
def between(
self,
lowerBound: t.Union[ColumnOrLiteral],
upperBound: t.Union[ColumnOrLiteral],
) -> Column:
lower_bound_exp = (
self._lit(lowerBound) if not isinstance(lowerBound, Column) else lowerBound
)
upper_bound_exp = (
self._lit(upperBound) if not isinstance(upperBound, Column) else upperBound
)
return Column(
exp.Between(
this=self.column_expression,
low=lower_bound_exp.expression,
high=upper_bound_exp.expression,
)
)
def over(self, window: WindowSpec) -> Column:
window_expression = window.expression.copy()
window_expression.set("this", self.column_expression)
return Column(window_expression)
ColumnOrName = t.Union[Column, str]
def transform_keys(col: ColumnOrName, f: t.Union[t.Callable[[Column, Column], Column]]) -> Column:
f_expression = _get_lambda_from_func(f)
return Column.invoke_anonymous_function(col, "TRANSFORM_KEYS", Column(f_expression)) | null |
153,203 | from __future__ import annotations
import typing as t
from sqlglot import exp as expression
from sqlglot.dataframe.sql.column import Column
from sqlglot.helper import ensure_list, flatten as _flatten
if t.TYPE_CHECKING:
from sqlglot.dataframe.sql._typing import ColumnOrLiteral, ColumnOrName
from sqlglot.dataframe.sql.dataframe import DataFrame
def _get_lambda_from_func(lambda_expression: t.Callable):
variables = [
expression.to_identifier(x, quoted=_lambda_quoted(x))
for x in lambda_expression.__code__.co_varnames
]
return expression.Lambda(
this=lambda_expression(*[Column(x) for x in variables]).expression,
expressions=variables,
)
class Column:
def __init__(self, expression: t.Optional[t.Union[ColumnOrLiteral, exp.Expression]]):
from sqlglot.dataframe.sql.session import SparkSession
if isinstance(expression, Column):
expression = expression.expression # type: ignore
elif expression is None or not isinstance(expression, (str, exp.Expression)):
expression = self._lit(expression).expression # type: ignore
elif not isinstance(expression, exp.Column):
expression = sqlglot.maybe_parse(expression, dialect=SparkSession().dialect).transform(
SparkSession().dialect.normalize_identifier, copy=False
)
if expression is None:
raise ValueError(f"Could not parse {expression}")
self.expression: exp.Expression = expression # type: ignore
def __repr__(self):
return repr(self.expression)
def __hash__(self):
return hash(self.expression)
def __eq__(self, other: ColumnOrLiteral) -> Column: # type: ignore
return self.binary_op(exp.EQ, other)
def __ne__(self, other: ColumnOrLiteral) -> Column: # type: ignore
return self.binary_op(exp.NEQ, other)
def __gt__(self, other: ColumnOrLiteral) -> Column:
return self.binary_op(exp.GT, other)
def __ge__(self, other: ColumnOrLiteral) -> Column:
return self.binary_op(exp.GTE, other)
def __lt__(self, other: ColumnOrLiteral) -> Column:
return self.binary_op(exp.LT, other)
def __le__(self, other: ColumnOrLiteral) -> Column:
return self.binary_op(exp.LTE, other)
def __and__(self, other: ColumnOrLiteral) -> Column:
return self.binary_op(exp.And, other)
def __or__(self, other: ColumnOrLiteral) -> Column:
return self.binary_op(exp.Or, other)
def __mod__(self, other: ColumnOrLiteral) -> Column:
return self.binary_op(exp.Mod, other)
def __add__(self, other: ColumnOrLiteral) -> Column:
return self.binary_op(exp.Add, other)
def __sub__(self, other: ColumnOrLiteral) -> Column:
return self.binary_op(exp.Sub, other)
def __mul__(self, other: ColumnOrLiteral) -> Column:
return self.binary_op(exp.Mul, other)
def __truediv__(self, other: ColumnOrLiteral) -> Column:
return self.binary_op(exp.Div, other)
def __div__(self, other: ColumnOrLiteral) -> Column:
return self.binary_op(exp.Div, other)
def __neg__(self) -> Column:
return self.unary_op(exp.Neg)
def __radd__(self, other: ColumnOrLiteral) -> Column:
return self.inverse_binary_op(exp.Add, other)
def __rsub__(self, other: ColumnOrLiteral) -> Column:
return self.inverse_binary_op(exp.Sub, other)
def __rmul__(self, other: ColumnOrLiteral) -> Column:
return self.inverse_binary_op(exp.Mul, other)
def __rdiv__(self, other: ColumnOrLiteral) -> Column:
return self.inverse_binary_op(exp.Div, other)
def __rtruediv__(self, other: ColumnOrLiteral) -> Column:
return self.inverse_binary_op(exp.Div, other)
def __rmod__(self, other: ColumnOrLiteral) -> Column:
return self.inverse_binary_op(exp.Mod, other)
def __pow__(self, power: ColumnOrLiteral, modulo=None):
return Column(exp.Pow(this=self.expression, expression=Column(power).expression))
def __rpow__(self, power: ColumnOrLiteral):
return Column(exp.Pow(this=Column(power).expression, expression=self.expression))
def __invert__(self):
return self.unary_op(exp.Not)
def __rand__(self, other: ColumnOrLiteral) -> Column:
return self.inverse_binary_op(exp.And, other)
def __ror__(self, other: ColumnOrLiteral) -> Column:
return self.inverse_binary_op(exp.Or, other)
def ensure_col(cls, value: t.Optional[t.Union[ColumnOrLiteral, exp.Expression]]) -> Column:
return cls(value)
def ensure_cols(cls, args: t.List[t.Union[ColumnOrLiteral, exp.Expression]]) -> t.List[Column]:
return [cls.ensure_col(x) if not isinstance(x, Column) else x for x in args]
def _lit(cls, value: ColumnOrLiteral) -> Column:
if isinstance(value, dict):
columns = [cls._lit(v).alias(k).expression for k, v in value.items()]
return cls(exp.Struct(expressions=columns))
return cls(exp.convert(value))
def invoke_anonymous_function(
cls, column: t.Optional[ColumnOrLiteral], func_name: str, *args: t.Optional[ColumnOrLiteral]
) -> Column:
columns = [] if column is None else [cls.ensure_col(column)]
column_args = [cls.ensure_col(arg) for arg in args]
expressions = [x.expression for x in columns + column_args]
new_expression = exp.Anonymous(this=func_name.upper(), expressions=expressions)
return Column(new_expression)
def invoke_expression_over_column(
cls, column: t.Optional[ColumnOrLiteral], callable_expression: t.Callable, **kwargs
) -> Column:
ensured_column = None if column is None else cls.ensure_col(column)
ensure_expression_values = {
k: (
[Column.ensure_col(x).expression for x in v]
if is_iterable(v)
else Column.ensure_col(v).expression
)
for k, v in kwargs.items()
if v is not None
}
new_expression = (
callable_expression(**ensure_expression_values)
if ensured_column is None
else callable_expression(
this=ensured_column.column_expression, **ensure_expression_values
)
)
return Column(new_expression)
def binary_op(self, klass: t.Callable, other: ColumnOrLiteral, **kwargs) -> Column:
return Column(
klass(this=self.column_expression, expression=Column(other).column_expression, **kwargs)
)
def inverse_binary_op(self, klass: t.Callable, other: ColumnOrLiteral, **kwargs) -> Column:
return Column(
klass(this=Column(other).column_expression, expression=self.column_expression, **kwargs)
)
def unary_op(self, klass: t.Callable, **kwargs) -> Column:
return Column(klass(this=self.column_expression, **kwargs))
def is_alias(self):
return isinstance(self.expression, exp.Alias)
def is_column(self):
return isinstance(self.expression, exp.Column)
def column_expression(self) -> t.Union[exp.Column, exp.Literal]:
return self.expression.unalias()
def alias_or_name(self) -> str:
return self.expression.alias_or_name
def ensure_literal(cls, value) -> Column:
from sqlglot.dataframe.sql.functions import lit
if isinstance(value, cls):
value = value.expression
if not isinstance(value, exp.Literal):
return lit(value)
return Column(value)
def copy(self) -> Column:
return Column(self.expression.copy())
def set_table_name(self, table_name: str, copy=False) -> Column:
expression = self.expression.copy() if copy else self.expression
expression.set("table", exp.to_identifier(table_name))
return Column(expression)
def sql(self, **kwargs) -> str:
from sqlglot.dataframe.sql.session import SparkSession
return self.expression.sql(**{"dialect": SparkSession().dialect, **kwargs})
def alias(self, name: str) -> Column:
from sqlglot.dataframe.sql.session import SparkSession
dialect = SparkSession().dialect
alias: exp.Expression = sqlglot.maybe_parse(name, dialect=dialect)
new_expression = exp.alias_(
self.column_expression,
alias.this if isinstance(alias, exp.Column) else name,
dialect=dialect,
)
return Column(new_expression)
def asc(self) -> Column:
new_expression = exp.Ordered(this=self.column_expression, desc=False, nulls_first=True)
return Column(new_expression)
def desc(self) -> Column:
new_expression = exp.Ordered(this=self.column_expression, desc=True, nulls_first=False)
return Column(new_expression)
asc_nulls_first = asc
def asc_nulls_last(self) -> Column:
new_expression = exp.Ordered(this=self.column_expression, desc=False, nulls_first=False)
return Column(new_expression)
def desc_nulls_first(self) -> Column:
new_expression = exp.Ordered(this=self.column_expression, desc=True, nulls_first=True)
return Column(new_expression)
desc_nulls_last = desc
def when(self, condition: Column, value: t.Any) -> Column:
from sqlglot.dataframe.sql.functions import when
column_with_if = when(condition, value)
if not isinstance(self.expression, exp.Case):
return column_with_if
new_column = self.copy()
new_column.expression.args["ifs"].extend(column_with_if.expression.args["ifs"])
return new_column
def otherwise(self, value: t.Any) -> Column:
from sqlglot.dataframe.sql.functions import lit
true_value = value if isinstance(value, Column) else lit(value)
new_column = self.copy()
new_column.expression.set("default", true_value.column_expression)
return new_column
def isNull(self) -> Column:
new_expression = exp.Is(this=self.column_expression, expression=exp.Null())
return Column(new_expression)
def isNotNull(self) -> Column:
new_expression = exp.Not(this=exp.Is(this=self.column_expression, expression=exp.Null()))
return Column(new_expression)
def cast(self, dataType: t.Union[str, DataType]) -> Column:
"""
Functionality Difference: PySpark cast accepts a datatype instance of the datatype class
Sqlglot doesn't currently replicate this class so it only accepts a string
"""
from sqlglot.dataframe.sql.session import SparkSession
if isinstance(dataType, DataType):
dataType = dataType.simpleString()
return Column(exp.cast(self.column_expression, dataType, dialect=SparkSession().dialect))
def startswith(self, value: t.Union[str, Column]) -> Column:
value = self._lit(value) if not isinstance(value, Column) else value
return self.invoke_anonymous_function(self, "STARTSWITH", value)
def endswith(self, value: t.Union[str, Column]) -> Column:
value = self._lit(value) if not isinstance(value, Column) else value
return self.invoke_anonymous_function(self, "ENDSWITH", value)
def rlike(self, regexp: str) -> Column:
return self.invoke_expression_over_column(
column=self, callable_expression=exp.RegexpLike, expression=self._lit(regexp).expression
)
def like(self, other: str):
return self.invoke_expression_over_column(
self, exp.Like, expression=self._lit(other).expression
)
def ilike(self, other: str):
return self.invoke_expression_over_column(
self, exp.ILike, expression=self._lit(other).expression
)
def substr(self, startPos: t.Union[int, Column], length: t.Union[int, Column]) -> Column:
startPos = self._lit(startPos) if not isinstance(startPos, Column) else startPos
length = self._lit(length) if not isinstance(length, Column) else length
return Column.invoke_expression_over_column(
self, exp.Substring, start=startPos.expression, length=length.expression
)
def isin(self, *cols: t.Union[ColumnOrLiteral, t.Iterable[ColumnOrLiteral]]):
columns = flatten(cols) if isinstance(cols[0], (list, set, tuple)) else cols # type: ignore
expressions = [self._lit(x).expression for x in columns]
return Column.invoke_expression_over_column(self, exp.In, expressions=expressions) # type: ignore
def between(
self,
lowerBound: t.Union[ColumnOrLiteral],
upperBound: t.Union[ColumnOrLiteral],
) -> Column:
lower_bound_exp = (
self._lit(lowerBound) if not isinstance(lowerBound, Column) else lowerBound
)
upper_bound_exp = (
self._lit(upperBound) if not isinstance(upperBound, Column) else upperBound
)
return Column(
exp.Between(
this=self.column_expression,
low=lower_bound_exp.expression,
high=upper_bound_exp.expression,
)
)
def over(self, window: WindowSpec) -> Column:
window_expression = window.expression.copy()
window_expression.set("this", self.column_expression)
return Column(window_expression)
ColumnOrName = t.Union[Column, str]
def transform_values(col: ColumnOrName, f: t.Union[t.Callable[[Column, Column], Column]]) -> Column:
f_expression = _get_lambda_from_func(f)
return Column.invoke_anonymous_function(col, "TRANSFORM_VALUES", Column(f_expression)) | null |
153,204 | from __future__ import annotations
import typing as t
from sqlglot import exp as expression
from sqlglot.dataframe.sql.column import Column
from sqlglot.helper import ensure_list, flatten as _flatten
if t.TYPE_CHECKING:
from sqlglot.dataframe.sql._typing import ColumnOrLiteral, ColumnOrName
from sqlglot.dataframe.sql.dataframe import DataFrame
def _get_lambda_from_func(lambda_expression: t.Callable):
variables = [
expression.to_identifier(x, quoted=_lambda_quoted(x))
for x in lambda_expression.__code__.co_varnames
]
return expression.Lambda(
this=lambda_expression(*[Column(x) for x in variables]).expression,
expressions=variables,
)
class Column:
def __init__(self, expression: t.Optional[t.Union[ColumnOrLiteral, exp.Expression]]):
from sqlglot.dataframe.sql.session import SparkSession
if isinstance(expression, Column):
expression = expression.expression # type: ignore
elif expression is None or not isinstance(expression, (str, exp.Expression)):
expression = self._lit(expression).expression # type: ignore
elif not isinstance(expression, exp.Column):
expression = sqlglot.maybe_parse(expression, dialect=SparkSession().dialect).transform(
SparkSession().dialect.normalize_identifier, copy=False
)
if expression is None:
raise ValueError(f"Could not parse {expression}")
self.expression: exp.Expression = expression # type: ignore
def __repr__(self):
return repr(self.expression)
def __hash__(self):
return hash(self.expression)
def __eq__(self, other: ColumnOrLiteral) -> Column: # type: ignore
return self.binary_op(exp.EQ, other)
def __ne__(self, other: ColumnOrLiteral) -> Column: # type: ignore
return self.binary_op(exp.NEQ, other)
def __gt__(self, other: ColumnOrLiteral) -> Column:
return self.binary_op(exp.GT, other)
def __ge__(self, other: ColumnOrLiteral) -> Column:
return self.binary_op(exp.GTE, other)
def __lt__(self, other: ColumnOrLiteral) -> Column:
return self.binary_op(exp.LT, other)
def __le__(self, other: ColumnOrLiteral) -> Column:
return self.binary_op(exp.LTE, other)
def __and__(self, other: ColumnOrLiteral) -> Column:
return self.binary_op(exp.And, other)
def __or__(self, other: ColumnOrLiteral) -> Column:
return self.binary_op(exp.Or, other)
def __mod__(self, other: ColumnOrLiteral) -> Column:
return self.binary_op(exp.Mod, other)
def __add__(self, other: ColumnOrLiteral) -> Column:
return self.binary_op(exp.Add, other)
def __sub__(self, other: ColumnOrLiteral) -> Column:
return self.binary_op(exp.Sub, other)
def __mul__(self, other: ColumnOrLiteral) -> Column:
return self.binary_op(exp.Mul, other)
def __truediv__(self, other: ColumnOrLiteral) -> Column:
return self.binary_op(exp.Div, other)
def __div__(self, other: ColumnOrLiteral) -> Column:
return self.binary_op(exp.Div, other)
def __neg__(self) -> Column:
return self.unary_op(exp.Neg)
def __radd__(self, other: ColumnOrLiteral) -> Column:
return self.inverse_binary_op(exp.Add, other)
def __rsub__(self, other: ColumnOrLiteral) -> Column:
return self.inverse_binary_op(exp.Sub, other)
def __rmul__(self, other: ColumnOrLiteral) -> Column:
return self.inverse_binary_op(exp.Mul, other)
def __rdiv__(self, other: ColumnOrLiteral) -> Column:
return self.inverse_binary_op(exp.Div, other)
def __rtruediv__(self, other: ColumnOrLiteral) -> Column:
return self.inverse_binary_op(exp.Div, other)
def __rmod__(self, other: ColumnOrLiteral) -> Column:
return self.inverse_binary_op(exp.Mod, other)
def __pow__(self, power: ColumnOrLiteral, modulo=None):
return Column(exp.Pow(this=self.expression, expression=Column(power).expression))
def __rpow__(self, power: ColumnOrLiteral):
return Column(exp.Pow(this=Column(power).expression, expression=self.expression))
def __invert__(self):
return self.unary_op(exp.Not)
def __rand__(self, other: ColumnOrLiteral) -> Column:
return self.inverse_binary_op(exp.And, other)
def __ror__(self, other: ColumnOrLiteral) -> Column:
return self.inverse_binary_op(exp.Or, other)
def ensure_col(cls, value: t.Optional[t.Union[ColumnOrLiteral, exp.Expression]]) -> Column:
return cls(value)
def ensure_cols(cls, args: t.List[t.Union[ColumnOrLiteral, exp.Expression]]) -> t.List[Column]:
return [cls.ensure_col(x) if not isinstance(x, Column) else x for x in args]
def _lit(cls, value: ColumnOrLiteral) -> Column:
if isinstance(value, dict):
columns = [cls._lit(v).alias(k).expression for k, v in value.items()]
return cls(exp.Struct(expressions=columns))
return cls(exp.convert(value))
def invoke_anonymous_function(
cls, column: t.Optional[ColumnOrLiteral], func_name: str, *args: t.Optional[ColumnOrLiteral]
) -> Column:
columns = [] if column is None else [cls.ensure_col(column)]
column_args = [cls.ensure_col(arg) for arg in args]
expressions = [x.expression for x in columns + column_args]
new_expression = exp.Anonymous(this=func_name.upper(), expressions=expressions)
return Column(new_expression)
def invoke_expression_over_column(
cls, column: t.Optional[ColumnOrLiteral], callable_expression: t.Callable, **kwargs
) -> Column:
ensured_column = None if column is None else cls.ensure_col(column)
ensure_expression_values = {
k: (
[Column.ensure_col(x).expression for x in v]
if is_iterable(v)
else Column.ensure_col(v).expression
)
for k, v in kwargs.items()
if v is not None
}
new_expression = (
callable_expression(**ensure_expression_values)
if ensured_column is None
else callable_expression(
this=ensured_column.column_expression, **ensure_expression_values
)
)
return Column(new_expression)
def binary_op(self, klass: t.Callable, other: ColumnOrLiteral, **kwargs) -> Column:
return Column(
klass(this=self.column_expression, expression=Column(other).column_expression, **kwargs)
)
def inverse_binary_op(self, klass: t.Callable, other: ColumnOrLiteral, **kwargs) -> Column:
return Column(
klass(this=Column(other).column_expression, expression=self.column_expression, **kwargs)
)
def unary_op(self, klass: t.Callable, **kwargs) -> Column:
return Column(klass(this=self.column_expression, **kwargs))
def is_alias(self):
return isinstance(self.expression, exp.Alias)
def is_column(self):
return isinstance(self.expression, exp.Column)
def column_expression(self) -> t.Union[exp.Column, exp.Literal]:
return self.expression.unalias()
def alias_or_name(self) -> str:
return self.expression.alias_or_name
def ensure_literal(cls, value) -> Column:
from sqlglot.dataframe.sql.functions import lit
if isinstance(value, cls):
value = value.expression
if not isinstance(value, exp.Literal):
return lit(value)
return Column(value)
def copy(self) -> Column:
return Column(self.expression.copy())
def set_table_name(self, table_name: str, copy=False) -> Column:
expression = self.expression.copy() if copy else self.expression
expression.set("table", exp.to_identifier(table_name))
return Column(expression)
def sql(self, **kwargs) -> str:
from sqlglot.dataframe.sql.session import SparkSession
return self.expression.sql(**{"dialect": SparkSession().dialect, **kwargs})
def alias(self, name: str) -> Column:
from sqlglot.dataframe.sql.session import SparkSession
dialect = SparkSession().dialect
alias: exp.Expression = sqlglot.maybe_parse(name, dialect=dialect)
new_expression = exp.alias_(
self.column_expression,
alias.this if isinstance(alias, exp.Column) else name,
dialect=dialect,
)
return Column(new_expression)
def asc(self) -> Column:
new_expression = exp.Ordered(this=self.column_expression, desc=False, nulls_first=True)
return Column(new_expression)
def desc(self) -> Column:
new_expression = exp.Ordered(this=self.column_expression, desc=True, nulls_first=False)
return Column(new_expression)
asc_nulls_first = asc
def asc_nulls_last(self) -> Column:
new_expression = exp.Ordered(this=self.column_expression, desc=False, nulls_first=False)
return Column(new_expression)
def desc_nulls_first(self) -> Column:
new_expression = exp.Ordered(this=self.column_expression, desc=True, nulls_first=True)
return Column(new_expression)
desc_nulls_last = desc
def when(self, condition: Column, value: t.Any) -> Column:
from sqlglot.dataframe.sql.functions import when
column_with_if = when(condition, value)
if not isinstance(self.expression, exp.Case):
return column_with_if
new_column = self.copy()
new_column.expression.args["ifs"].extend(column_with_if.expression.args["ifs"])
return new_column
def otherwise(self, value: t.Any) -> Column:
from sqlglot.dataframe.sql.functions import lit
true_value = value if isinstance(value, Column) else lit(value)
new_column = self.copy()
new_column.expression.set("default", true_value.column_expression)
return new_column
def isNull(self) -> Column:
new_expression = exp.Is(this=self.column_expression, expression=exp.Null())
return Column(new_expression)
def isNotNull(self) -> Column:
new_expression = exp.Not(this=exp.Is(this=self.column_expression, expression=exp.Null()))
return Column(new_expression)
def cast(self, dataType: t.Union[str, DataType]) -> Column:
"""
Functionality Difference: PySpark cast accepts a datatype instance of the datatype class
Sqlglot doesn't currently replicate this class so it only accepts a string
"""
from sqlglot.dataframe.sql.session import SparkSession
if isinstance(dataType, DataType):
dataType = dataType.simpleString()
return Column(exp.cast(self.column_expression, dataType, dialect=SparkSession().dialect))
def startswith(self, value: t.Union[str, Column]) -> Column:
value = self._lit(value) if not isinstance(value, Column) else value
return self.invoke_anonymous_function(self, "STARTSWITH", value)
def endswith(self, value: t.Union[str, Column]) -> Column:
value = self._lit(value) if not isinstance(value, Column) else value
return self.invoke_anonymous_function(self, "ENDSWITH", value)
def rlike(self, regexp: str) -> Column:
return self.invoke_expression_over_column(
column=self, callable_expression=exp.RegexpLike, expression=self._lit(regexp).expression
)
def like(self, other: str):
return self.invoke_expression_over_column(
self, exp.Like, expression=self._lit(other).expression
)
def ilike(self, other: str):
return self.invoke_expression_over_column(
self, exp.ILike, expression=self._lit(other).expression
)
def substr(self, startPos: t.Union[int, Column], length: t.Union[int, Column]) -> Column:
startPos = self._lit(startPos) if not isinstance(startPos, Column) else startPos
length = self._lit(length) if not isinstance(length, Column) else length
return Column.invoke_expression_over_column(
self, exp.Substring, start=startPos.expression, length=length.expression
)
def isin(self, *cols: t.Union[ColumnOrLiteral, t.Iterable[ColumnOrLiteral]]):
columns = flatten(cols) if isinstance(cols[0], (list, set, tuple)) else cols # type: ignore
expressions = [self._lit(x).expression for x in columns]
return Column.invoke_expression_over_column(self, exp.In, expressions=expressions) # type: ignore
def between(
self,
lowerBound: t.Union[ColumnOrLiteral],
upperBound: t.Union[ColumnOrLiteral],
) -> Column:
lower_bound_exp = (
self._lit(lowerBound) if not isinstance(lowerBound, Column) else lowerBound
)
upper_bound_exp = (
self._lit(upperBound) if not isinstance(upperBound, Column) else upperBound
)
return Column(
exp.Between(
this=self.column_expression,
low=lower_bound_exp.expression,
high=upper_bound_exp.expression,
)
)
def over(self, window: WindowSpec) -> Column:
window_expression = window.expression.copy()
window_expression.set("this", self.column_expression)
return Column(window_expression)
ColumnOrName = t.Union[Column, str]
def map_filter(col: ColumnOrName, f: t.Union[t.Callable[[Column, Column], Column]]) -> Column:
f_expression = _get_lambda_from_func(f)
return Column.invoke_anonymous_function(col, "MAP_FILTER", Column(f_expression)) | null |
153,205 | from __future__ import annotations
import typing as t
from sqlglot import exp as expression
from sqlglot.dataframe.sql.column import Column
from sqlglot.helper import ensure_list, flatten as _flatten
if t.TYPE_CHECKING:
from sqlglot.dataframe.sql._typing import ColumnOrLiteral, ColumnOrName
from sqlglot.dataframe.sql.dataframe import DataFrame
def _get_lambda_from_func(lambda_expression: t.Callable):
variables = [
expression.to_identifier(x, quoted=_lambda_quoted(x))
for x in lambda_expression.__code__.co_varnames
]
return expression.Lambda(
this=lambda_expression(*[Column(x) for x in variables]).expression,
expressions=variables,
)
class Column:
def __init__(self, expression: t.Optional[t.Union[ColumnOrLiteral, exp.Expression]]):
from sqlglot.dataframe.sql.session import SparkSession
if isinstance(expression, Column):
expression = expression.expression # type: ignore
elif expression is None or not isinstance(expression, (str, exp.Expression)):
expression = self._lit(expression).expression # type: ignore
elif not isinstance(expression, exp.Column):
expression = sqlglot.maybe_parse(expression, dialect=SparkSession().dialect).transform(
SparkSession().dialect.normalize_identifier, copy=False
)
if expression is None:
raise ValueError(f"Could not parse {expression}")
self.expression: exp.Expression = expression # type: ignore
def __repr__(self):
return repr(self.expression)
def __hash__(self):
return hash(self.expression)
def __eq__(self, other: ColumnOrLiteral) -> Column: # type: ignore
return self.binary_op(exp.EQ, other)
def __ne__(self, other: ColumnOrLiteral) -> Column: # type: ignore
return self.binary_op(exp.NEQ, other)
def __gt__(self, other: ColumnOrLiteral) -> Column:
return self.binary_op(exp.GT, other)
def __ge__(self, other: ColumnOrLiteral) -> Column:
return self.binary_op(exp.GTE, other)
def __lt__(self, other: ColumnOrLiteral) -> Column:
return self.binary_op(exp.LT, other)
def __le__(self, other: ColumnOrLiteral) -> Column:
return self.binary_op(exp.LTE, other)
def __and__(self, other: ColumnOrLiteral) -> Column:
return self.binary_op(exp.And, other)
def __or__(self, other: ColumnOrLiteral) -> Column:
return self.binary_op(exp.Or, other)
def __mod__(self, other: ColumnOrLiteral) -> Column:
return self.binary_op(exp.Mod, other)
def __add__(self, other: ColumnOrLiteral) -> Column:
return self.binary_op(exp.Add, other)
def __sub__(self, other: ColumnOrLiteral) -> Column:
return self.binary_op(exp.Sub, other)
def __mul__(self, other: ColumnOrLiteral) -> Column:
return self.binary_op(exp.Mul, other)
def __truediv__(self, other: ColumnOrLiteral) -> Column:
return self.binary_op(exp.Div, other)
def __div__(self, other: ColumnOrLiteral) -> Column:
return self.binary_op(exp.Div, other)
def __neg__(self) -> Column:
return self.unary_op(exp.Neg)
def __radd__(self, other: ColumnOrLiteral) -> Column:
return self.inverse_binary_op(exp.Add, other)
def __rsub__(self, other: ColumnOrLiteral) -> Column:
return self.inverse_binary_op(exp.Sub, other)
def __rmul__(self, other: ColumnOrLiteral) -> Column:
return self.inverse_binary_op(exp.Mul, other)
def __rdiv__(self, other: ColumnOrLiteral) -> Column:
return self.inverse_binary_op(exp.Div, other)
def __rtruediv__(self, other: ColumnOrLiteral) -> Column:
return self.inverse_binary_op(exp.Div, other)
def __rmod__(self, other: ColumnOrLiteral) -> Column:
return self.inverse_binary_op(exp.Mod, other)
def __pow__(self, power: ColumnOrLiteral, modulo=None):
return Column(exp.Pow(this=self.expression, expression=Column(power).expression))
def __rpow__(self, power: ColumnOrLiteral):
return Column(exp.Pow(this=Column(power).expression, expression=self.expression))
def __invert__(self):
return self.unary_op(exp.Not)
def __rand__(self, other: ColumnOrLiteral) -> Column:
return self.inverse_binary_op(exp.And, other)
def __ror__(self, other: ColumnOrLiteral) -> Column:
return self.inverse_binary_op(exp.Or, other)
def ensure_col(cls, value: t.Optional[t.Union[ColumnOrLiteral, exp.Expression]]) -> Column:
return cls(value)
def ensure_cols(cls, args: t.List[t.Union[ColumnOrLiteral, exp.Expression]]) -> t.List[Column]:
return [cls.ensure_col(x) if not isinstance(x, Column) else x for x in args]
def _lit(cls, value: ColumnOrLiteral) -> Column:
if isinstance(value, dict):
columns = [cls._lit(v).alias(k).expression for k, v in value.items()]
return cls(exp.Struct(expressions=columns))
return cls(exp.convert(value))
def invoke_anonymous_function(
cls, column: t.Optional[ColumnOrLiteral], func_name: str, *args: t.Optional[ColumnOrLiteral]
) -> Column:
columns = [] if column is None else [cls.ensure_col(column)]
column_args = [cls.ensure_col(arg) for arg in args]
expressions = [x.expression for x in columns + column_args]
new_expression = exp.Anonymous(this=func_name.upper(), expressions=expressions)
return Column(new_expression)
def invoke_expression_over_column(
cls, column: t.Optional[ColumnOrLiteral], callable_expression: t.Callable, **kwargs
) -> Column:
ensured_column = None if column is None else cls.ensure_col(column)
ensure_expression_values = {
k: (
[Column.ensure_col(x).expression for x in v]
if is_iterable(v)
else Column.ensure_col(v).expression
)
for k, v in kwargs.items()
if v is not None
}
new_expression = (
callable_expression(**ensure_expression_values)
if ensured_column is None
else callable_expression(
this=ensured_column.column_expression, **ensure_expression_values
)
)
return Column(new_expression)
def binary_op(self, klass: t.Callable, other: ColumnOrLiteral, **kwargs) -> Column:
return Column(
klass(this=self.column_expression, expression=Column(other).column_expression, **kwargs)
)
def inverse_binary_op(self, klass: t.Callable, other: ColumnOrLiteral, **kwargs) -> Column:
return Column(
klass(this=Column(other).column_expression, expression=self.column_expression, **kwargs)
)
def unary_op(self, klass: t.Callable, **kwargs) -> Column:
return Column(klass(this=self.column_expression, **kwargs))
def is_alias(self):
return isinstance(self.expression, exp.Alias)
def is_column(self):
return isinstance(self.expression, exp.Column)
def column_expression(self) -> t.Union[exp.Column, exp.Literal]:
return self.expression.unalias()
def alias_or_name(self) -> str:
return self.expression.alias_or_name
def ensure_literal(cls, value) -> Column:
from sqlglot.dataframe.sql.functions import lit
if isinstance(value, cls):
value = value.expression
if not isinstance(value, exp.Literal):
return lit(value)
return Column(value)
def copy(self) -> Column:
return Column(self.expression.copy())
def set_table_name(self, table_name: str, copy=False) -> Column:
expression = self.expression.copy() if copy else self.expression
expression.set("table", exp.to_identifier(table_name))
return Column(expression)
def sql(self, **kwargs) -> str:
from sqlglot.dataframe.sql.session import SparkSession
return self.expression.sql(**{"dialect": SparkSession().dialect, **kwargs})
def alias(self, name: str) -> Column:
from sqlglot.dataframe.sql.session import SparkSession
dialect = SparkSession().dialect
alias: exp.Expression = sqlglot.maybe_parse(name, dialect=dialect)
new_expression = exp.alias_(
self.column_expression,
alias.this if isinstance(alias, exp.Column) else name,
dialect=dialect,
)
return Column(new_expression)
def asc(self) -> Column:
new_expression = exp.Ordered(this=self.column_expression, desc=False, nulls_first=True)
return Column(new_expression)
def desc(self) -> Column:
new_expression = exp.Ordered(this=self.column_expression, desc=True, nulls_first=False)
return Column(new_expression)
asc_nulls_first = asc
def asc_nulls_last(self) -> Column:
new_expression = exp.Ordered(this=self.column_expression, desc=False, nulls_first=False)
return Column(new_expression)
def desc_nulls_first(self) -> Column:
new_expression = exp.Ordered(this=self.column_expression, desc=True, nulls_first=True)
return Column(new_expression)
desc_nulls_last = desc
def when(self, condition: Column, value: t.Any) -> Column:
from sqlglot.dataframe.sql.functions import when
column_with_if = when(condition, value)
if not isinstance(self.expression, exp.Case):
return column_with_if
new_column = self.copy()
new_column.expression.args["ifs"].extend(column_with_if.expression.args["ifs"])
return new_column
def otherwise(self, value: t.Any) -> Column:
from sqlglot.dataframe.sql.functions import lit
true_value = value if isinstance(value, Column) else lit(value)
new_column = self.copy()
new_column.expression.set("default", true_value.column_expression)
return new_column
def isNull(self) -> Column:
new_expression = exp.Is(this=self.column_expression, expression=exp.Null())
return Column(new_expression)
def isNotNull(self) -> Column:
new_expression = exp.Not(this=exp.Is(this=self.column_expression, expression=exp.Null()))
return Column(new_expression)
def cast(self, dataType: t.Union[str, DataType]) -> Column:
"""
Functionality Difference: PySpark cast accepts a datatype instance of the datatype class
Sqlglot doesn't currently replicate this class so it only accepts a string
"""
from sqlglot.dataframe.sql.session import SparkSession
if isinstance(dataType, DataType):
dataType = dataType.simpleString()
return Column(exp.cast(self.column_expression, dataType, dialect=SparkSession().dialect))
def startswith(self, value: t.Union[str, Column]) -> Column:
value = self._lit(value) if not isinstance(value, Column) else value
return self.invoke_anonymous_function(self, "STARTSWITH", value)
def endswith(self, value: t.Union[str, Column]) -> Column:
value = self._lit(value) if not isinstance(value, Column) else value
return self.invoke_anonymous_function(self, "ENDSWITH", value)
def rlike(self, regexp: str) -> Column:
return self.invoke_expression_over_column(
column=self, callable_expression=exp.RegexpLike, expression=self._lit(regexp).expression
)
def like(self, other: str):
return self.invoke_expression_over_column(
self, exp.Like, expression=self._lit(other).expression
)
def ilike(self, other: str):
return self.invoke_expression_over_column(
self, exp.ILike, expression=self._lit(other).expression
)
def substr(self, startPos: t.Union[int, Column], length: t.Union[int, Column]) -> Column:
startPos = self._lit(startPos) if not isinstance(startPos, Column) else startPos
length = self._lit(length) if not isinstance(length, Column) else length
return Column.invoke_expression_over_column(
self, exp.Substring, start=startPos.expression, length=length.expression
)
def isin(self, *cols: t.Union[ColumnOrLiteral, t.Iterable[ColumnOrLiteral]]):
columns = flatten(cols) if isinstance(cols[0], (list, set, tuple)) else cols # type: ignore
expressions = [self._lit(x).expression for x in columns]
return Column.invoke_expression_over_column(self, exp.In, expressions=expressions) # type: ignore
def between(
self,
lowerBound: t.Union[ColumnOrLiteral],
upperBound: t.Union[ColumnOrLiteral],
) -> Column:
lower_bound_exp = (
self._lit(lowerBound) if not isinstance(lowerBound, Column) else lowerBound
)
upper_bound_exp = (
self._lit(upperBound) if not isinstance(upperBound, Column) else upperBound
)
return Column(
exp.Between(
this=self.column_expression,
low=lower_bound_exp.expression,
high=upper_bound_exp.expression,
)
)
def over(self, window: WindowSpec) -> Column:
window_expression = window.expression.copy()
window_expression.set("this", self.column_expression)
return Column(window_expression)
ColumnOrName = t.Union[Column, str]
def map_zip_with(
col1: ColumnOrName,
col2: ColumnOrName,
f: t.Union[t.Callable[[Column, Column, Column], Column]],
) -> Column:
f_expression = _get_lambda_from_func(f)
return Column.invoke_anonymous_function(col1, "MAP_ZIP_WITH", col2, Column(f_expression)) | null |
153,206 | from __future__ import annotations
import functools
import typing as t
from enum import IntEnum
if t.TYPE_CHECKING:
from sqlglot.dataframe.sql.dataframe import DataFrame
from sqlglot.dataframe.sql.group import GroupedData
class Operation(IntEnum):
INIT = -1
NO_OP = 0
FROM = 1
WHERE = 2
GROUP_BY = 3
HAVING = 4
SELECT = 5
ORDER_BY = 6
LIMIT = 7
class DataFrame:
def __init__(
self,
spark: SparkSession,
expression: exp.Select,
branch_id: t.Optional[str] = None,
sequence_id: t.Optional[str] = None,
last_op: Operation = Operation.INIT,
pending_hints: t.Optional[t.List[exp.Expression]] = None,
output_expression_container: t.Optional[OutputExpressionContainer] = None,
**kwargs,
):
self.spark = spark
self.expression = expression
self.branch_id = branch_id or self.spark._random_branch_id
self.sequence_id = sequence_id or self.spark._random_sequence_id
self.last_op = last_op
self.pending_hints = pending_hints or []
self.output_expression_container = output_expression_container or exp.Select()
def __getattr__(self, column_name: str) -> Column:
return self[column_name]
def __getitem__(self, column_name: str) -> Column:
column_name = f"{self.branch_id}.{column_name}"
return Column(column_name)
def __copy__(self):
return self.copy()
def sparkSession(self):
return self.spark
def write(self):
return DataFrameWriter(self)
def latest_cte_name(self) -> str:
if not self.expression.ctes:
from_exp = self.expression.args["from"]
if from_exp.alias_or_name:
return from_exp.alias_or_name
table_alias = from_exp.find(exp.TableAlias)
if not table_alias:
raise RuntimeError(
f"Could not find an alias name for this expression: {self.expression}"
)
return table_alias.alias_or_name
return self.expression.ctes[-1].alias
def pending_join_hints(self):
return [hint for hint in self.pending_hints if isinstance(hint, exp.JoinHint)]
def pending_partition_hints(self):
return [hint for hint in self.pending_hints if isinstance(hint, exp.Anonymous)]
def columns(self) -> t.List[str]:
return self.expression.named_selects
def na(self) -> DataFrameNaFunctions:
return DataFrameNaFunctions(self)
def _replace_cte_names_with_hashes(self, expression: exp.Select):
replacement_mapping = {}
for cte in expression.ctes:
old_name_id = cte.args["alias"].this
new_hashed_id = exp.to_identifier(
self._create_hash_from_expression(cte.this), quoted=old_name_id.args["quoted"]
)
replacement_mapping[old_name_id] = new_hashed_id
expression = expression.transform(replace_id_value, replacement_mapping).assert_is(
exp.Select
)
return expression
def _create_cte_from_expression(
self,
expression: exp.Expression,
branch_id: t.Optional[str] = None,
sequence_id: t.Optional[str] = None,
**kwargs,
) -> t.Tuple[exp.CTE, str]:
name = self._create_hash_from_expression(expression)
expression_to_cte = expression.copy()
expression_to_cte.set("with", None)
cte = exp.Select().with_(name, as_=expression_to_cte, **kwargs).ctes[0]
cte.set("branch_id", branch_id or self.branch_id)
cte.set("sequence_id", sequence_id or self.sequence_id)
return cte, name
def _ensure_list_of_columns(self, cols: t.Collection[ColumnOrLiteral]) -> t.List[Column]: ...
def _ensure_list_of_columns(self, cols: ColumnOrLiteral) -> t.List[Column]: ...
def _ensure_list_of_columns(self, cols):
return Column.ensure_cols(ensure_list(cols))
def _ensure_and_normalize_cols(self, cols, expression: t.Optional[exp.Select] = None):
cols = self._ensure_list_of_columns(cols)
normalize(self.spark, expression or self.expression, cols)
return cols
def _ensure_and_normalize_col(self, col):
col = Column.ensure_col(col)
normalize(self.spark, self.expression, col)
return col
def _convert_leaf_to_cte(self, sequence_id: t.Optional[str] = None) -> DataFrame:
df = self._resolve_pending_hints()
sequence_id = sequence_id or df.sequence_id
expression = df.expression.copy()
cte_expression, cte_name = df._create_cte_from_expression(
expression=expression, sequence_id=sequence_id
)
new_expression = df._add_ctes_to_expression(
exp.Select(), expression.ctes + [cte_expression]
)
sel_columns = df._get_outer_select_columns(cte_expression)
new_expression = new_expression.from_(cte_name).select(
*[x.alias_or_name for x in sel_columns]
)
return df.copy(expression=new_expression, sequence_id=sequence_id)
def _resolve_pending_hints(self) -> DataFrame:
df = self.copy()
if not self.pending_hints:
return df
expression = df.expression
hint_expression = expression.args.get("hint") or exp.Hint(expressions=[])
for hint in df.pending_partition_hints:
hint_expression.append("expressions", hint)
df.pending_hints.remove(hint)
join_aliases = {
join_table.alias_or_name
for join_table in get_tables_from_expression_with_join(expression)
}
if join_aliases:
for hint in df.pending_join_hints:
for sequence_id_expression in hint.expressions:
sequence_id_or_name = sequence_id_expression.alias_or_name
sequence_ids_to_match = [sequence_id_or_name]
if sequence_id_or_name in df.spark.name_to_sequence_id_mapping:
sequence_ids_to_match = df.spark.name_to_sequence_id_mapping[
sequence_id_or_name
]
matching_ctes = [
cte
for cte in reversed(expression.ctes)
if cte.args["sequence_id"] in sequence_ids_to_match
]
for matching_cte in matching_ctes:
if matching_cte.alias_or_name in join_aliases:
sequence_id_expression.set("this", matching_cte.args["alias"].this)
df.pending_hints.remove(hint)
break
hint_expression.append("expressions", hint)
if hint_expression.expressions:
expression.set("hint", hint_expression)
return df
def _hint(self, hint_name: str, args: t.List[Column]) -> DataFrame:
hint_name = hint_name.upper()
hint_expression = (
exp.JoinHint(
this=hint_name,
expressions=[exp.to_table(parameter.alias_or_name) for parameter in args],
)
if hint_name in JOIN_HINTS
else exp.Anonymous(
this=hint_name, expressions=[parameter.expression for parameter in args]
)
)
new_df = self.copy()
new_df.pending_hints.append(hint_expression)
return new_df
def _set_operation(self, klass: t.Callable, other: DataFrame, distinct: bool):
other_df = other._convert_leaf_to_cte()
base_expression = self.expression.copy()
base_expression = self._add_ctes_to_expression(base_expression, other_df.expression.ctes)
all_ctes = base_expression.ctes
other_df.expression.set("with", None)
base_expression.set("with", None)
operation = klass(this=base_expression, distinct=distinct, expression=other_df.expression)
operation.set("with", exp.With(expressions=all_ctes))
return self.copy(expression=operation)._convert_leaf_to_cte()
def _cache(self, storage_level: str):
df = self._convert_leaf_to_cte()
df.expression.ctes[-1].set("cache_storage_level", storage_level)
return df
def _add_ctes_to_expression(cls, expression: exp.Select, ctes: t.List[exp.CTE]) -> exp.Select:
expression = expression.copy()
with_expression = expression.args.get("with")
if with_expression:
existing_ctes = with_expression.expressions
existsing_cte_names = {x.alias_or_name for x in existing_ctes}
for cte in ctes:
if cte.alias_or_name not in existsing_cte_names:
existing_ctes.append(cte)
else:
existing_ctes = ctes
expression.set("with", exp.With(expressions=existing_ctes))
return expression
def _get_outer_select_columns(cls, item: t.Union[exp.Expression, DataFrame]) -> t.List[Column]:
expression = item.expression if isinstance(item, DataFrame) else item
return [Column(x) for x in (expression.find(exp.Select) or exp.Select()).expressions]
def _create_hash_from_expression(cls, expression: exp.Expression) -> str:
from sqlglot.dataframe.sql.session import SparkSession
value = expression.sql(dialect=SparkSession().dialect).encode("utf-8")
return f"t{zlib.crc32(value)}"[:6]
def _get_select_expressions(
self,
) -> t.List[t.Tuple[t.Union[t.Type[exp.Cache], OutputExpressionContainer], exp.Select]]:
select_expressions: t.List[
t.Tuple[t.Union[t.Type[exp.Cache], OutputExpressionContainer], exp.Select]
] = []
main_select_ctes: t.List[exp.CTE] = []
for cte in self.expression.ctes:
cache_storage_level = cte.args.get("cache_storage_level")
if cache_storage_level:
select_expression = cte.this.copy()
select_expression.set("with", exp.With(expressions=copy(main_select_ctes)))
select_expression.set("cte_alias_name", cte.alias_or_name)
select_expression.set("cache_storage_level", cache_storage_level)
select_expressions.append((exp.Cache, select_expression))
else:
main_select_ctes.append(cte)
main_select = self.expression.copy()
if main_select_ctes:
main_select.set("with", exp.With(expressions=main_select_ctes))
expression_select_pair = (type(self.output_expression_container), main_select)
select_expressions.append(expression_select_pair) # type: ignore
return select_expressions
def sql(self, dialect: DialectType = None, optimize: bool = True, **kwargs) -> t.List[str]:
from sqlglot.dataframe.sql.session import SparkSession
dialect = Dialect.get_or_raise(dialect or SparkSession().dialect)
df = self._resolve_pending_hints()
select_expressions = df._get_select_expressions()
output_expressions: t.List[t.Union[exp.Select, exp.Cache, exp.Drop]] = []
replacement_mapping: t.Dict[exp.Identifier, exp.Identifier] = {}
for expression_type, select_expression in select_expressions:
select_expression = select_expression.transform(
replace_id_value, replacement_mapping
).assert_is(exp.Select)
if optimize:
select_expression = t.cast(
exp.Select, self.spark._optimize(select_expression, dialect=dialect)
)
select_expression = df._replace_cte_names_with_hashes(select_expression)
expression: t.Union[exp.Select, exp.Cache, exp.Drop]
if expression_type == exp.Cache:
cache_table_name = df._create_hash_from_expression(select_expression)
cache_table = exp.to_table(cache_table_name)
original_alias_name = select_expression.args["cte_alias_name"]
replacement_mapping[exp.to_identifier(original_alias_name)] = exp.to_identifier( # type: ignore
cache_table_name
)
sqlglot.schema.add_table(
cache_table_name,
{
expression.alias_or_name: expression.type.sql(dialect=dialect)
for expression in select_expression.expressions
},
dialect=dialect,
)
cache_storage_level = select_expression.args["cache_storage_level"]
options = [
exp.Literal.string("storageLevel"),
exp.Literal.string(cache_storage_level),
]
expression = exp.Cache(
this=cache_table, expression=select_expression, lazy=True, options=options
)
# We will drop the "view" if it exists before running the cache table
output_expressions.append(exp.Drop(this=cache_table, exists=True, kind="VIEW"))
elif expression_type == exp.Create:
expression = df.output_expression_container.copy()
expression.set("expression", select_expression)
elif expression_type == exp.Insert:
expression = df.output_expression_container.copy()
select_without_ctes = select_expression.copy()
select_without_ctes.set("with", None)
expression.set("expression", select_without_ctes)
if select_expression.ctes:
expression.set("with", exp.With(expressions=select_expression.ctes))
elif expression_type == exp.Select:
expression = select_expression
else:
raise ValueError(f"Invalid expression type: {expression_type}")
output_expressions.append(expression)
return [expression.sql(dialect=dialect, **kwargs) for expression in output_expressions]
def copy(self, **kwargs) -> DataFrame:
return DataFrame(**object_to_dict(self, **kwargs))
def select(self, *cols, **kwargs) -> DataFrame:
cols = self._ensure_and_normalize_cols(cols)
kwargs["append"] = kwargs.get("append", False)
if self.expression.args.get("joins"):
ambiguous_cols = [
col
for col in cols
if isinstance(col.column_expression, exp.Column) and not col.column_expression.table
]
if ambiguous_cols:
join_table_identifiers = [
x.this for x in get_tables_from_expression_with_join(self.expression)
]
cte_names_in_join = [x.this for x in join_table_identifiers]
# If we have columns that resolve to multiple CTE expressions then we want to use each CTE left-to-right
# and therefore we allow multiple columns with the same name in the result. This matches the behavior
# of Spark.
resolved_column_position: t.Dict[Column, int] = {col: -1 for col in ambiguous_cols}
for ambiguous_col in ambiguous_cols:
ctes_with_column = [
cte
for cte in self.expression.ctes
if cte.alias_or_name in cte_names_in_join
and ambiguous_col.alias_or_name in cte.this.named_selects
]
# Check if there is a CTE with this column that we haven't used before. If so, use it. Otherwise,
# use the same CTE we used before
cte = seq_get(ctes_with_column, resolved_column_position[ambiguous_col] + 1)
if cte:
resolved_column_position[ambiguous_col] += 1
else:
cte = ctes_with_column[resolved_column_position[ambiguous_col]]
ambiguous_col.expression.set("table", cte.alias_or_name)
return self.copy(
expression=self.expression.select(*[x.expression for x in cols], **kwargs), **kwargs
)
def alias(self, name: str, **kwargs) -> DataFrame:
new_sequence_id = self.spark._random_sequence_id
df = self.copy()
for join_hint in df.pending_join_hints:
for expression in join_hint.expressions:
if expression.alias_or_name == self.sequence_id:
expression.set("this", Column.ensure_col(new_sequence_id).expression)
df.spark._add_alias_to_mapping(name, new_sequence_id)
return df._convert_leaf_to_cte(sequence_id=new_sequence_id)
def where(self, column: t.Union[Column, bool], **kwargs) -> DataFrame:
col = self._ensure_and_normalize_col(column)
return self.copy(expression=self.expression.where(col.expression))
filter = where
def groupBy(self, *cols, **kwargs) -> GroupedData:
columns = self._ensure_and_normalize_cols(cols)
return GroupedData(self, columns, self.last_op)
def agg(self, *exprs, **kwargs) -> DataFrame:
cols = self._ensure_and_normalize_cols(exprs)
return self.groupBy().agg(*cols)
def join(
self,
other_df: DataFrame,
on: t.Union[str, t.List[str], Column, t.List[Column]],
how: str = "inner",
**kwargs,
) -> DataFrame:
other_df = other_df._convert_leaf_to_cte()
join_columns = self._ensure_list_of_columns(on)
# We will determine actual "join on" expression later so we don't provide it at first
join_expression = self.expression.join(
other_df.latest_cte_name, join_type=how.replace("_", " ")
)
join_expression = self._add_ctes_to_expression(join_expression, other_df.expression.ctes)
self_columns = self._get_outer_select_columns(join_expression)
other_columns = self._get_outer_select_columns(other_df)
# Determines the join clause and select columns to be used passed on what type of columns were provided for
# the join. The columns returned changes based on how the on expression is provided.
if isinstance(join_columns[0].expression, exp.Column):
"""
Unique characteristics of join on column names only:
* The column names are put at the front of the select list
* The column names are deduplicated across the entire select list and only the column names (other dups are allowed)
"""
table_names = [
table.alias_or_name
for table in get_tables_from_expression_with_join(join_expression)
]
potential_ctes = [
cte
for cte in join_expression.ctes
if cte.alias_or_name in table_names
and cte.alias_or_name != other_df.latest_cte_name
]
# Determine the table to reference for the left side of the join by checking each of the left side
# tables and see if they have the column being referenced.
join_column_pairs = []
for join_column in join_columns:
num_matching_ctes = 0
for cte in potential_ctes:
if join_column.alias_or_name in cte.this.named_selects:
left_column = join_column.copy().set_table_name(cte.alias_or_name)
right_column = join_column.copy().set_table_name(other_df.latest_cte_name)
join_column_pairs.append((left_column, right_column))
num_matching_ctes += 1
if num_matching_ctes > 1:
raise ValueError(
f"Column {join_column.alias_or_name} is ambiguous. Please specify the table name."
)
elif num_matching_ctes == 0:
raise ValueError(
f"Column {join_column.alias_or_name} does not exist in any of the tables."
)
join_clause = functools.reduce(
lambda x, y: x & y,
[left_column == right_column for left_column, right_column in join_column_pairs],
)
join_column_names = [left_col.alias_or_name for left_col, _ in join_column_pairs]
# To match spark behavior only the join clause gets deduplicated and it gets put in the front of the column list
select_column_names = [
(
column.alias_or_name
if not isinstance(column.expression.this, exp.Star)
else column.sql()
)
for column in self_columns + other_columns
]
select_column_names = [
column_name
for column_name in select_column_names
if column_name not in join_column_names
]
select_column_names = join_column_names + select_column_names
else:
"""
Unique characteristics of join on expressions:
* There is no deduplication of the results.
* The left join dataframe columns go first and right come after. No sort preference is given to join columns
"""
join_columns = self._ensure_and_normalize_cols(join_columns, join_expression)
if len(join_columns) > 1:
join_columns = [functools.reduce(lambda x, y: x & y, join_columns)]
join_clause = join_columns[0]
select_column_names = [column.alias_or_name for column in self_columns + other_columns]
# Update the on expression with the actual join clause to replace the dummy one from before
join_expression.args["joins"][-1].set("on", join_clause.expression)
new_df = self.copy(expression=join_expression)
new_df.pending_join_hints.extend(self.pending_join_hints)
new_df.pending_hints.extend(other_df.pending_hints)
new_df = new_df.select.__wrapped__(new_df, *select_column_names)
return new_df
def orderBy(
self,
*cols: t.Union[str, Column],
ascending: t.Optional[t.Union[t.Any, t.List[t.Any]]] = None,
) -> DataFrame:
"""
This implementation lets any ordered columns take priority over whatever is provided in `ascending`. Spark
has irregular behavior and can result in runtime errors. Users shouldn't be mixing the two anyways so this
is unlikely to come up.
"""
columns = self._ensure_and_normalize_cols(cols)
pre_ordered_col_indexes = [
i for i, col in enumerate(columns) if isinstance(col.expression, exp.Ordered)
]
if ascending is None:
ascending = [True] * len(columns)
elif not isinstance(ascending, list):
ascending = [ascending] * len(columns)
ascending = [bool(x) for i, x in enumerate(ascending)]
assert len(columns) == len(
ascending
), "The length of items in ascending must equal the number of columns provided"
col_and_ascending = list(zip(columns, ascending))
order_by_columns = [
(
exp.Ordered(this=col.expression, desc=not asc)
if i not in pre_ordered_col_indexes
else columns[i].column_expression
)
for i, (col, asc) in enumerate(col_and_ascending)
]
return self.copy(expression=self.expression.order_by(*order_by_columns))
sort = orderBy
def union(self, other: DataFrame) -> DataFrame:
return self._set_operation(exp.Union, other, False)
unionAll = union
def unionByName(self, other: DataFrame, allowMissingColumns: bool = False):
l_columns = self.columns
r_columns = other.columns
if not allowMissingColumns:
l_expressions = l_columns
r_expressions = l_columns
else:
l_expressions = []
r_expressions = []
r_columns_unused = copy(r_columns)
for l_column in l_columns:
l_expressions.append(l_column)
if l_column in r_columns:
r_expressions.append(l_column)
r_columns_unused.remove(l_column)
else:
r_expressions.append(exp.alias_(exp.Null(), l_column, copy=False))
for r_column in r_columns_unused:
l_expressions.append(exp.alias_(exp.Null(), r_column, copy=False))
r_expressions.append(r_column)
r_df = (
other.copy()._convert_leaf_to_cte().select(*self._ensure_list_of_columns(r_expressions))
)
l_df = self.copy()
if allowMissingColumns:
l_df = l_df._convert_leaf_to_cte().select(*self._ensure_list_of_columns(l_expressions))
return l_df._set_operation(exp.Union, r_df, False)
def intersect(self, other: DataFrame) -> DataFrame:
return self._set_operation(exp.Intersect, other, True)
def intersectAll(self, other: DataFrame) -> DataFrame:
return self._set_operation(exp.Intersect, other, False)
def exceptAll(self, other: DataFrame) -> DataFrame:
return self._set_operation(exp.Except, other, False)
def distinct(self) -> DataFrame:
return self.copy(expression=self.expression.distinct())
def dropDuplicates(self, subset: t.Optional[t.List[str]] = None):
if not subset:
return self.distinct()
column_names = ensure_list(subset)
window = Window.partitionBy(*column_names).orderBy(*column_names)
return (
self.copy()
.withColumn("row_num", F.row_number().over(window))
.where(F.col("row_num") == F.lit(1))
.drop("row_num")
)
def dropna(
self,
how: str = "any",
thresh: t.Optional[int] = None,
subset: t.Optional[t.Union[str, t.Tuple[str, ...], t.List[str]]] = None,
) -> DataFrame:
minimum_non_null = thresh or 0 # will be determined later if thresh is null
new_df = self.copy()
all_columns = self._get_outer_select_columns(new_df.expression)
if subset:
null_check_columns = self._ensure_and_normalize_cols(subset)
else:
null_check_columns = all_columns
if thresh is None:
minimum_num_nulls = 1 if how == "any" else len(null_check_columns)
else:
minimum_num_nulls = len(null_check_columns) - minimum_non_null + 1
if minimum_num_nulls > len(null_check_columns):
raise RuntimeError(
f"The minimum num nulls for dropna must be less than or equal to the number of columns. "
f"Minimum num nulls: {minimum_num_nulls}, Num Columns: {len(null_check_columns)}"
)
if_null_checks = [
F.when(column.isNull(), F.lit(1)).otherwise(F.lit(0)) for column in null_check_columns
]
nulls_added_together = functools.reduce(lambda x, y: x + y, if_null_checks)
num_nulls = nulls_added_together.alias("num_nulls")
new_df = new_df.select(num_nulls, append=True)
filtered_df = new_df.where(F.col("num_nulls") < F.lit(minimum_num_nulls))
final_df = filtered_df.select(*all_columns)
return final_df
def fillna(
self,
value: t.Union[ColumnLiterals],
subset: t.Optional[t.Union[str, t.Tuple[str, ...], t.List[str]]] = None,
) -> DataFrame:
"""
Functionality Difference: If you provide a value to replace a null and that type conflicts
with the type of the column then PySpark will just ignore your replacement.
This will try to cast them to be the same in some cases. So they won't always match.
Best to not mix types so make sure replacement is the same type as the column
Possibility for improvement: Use `typeof` function to get the type of the column
and check if it matches the type of the value provided. If not then make it null.
"""
from sqlglot.dataframe.sql.functions import lit
values = None
columns = None
new_df = self.copy()
all_columns = self._get_outer_select_columns(new_df.expression)
all_column_mapping = {column.alias_or_name: column for column in all_columns}
if isinstance(value, dict):
values = list(value.values())
columns = self._ensure_and_normalize_cols(list(value))
if not columns:
columns = self._ensure_and_normalize_cols(subset) if subset else all_columns
if not values:
values = [value] * len(columns)
value_columns = [lit(value) for value in values]
null_replacement_mapping = {
column.alias_or_name: (
F.when(column.isNull(), value).otherwise(column).alias(column.alias_or_name)
)
for column, value in zip(columns, value_columns)
}
null_replacement_mapping = {**all_column_mapping, **null_replacement_mapping}
null_replacement_columns = [
null_replacement_mapping[column.alias_or_name] for column in all_columns
]
new_df = new_df.select(*null_replacement_columns)
return new_df
def replace(
self,
to_replace: t.Union[bool, int, float, str, t.List, t.Dict],
value: t.Optional[t.Union[bool, int, float, str, t.List]] = None,
subset: t.Optional[t.Collection[ColumnOrName] | ColumnOrName] = None,
) -> DataFrame:
from sqlglot.dataframe.sql.functions import lit
old_values = None
new_df = self.copy()
all_columns = self._get_outer_select_columns(new_df.expression)
all_column_mapping = {column.alias_or_name: column for column in all_columns}
columns = self._ensure_and_normalize_cols(subset) if subset else all_columns
if isinstance(to_replace, dict):
old_values = list(to_replace)
new_values = list(to_replace.values())
elif not old_values and isinstance(to_replace, list):
assert isinstance(value, list), "value must be a list since the replacements are a list"
assert len(to_replace) == len(
value
), "the replacements and values must be the same length"
old_values = to_replace
new_values = value
else:
old_values = [to_replace] * len(columns)
new_values = [value] * len(columns)
old_values = [lit(value) for value in old_values]
new_values = [lit(value) for value in new_values]
replacement_mapping = {}
for column in columns:
expression = Column(None)
for i, (old_value, new_value) in enumerate(zip(old_values, new_values)):
if i == 0:
expression = F.when(column == old_value, new_value)
else:
expression = expression.when(column == old_value, new_value) # type: ignore
replacement_mapping[column.alias_or_name] = expression.otherwise(column).alias(
column.expression.alias_or_name
)
replacement_mapping = {**all_column_mapping, **replacement_mapping}
replacement_columns = [replacement_mapping[column.alias_or_name] for column in all_columns]
new_df = new_df.select(*replacement_columns)
return new_df
def withColumn(self, colName: str, col: Column) -> DataFrame:
col = self._ensure_and_normalize_col(col)
existing_col_names = self.expression.named_selects
existing_col_index = (
existing_col_names.index(colName) if colName in existing_col_names else None
)
if existing_col_index:
expression = self.expression.copy()
expression.expressions[existing_col_index] = col.expression
return self.copy(expression=expression)
return self.copy().select(col.alias(colName), append=True)
def withColumnRenamed(self, existing: str, new: str):
expression = self.expression.copy()
existing_columns = [
expression
for expression in expression.expressions
if expression.alias_or_name == existing
]
if not existing_columns:
raise ValueError("Tried to rename a column that doesn't exist")
for existing_column in existing_columns:
if isinstance(existing_column, exp.Column):
existing_column.replace(exp.alias_(existing_column, new))
else:
existing_column.set("alias", exp.to_identifier(new))
return self.copy(expression=expression)
def drop(self, *cols: t.Union[str, Column]) -> DataFrame:
all_columns = self._get_outer_select_columns(self.expression)
drop_cols = self._ensure_and_normalize_cols(cols)
new_columns = [
col
for col in all_columns
if col.alias_or_name not in [drop_column.alias_or_name for drop_column in drop_cols]
]
return self.copy().select(*new_columns, append=False)
def limit(self, num: int) -> DataFrame:
return self.copy(expression=self.expression.limit(num))
def hint(self, name: str, *parameters: t.Optional[t.Union[str, int]]) -> DataFrame:
parameter_list = ensure_list(parameters)
parameter_columns = (
self._ensure_list_of_columns(parameter_list)
if parameters
else Column.ensure_cols([self.sequence_id])
)
return self._hint(name, parameter_columns)
def repartition(
self, numPartitions: t.Union[int, ColumnOrName], *cols: ColumnOrName
) -> DataFrame:
num_partition_cols = self._ensure_list_of_columns(numPartitions)
columns = self._ensure_and_normalize_cols(cols)
args = num_partition_cols + columns
return self._hint("repartition", args)
def coalesce(self, numPartitions: int) -> DataFrame:
num_partitions = Column.ensure_cols([numPartitions])
return self._hint("coalesce", num_partitions)
def cache(self) -> DataFrame:
return self._cache(storage_level="MEMORY_AND_DISK")
def persist(self, storageLevel: str = "MEMORY_AND_DISK_SER") -> DataFrame:
"""
Storage Level Options: https://spark.apache.org/docs/3.0.0-preview/sql-ref-syntax-aux-cache-cache-table.html
"""
return self._cache(storageLevel)
class GroupedData:
def __init__(self, df: DataFrame, group_by_cols: t.List[Column], last_op: Operation):
self._df = df.copy()
self.spark = df.spark
self.last_op = last_op
self.group_by_cols = group_by_cols
def _get_function_applied_columns(
self, func_name: str, cols: t.Tuple[str, ...]
) -> t.List[Column]:
func_name = func_name.lower()
return [getattr(F, func_name)(name).alias(f"{func_name}({name})") for name in cols]
def agg(self, *exprs: t.Union[Column, t.Dict[str, str]]) -> DataFrame:
columns = (
[Column(f"{agg_func}({column_name})") for column_name, agg_func in exprs[0].items()]
if isinstance(exprs[0], dict)
else exprs
)
cols = self._df._ensure_and_normalize_cols(columns)
expression = self._df.expression.group_by(
*[x.expression for x in self.group_by_cols]
).select(*[x.expression for x in self.group_by_cols + cols], append=False)
return self._df.copy(expression=expression)
def count(self) -> DataFrame:
return self.agg(F.count("*").alias("count"))
def mean(self, *cols: str) -> DataFrame:
return self.avg(*cols)
def avg(self, *cols: str) -> DataFrame:
return self.agg(*self._get_function_applied_columns("avg", cols))
def max(self, *cols: str) -> DataFrame:
return self.agg(*self._get_function_applied_columns("max", cols))
def min(self, *cols: str) -> DataFrame:
return self.agg(*self._get_function_applied_columns("min", cols))
def sum(self, *cols: str) -> DataFrame:
return self.agg(*self._get_function_applied_columns("sum", cols))
def pivot(self, *cols: str) -> DataFrame:
raise NotImplementedError("Sum distinct is not currently implemented")
The provided code snippet includes necessary dependencies for implementing the `operation` function. Write a Python function `def operation(op: Operation)` to solve the following problem:
Decorator used around DataFrame methods to indicate what type of operation is being performed from the ordered Operation enums. This is used to determine which operations should be performed on a CTE vs. included with the previous operation. Ex: After a user does a join we want to allow them to select which columns for the different tables that they want to carry through to the following operation. If we put that join in a CTE preemptively then the user would not have a chance to select which column they want in cases where there is overlap in names.
Here is the function:
def operation(op: Operation):
"""
Decorator used around DataFrame methods to indicate what type of operation is being performed from the
ordered Operation enums. This is used to determine which operations should be performed on a CTE vs.
included with the previous operation.
Ex: After a user does a join we want to allow them to select which columns for the different
tables that they want to carry through to the following operation. If we put that join in
a CTE preemptively then the user would not have a chance to select which column they want
in cases where there is overlap in names.
"""
def decorator(func: t.Callable):
@functools.wraps(func)
def wrapper(self: DataFrame, *args, **kwargs):
if self.last_op == Operation.INIT:
self = self._convert_leaf_to_cte()
self.last_op = Operation.NO_OP
last_op = self.last_op
new_op = op if op != Operation.NO_OP else last_op
if new_op < last_op or (last_op == new_op == Operation.SELECT):
self = self._convert_leaf_to_cte()
df: t.Union[DataFrame, GroupedData] = func(self, *args, **kwargs)
df.last_op = new_op # type: ignore
return df
wrapper.__wrapped__ = func # type: ignore
return wrapper
return decorator | Decorator used around DataFrame methods to indicate what type of operation is being performed from the ordered Operation enums. This is used to determine which operations should be performed on a CTE vs. included with the previous operation. Ex: After a user does a join we want to allow them to select which columns for the different tables that they want to carry through to the following operation. If we put that join in a CTE preemptively then the user would not have a chance to select which column they want in cases where there is overlap in names. |
153,207 | import sys
import cv2
import torch
import numpy as np
import os
from os.path import join
from pysot.core.config import cfg
from pysot.models.model_builder import ModelBuilder
from pysot.tracker.tracker_builder import build_tracker
from pysot.utils.bbox import get_axis_aligned_bbox
from pysot.utils.model_load import load_pretrain
from toolkit.datasets import DatasetFactory
from toolkit.utils.region import vot_overlap, vot_float2str
from . import vot
from .vot import Rectangle, Polygon, Point
model_file = join(cfg_root, 'model.pth')
cfg_file = join(cfg_root, 'config.yaml')
def warmup(model):
for i in range(10):
model.template(torch.FloatTensor(1,3,127,127).cuda())
tracker = setup_tracker()
tracker.init(im, gt_bbox_)
cfg = __C
class ModelBuilder(nn.Module):
def __init__(self):
super(ModelBuilder, self).__init__()
# build backbone
self.backbone = get_backbone(cfg.BACKBONE.TYPE,
**cfg.BACKBONE.KWARGS)
# build adjust layer
if cfg.ADJUST.ADJUST:
self.neck = get_neck(cfg.ADJUST.TYPE,
**cfg.ADJUST.KWARGS)
# build rpn head
self.rpn_head = get_rpn_head(cfg.RPN.TYPE,
**cfg.RPN.KWARGS)
# build mask head
if cfg.MASK.MASK:
self.mask_head = get_mask_head(cfg.MASK.TYPE,
**cfg.MASK.KWARGS)
if cfg.REFINE.REFINE:
self.refine_head = get_refine_head(cfg.REFINE.TYPE)
def template(self, z):
zf = self.backbone(z)
if cfg.MASK.MASK:
zf = zf[-1]
if cfg.ADJUST.ADJUST:
zf = self.neck(zf)
self.zf = zf
def track(self, x):
xf = self.backbone(x)
if cfg.MASK.MASK:
self.xf = xf[:-1]
xf = xf[-1]
if cfg.ADJUST.ADJUST:
xf = self.neck(xf)
cls, loc = self.rpn_head(self.zf, xf)
if cfg.MASK.MASK:
mask, self.mask_corr_feature = self.mask_head(self.zf, xf)
return {
'cls': cls,
'loc': loc,
'mask': mask if cfg.MASK.MASK else None
}
def mask_refine(self, pos):
return self.refine_head(self.xf, self.mask_corr_feature, pos)
def log_softmax(self, cls):
b, a2, h, w = cls.size()
cls = cls.view(b, 2, a2//2, h, w)
cls = cls.permute(0, 2, 3, 4, 1).contiguous()
cls = F.log_softmax(cls, dim=4)
return cls
def forward(self, data):
""" only used in training
"""
template = data['template'].cuda()
search = data['search'].cuda()
label_cls = data['label_cls'].cuda()
label_loc = data['label_loc'].cuda()
label_loc_weight = data['label_loc_weight'].cuda()
# get feature
zf = self.backbone(template)
xf = self.backbone(search)
if cfg.MASK.MASK:
zf = zf[-1]
self.xf_refine = xf[:-1]
xf = xf[-1]
if cfg.ADJUST.ADJUST:
zf = self.neck(zf)
xf = self.neck(xf)
cls, loc = self.rpn_head(zf, xf)
# get loss
cls = self.log_softmax(cls)
cls_loss = select_cross_entropy_loss(cls, label_cls)
loc_loss = weight_l1_loss(loc, label_loc, label_loc_weight)
outputs = {}
outputs['total_loss'] = cfg.TRAIN.CLS_WEIGHT * cls_loss + \
cfg.TRAIN.LOC_WEIGHT * loc_loss
outputs['cls_loss'] = cls_loss
outputs['loc_loss'] = loc_loss
if cfg.MASK.MASK:
# TODO
mask, self.mask_corr_feature = self.mask_head(zf, xf)
mask_loss = None
outputs['total_loss'] += cfg.TRAIN.MASK_WEIGHT * mask_loss
outputs['mask_loss'] = mask_loss
return outputs
def build_tracker(model):
return TRACKS[cfg.TRACK.TYPE](model)
def load_pretrain(model, pretrained_path):
logger.info('load pretrained model from {}'.format(pretrained_path))
device = torch.cuda.current_device()
pretrained_dict = torch.load(pretrained_path,
map_location=lambda storage, loc: storage.cuda(device))
if "state_dict" in pretrained_dict.keys():
pretrained_dict = remove_prefix(pretrained_dict['state_dict'],
'module.')
else:
pretrained_dict = remove_prefix(pretrained_dict, 'module.')
try:
check_keys(model, pretrained_dict)
except:
logger.info('[Warning]: using pretrain as features.\
Adding "features." as prefix')
new_dict = {}
for k, v in pretrained_dict.items():
k = 'features.' + k
new_dict[k] = v
pretrained_dict = new_dict
check_keys(model, pretrained_dict)
model.load_state_dict(pretrained_dict, strict=False)
return model
def setup_tracker():
cfg.merge_from_file(cfg_file)
model = ModelBuilder()
model = load_pretrain(model, model_file).cuda().eval()
tracker = build_tracker(model)
warmup(model)
return tracker | null |
153,208 | import matplotlib.pyplot as plt
import numpy as np
import pickle
from matplotlib import rc
from .draw_utils import COLOR, MARKER_STYLE
COLOR = ((1, 0, 0),
(0, 1, 0),
(1, 0, 1),
(1, 1, 0),
(0 , 162/255, 232/255),
(0.5, 0.5, 0.5),
(0, 0, 1),
(0, 1, 1),
(136/255, 0 , 21/255),
(255/255, 127/255, 39/255),
(0, 0, 0))
MARKER_STYLE = ['o', 'v', '<', '*', 'D', 'x', '.', 'x', '<', '.']
def draw_eao(result):
fig = plt.figure()
ax = fig.add_subplot(111, projection='polar')
angles = np.linspace(0, 2*np.pi, 8, endpoint=True)
attr2value = []
for i, (tracker_name, ret) in enumerate(result.items()):
value = list(ret.values())
attr2value.append(value)
value.append(value[0])
attr2value = np.array(attr2value)
max_value = np.max(attr2value, axis=0)
min_value = np.min(attr2value, axis=0)
for i, (tracker_name, ret) in enumerate(result.items()):
value = list(ret.values())
value.append(value[0])
value = np.array(value)
value *= (1 / max_value)
plt.plot(angles, value, linestyle='-', color=COLOR[i], marker=MARKER_STYLE[i],
label=tracker_name, linewidth=1.5, markersize=6)
attrs = ["Overall", "Camera motion",
"Illumination change","Motion Change",
"Size change","Occlusion",
"Unassigned"]
attr_value = []
for attr, maxv, minv in zip(attrs, max_value, min_value):
attr_value.append(attr + "\n({:.3f},{:.3f})".format(minv, maxv))
ax.set_thetagrids(angles[:-1] * 180/np.pi, attr_value)
ax.spines['polar'].set_visible(False)
ax.legend(loc='upper center', bbox_to_anchor=(0.5,-0.07), frameon=False, ncol=5)
ax.grid(b=False)
ax.set_ylim(0, 1.18)
ax.set_yticks([])
plt.show() | null |
153,209 | import matplotlib.pyplot as plt
import numpy as np
from matplotlib import rc
from .draw_utils import COLOR, LINE_STYLE
COLOR = ((1, 0, 0),
(0, 1, 0),
(1, 0, 1),
(1, 1, 0),
(0 , 162/255, 232/255),
(0.5, 0.5, 0.5),
(0, 0, 1),
(0, 1, 1),
(136/255, 0 , 21/255),
(255/255, 127/255, 39/255),
(0, 0, 0))
def draw_f1(result, bold_name=None):
# drawing f1 contour
fig, ax = plt.subplots()
for f1 in np.arange(0.1, 1, 0.1):
recall = np.arange(f1, 1+0.01, 0.01)
precision = f1 * recall / (2 * recall - f1)
ax.plot(recall, precision, color=[0,1,0], linestyle='-', linewidth=0.5)
ax.plot(precision, recall, color=[0,1,0], linestyle='-', linewidth=0.5)
ax.grid(b=True)
ax.set_aspect(1)
plt.xlabel('Recall')
plt.ylabel('Precision')
plt.axis([0, 1, 0, 1])
plt.title(r'\textbf{VOT2018-LT Precision vs Recall}')
# draw result line
all_precision = {}
all_recall = {}
best_f1 = {}
best_idx = {}
for tracker_name, ret in result.items():
precision = np.mean(list(ret['precision'].values()), axis=0)
recall = np.mean(list(ret['recall'].values()), axis=0)
f1 = 2 * precision * recall / (precision + recall)
max_idx = np.argmax(f1)
all_precision[tracker_name] = precision
all_recall[tracker_name] = recall
best_f1[tracker_name] = f1[max_idx]
best_idx[tracker_name] = max_idx
for idx, (tracker_name, best_f1) in \
enumerate(sorted(best_f1.items(), key=lambda x:x[1], reverse=True)):
if tracker_name == bold_name:
label = r"\textbf{[%.3f] Ours}" % (best_f1)
else:
label = "[%.3f] " % (best_f1) + tracker_name
recall = all_recall[tracker_name][:-1]
precision = all_precision[tracker_name][:-1]
ax.plot(recall, precision, color=COLOR[idx], linestyle='-',
label=label)
f1_idx = best_idx[tracker_name]
ax.plot(recall[f1_idx], precision[f1_idx], color=[0,0,0], marker='o',
markerfacecolor=COLOR[idx], markersize=5)
ax.legend(loc='lower right', labelspacing=0.2)
plt.xticks(np.arange(0, 1+0.1, 0.1))
plt.yticks(np.arange(0, 1+0.1, 0.1))
plt.show() | null |
153,210 | import matplotlib.pyplot as plt
import numpy as np
from .draw_utils import COLOR, LINE_STYLE
COLOR = ((1, 0, 0),
(0, 1, 0),
(1, 0, 1),
(1, 1, 0),
(0 , 162/255, 232/255),
(0.5, 0.5, 0.5),
(0, 0, 1),
(0, 1, 1),
(136/255, 0 , 21/255),
(255/255, 127/255, 39/255),
(0, 0, 0))
LINE_STYLE = ['-', '--', ':', '-', '--', ':', '-', '--', ':', '-']
def draw_success_precision(success_ret, name, videos, attr, precision_ret=None,
norm_precision_ret=None, bold_name=None, axis=[0, 1]):
# success plot
fig, ax = plt.subplots()
ax.grid(b=True)
ax.set_aspect(1)
plt.xlabel('Overlap threshold')
plt.ylabel('Success rate')
if attr == 'ALL':
plt.title(r'\textbf{Success plots of OPE on %s}' % (name))
else:
plt.title(r'\textbf{Success plots of OPE - %s}' % (attr))
plt.axis([0, 1]+axis)
success = {}
thresholds = np.arange(0, 1.05, 0.05)
for tracker_name in success_ret.keys():
value = [v for k, v in success_ret[tracker_name].items() if k in videos]
success[tracker_name] = np.mean(value)
for idx, (tracker_name, auc) in \
enumerate(sorted(success.items(), key=lambda x:x[1], reverse=True)):
if tracker_name == bold_name:
label = r"\textbf{[%.3f] %s}" % (auc, tracker_name)
else:
label = "[%.3f] " % (auc) + tracker_name
value = [v for k, v in success_ret[tracker_name].items() if k in videos]
plt.plot(thresholds, np.mean(value, axis=0),
color=COLOR[idx], linestyle=LINE_STYLE[idx],label=label, linewidth=2)
ax.legend(loc='lower left', labelspacing=0.2)
ax.autoscale(enable=True, axis='both', tight=True)
xmin, xmax, ymin, ymax = plt.axis()
ax.autoscale(enable=False)
ymax += 0.03
plt.axis([xmin, xmax, ymin, ymax])
plt.xticks(np.arange(xmin, xmax+0.01, 0.1))
plt.yticks(np.arange(ymin, ymax, 0.1))
ax.set_aspect((xmax - xmin)/(ymax-ymin))
plt.show()
if precision_ret:
# norm precision plot
fig, ax = plt.subplots()
ax.grid(b=True)
ax.set_aspect(50)
plt.xlabel('Location error threshold')
plt.ylabel('Precision')
if attr == 'ALL':
plt.title(r'\textbf{Precision plots of OPE on %s}' % (name))
else:
plt.title(r'\textbf{Precision plots of OPE - %s}' % (attr))
plt.axis([0, 50]+axis)
precision = {}
thresholds = np.arange(0, 51, 1)
for tracker_name in precision_ret.keys():
value = [v for k, v in precision_ret[tracker_name].items() if k in videos]
precision[tracker_name] = np.mean(value, axis=0)[20]
for idx, (tracker_name, pre) in \
enumerate(sorted(precision.items(), key=lambda x:x[1], reverse=True)):
if tracker_name == bold_name:
label = r"\textbf{[%.3f] %s}" % (pre, tracker_name)
else:
label = "[%.3f] " % (pre) + tracker_name
value = [v for k, v in precision_ret[tracker_name].items() if k in videos]
plt.plot(thresholds, np.mean(value, axis=0),
color=COLOR[idx], linestyle=LINE_STYLE[idx],label=label, linewidth=2)
ax.legend(loc='lower right', labelspacing=0.2)
ax.autoscale(enable=True, axis='both', tight=True)
xmin, xmax, ymin, ymax = plt.axis()
ax.autoscale(enable=False)
ymax += 0.03
plt.axis([xmin, xmax, ymin, ymax])
plt.xticks(np.arange(xmin, xmax+0.01, 5))
plt.yticks(np.arange(ymin, ymax, 0.1))
ax.set_aspect((xmax - xmin)/(ymax-ymin))
plt.show()
# norm precision plot
if norm_precision_ret:
fig, ax = plt.subplots()
ax.grid(b=True)
plt.xlabel('Location error threshold')
plt.ylabel('Precision')
if attr == 'ALL':
plt.title(r'\textbf{Normalized Precision plots of OPE on %s}' % (name))
else:
plt.title(r'\textbf{Normalized Precision plots of OPE - %s}' % (attr))
norm_precision = {}
thresholds = np.arange(0, 51, 1) / 100
for tracker_name in precision_ret.keys():
value = [v for k, v in norm_precision_ret[tracker_name].items() if k in videos]
norm_precision[tracker_name] = np.mean(value, axis=0)[20]
for idx, (tracker_name, pre) in \
enumerate(sorted(norm_precision.items(), key=lambda x:x[1], reverse=True)):
if tracker_name == bold_name:
label = r"\textbf{[%.3f] %s}" % (pre, tracker_name)
else:
label = "[%.3f] " % (pre) + tracker_name
value = [v for k, v in norm_precision_ret[tracker_name].items() if k in videos]
plt.plot(thresholds, np.mean(value, axis=0),
color=COLOR[idx], linestyle=LINE_STYLE[idx],label=label, linewidth=2)
ax.legend(loc='lower right', labelspacing=0.2)
ax.autoscale(enable=True, axis='both', tight=True)
xmin, xmax, ymin, ymax = plt.axis()
ax.autoscale(enable=False)
ymax += 0.03
plt.axis([xmin, xmax, ymin, ymax])
plt.xticks(np.arange(xmin, xmax+0.01, 0.05))
plt.yticks(np.arange(ymin, ymax, 0.1))
ax.set_aspect((xmax - xmin)/(ymax-ymin))
plt.show() | null |
153,212 | import numpy as np
from . import region
The provided code snippet includes necessary dependencies for implementing the `calculate_failures` function. Write a Python function `def calculate_failures(trajectory)` to solve the following problem:
Calculate number of failures Args: trajectory: list of bbox Returns: num_failures: number of failures failures: failures point in trajectory, start with 0
Here is the function:
def calculate_failures(trajectory):
""" Calculate number of failures
Args:
trajectory: list of bbox
Returns:
num_failures: number of failures
failures: failures point in trajectory, start with 0
"""
failures = [i for i, x in zip(range(len(trajectory)), trajectory)
if len(x) == 1 and x[0] == 2]
num_failures = len(failures)
return num_failures, failures | Calculate number of failures Args: trajectory: list of bbox Returns: num_failures: number of failures failures: failures point in trajectory, start with 0 |
153,213 | import numpy as np
from . import region
The provided code snippet includes necessary dependencies for implementing the `calculate_accuracy` function. Write a Python function `def calculate_accuracy(pred_trajectory, gt_trajectory, burnin=0, ignore_unknown=True, bound=None)` to solve the following problem:
Caculate accuracy socre as average overlap over the entire sequence Args: trajectory: list of bbox gt_trajectory: list of bbox burnin: number of frames that have to be ignored after the failure ignore_unknown: ignore frames where the overlap is unknown bound: bounding region Return: acc: average overlap overlaps: per frame overlaps
Here is the function:
def calculate_accuracy(pred_trajectory, gt_trajectory,
burnin=0, ignore_unknown=True, bound=None):
"""Caculate accuracy socre as average overlap over the entire sequence
Args:
trajectory: list of bbox
gt_trajectory: list of bbox
burnin: number of frames that have to be ignored after the failure
ignore_unknown: ignore frames where the overlap is unknown
bound: bounding region
Return:
acc: average overlap
overlaps: per frame overlaps
"""
pred_trajectory_ = pred_trajectory
if not ignore_unknown:
unkown = [len(x)==1 and x[0] == 0 for x in pred_trajectory]
if burnin > 0:
pred_trajectory_ = pred_trajectory[:]
mask = [len(x)==1 and x[0] == 1 for x in pred_trajectory]
for i in range(len(mask)):
if mask[i]:
for j in range(burnin):
if i + j < len(mask):
pred_trajectory_[i+j] = [0]
min_len = min(len(pred_trajectory_), len(gt_trajectory))
overlaps = region.vot_overlap_traj(pred_trajectory_[:min_len],
gt_trajectory[:min_len], bound)
if not ignore_unknown:
overlaps = [u if u else 0 for u in unkown]
acc = 0
if len(overlaps) > 0:
acc = np.nanmean(overlaps)
return acc, overlaps | Caculate accuracy socre as average overlap over the entire sequence Args: trajectory: list of bbox gt_trajectory: list of bbox burnin: number of frames that have to be ignored after the failure ignore_unknown: ignore frames where the overlap is unknown bound: bounding region Return: acc: average overlap overlaps: per frame overlaps |
153,214 | import numpy as np
from . import region
def overlap_ratio(rect1, rect2):
'''Compute overlap ratio between two rects
Args
rect:2d array of N x [x,y,w,h]
Return:
iou
'''
# if rect1.ndim==1:
# rect1 = rect1[np.newaxis, :]
# if rect2.ndim==1:
# rect2 = rect2[np.newaxis, :]
left = np.maximum(rect1[:,0], rect2[:,0])
right = np.minimum(rect1[:,0]+rect1[:,2], rect2[:,0]+rect2[:,2])
top = np.maximum(rect1[:,1], rect2[:,1])
bottom = np.minimum(rect1[:,1]+rect1[:,3], rect2[:,1]+rect2[:,3])
intersect = np.maximum(0,right - left) * np.maximum(0,bottom - top)
union = rect1[:,2]*rect1[:,3] + rect2[:,2]*rect2[:,3] - intersect
iou = intersect / union
iou = np.maximum(np.minimum(1, iou), 0)
return iou
def success_overlap(gt_bb, result_bb, n_frame):
thresholds_overlap = np.arange(0, 1.05, 0.05)
success = np.zeros(len(thresholds_overlap))
iou = np.ones(len(gt_bb)) * (-1)
# mask = np.sum(gt_bb > 0, axis=1) == 4 #TODO check all dataset
mask = np.sum(gt_bb[:, 2:] > 0, axis=1) == 2
iou[mask] = overlap_ratio(gt_bb[mask], result_bb[mask])
for i in range(len(thresholds_overlap)):
success[i] = np.sum(iou > thresholds_overlap[i]) / float(n_frame)
return success | null |
153,215 | import numpy as np
from . import region
def success_error(gt_center, result_center, thresholds, n_frame):
# n_frame = len(gt_center)
success = np.zeros(len(thresholds))
dist = np.ones(len(gt_center)) * (-1)
mask = np.sum(gt_center > 0, axis=1) == 2
dist[mask] = np.sqrt(np.sum(
np.power(gt_center[mask] - result_center[mask], 2), axis=1))
for i in range(len(thresholds)):
success[i] = np.sum(dist <= thresholds[i]) / float(n_frame)
return success | null |
153,216 | import numpy as np
from . import region
The provided code snippet includes necessary dependencies for implementing the `determine_thresholds` function. Write a Python function `def determine_thresholds(scores, resolution=100)` to solve the following problem:
Args: scores: 1d array of score
Here is the function:
def determine_thresholds(scores, resolution=100):
"""
Args:
scores: 1d array of score
"""
scores = np.sort(scores[np.logical_not(np.isnan(scores))])
delta = np.floor(len(scores) / (resolution - 2))
idxs = np.floor(np.linspace(delta-1, len(scores)-delta, resolution-2)+0.5).astype(np.int32)
thresholds = np.zeros((resolution))
thresholds[0] = - np.inf
thresholds[-1] = np.inf
thresholds[1:-1] = scores[idxs]
return thresholds | Args: scores: 1d array of score |
153,217 | import numpy as np
from . import region
def calculate_f1(overlaps, score, bound, thresholds, N):
overlaps = np.array(overlaps)
overlaps[np.isnan(overlaps)] = 0
score = np.array(score)
score[np.isnan(score)] = 0
precision = np.zeros(len(thresholds))
recall = np.zeros(len(thresholds))
for i, th in enumerate(thresholds):
if th == - np.inf:
idx = score > 0
else:
idx = score >= th
if np.sum(idx) == 0:
precision[i] = 1
recall[i] = 0
else:
precision[i] = np.mean(overlaps[idx])
recall[i] = np.sum(overlaps[idx]) / N
f1 = 2 * precision * recall / (precision + recall)
return f1, precision, recall | null |
153,218 | import numpy as np
from . import region
def calculate_expected_overlap(fragments, fweights):
max_len = fragments.shape[1]
expected_overlaps = np.zeros((max_len), np.float32)
expected_overlaps[0] = 1
# TODO Speed Up
for i in range(1, max_len):
mask = np.logical_not(np.isnan(fragments[:, i]))
if np.any(mask):
fragment = fragments[mask, 1:i+1]
seq_mean = np.sum(fragment, 1) / fragment.shape[1]
expected_overlaps[i] = np.sum(seq_mean *
fweights[mask]) / np.sum(fweights[mask])
return expected_overlaps | null |
153,219 | from pycocotools.coco import COCO
import cv2
import numpy as np
from os.path import join, isdir
from os import mkdir, makedirs
from concurrent import futures
import sys
import time
The provided code snippet includes necessary dependencies for implementing the `printProgress` function. Write a Python function `def printProgress(iteration, total, prefix='', suffix='', decimals=1, barLength=100)` to solve the following problem:
Call in a loop to create terminal progress bar @params: iteration - Required : current iteration (Int) total - Required : total iterations (Int) prefix - Optional : prefix string (Str) suffix - Optional : suffix string (Str) decimals - Optional : positive number of decimals in percent complete (Int) barLength - Optional : character length of bar (Int)
Here is the function:
def printProgress(iteration, total, prefix='', suffix='', decimals=1, barLength=100):
"""
Call in a loop to create terminal progress bar
@params:
iteration - Required : current iteration (Int)
total - Required : total iterations (Int)
prefix - Optional : prefix string (Str)
suffix - Optional : suffix string (Str)
decimals - Optional : positive number of decimals in percent complete (Int)
barLength - Optional : character length of bar (Int)
"""
formatStr = "{0:." + str(decimals) + "f}"
percents = formatStr.format(100 * (iteration / float(total)))
filledLength = int(round(barLength * iteration / float(total)))
bar = '' * filledLength + '-' * (barLength - filledLength)
sys.stdout.write('\r%s |%s| %s%s %s' % (prefix, bar, percents, '%', suffix)),
if iteration == total:
sys.stdout.write('\x1b[2K\r')
sys.stdout.flush() | Call in a loop to create terminal progress bar @params: iteration - Required : current iteration (Int) total - Required : total iterations (Int) prefix - Optional : prefix string (Str) suffix - Optional : suffix string (Str) decimals - Optional : positive number of decimals in percent complete (Int) barLength - Optional : character length of bar (Int) |
153,220 | from pycocotools.coco import COCO
import cv2
import numpy as np
from os.path import join, isdir
from os import mkdir, makedirs
from concurrent import futures
import sys
import time
def crop_like_SiamFC(image, bbox, context_amount=0.5, exemplar_size=127, instanc_size=255, padding=(0, 0, 0)):
target_pos = [(bbox[2]+bbox[0])/2., (bbox[3]+bbox[1])/2.]
target_size = [bbox[2]-bbox[0], bbox[3]-bbox[1]]
wc_z = target_size[1] + context_amount * sum(target_size)
hc_z = target_size[0] + context_amount * sum(target_size)
s_z = np.sqrt(wc_z * hc_z)
scale_z = exemplar_size / s_z
d_search = (instanc_size - exemplar_size) / 2
pad = d_search / scale_z
s_x = s_z + 2 * pad
z = crop_hwc(image, pos_s_2_bbox(target_pos, s_z), exemplar_size, padding)
x = crop_hwc(image, pos_s_2_bbox(target_pos, s_x), instanc_size, padding)
return z, x
def crop_img(img, anns, set_crop_base_path, set_img_base_path, instanc_size=511):
frame_crop_base_path = join(set_crop_base_path, img['file_name'].split('/')[-1].split('.')[0])
if not isdir(frame_crop_base_path): makedirs(frame_crop_base_path)
im = cv2.imread('{}/{}'.format(set_img_base_path, img['file_name']))
avg_chans = np.mean(im, axis=(0, 1))
for trackid, ann in enumerate(anns):
rect = ann['bbox']
bbox = [rect[0], rect[1], rect[0] + rect[2], rect[1] + rect[3]]
if rect[2] <= 0 or rect[3] <=0:
continue
z, x = crop_like_SiamFC(im, bbox, instanc_size=instanc_size, padding=avg_chans)
cv2.imwrite(join(frame_crop_base_path, '{:06d}.{:02d}.z.jpg'.format(0, trackid)), z)
cv2.imwrite(join(frame_crop_base_path, '{:06d}.{:02d}.x.jpg'.format(0, trackid)), x) | null |
153,221 | from . import _mask
def encode(bimask):
if len(bimask.shape) == 3:
return _mask.encode(bimask)
elif len(bimask.shape) == 2:
h, w = bimask.shape
return _mask.encode(bimask.reshape((h, w, 1), order='F'))[0] | null |
153,222 | from . import _mask
def decode(rleObjs):
if type(rleObjs) == list:
return _mask.decode(rleObjs)
else:
return _mask.decode([rleObjs])[:,:,0] | null |
153,223 | from . import _mask
def area(rleObjs):
if type(rleObjs) == list:
return _mask.area(rleObjs)
else:
return _mask.area([rleObjs])[0] | null |
153,224 | from . import _mask
def toBbox(rleObjs):
if type(rleObjs) == list:
return _mask.toBbox(rleObjs)
else:
return _mask.toBbox([rleObjs])[0] | null |
153,226 | from os.path import join
from os import listdir
import json
import numpy as np
def check_size(frame_sz, bbox):
min_ratio = 0.1
max_ratio = 0.75
# only accept objects >10% and <75% of the total frame
area_ratio = np.sqrt((bbox[2]-bbox[0])*(bbox[3]-bbox[1])/float(np.prod(frame_sz)))
ok = (area_ratio > min_ratio) and (area_ratio < max_ratio)
return ok | null |
153,227 | from os.path import join
from os import listdir
import json
import numpy as np
def check_borders(frame_sz, bbox):
dist_from_border = 0.05 * (bbox[2] - bbox[0] + bbox[3] - bbox[1])/2
ok = (bbox[0] > dist_from_border) and (bbox[1] > dist_from_border) and \
((frame_sz[0] - bbox[2]) > dist_from_border) and \
((frame_sz[1] - bbox[3]) > dist_from_border)
return ok | null |
153,228 | from os.path import join, isdir
from os import listdir, mkdir, makedirs
import cv2
import numpy as np
import glob
import xml.etree.ElementTree as ET
from concurrent import futures
import sys
import time
The provided code snippet includes necessary dependencies for implementing the `printProgress` function. Write a Python function `def printProgress(iteration, total, prefix='', suffix='', decimals=1, barLength=100)` to solve the following problem:
Call in a loop to create terminal progress bar @params: iteration - Required : current iteration (Int) total - Required : total iterations (Int) prefix - Optional : prefix string (Str) suffix - Optional : suffix string (Str) decimals - Optional : positive number of decimals in percent complete (Int) barLength - Optional : character length of bar (Int)
Here is the function:
def printProgress(iteration, total, prefix='', suffix='', decimals=1, barLength=100):
"""
Call in a loop to create terminal progress bar
@params:
iteration - Required : current iteration (Int)
total - Required : total iterations (Int)
prefix - Optional : prefix string (Str)
suffix - Optional : suffix string (Str)
decimals - Optional : positive number of decimals in percent complete (Int)
barLength - Optional : character length of bar (Int)
"""
formatStr = "{0:." + str(decimals) + "f}"
percents = formatStr.format(100 * (iteration / float(total)))
filledLength = int(round(barLength * iteration / float(total)))
bar = '' * filledLength + '-' * (barLength - filledLength)
sys.stdout.write('\r%s |%s| %s%s %s' % (prefix, bar, percents, '%', suffix)),
if iteration == total:
sys.stdout.write('\x1b[2K\r')
sys.stdout.flush() | Call in a loop to create terminal progress bar @params: iteration - Required : current iteration (Int) total - Required : total iterations (Int) prefix - Optional : prefix string (Str) suffix - Optional : suffix string (Str) decimals - Optional : positive number of decimals in percent complete (Int) barLength - Optional : character length of bar (Int) |
153,229 | from os.path import join, isdir
from os import listdir, mkdir, makedirs
import cv2
import numpy as np
import glob
import xml.etree.ElementTree as ET
from concurrent import futures
import sys
import time
ann_base_path = join(VID_base_path, 'Annotations/VID/train/')
def crop_like_SiamFC(image, bbox, context_amount=0.5, exemplar_size=127, instanc_size=255, padding=(0, 0, 0)):
target_pos = [(bbox[2]+bbox[0])/2., (bbox[3]+bbox[1])/2.]
target_size = [bbox[2]-bbox[0], bbox[3]-bbox[1]]
wc_z = target_size[1] + context_amount * sum(target_size)
hc_z = target_size[0] + context_amount * sum(target_size)
s_z = np.sqrt(wc_z * hc_z)
scale_z = exemplar_size / s_z
d_search = (instanc_size - exemplar_size) / 2
pad = d_search / scale_z
s_x = s_z + 2 * pad
z = crop_hwc(image, pos_s_2_bbox(target_pos, s_z), exemplar_size, padding)
x = crop_hwc(image, pos_s_2_bbox(target_pos, s_x), instanc_size, padding)
return z, x
def crop_video(sub_set, video, crop_path, instanc_size):
video_crop_base_path = join(crop_path, sub_set, video)
if not isdir(video_crop_base_path): makedirs(video_crop_base_path)
sub_set_base_path = join(ann_base_path, sub_set)
xmls = sorted(glob.glob(join(sub_set_base_path, video, '*.xml')))
for xml in xmls:
xmltree = ET.parse(xml)
# size = xmltree.findall('size')[0]
# frame_sz = [int(it.text) for it in size]
objects = xmltree.findall('object')
objs = []
filename = xmltree.findall('filename')[0].text
im = cv2.imread(xml.replace('xml', 'JPEG').replace('Annotations', 'Data'))
avg_chans = np.mean(im, axis=(0, 1))
for object_iter in objects:
trackid = int(object_iter.find('trackid').text)
# name = (object_iter.find('name')).text
bndbox = object_iter.find('bndbox')
# occluded = int(object_iter.find('occluded').text)
bbox = [int(bndbox.find('xmin').text), int(bndbox.find('ymin').text),
int(bndbox.find('xmax').text), int(bndbox.find('ymax').text)]
z, x = crop_like_SiamFC(im, bbox, instanc_size=instanc_size, padding=avg_chans)
cv2.imwrite(join(video_crop_base_path, '{:06d}.{:02d}.z.jpg'.format(int(filename), trackid)), z)
cv2.imwrite(join(video_crop_base_path, '{:06d}.{:02d}.x.jpg'.format(int(filename), trackid)), x) | null |
153,230 | from __future__ import unicode_literals
import json
from os.path import join, exists
import pandas as pd
d_sets = ['yt_bb_detection_validation', 'yt_bb_detection_train']
col_names = ['youtube_id', 'timestamp_ms','class_id','class_name',
'object_id','object_presence','xmin','xmax','ymin','ymax']
crop_path = './crop{:d}'.format(instanc_size)
def parse_and_sched(dl_dir='.'):
# For each of the two datasets
js = {}
for d_set in d_sets:
# Make the directory for this dataset
d_set_dir = dl_dir+'/'+d_set+'/'
# Parse csv data using pandas
print (d_set+': Parsing annotations into clip data...')
df = pd.DataFrame.from_csv(d_set+'.csv', header=None, index_col=False)
df.columns = col_names
# Get list of unique video files
vids = df['youtube_id'].unique()
for vid in vids:
data = df[df['youtube_id']==vid]
for index, row in data.iterrows():
youtube_id, timestamp_ms, class_id, class_name, \
object_id, object_presence, x1, x2, y1, y2 = row
if object_presence == 'absent':
continue
if x1 < 0 or x2 < 0 or y1 < 0 or y2 < 0 or y2 < y1 or x2 < x1:
continue
bbox = [x1, y1, x2, y2]
frame = '%06d' % (int(timestamp_ms) / 1000)
obj = '%02d' % (int(object_id))
video = join(d_set_dir + str(class_id), youtube_id)
if not exists(join(crop_path, video, '{}.{}.x.jpg'.format(frame, obj))):
continue
if video not in js:
js[video] = {}
if obj not in js[video]:
js[video][obj] = {}
js[video][obj][frame] = bbox
if 'yt_bb_detection_train' == d_set:
json.dump(js, open('train.json', 'w'), indent=4, sort_keys=True)
else:
json.dump(js, open('val.json', 'w'), indent=4, sort_keys=True)
js = {}
print(d_set+': All videos downloaded' ) | null |
153,231 | from __future__ import unicode_literals
from subprocess import check_call
from concurrent import futures
import os
from os.path import join
import sys
import cv2
import pandas as pd
import numpy as np
d_sets = ['yt_bb_detection_validation', 'yt_bb_detection_train']
col_names = ['youtube_id', 'timestamp_ms','class_id','class_name',
'object_id','object_presence','xmin','xmax','ymin','ymax']
def printProgress (iteration, total, prefix = '', suffix = '', decimals = 1, barLength = 100):
"""
Call in a loop to create terminal progress bar
iteration - Required : current iteration (Int)
total - Required : total iterations (Int)
prefix - Optional : prefix string (Str)
suffix - Optional : suffix string (Str)
decimals - Optional : positive number of decimals in percent complete (Int)
barLength - Optional : character length of bar (Int)
"""
formatStr = "{0:." + str(decimals) + "f}"
percents = formatStr.format(100 * (iteration / float(total)))
filledLength = int(round(barLength * iteration / float(total)))
bar = '█' * filledLength + '-' * (barLength - filledLength)
sys.stdout.write('\r%s |%s| %s%s %s' % (prefix, bar, percents, '%', suffix)),
if iteration == total:
sys.stdout.write('\x1b[2K\r')
sys.stdout.flush()
def dl_and_cut(vid, data, d_set_dir):
for index, row in data.iterrows():
youtube_id, timestamp_ms, class_id, class_name,\
object_id, object_presence, xmin, xmax, ymin, ymax = row
if object_presence == 'absent':
continue
class_dir = d_set_dir + str(class_id)
frame_path = class_dir + '/' + youtube_id + '_' + str(timestamp_ms) + \
'_' + str(class_id) + '_' + str(object_id) + '.jpg'
# Verify that the video has been downloaded. Skip otherwise
if not os.path.exists(frame_path):
continue
image = cv2.imread(frame_path)
avg_chans = np.mean(image, axis=(0, 1))
# Uncomment lines below to print bounding boxes on downloaded images
h, w = image.shape[:2]
x1 = xmin*w
x2 = xmax*w
y1 = ymin*h
y2 = ymax*h
if x1 < 0 or x2 < 0 or y1 < 0 or y2 < 0 or y2 < y1 or x2 < x1:
continue
# Make the class directory if it doesn't exist yet
crop_class_dir = join(crop_path, d_set_dir+str(class_id), youtube_id)
check_call(['mkdir', '-p', crop_class_dir])
# Save the extracted image
bbox = [x1, y1, x2, y2]
z, x = crop_like_SiamFC(image, bbox, instanc_size=instanc_size, padding=avg_chans)
cv2.imwrite(join(crop_class_dir, '{:06d}.{:02d}.z.jpg'.format(int(timestamp_ms)/1000, int(object_id))), z)
cv2.imwrite(join(crop_class_dir, '{:06d}.{:02d}.x.jpg'.format(int(timestamp_ms)/1000, int(object_id))), x)
return True
The provided code snippet includes necessary dependencies for implementing the `parse_and_sched` function. Write a Python function `def parse_and_sched(dl_dir='.', num_threads=24)` to solve the following problem:
Crop the entire youtube-bb data set into `crop_path`.
Here is the function:
def parse_and_sched(dl_dir='.', num_threads=24):
"""Crop the entire youtube-bb data set into `crop_path`.
"""
# For each of the two datasets
for d_set in d_sets:
# Make the directory for this dataset
d_set_dir = dl_dir+'/'+d_set+'/'
# Download & extract the annotation list
# print (d_set+': Downloading annotations...')
# check_call(['wget', web_host+d_set+'.csv.gz'])
# print (d_set+': Unzipping annotations...')
# check_call(['gzip', '-d', '-f', d_set+'.csv.gz'])
# Parse csv data using pandas
print (d_set+': Parsing annotations into clip data...')
df = pd.DataFrame.from_csv(d_set+'.csv', header=None, index_col=False)
df.columns = col_names
# Get list of unique video files
vids = df['youtube_id'].unique()
# Download and cut in parallel threads giving
with futures.ProcessPoolExecutor(max_workers=num_threads) as executor:
fs = [executor.submit(dl_and_cut,vid,df[df['youtube_id']==vid],d_set_dir) for vid in vids]
for i, f in enumerate(futures.as_completed(fs)):
# Write progress to error so that it can be seen
printProgress(i, len(vids),
prefix = d_set,
suffix = 'Done',
barLength = 40)
print(d_set+': All videos Crop Done') | Crop the entire youtube-bb data set into `crop_path`. |
153,232 | from os.path import join, isdir
from os import mkdir, makedirs
import cv2
import numpy as np
import glob
import xml.etree.ElementTree as ET
from concurrent import futures
import time
import sys
The provided code snippet includes necessary dependencies for implementing the `printProgress` function. Write a Python function `def printProgress(iteration, total, prefix='', suffix='', decimals=1, barLength=100)` to solve the following problem:
Call in a loop to create terminal progress bar @params: iteration - Required : current iteration (Int) total - Required : total iterations (Int) prefix - Optional : prefix string (Str) suffix - Optional : suffix string (Str) decimals - Optional : positive number of decimals in percent complete (Int) barLength - Optional : character length of bar (Int)
Here is the function:
def printProgress(iteration, total, prefix='', suffix='', decimals=1, barLength=100):
"""
Call in a loop to create terminal progress bar
@params:
iteration - Required : current iteration (Int)
total - Required : total iterations (Int)
prefix - Optional : prefix string (Str)
suffix - Optional : suffix string (Str)
decimals - Optional : positive number of decimals in percent complete (Int)
barLength - Optional : character length of bar (Int)
"""
formatStr = "{0:." + str(decimals) + "f}"
percents = formatStr.format(100 * (iteration / float(total)))
filledLength = int(round(barLength * iteration / float(total)))
bar = '' * filledLength + '-' * (barLength - filledLength)
sys.stdout.write('\r%s |%s| %s%s %s' % (prefix, bar, percents, '%', suffix)),
if iteration == total:
sys.stdout.write('\x1b[2K\r')
sys.stdout.flush() | Call in a loop to create terminal progress bar @params: iteration - Required : current iteration (Int) total - Required : total iterations (Int) prefix - Optional : prefix string (Str) suffix - Optional : suffix string (Str) decimals - Optional : positive number of decimals in percent complete (Int) barLength - Optional : character length of bar (Int) |
153,233 | from os.path import join, isdir
from os import mkdir, makedirs
import cv2
import numpy as np
import glob
import xml.etree.ElementTree as ET
from concurrent import futures
import time
import sys
def crop_like_SiamFC(image, bbox, context_amount=0.5, exemplar_size=127, instanc_size=255, padding=(0, 0, 0)):
target_pos = [(bbox[2] + bbox[0]) / 2., (bbox[3] + bbox[1]) / 2.]
target_size = [bbox[2] - bbox[0], bbox[3] - bbox[1]]
wc_z = target_size[1] + context_amount * sum(target_size)
hc_z = target_size[0] + context_amount * sum(target_size)
s_z = np.sqrt(wc_z * hc_z)
scale_z = exemplar_size / s_z
d_search = (instanc_size - exemplar_size) / 2
pad = d_search / scale_z
s_x = s_z + 2 * pad
z = crop_hwc(image, pos_s_2_bbox(target_pos, s_z), exemplar_size, padding)
x = crop_hwc(image, pos_s_2_bbox(target_pos, s_x), instanc_size, padding)
return z, x
def crop_xml(xml, sub_set_crop_path, instanc_size=511):
xmltree = ET.parse(xml)
objects = xmltree.findall('object')
frame_crop_base_path = join(sub_set_crop_path, xml.split('/')[-1].split('.')[0])
if not isdir(frame_crop_base_path): makedirs(frame_crop_base_path)
img_path = xml.replace('xml', 'JPEG').replace('Annotations', 'Data')
im = cv2.imread(img_path)
avg_chans = np.mean(im, axis=(0, 1))
for id, object_iter in enumerate(objects):
bndbox = object_iter.find('bndbox')
bbox = [int(bndbox.find('xmin').text), int(bndbox.find('ymin').text),
int(bndbox.find('xmax').text), int(bndbox.find('ymax').text)]
z, x = crop_like_SiamFC(im, bbox, instanc_size=instanc_size, padding=avg_chans)
cv2.imwrite(join(frame_crop_base_path, '{:06d}.{:02d}.z.jpg'.format(0, id)), z)
cv2.imwrite(join(frame_crop_base_path, '{:06d}.{:02d}.x.jpg'.format(0, id)), x) | null |
153,234 | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import torch
import torch.nn.functional as F
The provided code snippet includes necessary dependencies for implementing the `xcorr_slow` function. Write a Python function `def xcorr_slow(x, kernel)` to solve the following problem:
for loop to calculate cross correlation, slow version
Here is the function:
def xcorr_slow(x, kernel):
"""for loop to calculate cross correlation, slow version
"""
batch = x.size()[0]
out = []
for i in range(batch):
px = x[i]
pk = kernel[i]
px = px.view(1, -1, px.size()[1], px.size()[2])
pk = pk.view(1, -1, pk.size()[1], pk.size()[2])
po = F.conv2d(px, pk)
out.append(po)
out = torch.cat(out, 0)
return out | for loop to calculate cross correlation, slow version |
153,235 | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import torch
import torch.nn.functional as F
The provided code snippet includes necessary dependencies for implementing the `xcorr_fast` function. Write a Python function `def xcorr_fast(x, kernel)` to solve the following problem:
group conv2d to calculate cross correlation, fast version
Here is the function:
def xcorr_fast(x, kernel):
"""group conv2d to calculate cross correlation, fast version
"""
batch = kernel.size()[0]
pk = kernel.view(-1, x.size()[1], kernel.size()[2], kernel.size()[3])
px = x.view(1, -1, x.size()[2], x.size()[3])
po = F.conv2d(px, pk, groups=batch)
po = po.view(batch, -1, po.size()[2], po.size()[3])
return po | group conv2d to calculate cross correlation, fast version |
153,236 | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import torch
import torch.nn.functional as F
The provided code snippet includes necessary dependencies for implementing the `xcorr_depthwise` function. Write a Python function `def xcorr_depthwise(x, kernel)` to solve the following problem:
depthwise cross correlation
Here is the function:
def xcorr_depthwise(x, kernel):
"""depthwise cross correlation
"""
batch = kernel.size(0)
channel = kernel.size(1)
x = x.view(1, batch*channel, x.size(2), x.size(3))
kernel = kernel.view(batch*channel, 1, kernel.size(2), kernel.size(3))
out = F.conv2d(x, kernel, groups=batch*channel)
out = out.view(batch, channel, out.size(2), out.size(3))
return out | depthwise cross correlation |
153,237 | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import torch.nn as nn
class AlexNetLegacy(nn.Module):
configs = [3, 96, 256, 384, 384, 256]
def __init__(self, width_mult=1):
configs = list(map(lambda x: 3 if x == 3 else
int(x*width_mult), AlexNet.configs))
super(AlexNetLegacy, self).__init__()
self.features = nn.Sequential(
nn.Conv2d(configs[0], configs[1], kernel_size=11, stride=2),
nn.BatchNorm2d(configs[1]),
nn.MaxPool2d(kernel_size=3, stride=2),
nn.ReLU(inplace=True),
nn.Conv2d(configs[1], configs[2], kernel_size=5),
nn.BatchNorm2d(configs[2]),
nn.MaxPool2d(kernel_size=3, stride=2),
nn.ReLU(inplace=True),
nn.Conv2d(configs[2], configs[3], kernel_size=3),
nn.BatchNorm2d(configs[3]),
nn.ReLU(inplace=True),
nn.Conv2d(configs[3], configs[4], kernel_size=3),
nn.BatchNorm2d(configs[4]),
nn.ReLU(inplace=True),
nn.Conv2d(configs[4], configs[5], kernel_size=3),
nn.BatchNorm2d(configs[5]),
)
self.feature_size = configs[5]
def forward(self, x):
x = self.features(x)
return x
def alexnetlegacy(**kwargs):
return AlexNetLegacy(**kwargs) | null |
153,238 | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import torch.nn as nn
class AlexNet(nn.Module):
configs = [3, 96, 256, 384, 384, 256]
def __init__(self, width_mult=1):
configs = list(map(lambda x: 3 if x == 3 else
int(x*width_mult), AlexNet.configs))
super(AlexNet, self).__init__()
self.layer1 = nn.Sequential(
nn.Conv2d(configs[0], configs[1], kernel_size=11, stride=2),
nn.BatchNorm2d(configs[1]),
nn.MaxPool2d(kernel_size=3, stride=2),
nn.ReLU(inplace=True),
)
self.layer2 = nn.Sequential(
nn.Conv2d(configs[1], configs[2], kernel_size=5),
nn.BatchNorm2d(configs[2]),
nn.MaxPool2d(kernel_size=3, stride=2),
nn.ReLU(inplace=True),
)
self.layer3 = nn.Sequential(
nn.Conv2d(configs[2], configs[3], kernel_size=3),
nn.BatchNorm2d(configs[3]),
nn.ReLU(inplace=True),
)
self.layer4 = nn.Sequential(
nn.Conv2d(configs[3], configs[4], kernel_size=3),
nn.BatchNorm2d(configs[4]),
nn.ReLU(inplace=True),
)
self.layer5 = nn.Sequential(
nn.Conv2d(configs[4], configs[5], kernel_size=3),
nn.BatchNorm2d(configs[5]),
)
self.feature_size = configs[5]
def forward(self, x):
x = self.layer1(x)
x = self.layer2(x)
x = self.layer3(x)
x = self.layer4(x)
x = self.layer5(x)
return x
def alexnet(**kwargs):
return AlexNet(**kwargs) | null |
153,239 | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import torch
import torch.nn as nn
def conv_bn(inp, oup, stride, padding=1):
return nn.Sequential(
nn.Conv2d(inp, oup, 3, stride, padding, bias=False),
nn.BatchNorm2d(oup),
nn.ReLU6(inplace=True)
) | null |
153,240 | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import torch
import torch.nn as nn
def conv_1x1_bn(inp, oup):
return nn.Sequential(
nn.Conv2d(inp, oup, 1, 1, 0, bias=False),
nn.BatchNorm2d(oup),
nn.ReLU6(inplace=True)
) | null |
153,241 | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import torch
import torch.nn as nn
class MobileNetV2(nn.Sequential):
def __init__(self, width_mult=1.0, used_layers=[3, 5, 7]):
super(MobileNetV2, self).__init__()
self.interverted_residual_setting = [
# t, c, n, s
[1, 16, 1, 1, 1],
[6, 24, 2, 2, 1],
[6, 32, 3, 2, 1],
[6, 64, 4, 2, 1],
[6, 96, 3, 1, 1],
[6, 160, 3, 2, 1],
[6, 320, 1, 1, 1],
]
# 0,2,3,4,6
self.interverted_residual_setting = [
# t, c, n, s
[1, 16, 1, 1, 1],
[6, 24, 2, 2, 1],
[6, 32, 3, 2, 1],
[6, 64, 4, 1, 2],
[6, 96, 3, 1, 2],
[6, 160, 3, 1, 4],
[6, 320, 1, 1, 4],
]
self.channels = [24, 32, 96, 320]
self.channels = [int(c * width_mult) for c in self.channels]
input_channel = int(32 * width_mult)
self.last_channel = int(1280 * width_mult) \
if width_mult > 1.0 else 1280
self.add_module('layer0', conv_bn(3, input_channel, 2, 0))
last_dilation = 1
self.used_layers = used_layers
for idx, (t, c, n, s, d) in \
enumerate(self.interverted_residual_setting, start=1):
output_channel = int(c * width_mult)
layers = []
for i in range(n):
if i == 0:
if d == last_dilation:
dd = d
else:
dd = max(d // 2, 1)
layers.append(InvertedResidual(input_channel,
output_channel, s, t, dd))
else:
layers.append(InvertedResidual(input_channel,
output_channel, 1, t, d))
input_channel = output_channel
last_dilation = d
self.add_module('layer%d' % (idx), nn.Sequential(*layers))
def forward(self, x):
outputs = []
for idx in range(8):
name = "layer%d" % idx
x = getattr(self, name)(x)
outputs.append(x)
p0, p1, p2, p3, p4 = [outputs[i] for i in [1, 2, 3, 5, 7]]
out = [outputs[i] for i in self.used_layers]
if len(out) == 1:
return out[0]
return out
def mobilenetv2(**kwargs):
model = MobileNetV2(**kwargs)
return model | null |
153,242 | import math
import torch.nn as nn
import torch
The provided code snippet includes necessary dependencies for implementing the `conv3x3` function. Write a Python function `def conv3x3(in_planes, out_planes, stride=1, dilation=1)` to solve the following problem:
3x3 convolution with padding
Here is the function:
def conv3x3(in_planes, out_planes, stride=1, dilation=1):
"3x3 convolution with padding"
return nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride,
padding=dilation, bias=False, dilation=dilation) | 3x3 convolution with padding |
153,243 | import math
import torch.nn as nn
import torch
class BasicBlock(nn.Module):
expansion = 1
def __init__(self, inplanes, planes, stride=1,
downsample=None, dilation=1):
super(BasicBlock, self).__init__()
padding = 2 - stride
if dilation > 1:
padding = dilation
dd = dilation
pad = padding
if downsample is not None and dilation > 1:
dd = dilation // 2
pad = dd
self.conv1 = nn.Conv2d(inplanes, planes,
stride=stride, dilation=dd, bias=False,
kernel_size=3, padding=pad)
self.bn1 = nn.BatchNorm2d(planes)
self.relu = nn.ReLU(inplace=True)
self.conv2 = conv3x3(planes, planes, dilation=dilation)
self.bn2 = nn.BatchNorm2d(planes)
self.downsample = downsample
self.stride = stride
def forward(self, x):
residual = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
if self.downsample is not None:
residual = self.downsample(x)
out += residual
out = self.relu(out)
return out
class ResNet(nn.Module):
def __init__(self, block, layers, used_layers):
self.inplanes = 64
super(ResNet, self).__init__()
self.conv1 = nn.Conv2d(3, 64, kernel_size=7, stride=2, padding=0, # 3
bias=False)
self.bn1 = nn.BatchNorm2d(64)
self.relu = nn.ReLU(inplace=True)
self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)
self.layer1 = self._make_layer(block, 64, layers[0])
self.layer2 = self._make_layer(block, 128, layers[1], stride=2)
self.feature_size = 128 * block.expansion
self.used_layers = used_layers
layer3 = True if 3 in used_layers else False
layer4 = True if 4 in used_layers else False
if layer3:
self.layer3 = self._make_layer(block, 256, layers[2],
stride=1, dilation=2) # 15x15, 7x7
self.feature_size = (256 + 128) * block.expansion
else:
self.layer3 = lambda x: x # identity
if layer4:
self.layer4 = self._make_layer(block, 512, layers[3],
stride=1, dilation=4) # 7x7, 3x3
self.feature_size = 512 * block.expansion
else:
self.layer4 = lambda x: x # identity
for m in self.modules():
if isinstance(m, nn.Conv2d):
n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
m.weight.data.normal_(0, math.sqrt(2. / n))
elif isinstance(m, nn.BatchNorm2d):
m.weight.data.fill_(1)
m.bias.data.zero_()
def _make_layer(self, block, planes, blocks, stride=1, dilation=1):
downsample = None
dd = dilation
if stride != 1 or self.inplanes != planes * block.expansion:
if stride == 1 and dilation == 1:
downsample = nn.Sequential(
nn.Conv2d(self.inplanes, planes * block.expansion,
kernel_size=1, stride=stride, bias=False),
nn.BatchNorm2d(planes * block.expansion),
)
else:
if dilation > 1:
dd = dilation // 2
padding = dd
else:
dd = 1
padding = 0
downsample = nn.Sequential(
nn.Conv2d(self.inplanes, planes * block.expansion,
kernel_size=3, stride=stride, bias=False,
padding=padding, dilation=dd),
nn.BatchNorm2d(planes * block.expansion),
)
layers = []
layers.append(block(self.inplanes, planes, stride,
downsample, dilation=dilation))
self.inplanes = planes * block.expansion
for i in range(1, blocks):
layers.append(block(self.inplanes, planes, dilation=dilation))
return nn.Sequential(*layers)
def forward(self, x):
x = self.conv1(x)
x = self.bn1(x)
x_ = self.relu(x)
x = self.maxpool(x_)
p1 = self.layer1(x)
p2 = self.layer2(p1)
p3 = self.layer3(p2)
p4 = self.layer4(p3)
out = [x_, p1, p2, p3, p4]
out = [out[i] for i in self.used_layers]
if len(out) == 1:
return out[0]
else:
return out
The provided code snippet includes necessary dependencies for implementing the `resnet18` function. Write a Python function `def resnet18(**kwargs)` to solve the following problem:
Constructs a ResNet-18 model.
Here is the function:
def resnet18(**kwargs):
"""Constructs a ResNet-18 model.
"""
model = ResNet(BasicBlock, [2, 2, 2, 2], **kwargs)
return model | Constructs a ResNet-18 model. |
153,244 | import math
import torch.nn as nn
import torch
class BasicBlock(nn.Module):
expansion = 1
def __init__(self, inplanes, planes, stride=1,
downsample=None, dilation=1):
super(BasicBlock, self).__init__()
padding = 2 - stride
if dilation > 1:
padding = dilation
dd = dilation
pad = padding
if downsample is not None and dilation > 1:
dd = dilation // 2
pad = dd
self.conv1 = nn.Conv2d(inplanes, planes,
stride=stride, dilation=dd, bias=False,
kernel_size=3, padding=pad)
self.bn1 = nn.BatchNorm2d(planes)
self.relu = nn.ReLU(inplace=True)
self.conv2 = conv3x3(planes, planes, dilation=dilation)
self.bn2 = nn.BatchNorm2d(planes)
self.downsample = downsample
self.stride = stride
def forward(self, x):
residual = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
if self.downsample is not None:
residual = self.downsample(x)
out += residual
out = self.relu(out)
return out
class ResNet(nn.Module):
def __init__(self, block, layers, used_layers):
self.inplanes = 64
super(ResNet, self).__init__()
self.conv1 = nn.Conv2d(3, 64, kernel_size=7, stride=2, padding=0, # 3
bias=False)
self.bn1 = nn.BatchNorm2d(64)
self.relu = nn.ReLU(inplace=True)
self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)
self.layer1 = self._make_layer(block, 64, layers[0])
self.layer2 = self._make_layer(block, 128, layers[1], stride=2)
self.feature_size = 128 * block.expansion
self.used_layers = used_layers
layer3 = True if 3 in used_layers else False
layer4 = True if 4 in used_layers else False
if layer3:
self.layer3 = self._make_layer(block, 256, layers[2],
stride=1, dilation=2) # 15x15, 7x7
self.feature_size = (256 + 128) * block.expansion
else:
self.layer3 = lambda x: x # identity
if layer4:
self.layer4 = self._make_layer(block, 512, layers[3],
stride=1, dilation=4) # 7x7, 3x3
self.feature_size = 512 * block.expansion
else:
self.layer4 = lambda x: x # identity
for m in self.modules():
if isinstance(m, nn.Conv2d):
n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
m.weight.data.normal_(0, math.sqrt(2. / n))
elif isinstance(m, nn.BatchNorm2d):
m.weight.data.fill_(1)
m.bias.data.zero_()
def _make_layer(self, block, planes, blocks, stride=1, dilation=1):
downsample = None
dd = dilation
if stride != 1 or self.inplanes != planes * block.expansion:
if stride == 1 and dilation == 1:
downsample = nn.Sequential(
nn.Conv2d(self.inplanes, planes * block.expansion,
kernel_size=1, stride=stride, bias=False),
nn.BatchNorm2d(planes * block.expansion),
)
else:
if dilation > 1:
dd = dilation // 2
padding = dd
else:
dd = 1
padding = 0
downsample = nn.Sequential(
nn.Conv2d(self.inplanes, planes * block.expansion,
kernel_size=3, stride=stride, bias=False,
padding=padding, dilation=dd),
nn.BatchNorm2d(planes * block.expansion),
)
layers = []
layers.append(block(self.inplanes, planes, stride,
downsample, dilation=dilation))
self.inplanes = planes * block.expansion
for i in range(1, blocks):
layers.append(block(self.inplanes, planes, dilation=dilation))
return nn.Sequential(*layers)
def forward(self, x):
x = self.conv1(x)
x = self.bn1(x)
x_ = self.relu(x)
x = self.maxpool(x_)
p1 = self.layer1(x)
p2 = self.layer2(p1)
p3 = self.layer3(p2)
p4 = self.layer4(p3)
out = [x_, p1, p2, p3, p4]
out = [out[i] for i in self.used_layers]
if len(out) == 1:
return out[0]
else:
return out
The provided code snippet includes necessary dependencies for implementing the `resnet34` function. Write a Python function `def resnet34(**kwargs)` to solve the following problem:
Constructs a ResNet-34 model.
Here is the function:
def resnet34(**kwargs):
"""Constructs a ResNet-34 model.
"""
model = ResNet(BasicBlock, [3, 4, 6, 3], **kwargs)
return model | Constructs a ResNet-34 model. |
153,245 | import math
import torch.nn as nn
import torch
class Bottleneck(nn.Module):
expansion = 4
def __init__(self, inplanes, planes, stride=1,
downsample=None, dilation=1):
super(Bottleneck, self).__init__()
self.conv1 = nn.Conv2d(inplanes, planes, kernel_size=1, bias=False)
self.bn1 = nn.BatchNorm2d(planes)
padding = 2 - stride
if downsample is not None and dilation > 1:
dilation = dilation // 2
padding = dilation
assert stride == 1 or dilation == 1, \
"stride and dilation must have one equals to zero at least"
if dilation > 1:
padding = dilation
self.conv2 = nn.Conv2d(planes, planes, kernel_size=3, stride=stride,
padding=padding, bias=False, dilation=dilation)
self.bn2 = nn.BatchNorm2d(planes)
self.conv3 = nn.Conv2d(planes, planes * 4, kernel_size=1, bias=False)
self.bn3 = nn.BatchNorm2d(planes * 4)
self.relu = nn.ReLU(inplace=True)
self.downsample = downsample
self.stride = stride
def forward(self, x):
residual = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
out = self.relu(out)
out = self.conv3(out)
out = self.bn3(out)
if self.downsample is not None:
residual = self.downsample(x)
out += residual
out = self.relu(out)
return out
class ResNet(nn.Module):
def __init__(self, block, layers, used_layers):
self.inplanes = 64
super(ResNet, self).__init__()
self.conv1 = nn.Conv2d(3, 64, kernel_size=7, stride=2, padding=0, # 3
bias=False)
self.bn1 = nn.BatchNorm2d(64)
self.relu = nn.ReLU(inplace=True)
self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)
self.layer1 = self._make_layer(block, 64, layers[0])
self.layer2 = self._make_layer(block, 128, layers[1], stride=2)
self.feature_size = 128 * block.expansion
self.used_layers = used_layers
layer3 = True if 3 in used_layers else False
layer4 = True if 4 in used_layers else False
if layer3:
self.layer3 = self._make_layer(block, 256, layers[2],
stride=1, dilation=2) # 15x15, 7x7
self.feature_size = (256 + 128) * block.expansion
else:
self.layer3 = lambda x: x # identity
if layer4:
self.layer4 = self._make_layer(block, 512, layers[3],
stride=1, dilation=4) # 7x7, 3x3
self.feature_size = 512 * block.expansion
else:
self.layer4 = lambda x: x # identity
for m in self.modules():
if isinstance(m, nn.Conv2d):
n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
m.weight.data.normal_(0, math.sqrt(2. / n))
elif isinstance(m, nn.BatchNorm2d):
m.weight.data.fill_(1)
m.bias.data.zero_()
def _make_layer(self, block, planes, blocks, stride=1, dilation=1):
downsample = None
dd = dilation
if stride != 1 or self.inplanes != planes * block.expansion:
if stride == 1 and dilation == 1:
downsample = nn.Sequential(
nn.Conv2d(self.inplanes, planes * block.expansion,
kernel_size=1, stride=stride, bias=False),
nn.BatchNorm2d(planes * block.expansion),
)
else:
if dilation > 1:
dd = dilation // 2
padding = dd
else:
dd = 1
padding = 0
downsample = nn.Sequential(
nn.Conv2d(self.inplanes, planes * block.expansion,
kernel_size=3, stride=stride, bias=False,
padding=padding, dilation=dd),
nn.BatchNorm2d(planes * block.expansion),
)
layers = []
layers.append(block(self.inplanes, planes, stride,
downsample, dilation=dilation))
self.inplanes = planes * block.expansion
for i in range(1, blocks):
layers.append(block(self.inplanes, planes, dilation=dilation))
return nn.Sequential(*layers)
def forward(self, x):
x = self.conv1(x)
x = self.bn1(x)
x_ = self.relu(x)
x = self.maxpool(x_)
p1 = self.layer1(x)
p2 = self.layer2(p1)
p3 = self.layer3(p2)
p4 = self.layer4(p3)
out = [x_, p1, p2, p3, p4]
out = [out[i] for i in self.used_layers]
if len(out) == 1:
return out[0]
else:
return out
The provided code snippet includes necessary dependencies for implementing the `resnet50` function. Write a Python function `def resnet50(**kwargs)` to solve the following problem:
Constructs a ResNet-50 model.
Here is the function:
def resnet50(**kwargs):
"""Constructs a ResNet-50 model.
"""
model = ResNet(Bottleneck, [3, 4, 6, 3], **kwargs)
return model | Constructs a ResNet-50 model. |
153,246 | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import torch
import torch.nn.functional as F
def get_cls_loss(pred, label, select):
if len(select.size()) == 0 or \
select.size() == torch.Size([0]):
return 0
pred = torch.index_select(pred, 0, select)
label = torch.index_select(label, 0, select)
return F.nll_loss(pred, label)
def select_cross_entropy_loss(pred, label):
pred = pred.view(-1, 2)
label = label.view(-1)
pos = label.data.eq(1).nonzero().squeeze().cuda()
neg = label.data.eq(0).nonzero().squeeze().cuda()
loss_pos = get_cls_loss(pred, label, pos)
loss_neg = get_cls_loss(pred, label, neg)
return loss_pos * 0.5 + loss_neg * 0.5 | null |
153,247 | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import torch
import torch.nn.functional as F
def weight_l1_loss(pred_loc, label_loc, loss_weight):
b, _, sh, sw = pred_loc.size()
pred_loc = pred_loc.view(b, 4, -1, sh, sw)
diff = (pred_loc - label_loc).abs()
diff = diff.sum(dim=1).view(b, -1, sh, sw)
loss = diff * loss_weight
return loss.sum().div(b) | null |
153,248 | import torch.nn as nn
def init_weights(model):
for m in model.modules():
if isinstance(m, nn.Conv2d):
nn.init.kaiming_normal_(m.weight.data,
mode='fan_out',
nonlinearity='relu')
elif isinstance(m, nn.BatchNorm2d):
m.weight.data.fill_(1)
m.bias.data.zero_() | null |
153,249 | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import logging
import torch
def check_keys(model, pretrained_state_dict):
ckpt_keys = set(pretrained_state_dict.keys())
model_keys = set(model.state_dict().keys())
used_pretrained_keys = model_keys & ckpt_keys
unused_pretrained_keys = ckpt_keys - model_keys
missing_keys = model_keys - ckpt_keys
# filter 'num_batches_tracked'
missing_keys = [x for x in missing_keys
if not x.endswith('num_batches_tracked')]
if len(missing_keys) > 0:
logger.info('[Warning] missing keys: {}'.format(missing_keys))
logger.info('missing keys:{}'.format(len(missing_keys)))
if len(unused_pretrained_keys) > 0:
logger.info('[Warning] unused_pretrained_keys: {}'.format(
unused_pretrained_keys))
logger.info('unused checkpoint keys:{}'.format(
len(unused_pretrained_keys)))
logger.info('used keys:{}'.format(len(used_pretrained_keys)))
assert len(used_pretrained_keys) > 0, \
'load NONE from pretrained checkpoint'
return True
def remove_prefix(state_dict, prefix):
''' Old style model is stored with all names of parameters
share common prefix 'module.' '''
logger.info('remove prefix \'{}\''.format(prefix))
f = lambda x: x.split(prefix, 1)[-1] if x.startswith(prefix) else x
return {f(key): value for key, value in state_dict.items()}
def restore_from(model, optimizer, ckpt_path):
device = torch.cuda.current_device()
ckpt = torch.load(ckpt_path,
map_location=lambda storage, loc: storage.cuda(device))
epoch = ckpt['epoch']
ckpt_model_dict = remove_prefix(ckpt['state_dict'], 'module.')
check_keys(model, ckpt_model_dict)
model.load_state_dict(ckpt_model_dict, strict=False)
check_keys(optimizer, ckpt['optimizer'])
optimizer.load_state_dict(ckpt['optimizer'])
return model, optimizer, epoch | null |
153,250 | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
from collections import namedtuple
import numpy as np
Corner = namedtuple('Corner', 'x1 y1 x2 y2')
Center = namedtuple('Center', 'x y w h')
The provided code snippet includes necessary dependencies for implementing the `corner2center` function. Write a Python function `def corner2center(corner)` to solve the following problem:
convert (x1, y1, x2, y2) to (cx, cy, w, h) Args: conrner: Corner or np.array (4*N) Return: Center or np.array (4 * N)
Here is the function:
def corner2center(corner):
""" convert (x1, y1, x2, y2) to (cx, cy, w, h)
Args:
conrner: Corner or np.array (4*N)
Return:
Center or np.array (4 * N)
"""
if isinstance(corner, Corner):
x1, y1, x2, y2 = corner
return Center((x1 + x2) * 0.5, (y1 + y2) * 0.5, (x2 - x1), (y2 - y1))
else:
x1, y1, x2, y2 = corner[0], corner[1], corner[2], corner[3]
x = (x1 + x2) * 0.5
y = (y1 + y2) * 0.5
w = x2 - x1
h = y2 - y1
return x, y, w, h | convert (x1, y1, x2, y2) to (cx, cy, w, h) Args: conrner: Corner or np.array (4*N) Return: Center or np.array (4 * N) |
153,251 | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
from collections import namedtuple
import numpy as np
Corner = namedtuple('Corner', 'x1 y1 x2 y2')
Center = namedtuple('Center', 'x y w h')
The provided code snippet includes necessary dependencies for implementing the `center2corner` function. Write a Python function `def center2corner(center)` to solve the following problem:
convert (cx, cy, w, h) to (x1, y1, x2, y2) Args: center: Center or np.array (4 * N) Return: center or np.array (4 * N)
Here is the function:
def center2corner(center):
""" convert (cx, cy, w, h) to (x1, y1, x2, y2)
Args:
center: Center or np.array (4 * N)
Return:
center or np.array (4 * N)
"""
if isinstance(center, Center):
x, y, w, h = center
return Corner(x - w * 0.5, y - h * 0.5, x + w * 0.5, y + h * 0.5)
else:
x, y, w, h = center[0], center[1], center[2], center[3]
x1 = x - w * 0.5
y1 = y - h * 0.5
x2 = x + w * 0.5
y2 = y + h * 0.5
return x1, y1, x2, y2 | convert (cx, cy, w, h) to (x1, y1, x2, y2) Args: center: Center or np.array (4 * N) Return: center or np.array (4 * N) |
153,252 | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
from collections import namedtuple
import numpy as np
The provided code snippet includes necessary dependencies for implementing the `IoU` function. Write a Python function `def IoU(rect1, rect2)` to solve the following problem:
caculate interection over union Args: rect1: (x1, y1, x2, y2) rect2: (x1, y1, x2, y2) Returns: iou
Here is the function:
def IoU(rect1, rect2):
""" caculate interection over union
Args:
rect1: (x1, y1, x2, y2)
rect2: (x1, y1, x2, y2)
Returns:
iou
"""
# overlap
x1, y1, x2, y2 = rect1[0], rect1[1], rect1[2], rect1[3]
tx1, ty1, tx2, ty2 = rect2[0], rect2[1], rect2[2], rect2[3]
xx1 = np.maximum(tx1, x1)
yy1 = np.maximum(ty1, y1)
xx2 = np.minimum(tx2, x2)
yy2 = np.minimum(ty2, y2)
ww = np.maximum(0, xx2 - xx1)
hh = np.maximum(0, yy2 - yy1)
area = (x2-x1) * (y2-y1)
target_a = (tx2-tx1) * (ty2 - ty1)
inter = ww * hh
iou = inter / (area + target_a - inter)
return iou | caculate interection over union Args: rect1: (x1, y1, x2, y2) rect2: (x1, y1, x2, y2) Returns: iou |
153,253 | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
from collections import namedtuple
import numpy as np
The provided code snippet includes necessary dependencies for implementing the `cxy_wh_2_rect` function. Write a Python function `def cxy_wh_2_rect(pos, sz)` to solve the following problem:
convert (cx, cy, w, h) to (x1, y1, w, h), 0-index
Here is the function:
def cxy_wh_2_rect(pos, sz):
""" convert (cx, cy, w, h) to (x1, y1, w, h), 0-index
"""
return np.array([pos[0]-sz[0]/2, pos[1]-sz[1]/2, sz[0], sz[1]]) | convert (cx, cy, w, h) to (x1, y1, w, h), 0-index |
153,254 | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
from collections import namedtuple
import numpy as np
The provided code snippet includes necessary dependencies for implementing the `rect_2_cxy_wh` function. Write a Python function `def rect_2_cxy_wh(rect)` to solve the following problem:
convert (x1, y1, w, h) to (cx, cy, w, h), 0-index
Here is the function:
def rect_2_cxy_wh(rect):
""" convert (x1, y1, w, h) to (cx, cy, w, h), 0-index
"""
return np.array([rect[0]+rect[2]/2, rect[1]+rect[3]/2]), \
np.array([rect[2], rect[3]]) | convert (x1, y1, w, h) to (cx, cy, w, h), 0-index |
153,255 | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
from collections import namedtuple
import numpy as np
The provided code snippet includes necessary dependencies for implementing the `cxy_wh_2_rect1` function. Write a Python function `def cxy_wh_2_rect1(pos, sz)` to solve the following problem:
convert (cx, cy, w, h) to (x1, y1, w, h), 1-index
Here is the function:
def cxy_wh_2_rect1(pos, sz):
""" convert (cx, cy, w, h) to (x1, y1, w, h), 1-index
"""
return np.array([pos[0]-sz[0]/2+1, pos[1]-sz[1]/2+1, sz[0], sz[1]]) | convert (cx, cy, w, h) to (x1, y1, w, h), 1-index |
153,256 | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
from collections import namedtuple
import numpy as np
The provided code snippet includes necessary dependencies for implementing the `rect1_2_cxy_wh` function. Write a Python function `def rect1_2_cxy_wh(rect)` to solve the following problem:
convert (x1, y1, w, h) to (cx, cy, w, h), 1-index
Here is the function:
def rect1_2_cxy_wh(rect):
""" convert (x1, y1, w, h) to (cx, cy, w, h), 1-index
"""
return np.array([rect[0]+rect[2]/2-1, rect[1]+rect[3]/2-1]), \
np.array([rect[2], rect[3]]) | convert (x1, y1, w, h) to (cx, cy, w, h), 1-index |
153,257 | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
from collections import namedtuple
import numpy as np
The provided code snippet includes necessary dependencies for implementing the `get_min_max_bbox` function. Write a Python function `def get_min_max_bbox(region)` to solve the following problem:
convert region to (cx, cy, w, h) that represent by mim-max box
Here is the function:
def get_min_max_bbox(region):
""" convert region to (cx, cy, w, h) that represent by mim-max box
"""
nv = region.size
if nv == 8:
cx = np.mean(region[0::2])
cy = np.mean(region[1::2])
x1 = min(region[0::2])
x2 = max(region[0::2])
y1 = min(region[1::2])
y2 = max(region[1::2])
w = x2 - x1
h = y2 - y1
else:
x = region[0]
y = region[1]
w = region[2]
h = region[3]
cx = x+w/2
cy = y+h/2
return cx, cy, w, h | convert region to (cx, cy, w, h) that represent by mim-max box |
153,258 | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import os
from colorama import Fore, Style
def _bold(s):
return "\033[1m%s\033[0m" % s | null |
153,259 | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import os
from colorama import Fore, Style
def _exec(cmd):
f = os.popen(cmd, 'r', 1)
return f.read().strip()
def commit():
root = os.path.abspath(os.path.join(os.path.dirname(__file__), '../../'))
cmd = "cd {}; git log | head -n1 | awk '{{print $2}}'".format(root)
commit = _exec(cmd)
cmd = "cd {}; git log --oneline | head -n1".format(root)
commit_log = _exec(cmd)
return "commit : {}\n log : {}".format(commit, commit_log) | null |
153,260 | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import os
import socket
import logging
import torch
import torch.nn as nn
import torch.distributed as dist
from pysot.utils.log_helper import log_once
The provided code snippet includes necessary dependencies for implementing the `broadcast_params` function. Write a Python function `def broadcast_params(model)` to solve the following problem:
broadcast model parameters
Here is the function:
def broadcast_params(model):
""" broadcast model parameters """
for p in model.state_dict().values():
dist.broadcast(p, 0) | broadcast model parameters |
153,261 | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import os
import socket
import logging
import torch
import torch.nn as nn
import torch.distributed as dist
from pysot.utils.log_helper import log_once
def get_world_size():
if not inited:
raise(Exception('dist not inited'))
return world_size
The provided code snippet includes necessary dependencies for implementing the `broadcast_buffers` function. Write a Python function `def broadcast_buffers(model, method=0)` to solve the following problem:
broadcast model buffers
Here is the function:
def broadcast_buffers(model, method=0):
""" broadcast model buffers """
if method == 0:
return
world_size = get_world_size()
for b in model._all_buffers():
if method == 1: # broadcast from main proccess
dist.broadcast(b, 0)
elif method == 2: # average
dist.all_reduce(b)
b /= world_size
else:
raise Exception('Invalid buffer broadcast code {}'.format(method)) | broadcast model buffers |
153,262 | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import os
import socket
import logging
import torch
import torch.nn as nn
import torch.distributed as dist
from pysot.utils.log_helper import log_once
def _get_local_ip():
try:
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
s.connect(('8.8.8.8', 80))
ip = s.getsockname()[0]
finally:
s.close()
return ip | null |
153,263 | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import os
import socket
import logging
import torch
import torch.nn as nn
import torch.distributed as dist
from pysot.utils.log_helper import log_once
logger = logging.getLogger('global')
inited = False
def _dist_init():
'''
if guess right:
ntasks: world_size (process num)
proc_id: rank
'''
rank = int(os.environ['RANK'])
num_gpus = torch.cuda.device_count()
torch.cuda.set_device(rank % num_gpus)
dist.init_process_group(backend='nccl')
world_size = dist.get_world_size()
return rank, world_size
def dist_init():
global rank, world_size, inited
try:
rank, world_size = _dist_init()
except RuntimeError as e:
if 'public' in e.args[0]:
logger.info(e)
logger.info('Warning: use single process')
rank, world_size = 0, 1
else:
raise RuntimeError(*e.args)
inited = True
return rank, world_size | null |
153,264 | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import os
import logging
import math
import sys
class Filter:
def __init__(self, flag):
self.flag = flag
def filter(self, x):
return self.flag
def get_format_custom(logger, level):
if 'RANK' in os.environ:
rank = int(os.environ['RANK'])
if level == logging.INFO:
logger.addFilter(Filter(rank == 0))
else:
rank = 0
format_str = '[%(asctime)s-rk{}-%(message)s'.format(rank)
formatter = logging.Formatter(format_str)
return formatter | null |
153,265 | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import os
import logging
import math
import sys
logs = set()
def get_format(logger, level):
if 'RANK' in os.environ:
rank = int(os.environ['RANK'])
if level == logging.INFO:
logger.addFilter(Filter(rank == 0))
else:
rank = 0
format_str = '[%(asctime)s-rk{}-%(filename)s#%(lineno)3d] %(message)s'.format(rank)
formatter = logging.Formatter(format_str)
return formatter
def init_log(name, level=logging.INFO, format_func=get_format):
if (name, level) in logs:
return
logs.add((name, level))
logger = logging.getLogger(name)
logger.setLevel(level)
ch = logging.StreamHandler()
ch.setLevel(level)
formatter = format_func(logger, level)
ch.setFormatter(formatter)
logger.addHandler(ch)
return logger | null |
153,266 | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import os
import logging
import math
import sys
def get_format(logger, level):
if 'RANK' in os.environ:
rank = int(os.environ['RANK'])
if level == logging.INFO:
logger.addFilter(Filter(rank == 0))
else:
rank = 0
format_str = '[%(asctime)s-rk{}-%(filename)s#%(lineno)3d] %(message)s'.format(rank)
formatter = logging.Formatter(format_str)
return formatter
def add_file_handler(name, log_file, level=logging.INFO):
logger = logging.getLogger(name)
fh = logging.FileHandler(log_file)
fh.setFormatter(get_format(logger, level))
logger.addHandler(fh) | null |
153,267 | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import os
import logging
import math
import sys
if hasattr(sys, 'frozen'): # support for py2exe
_srcfile = "logging%s__init__%s" % (os.sep, __file__[-4:])
elif __file__[-4:].lower() in ['.pyc', '.pyo']:
_srcfile = __file__[:-4] + '.py'
else:
_srcfile = __file__
_srcfile = os.path.normcase(_srcfile)
def find_caller():
def current_frame():
try:
raise Exception
except:
return sys.exc_info()[2].tb_frame.f_back
f = current_frame()
if f is not None:
f = f.f_back
rv = "(unknown file)", 0, "(unknown function)"
while hasattr(f, "f_code"):
co = f.f_code
filename = os.path.normcase(co.co_filename)
rv = (co.co_filename, f.f_lineno, co.co_name)
if filename == _srcfile:
f = f.f_back
continue
break
rv = list(rv)
rv[0] = os.path.basename(rv[0])
return rv | null |
153,268 | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import argparse
import logging
import os
import time
import math
import json
import random
import numpy as np
import torch
import torch.nn as nn
from torch.utils.data import DataLoader
from tensorboardX import SummaryWriter
from torch.nn.utils import clip_grad_norm_
from torch.utils.data.distributed import DistributedSampler
from pysot.utils.lr_scheduler import build_lr_scheduler
from pysot.utils.log_helper import init_log, print_speed, add_file_handler
from pysot.utils.distributed import dist_init, DistModule, reduce_gradients,\
average_reduce, get_rank, get_world_size
from pysot.utils.model_load import load_pretrain, restore_from
from pysot.utils.average_meter import AverageMeter
from pysot.utils.misc import describe, commit
from pysot.models.model_builder import ModelBuilder
from pysot.datasets.dataset import TrkDataset
from pysot.core.config import cfg
def seed_torch(seed=0):
random.seed(seed)
os.environ['PYTHONHASHSEED'] = str(seed)
np.random.seed(seed)
torch.manual_seed(seed)
torch.cuda.manual_seed(seed)
torch.backends.cudnn.benchmark = False
torch.backends.cudnn.deterministic = True | null |
153,269 | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import argparse
import logging
import os
import time
import math
import json
import random
import numpy as np
import torch
import torch.nn as nn
from torch.utils.data import DataLoader
from tensorboardX import SummaryWriter
from torch.nn.utils import clip_grad_norm_
from torch.utils.data.distributed import DistributedSampler
from pysot.utils.lr_scheduler import build_lr_scheduler
from pysot.utils.log_helper import init_log, print_speed, add_file_handler
from pysot.utils.distributed import dist_init, DistModule, reduce_gradients,\
average_reduce, get_rank, get_world_size
from pysot.utils.model_load import load_pretrain, restore_from
from pysot.utils.average_meter import AverageMeter
from pysot.utils.misc import describe, commit
from pysot.models.model_builder import ModelBuilder
from pysot.datasets.dataset import TrkDataset
from pysot.core.config import cfg
logger = logging.getLogger('global')
def get_world_size():
if not inited:
raise(Exception('dist not inited'))
return world_size
class TrkDataset(Dataset):
def __init__(self,):
super(TrkDataset, self).__init__()
desired_size = (cfg.TRAIN.SEARCH_SIZE - cfg.TRAIN.EXEMPLAR_SIZE) / \
cfg.ANCHOR.STRIDE + 1 + cfg.TRAIN.BASE_SIZE
if desired_size != cfg.TRAIN.OUTPUT_SIZE:
raise Exception('size not match!')
# create anchor target
self.anchor_target = AnchorTarget()
# create sub dataset
self.all_dataset = []
start = 0
self.num = 0
for name in cfg.DATASET.NAMES:
subdata_cfg = getattr(cfg.DATASET, name)
sub_dataset = SubDataset(
name,
subdata_cfg.ROOT,
subdata_cfg.ANNO,
subdata_cfg.FRAME_RANGE,
subdata_cfg.NUM_USE,
start
)
start += sub_dataset.num
self.num += sub_dataset.num_use
sub_dataset.log()
self.all_dataset.append(sub_dataset)
# data augmentation
self.template_aug = Augmentation(
cfg.DATASET.TEMPLATE.SHIFT,
cfg.DATASET.TEMPLATE.SCALE,
cfg.DATASET.TEMPLATE.BLUR,
cfg.DATASET.TEMPLATE.FLIP,
cfg.DATASET.TEMPLATE.COLOR
)
self.search_aug = Augmentation(
cfg.DATASET.SEARCH.SHIFT,
cfg.DATASET.SEARCH.SCALE,
cfg.DATASET.SEARCH.BLUR,
cfg.DATASET.SEARCH.FLIP,
cfg.DATASET.SEARCH.COLOR
)
videos_per_epoch = cfg.DATASET.VIDEOS_PER_EPOCH
self.num = videos_per_epoch if videos_per_epoch > 0 else self.num
self.num *= cfg.TRAIN.EPOCH
self.pick = self.shuffle()
def shuffle(self):
pick = []
m = 0
while m < self.num:
p = []
for sub_dataset in self.all_dataset:
sub_p = sub_dataset.pick
p += sub_p
np.random.shuffle(p)
pick += p
m = len(pick)
logger.info("shuffle done!")
logger.info("dataset length {}".format(self.num))
return pick[:self.num]
def _find_dataset(self, index):
for dataset in self.all_dataset:
if dataset.start_idx + dataset.num > index:
return dataset, index - dataset.start_idx
def _get_bbox(self, image, shape):
imh, imw = image.shape[:2]
if len(shape) == 4:
w, h = shape[2]-shape[0], shape[3]-shape[1]
else:
w, h = shape
context_amount = 0.5
exemplar_size = cfg.TRAIN.EXEMPLAR_SIZE
wc_z = w + context_amount * (w+h)
hc_z = h + context_amount * (w+h)
s_z = np.sqrt(wc_z * hc_z)
scale_z = exemplar_size / s_z
w = w*scale_z
h = h*scale_z
cx, cy = imw//2, imh//2
bbox = center2corner(Center(cx, cy, w, h))
return bbox
def __len__(self):
return self.num
def __getitem__(self, index):
index = self.pick[index]
dataset, index = self._find_dataset(index)
gray = cfg.DATASET.GRAY and cfg.DATASET.GRAY > np.random.random()
neg = cfg.DATASET.NEG and cfg.DATASET.NEG > np.random.random()
# get one dataset
if neg:
template = dataset.get_random_target(index)
search = np.random.choice(self.all_dataset).get_random_target()
else:
template, search = dataset.get_positive_pair(index)
# get image
template_image = cv2.imread(template[0])
search_image = cv2.imread(search[0])
# get bounding box
template_box = self._get_bbox(template_image, template[1])
search_box = self._get_bbox(search_image, search[1])
# augmentation
template, _ = self.template_aug(template_image,
template_box,
cfg.TRAIN.EXEMPLAR_SIZE,
gray=gray)
search, bbox = self.search_aug(search_image,
search_box,
cfg.TRAIN.SEARCH_SIZE,
gray=gray)
# get labels
cls, delta, delta_weight, overlap = self.anchor_target(
bbox, cfg.TRAIN.OUTPUT_SIZE, neg)
template = template.transpose((2, 0, 1)).astype(np.float32)
search = search.transpose((2, 0, 1)).astype(np.float32)
return {
'template': template,
'search': search,
'label_cls': cls,
'label_loc': delta,
'label_loc_weight': delta_weight,
'bbox': np.array(bbox)
}
cfg = __C
def build_data_loader():
logger.info("build train dataset")
# train_dataset
train_dataset = TrkDataset()
logger.info("build dataset done")
train_sampler = None
if get_world_size() > 1:
train_sampler = DistributedSampler(train_dataset)
train_loader = DataLoader(train_dataset,
batch_size=cfg.TRAIN.BATCH_SIZE,
num_workers=cfg.TRAIN.NUM_WORKERS,
pin_memory=True,
sampler=train_sampler)
return train_loader | null |
153,270 | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import os
import argparse
import cv2
import torch
import numpy as np
from glob import glob
from pysot.core.config import cfg
from pysot.models.model_builder import ModelBuilder
from pysot.tracker.tracker_builder import build_tracker
args = parser.parse_args()
def get_frames(video_name):
if not video_name:
cap = cv2.VideoCapture(0)
# warmup
for i in range(5):
cap.read()
while True:
ret, frame = cap.read()
if ret:
yield frame
else:
break
elif video_name.endswith('avi') or \
video_name.endswith('mp4'):
cap = cv2.VideoCapture(args.video_name)
while True:
ret, frame = cap.read()
if ret:
yield frame
else:
break
else:
images = glob(os.path.join(video_name, '*.jp*'))
images = sorted(images,
key=lambda x: int(x.split('/')[-1].split('.')[0]))
for img in images:
frame = cv2.imread(img)
yield frame | null |
153,271 | from __future__ import division
from __future__ import print_function
import argparse
import os
import cv2
import numpy as np
import torch
from toolkit.datasets import DatasetFactory
from toolkit.utils.region import vot_overlap, vot_float2str
from pysot.models.model_builder import ModelBuilder
from pysot.tracker.tracker_builder import build_tracker
from pysot.utils.bbox import get_axis_aligned_bbox
from pysot.utils.model_load import load_pretrain
from pysot.core.config import cfg
def parse_range(range_str):
param = map(float, range_str.split(','))
return np.arange(*param) | null |
153,272 | from __future__ import division
from __future__ import print_function
import argparse
import os
import cv2
import numpy as np
import torch
from toolkit.datasets import DatasetFactory
from toolkit.utils.region import vot_overlap, vot_float2str
from pysot.models.model_builder import ModelBuilder
from pysot.tracker.tracker_builder import build_tracker
from pysot.utils.bbox import get_axis_aligned_bbox
from pysot.utils.model_load import load_pretrain
from pysot.core.config import cfg
def parse_range_int(range_str):
param = map(int, range_str.split(','))
return np.arange(*param) | null |
153,273 | from __future__ import division
from __future__ import print_function
import argparse
import os
import cv2
import numpy as np
import torch
from toolkit.datasets import DatasetFactory
from toolkit.utils.region import vot_overlap, vot_float2str
from pysot.models.model_builder import ModelBuilder
from pysot.tracker.tracker_builder import build_tracker
from pysot.utils.bbox import get_axis_aligned_bbox
from pysot.utils.model_load import load_pretrain
from pysot.core.config import cfg
def get_axis_aligned_bbox(region):
""" convert region to (cx, cy, w, h) that represent by axis aligned box
"""
nv = region.size
if nv == 8:
cx = np.mean(region[0::2])
cy = np.mean(region[1::2])
x1 = min(region[0::2])
x2 = max(region[0::2])
y1 = min(region[1::2])
y2 = max(region[1::2])
A1 = np.linalg.norm(region[0:2] - region[2:4]) * \
np.linalg.norm(region[2:4] - region[4:6])
A2 = (x2 - x1) * (y2 - y1)
s = np.sqrt(A1 / A2)
w = s * (x2 - x1) + 1
h = s * (y2 - y1) + 1
else:
x = region[0]
y = region[1]
w = region[2]
h = region[3]
cx = x+w/2
cy = y+h/2
return cx, cy, w, h
def run_tracker(tracker, img, gt, video_name, restart=True):
frame_counter = 0
lost_number = 0
toc = 0
pred_bboxes = []
if restart: # VOT2016 and VOT 2018
for idx, (img, gt_bbox) in enumerate(video):
if len(gt_bbox) == 4:
gt_bbox = [gt_bbox[0], gt_bbox[1],
gt_bbox[0], gt_bbox[1]+gt_bbox[3]-1,
gt_bbox[0]+gt_bbox[2]-1, gt_bbox[1]+gt_bbox[3]-1,
gt_bbox[0]+gt_bbox[2]-1, gt_bbox[1]]
tic = cv2.getTickCount()
if idx == frame_counter:
cx, cy, w, h = get_axis_aligned_bbox(np.array(gt_bbox))
gt_bbox_ = [cx-(w-1)/2, cy-(h-1)/2, w, h]
tracker.init(img, gt_bbox_)
pred_bbox = gt_bbox_
pred_bboxes.append(1)
elif idx > frame_counter:
outputs = tracker.track(img)
pred_bbox = outputs['bbox']
overlap = vot_overlap(pred_bbox, gt_bbox,
(img.shape[1], img.shape[0]))
if overlap > 0:
# not lost
pred_bboxes.append(pred_bbox)
else:
# lost object
pred_bboxes.append(2)
frame_counter = idx + 5 # skip 5 frames
lost_number += 1
else:
pred_bboxes.append(0)
toc += cv2.getTickCount() - tic
toc /= cv2.getTickFrequency()
print('Video: {:12s} Time: {:4.1f}s Speed: {:3.1f}fps Lost: {:d}'.format(
video_name, toc, idx / toc, lost_number))
return pred_bboxes
else:
toc = 0
pred_bboxes = []
scores = []
track_times = []
for idx, (img, gt_bbox) in enumerate(video):
tic = cv2.getTickCount()
if idx == 0:
cx, cy, w, h = get_axis_aligned_bbox(np.array(gt_bbox))
gt_bbox_ = [cx-(w-1)/2, cy-(h-1)/2, w, h]
tracker.init(img, gt_bbox_)
pred_bbox = gt_bbox_
scores.append(None)
pred_bboxes.append(pred_bbox)
else:
outputs = tracker.track(img)
pred_bbox = outputs['bbox']
pred_bboxes.append(pred_bbox)
scores.append(outputs['best_score'])
toc += cv2.getTickCount() - tic
track_times.append((cv2.getTickCount() - tic)/cv2.getTickFrequency())
toc /= cv2.getTickFrequency()
print('Video: {:12s} Time: {:5.1f}s Speed: {:3.1f}fps'.format(
video_name, toc, idx / toc))
return pred_bboxes, scores, track_times | null |
153,274 | from __future__ import division
from __future__ import print_function
import argparse
import os
import cv2
import numpy as np
import torch
from toolkit.datasets import DatasetFactory
from toolkit.utils.region import vot_overlap, vot_float2str
from pysot.models.model_builder import ModelBuilder
from pysot.tracker.tracker_builder import build_tracker
from pysot.utils.bbox import get_axis_aligned_bbox
from pysot.utils.model_load import load_pretrain
from pysot.core.config import cfg
def _check_and_occupation(video_path, result_path):
if os.path.isfile(result_path):
return True
try:
if not os.path.isdir(video_path):
os.makedirs(video_path)
except OSError as err:
print(err)
with open(result_path, 'w') as f:
f.write('Occ')
return False | null |
153,275 | import jsonlines
import json
import numpy as np
import os
from tqdm import tqdm
from langdetect import detect
import argparse
import random
def load_jsonlines(file):
with jsonlines.open(file, 'r') as jsonl_f:
lst = [obj for obj in jsonl_f]
return lst
def process_tulu_dataset(fp, single_turn_only=True, max_n=1000, dataset_name=False):
data = load_jsonlines(fp)
processed_data = []
for item in tqdm(data):
messages = item["messages"]
if single_turn_only is True and len(messages) > 2:
continue
# currently only support single turn
input = messages[0]["content"].replace("\nOutput:", "")
if len(input.split(" ")) > 500:
continue
if input[-2:] == "\n\n":
input = input[:-2]
if input[-1:] == "\n":
input = input[:-1]
output = messages[1]["content"]
if len(output) == 0:
continue
if output[0] == "\n":
output = output[1:]
if len(output.split(" ")) > 500:
continue
if dataset_name == "sharegpt" or "oasst1":
try:
if detect(input) != "en" or detect(output) != "en":
# print("multilingual input")
continue
except:
print("non text input")
id = item["id"]
processed_data.append({"input": "", "instruction": input, "output": output, "id": id, "dataset_name": dataset_name})
if max_n is not None and max_n < len(processed_data):
processed_data = random.sample(processed_data, k=max_n)
return processed_data | null |
153,276 | import jsonlines
import argparse
import json
import os
import spacy
nlp = spacy.load("en_core_web_sm")
def split_sentences(paragraph):
doc = nlp(paragraph)
sentences = []
for sent in doc.sents:
sentences.append(sent.text)
return sentences | null |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.