id int64 0 190k | prompt stringlengths 21 13.4M | docstring stringlengths 1 12k ⌀ |
|---|---|---|
158,921 | from sqlalchemy import sql
from siuba.siu import FunctionLookupBound, FunctionLookupError
import warnings
from sqlalchemy.sql.elements import ClauseElement
from sqlalchemy.sql import ColumnCollection
from sqlalchemy.sql.elements import Over
class AggOver(CustomOverClause):
"""Over clause for uses of functions min, max, avg, that return one value.
Note that this class does not set order by, which is how these functions
generally become their cumulative versions.
E.g. mean(x) -> AVG(x) OVER (partition_by <group vars>)
"""
def set_over(self, group_by, order_by = None):
self.partition_by = group_by
return self
def func(cls, name):
sa_func = getattr(sql.func, name)
def f(codata, col, *args, **kwargs) -> AggOver:
return cls(sa_func(col, *args, **kwargs))
return f
from siuba.ops.translate import create_pandas_translator
from functools import singledispatch
The provided code snippet includes necessary dependencies for implementing the `sql_ordered_set` function. Write a Python function `def sql_ordered_set(name, is_analytic=False)` to solve the following problem:
Generate function for ordered and hypothetical set aggregates. Hypothetical-set aggregates take an argument, and return a value for each element of the argument. For example: rank(2) WITHIN GROUP (order by x). In this case, the hypothetical ranks 2 relative to x. Ordered set aggregates are like percentil_cont(.5) WITHIN GROUP (order by x), which calculates the median of x.
Here is the function:
def sql_ordered_set(name, is_analytic=False):
"""Generate function for ordered and hypothetical set aggregates.
Hypothetical-set aggregates take an argument, and return a value for each
element of the argument. For example: rank(2) WITHIN GROUP (order by x).
In this case, the hypothetical ranks 2 relative to x.
Ordered set aggregates are like percentil_cont(.5) WITHIN GROUP (order by x),
which calculates the median of x.
"""
sa_func = getattr(sql.func, name)
if is_analytic:
return lambda codata, col, *args: AggOver(
sa_func(*args).within_group(col)
)
return lambda codata, col, *args: sa_func(*args).within_group(col) | Generate function for ordered and hypothetical set aggregates. Hypothetical-set aggregates take an argument, and return a value for each element of the argument. For example: rank(2) WITHIN GROUP (order by x). In this case, the hypothetical ranks 2 relative to x. Ordered set aggregates are like percentil_cont(.5) WITHIN GROUP (order by x), which calculates the median of x. |
158,922 | from sqlalchemy import sql
from siuba.siu import FunctionLookupBound, FunctionLookupError
import warnings
from sqlalchemy.sql.elements import ClauseElement
from sqlalchemy.sql import ColumnCollection
from sqlalchemy.sql.elements import Over
from siuba.ops.translate import create_pandas_translator
from functools import singledispatch
def sql_not_impl(msg = ""):
return FunctionLookupBound(msg or "function not implemented") | null |
158,923 | from sqlalchemy import sql
from siuba.siu import FunctionLookupBound, FunctionLookupError
import warnings
from sqlalchemy.sql.elements import ClauseElement
from sqlalchemy.sql import ColumnCollection
from sqlalchemy.sql.elements import Over
from siuba.ops.translate import create_pandas_translator
from functools import singledispatch
def win_absent(name):
# Return an error, that is picked up by the translator.
# this allows us to report errors at translation, rather than call time.
return FunctionLookupBound("SQL dialect does not support window function {}.".format(name)) | null |
158,924 | from sqlalchemy import sql
from siuba.siu import FunctionLookupBound, FunctionLookupError
import warnings
from sqlalchemy.sql.elements import ClauseElement
from sqlalchemy.sql import ColumnCollection
from sqlalchemy.sql.elements import Over
from siuba.ops.translate import create_pandas_translator
from functools import singledispatch
The provided code snippet includes necessary dependencies for implementing the `extend_base` function. Write a Python function `def extend_base(cls, **kwargs)` to solve the following problem:
Register concrete methods onto generic functions for pandas Series methods.
Here is the function:
def extend_base(cls, **kwargs):
"""Register concrete methods onto generic functions for pandas Series methods."""
from siuba.ops import ALL_OPS
for meth_name, f in kwargs.items():
ALL_OPS[meth_name].register(cls, f) | Register concrete methods onto generic functions for pandas Series methods. |
158,925 | from sqlalchemy import sql
from siuba.siu import FunctionLookupBound, FunctionLookupError
import warnings
from sqlalchemy.sql.elements import ClauseElement
from sqlalchemy.sql import ColumnCollection
from sqlalchemy.sql.elements import Over
from siuba.ops.translate import create_pandas_translator
from functools import singledispatch
def convert_literal(codata, lit):
return sql.literal(lit) | null |
158,926 | import warnings
from .translate import CustomOverClause, ColumnCollection
from .utils import (
get_dialect_translator,
_sql_column_collection,
_sql_add_columns,
)
from sqlalchemy import sql
import sqlalchemy
from siuba.siu import FunctionLookupError
from functools import singledispatch
def _repr_grouped_df_html_(self):
return "<div><p>(grouped data frame)</p>" + self._selected_obj._repr_html_() + "</div>" | null |
158,927 | from siuba.dply.across import across, _get_name_template, _across_setup_fns, ctx_verb_data, ctx_verb_window
from siuba.dply.tidyselect import var_select, var_create
from siuba.siu import FormulaContext, Call, FormulaArg
from siuba.siu.calls import str_to_getitem_call
from siuba.siu.visitors import CallListener
from .backend import LazyTbl
from .utils import _sql_select, _sql_column_collection
from .translate import ColumnCollection
from sqlalchemy import sql
class LazyTbl:
def __init__(
self, source, tbl, columns = None,
ops = None, group_by = tuple(), order_by = tuple(),
translator = None
):
"""Create a representation of a SQL table.
Args:
source: a sqlalchemy.Engine or sqlalchemy.Connection instance.
tbl: table of form 'schema_name.table_name', 'table_name', or sqlalchemy.Table.
columns: if specified, a listlike of column names.
Examples
--------
::
from sqlalchemy import create_engine
from siuba.data import mtcars
# create database and table
engine = create_engine("sqlite:///:memory:")
mtcars.to_sql('mtcars', engine)
tbl_mtcars = LazyTbl(engine, 'mtcars')
"""
# connection and dialect specific functions
self.source = sqlalchemy.create_engine(source) if isinstance(source, str) else source
# get dialect name
dialect = self.source.dialect.name
self.translator = get_dialect_translator(dialect)
self.tbl = self._create_table(tbl, columns, self.source)
# important states the query can be in (e.g. grouped)
self.ops = [self.tbl] if ops is None else ops
self.group_by = group_by
self.order_by = order_by
def append_op(self, op, **kwargs):
cpy = self.copy(**kwargs)
cpy.ops = cpy.ops + [op]
return cpy
def copy(self, **kwargs):
return self.__class__(**{**self.__dict__, **kwargs})
def shape_call(
self,
call, window = True, str_accessors = False,
verb_name = None, arg_name = None,
):
return self.translator.shape_call(call, window, str_accessors, verb_name, arg_name)
def track_call_windows(self, call, columns = None, window_cte = None):
"""Returns tuple of (new column expression, list of window exprs)"""
from .verbs.arrange import _eval_arrange_args
columns = self.last_op.columns if columns is None else columns
order_by = _eval_arrange_args(self, self.order_by, columns)
return track_call_windows(call, columns, self.group_by, order_by, window_cte)
def get_ordered_col_names(self):
"""Return columns from current select, with grouping columns first."""
ungrouped = [k for k in self.last_op.columns.keys() if k not in self.group_by]
return list(self.group_by) + ungrouped
#def label_breaks_order_by(self, name):
# """Returns True if a new column label would break the order by vars."""
# # TODO: arrange currently allows literals, which breaks this. it seems
# # better to only allow calls in arrange.
# order_by_vars = {c.op_vars(attr_calls=False) for c in self.order_by}
def last_op(self) -> "sql.Table | sql.Select":
last_op = self.ops[-1]
if last_op is None:
raise TypeError()
return last_op
def last_select(self):
last_op = self.last_op
if not isinstance(last_op, sql.selectable.SelectBase):
return last_op.select()
return last_op
def _create_table(tbl, columns = None, source = None):
"""Return a sqlalchemy.Table, autoloading column info if needed.
Arguments:
tbl: a sqlalchemy.Table or string of form 'table_name' or 'schema_name.table_name'.
columns: a tuple of column names for the table. Overrides source argument.
source: a sqlalchemy engine, used to autoload columns.
"""
if isinstance(tbl, sql.selectable.FromClause):
return tbl
if not isinstance(tbl, str):
raise ValueError("tbl must be a sqlalchemy Table or string, but was %s" %type(tbl))
if columns is None and source is None:
raise ValueError("One of columns or source must be specified")
schema, table_name = tbl.split('.') if '.' in tbl else [None, tbl]
columns = map(sqlalchemy.Column, columns) if columns is not None else tuple()
# TODO: pybigquery uses schema to mean project_id, so we cannot use
# siuba's classic breakdown "{schema}.{table_name}". Basically
# pybigquery uses "{schema=project_id}.{dataset_dot_table_name}" in its internal
# logic. An important side effect is that bigquery errors for
# `dataset`.`table`, but not `dataset.table`.
if source and source.dialect.name == "bigquery":
table_name = tbl
schema = None
return sqlalchemy.Table(
table_name,
sqlalchemy.MetaData(),
*columns,
schema = schema,
autoload_with = source if not columns else None
)
def _get_preview(self):
# need to make prev op a cte, so we don't override any previous limit
from siuba.dply.verbs import collect
new_sel = self.last_select.limit(5)
tbl_small = self.append_op(new_sel)
return collect(tbl_small)
def __repr__(self):
template = (
"# Source: lazy query\n"
"# DB Conn: {}\n"
"# Preview:\n{}\n"
"# .. may have more rows"
)
return template.format(repr(self.source.engine), repr(self._get_preview()))
def _repr_html_(self):
template = (
"<div>"
"<pre>"
"# Source: lazy query\n"
"# DB Conn: {}\n"
"# Preview:\n"
"</pre>"
"{}"
"<p># .. may have more rows</p>"
"</div>"
)
data = self._get_preview()
# _repr_html_ can not exist or return None, to signify that repr should be used
if not hasattr(data, '_repr_html_'):
return None
html_data = data._repr_html_()
if html_data is None:
return None
return template.format(self.source.engine, html_data)
def _across_lazy_tbl(__data: LazyTbl, cols, fns, names: "str | None" = None) -> LazyTbl:
raise NotImplementedError(
"across() cannot called directly on a LazyTbl. Please use it inside a verb, "
"like mutate(), summarize(), filter(), arrange(), group_by(), etc.."
)
#selectable = __data.last_op
#
#columns = selectable.alias().columns
#if not isinstance(columns, ImmutableColumnCollection):
# raise TypeError(str(type(columns)))
#res_cols = across(columns, cols, fns, names)
#return __data.append_op(_sql_select(res_cols)) | null |
158,928 | from siuba.dply.across import across, _get_name_template, _across_setup_fns, ctx_verb_data, ctx_verb_window
from siuba.dply.tidyselect import var_select, var_create
from siuba.siu import FormulaContext, Call, FormulaArg
from siuba.siu.calls import str_to_getitem_call
from siuba.siu.visitors import CallListener
from .backend import LazyTbl
from .utils import _sql_select, _sql_column_collection
from .translate import ColumnCollection
from sqlalchemy import sql
class ReplaceFx(CallListener):
def __init__(self, replacement):
self.replacement = replacement
def exit(self, node):
res = super().exit(node)
if isinstance(res, FormulaArg):
return str_to_getitem_call(self.replacement)
return res
ctx_verb_data = ContextVar("data")
ctx_verb_window = ContextVar("window")
def _across_setup_fns(fns) -> "dict[str, Callable[[FormulaContext], Any]]":
final_calls = {}
if isinstance(fns, (list, tuple)):
raise NotImplementedError(
"Specifying functions as a list or tuple is not supported. "
"Please use a dictionary to define multiple functions to apply. \n\n"
"E.g. across(_[:], {'round': Fx.round(), 'round2': Fx.round() + 1})"
)
elif isinstance(fns, dict):
for name, fn_call_raw in fns.items():
# symbolics get stripped by default for arguments to verbs, but
# these are inside a dictionary, so need to strip manually.
fn_call = strip_symbolic(fn_call_raw)
if isinstance(fn_call, Call):
final_calls[name] = fn_call
elif callable(fn_call):
final_calls[name] = create_eager_pipe_call(FuncArg(fn_call), Fx)
else:
raise TypeError(
"All functions to be applied in across must be a siuba.siu.Call, "
f"but received a function of type {type(fn_call)}"
)
elif isinstance(fns, Call):
final_calls["fn1"] = fns
elif callable(fns):
final_calls["fn1"] = create_eager_pipe_call(FuncArg(fns), Fx)
else:
raise NotImplementedError(f"Unsupported function type in across: {type(fns)}")
return final_calls
def _get_name_template(fns, names: "str | None") -> str:
if names is not None:
return names
if callable(fns):
return DEFAULT_SINGLE_FUNC_TEMPLATE
return DEFAULT_MULTI_FUNC_TEMPLATE
def var_select(colnames, *args, data=None):
# TODO: don't erase named column if included again
colnames = colnames if isinstance(colnames, pd.Series) else pd.Series(colnames)
cols = OrderedDict()
#flat_args = var_flatten(args)
all_vars = chain(*map(flatten_var, args))
# Add entries in pandas.rename style {"orig_name": "new_name"}
for ii, arg in enumerate(all_vars):
# strings are added directly
if isinstance(arg, str):
cols[arg] = None
# integers add colname at corresponding index
elif isinstance(arg, int):
cols[colnames.iloc[arg]] = None
# general var handling
elif isinstance(arg, Var):
# remove negated Vars, otherwise include them
if ii == 0 and arg.negated:
# if negation used as first arg apply an implicit everything
cols.update((k, None) for k in colnames)
# slicing can refer to single, or range of columns
if isinstance(arg.name, slice):
start, stop = var_slice(colnames, arg.name)
for ii in range(start, stop):
var_put_cols(colnames[ii], arg, cols)
# method calls like endswith()
elif callable(arg.name):
# TODO: not sure if this is a good idea...
# basically proxies to pandas str methods (they must return bool array)
indx = arg.name(colnames.str)
var_put_cols(colnames[indx].tolist(), arg, cols)
#cols.update((x, None) for x in set(colnames[indx]) - set(cols))
elif isinstance(arg.name, int):
var_put_cols(colnames.iloc[arg.name], arg, cols)
else:
var_put_cols(arg.name, arg, cols)
elif callable(arg) and data is not None:
# TODO: call on the data
col_mask = colwise_eval(data, arg)
for name in colnames[col_mask]:
cols[name] = None
else:
raise Exception("variable must be either a string or Var instance")
return cols
def var_create(*args) -> "tuple[Var]":
vl = VarList()
all_vars = []
for arg in args:
if isinstance(arg, Call):
res = arg(vl)
if isinstance(res, VarList):
raise ValueError("Must select specific column. Did you pass `_` to select?")
all_vars.append(res)
elif isinstance(arg, Var):
all_vars.append(arg)
elif callable(arg):
all_vars.append(arg)
else:
all_vars.append(Var(arg))
return tuple(all_vars)
def _sql_column_collection(columns):
# This function largely handles the removal of ImmutableColumnCollection in
# sqlalchemy, in favor of ColumnCollection being immutable.
data = {col.key: col for col in columns}
if is_sqla_12() or is_sqla_13():
from sqlalchemy.sql.base import ImmutableColumnCollection
return ImmutableColumnCollection(data, columns)
elif is_sqla_14():
from sqlalchemy.sql.base import ColumnCollection
return ColumnCollection(list(data.items())).as_immutable()
else:
from sqlalchemy.sql.base import ColumnCollection
return ColumnCollection(list(data.items()))
def _across_sql_cols(
__data: ColumnCollection,
cols,
fns,
names: "str | None" = None
) -> ColumnCollection:
lazy_tbl = ctx_verb_data.get()
window = ctx_verb_window.get()
column_names = list(__data.keys())
name_template = _get_name_template(fns, names)
selected_cols = var_select(column_names, *var_create(cols), data=__data)
fns_map = _across_setup_fns(fns)
results = []
# iterate over columns ----
for new_name, old_name in selected_cols.items():
if old_name is None:
old_name = new_name
crnt_col = __data[old_name]
#context = FormulaContext(Fx=crnt_col, _=__data)
# iterate over functions ----
for fn_name, fn in fns_map.items():
fmt_pars = {"fn": fn_name, "col": new_name}
fn_replaced = ReplaceFx(old_name).enter(fn)
new_call = lazy_tbl.shape_call(
fn_replaced,
window,
verb_name="Across",
arg_name = f"function {fn_name} of {len(fns_map)}"
)
res, windows, _ = lazy_tbl.track_call_windows(new_call, __data)
#res = new_call(context)
res_name = name_template.format(**fmt_pars)
results.append(res.label(res_name))
return _sql_column_collection(results) | null |
158,929 | import warnings
from sqlalchemy.sql.elements import ClauseElement
from sqlalchemy import sql
from ..translate import (
SqlColumn, SqlColumnAgg,
win_cumul, AggOver, CumlOver, RankOver, warn_arg_default, win_absent
)
from ..dialects.sqlite import SqliteColumn
from ..dialects.mysql import MysqlColumn
from ..dialects.bigquery import BigqueryColumn
from ..dialects.duckdb import DuckdbColumn
from siuba.dply.vector import (
#cumall, cumany, cummean,
desc,
dense_rank, percent_rank, min_rank, cume_dist,
row_number,
#ntile,
between,
coalesce,
lead, lag,
n,
n_distinct,
na_if,
#near,
nth, first, last
)
from ..translate import SiubaSqlRuntimeWarning
class SqlColumn(SqlBase): pass
def desc(x):
"""Return array sorted in descending order."""
return x.sort_values(ascending = False).reset_index(drop = True)
The provided code snippet includes necessary dependencies for implementing the `_desc_sql` function. Write a Python function `def _desc_sql(codata: SqlColumn, x: ClauseElement) -> ClauseElement` to solve the following problem:
Example: >>> print(desc(SqlColumn(), sql.column('a'))) a DESC
Here is the function:
def _desc_sql(codata: SqlColumn, x: ClauseElement) -> ClauseElement:
"""
Example:
>>> print(desc(SqlColumn(), sql.column('a')))
a DESC
"""
return x.desc() | Example: >>> print(desc(SqlColumn(), sql.column('a'))) a DESC |
158,930 | import warnings
from sqlalchemy.sql.elements import ClauseElement
from sqlalchemy import sql
from ..translate import (
SqlColumn, SqlColumnAgg,
win_cumul, AggOver, CumlOver, RankOver, warn_arg_default, win_absent
)
from ..dialects.sqlite import SqliteColumn
from ..dialects.mysql import MysqlColumn
from ..dialects.bigquery import BigqueryColumn
from ..dialects.duckdb import DuckdbColumn
from siuba.dply.vector import (
#cumall, cumany, cummean,
desc,
dense_rank, percent_rank, min_rank, cume_dist,
row_number,
#ntile,
between,
coalesce,
lead, lag,
n,
n_distinct,
na_if,
#near,
nth, first, last
)
from ..translate import SiubaSqlRuntimeWarning
def _sql_rank_over(rank_func, col, partition, nulls_last):
# partitioning ensures aggregates that use total length are correct,
# e.g. percent rank, cume_dist and friends, by separating NULLs into their
# own partition
over_clause = RankOver(
rank_func(),
order_by = col if not nulls_last else col.nullslast(),
partition_by = col.isnot(None) if partition else None
)
return sql.case({col.isnot(None): over_clause})
def warn_arg_default(func_name, arg_name, arg, correct):
warnings.warn(
"\n{func_name} sql translation defaults "
"{arg_name} to {arg}. To return identical result as pandas, use "
"{arg_name} = {correct}.\n\n"
"This warning only displays once per function".format(
func_name = func_name, arg_name = arg_name, arg = repr(arg), correct = repr(correct)
),
SiubaSqlRuntimeWarning
)
class RankOver(CustomOverClause):
"""Over clause for ranking functions.
Note that in python we might call rank(col), but in SQL the ranking column
is defined using order by.
E.g. rank(y) -> rank() OVER (partition by <group vars> order by y)
"""
def set_over(self, group_by, order_by = None):
crnt_partition = getattr(self.partition_by, 'clauses', tuple())
self.partition_by = sql.elements.ClauseList(*crnt_partition, *group_by.clauses)
return self
def func(cls, name):
sa_func = getattr(sql.func, name)
def f(codata, col) -> RankOver:
return cls(sa_func(), order_by = col)
return f
def _sql_rank(func_name, partition = False, nulls_last = False):
# partition controls whether to make partition by NOT NULL
rank_func = getattr(sql.func, func_name)
def f(_, col, na_option = None) -> RankOver:
if na_option == "keep":
return _sql_rank_over(rank_func, col, partition, nulls_last)
warn_arg_default(func_name, 'na_option', None, "keep")
return RankOver(rank_func(), order_by = col)
return f | null |
158,931 | import warnings
from sqlalchemy.sql.elements import ClauseElement
from sqlalchemy import sql
from ..translate import (
SqlColumn, SqlColumnAgg,
win_cumul, AggOver, CumlOver, RankOver, warn_arg_default, win_absent
)
from ..dialects.sqlite import SqliteColumn
from ..dialects.mysql import MysqlColumn
from ..dialects.bigquery import BigqueryColumn
from ..dialects.duckdb import DuckdbColumn
from siuba.dply.vector import (
#cumall, cumany, cummean,
desc,
dense_rank, percent_rank, min_rank, cume_dist,
row_number,
#ntile,
between,
coalesce,
lead, lag,
n,
n_distinct,
na_if,
#near,
nth, first, last
)
from ..translate import SiubaSqlRuntimeWarning
class SqlColumn(SqlBase): pass
class CumlOver(CustomOverClause):
"""Over clause for cumulative versions of functions like sum, min, max.
Note that this class is also currently used for aggregates that might require
ordering, like nth, first, etc..
e.g. cumsum(x) -> SUM(x) OVER (partition by <group vars> order by <order vars>)
e.g. nth(0) -> NTH_VALUE(1) OVER (partition by <group vars> order by <order vars>)
"""
def set_over(self, group_by, order_by):
self.partition_by = group_by
# do not override order by if it was set by the user. this might happen
# in functions like nth, which gives the option to set it.
if self.order_by is None or not len(self.order_by):
if not len(order_by):
warnings.warn(
"No order by columns explicitly set in window function. SQL engine"
"does not guarantee a row ordering. Recommend using an arrange beforehand.",
RuntimeWarning
)
self.order_by = order_by
return self
def func(cls, name, rows=(None, 0)):
sa_func = getattr(sql.func, name)
def f(codata, col, *args, **kwargs) -> CumlOver:
return cls(sa_func(col, *args, **kwargs), rows = rows)
return f
def row_number(x):
"""Return the row number (position) for each value in x, beginning with 1.
Example:
>>> ser = pd.Series([7,8])
>>> row_number(ser)
0 1
1 2
dtype: int64
>>> row_number(pd.DataFrame({'a': ser}))
0 1
1 2
dtype: int64
>>> row_number(pd.Series([7,8], index = [3, 4]))
3 1
4 2
dtype: int64
"""
if isinstance(x, pd.DataFrame):
n = x.shape[0]
else:
n = len(x)
arr = np.arange(1, n + 1)
# could use single dispatch, but for now ensure output data type matches input
if isinstance(x, pd.Series):
return x._constructor(arr, x.index, fastpath = True)
return pd.Series(arr, x.index, fastpath = True)
The provided code snippet includes necessary dependencies for implementing the `_row_number_sql` function. Write a Python function `def _row_number_sql(codata: SqlColumn, col: ClauseElement) -> CumlOver` to solve the following problem:
Example: >>> print(row_number(SqlColumn(), sql.column('a'))) row_number() OVER ()
Here is the function:
def _row_number_sql(codata: SqlColumn, col: ClauseElement) -> CumlOver:
"""
Example:
>>> print(row_number(SqlColumn(), sql.column('a')))
row_number() OVER ()
"""
return CumlOver(sql.func.row_number()) | Example: >>> print(row_number(SqlColumn(), sql.column('a'))) row_number() OVER () |
158,932 | import warnings
from sqlalchemy.sql.elements import ClauseElement
from sqlalchemy import sql
from ..translate import (
SqlColumn, SqlColumnAgg,
win_cumul, AggOver, CumlOver, RankOver, warn_arg_default, win_absent
)
from ..dialects.sqlite import SqliteColumn
from ..dialects.mysql import MysqlColumn
from ..dialects.bigquery import BigqueryColumn
from ..dialects.duckdb import DuckdbColumn
from siuba.dply.vector import (
#cumall, cumany, cummean,
desc,
dense_rank, percent_rank, min_rank, cume_dist,
row_number,
#ntile,
between,
coalesce,
lead, lag,
n,
n_distinct,
na_if,
#near,
nth, first, last
)
from ..translate import SiubaSqlRuntimeWarning
class SqlColumn(SqlBase): pass
def between(x, left, right, default = False):
"""Return whether a value is between left and right (including either side).
Example:
>>> between(pd.Series([1,2,3]), 0, 2)
0 True
1 True
2 False
dtype: bool
Note:
This is a thin wrapper around pd.Series.between(left, right)
"""
# note: NA -> False, in tidyverse NA -> NA
if default is not False:
raise TypeError("between function must use default = False for pandas Series")
return x.between(left, right)
def coalesce(x, *args):
"""Returns a copy of x, with NaN values filled in from \*args. Ignores indexes.
Arguments:
x: a pandas Series object
*args: other Series that are the same length as x, or a scalar
Examples:
>>> x = pd.Series([1.1, None, None])
>>> abc = pd.Series(['a', 'b', None])
>>> xyz = pd.Series(['x', 'y', 'z'])
>>> coalesce(x, abc)
0 1.1
1 b
2 None
dtype: object
>>> coalesce(x, abc, xyz)
0 1.1
1 b
2 z
dtype: object
"""
crnt = x.reset_index(drop = True)
for other in args:
if isinstance(other, pd.Series):
other = other.reset_index(drop = True)
crnt = crnt.where(crnt.notna(), other)
crnt.index = x.index
return crnt
The provided code snippet includes necessary dependencies for implementing the `_between_sql` function. Write a Python function `def _between_sql(codata: SqlColumn, x, left, right, default = None) -> ClauseElement` to solve the following problem:
Example: >>> print(between(SqlColumn(), sql.column('a'), 1, 2)) a BETWEEN :a_1 AND :a_2 >>> print(between(SqlColumn(), sql.column('a'), 1, 2, default = False)) coalesce(a BETWEEN :a_1 AND :a_2, :coalesce_1)
Here is the function:
def _between_sql(codata: SqlColumn, x, left, right, default = None) -> ClauseElement:
"""
Example:
>>> print(between(SqlColumn(), sql.column('a'), 1, 2))
a BETWEEN :a_1 AND :a_2
>>> print(between(SqlColumn(), sql.column('a'), 1, 2, default = False))
coalesce(a BETWEEN :a_1 AND :a_2, :coalesce_1)
"""
if default is not False:
# TODO: warn
pass
if default is None:
return x.between(left, right)
return sql.functions.coalesce(x.between(left, right), default) | Example: >>> print(between(SqlColumn(), sql.column('a'), 1, 2)) a BETWEEN :a_1 AND :a_2 >>> print(between(SqlColumn(), sql.column('a'), 1, 2, default = False)) coalesce(a BETWEEN :a_1 AND :a_2, :coalesce_1) |
158,933 | import warnings
from sqlalchemy.sql.elements import ClauseElement
from sqlalchemy import sql
from ..translate import (
SqlColumn, SqlColumnAgg,
win_cumul, AggOver, CumlOver, RankOver, warn_arg_default, win_absent
)
from ..dialects.sqlite import SqliteColumn
from ..dialects.mysql import MysqlColumn
from ..dialects.bigquery import BigqueryColumn
from ..dialects.duckdb import DuckdbColumn
from siuba.dply.vector import (
#cumall, cumany, cummean,
desc,
dense_rank, percent_rank, min_rank, cume_dist,
row_number,
#ntile,
between,
coalesce,
lead, lag,
n,
n_distinct,
na_if,
#near,
nth, first, last
)
from ..translate import SiubaSqlRuntimeWarning
class SqlColumn(SqlBase): pass
def coalesce(x, *args):
"""Returns a copy of x, with NaN values filled in from \*args. Ignores indexes.
Arguments:
x: a pandas Series object
*args: other Series that are the same length as x, or a scalar
Examples:
>>> x = pd.Series([1.1, None, None])
>>> abc = pd.Series(['a', 'b', None])
>>> xyz = pd.Series(['x', 'y', 'z'])
>>> coalesce(x, abc)
0 1.1
1 b
2 None
dtype: object
>>> coalesce(x, abc, xyz)
0 1.1
1 b
2 z
dtype: object
"""
crnt = x.reset_index(drop = True)
for other in args:
if isinstance(other, pd.Series):
other = other.reset_index(drop = True)
crnt = crnt.where(crnt.notna(), other)
crnt.index = x.index
return crnt
The provided code snippet includes necessary dependencies for implementing the `_coalesce_sql` function. Write a Python function `def _coalesce_sql(codata: SqlColumn, x, *args) -> ClauseElement` to solve the following problem:
Example: >>> print(coalesce(SqlColumn(), sql.column('a'), sql.column('b'))) coalesce(a, b) >>> print(coalesce(SqlColumn(), 1, sql.column('a'))) coalesce(:coalesce_1, a)
Here is the function:
def _coalesce_sql(codata: SqlColumn, x, *args) -> ClauseElement:
"""
Example:
>>> print(coalesce(SqlColumn(), sql.column('a'), sql.column('b')))
coalesce(a, b)
>>> print(coalesce(SqlColumn(), 1, sql.column('a')))
coalesce(:coalesce_1, a)
"""
return sql.functions.coalesce(x, *args) | Example: >>> print(coalesce(SqlColumn(), sql.column('a'), sql.column('b'))) coalesce(a, b) >>> print(coalesce(SqlColumn(), 1, sql.column('a'))) coalesce(:coalesce_1, a) |
158,934 | import warnings
from sqlalchemy.sql.elements import ClauseElement
from sqlalchemy import sql
from ..translate import (
SqlColumn, SqlColumnAgg,
win_cumul, AggOver, CumlOver, RankOver, warn_arg_default, win_absent
)
from ..dialects.sqlite import SqliteColumn
from ..dialects.mysql import MysqlColumn
from ..dialects.bigquery import BigqueryColumn
from ..dialects.duckdb import DuckdbColumn
from siuba.dply.vector import (
#cumall, cumany, cummean,
desc,
dense_rank, percent_rank, min_rank, cume_dist,
row_number,
#ntile,
between,
coalesce,
lead, lag,
n,
n_distinct,
na_if,
#near,
nth, first, last
)
from ..translate import SiubaSqlRuntimeWarning
class SqlColumn(SqlBase): pass
win_cumul = CumlOver.func
The provided code snippet includes necessary dependencies for implementing the `_lead_sql` function. Write a Python function `def _lead_sql(codata: SqlColumn, x, n = 1, default = None) -> ClauseElement` to solve the following problem:
Example: >>> print(lead(SqlColumn(), sql.column('a'), 2, 99)) lead(a, :lead_1, :lead_2) OVER ()
Here is the function:
def _lead_sql(codata: SqlColumn, x, n = 1, default = None) -> ClauseElement:
"""
Example:
>>> print(lead(SqlColumn(), sql.column('a'), 2, 99))
lead(a, :lead_1, :lead_2) OVER ()
"""
f = win_cumul("lead", rows=None)
return f(codata, x, n, default) | Example: >>> print(lead(SqlColumn(), sql.column('a'), 2, 99)) lead(a, :lead_1, :lead_2) OVER () |
158,935 | import warnings
from sqlalchemy.sql.elements import ClauseElement
from sqlalchemy import sql
from ..translate import (
SqlColumn, SqlColumnAgg,
win_cumul, AggOver, CumlOver, RankOver, warn_arg_default, win_absent
)
from ..dialects.sqlite import SqliteColumn
from ..dialects.mysql import MysqlColumn
from ..dialects.bigquery import BigqueryColumn
from ..dialects.duckdb import DuckdbColumn
from siuba.dply.vector import (
#cumall, cumany, cummean,
desc,
dense_rank, percent_rank, min_rank, cume_dist,
row_number,
#ntile,
between,
coalesce,
lead, lag,
n,
n_distinct,
na_if,
#near,
nth, first, last
)
from ..translate import SiubaSqlRuntimeWarning
class SqlColumn(SqlBase): pass
win_cumul = CumlOver.func
The provided code snippet includes necessary dependencies for implementing the `_lag_sql` function. Write a Python function `def _lag_sql(codata: SqlColumn, x, n = 1, default = None) -> ClauseElement` to solve the following problem:
Example: >>> print(lag(SqlColumn(), sql.column('a'), 2, 99)) lag(a, :lag_1, :lag_2) OVER ()
Here is the function:
def _lag_sql(codata: SqlColumn, x, n = 1, default = None) -> ClauseElement:
"""
Example:
>>> print(lag(SqlColumn(), sql.column('a'), 2, 99))
lag(a, :lag_1, :lag_2) OVER ()
"""
f = win_cumul("lag", rows=None)
return f(codata, x, n , default) | Example: >>> print(lag(SqlColumn(), sql.column('a'), 2, 99)) lag(a, :lag_1, :lag_2) OVER () |
158,936 | import warnings
from sqlalchemy.sql.elements import ClauseElement
from sqlalchemy import sql
from ..translate import (
SqlColumn, SqlColumnAgg,
win_cumul, AggOver, CumlOver, RankOver, warn_arg_default, win_absent
)
from ..dialects.sqlite import SqliteColumn
from ..dialects.mysql import MysqlColumn
from ..dialects.bigquery import BigqueryColumn
from ..dialects.duckdb import DuckdbColumn
from siuba.dply.vector import (
#cumall, cumany, cummean,
desc,
dense_rank, percent_rank, min_rank, cume_dist,
row_number,
#ntile,
between,
coalesce,
lead, lag,
n,
n_distinct,
na_if,
#near,
nth, first, last
)
from ..translate import SiubaSqlRuntimeWarning
class SqlColumn(SqlBase): pass
class AggOver(CustomOverClause):
"""Over clause for uses of functions min, max, avg, that return one value.
Note that this class does not set order by, which is how these functions
generally become their cumulative versions.
E.g. mean(x) -> AVG(x) OVER (partition_by <group vars>)
"""
def set_over(self, group_by, order_by = None):
self.partition_by = group_by
return self
def func(cls, name):
sa_func = getattr(sql.func, name)
def f(codata, col, *args, **kwargs) -> AggOver:
return cls(sa_func(col, *args, **kwargs))
return f
The provided code snippet includes necessary dependencies for implementing the `_n_sql` function. Write a Python function `def _n_sql(codata: SqlColumn, x) -> ClauseElement` to solve the following problem:
Example: >>> print(n(SqlColumn(), sql.column('a'))) count(*) OVER ()
Here is the function:
def _n_sql(codata: SqlColumn, x) -> ClauseElement:
"""
Example:
>>> print(n(SqlColumn(), sql.column('a')))
count(*) OVER ()
"""
return AggOver(sql.func.count()) | Example: >>> print(n(SqlColumn(), sql.column('a'))) count(*) OVER () |
158,937 | import warnings
from sqlalchemy.sql.elements import ClauseElement
from sqlalchemy import sql
from ..translate import (
SqlColumn, SqlColumnAgg,
win_cumul, AggOver, CumlOver, RankOver, warn_arg_default, win_absent
)
from ..dialects.sqlite import SqliteColumn
from ..dialects.mysql import MysqlColumn
from ..dialects.bigquery import BigqueryColumn
from ..dialects.duckdb import DuckdbColumn
from siuba.dply.vector import (
#cumall, cumany, cummean,
desc,
dense_rank, percent_rank, min_rank, cume_dist,
row_number,
#ntile,
between,
coalesce,
lead, lag,
n,
n_distinct,
na_if,
#near,
nth, first, last
)
from ..translate import SiubaSqlRuntimeWarning
class SqlColumnAgg(SqlBase): pass
The provided code snippet includes necessary dependencies for implementing the `_n_sql_agg` function. Write a Python function `def _n_sql_agg(codata: SqlColumnAgg, x) -> ClauseElement` to solve the following problem:
Example: >>> >> from siuba.sql.translate import SqlColumnAgg >> print(n(SqlColumnAgg(), None)) count(*)
Here is the function:
def _n_sql_agg(codata: SqlColumnAgg, x) -> ClauseElement:
"""
Example:
>>>
>> from siuba.sql.translate import SqlColumnAgg
>> print(n(SqlColumnAgg(), None))
count(*)
"""
return sql.func.count() | Example: >>> >> from siuba.sql.translate import SqlColumnAgg >> print(n(SqlColumnAgg(), None)) count(*) |
158,938 | import warnings
from sqlalchemy.sql.elements import ClauseElement
from sqlalchemy import sql
from ..translate import (
SqlColumn, SqlColumnAgg,
win_cumul, AggOver, CumlOver, RankOver, warn_arg_default, win_absent
)
from ..dialects.sqlite import SqliteColumn
from ..dialects.mysql import MysqlColumn
from ..dialects.bigquery import BigqueryColumn
from ..dialects.duckdb import DuckdbColumn
from siuba.dply.vector import (
#cumall, cumany, cummean,
desc,
dense_rank, percent_rank, min_rank, cume_dist,
row_number,
#ntile,
between,
coalesce,
lead, lag,
n,
n_distinct,
na_if,
#near,
nth, first, last
)
from ..translate import SiubaSqlRuntimeWarning
class SqlColumn(SqlBase): pass
The provided code snippet includes necessary dependencies for implementing the `_n_distinct_sql` function. Write a Python function `def _n_distinct_sql(codata: SqlColumn, x: ClauseElement) -> ClauseElement` to solve the following problem:
Example: >>> print(n_distinct(SqlColumn(), sql.column('a')) ) count(distinct(a))
Here is the function:
def _n_distinct_sql(codata: SqlColumn, x: ClauseElement) -> ClauseElement:
"""
Example:
>>> print(n_distinct(SqlColumn(), sql.column('a')) )
count(distinct(a))
"""
return sql.func.count(sql.func.distinct(x)) | Example: >>> print(n_distinct(SqlColumn(), sql.column('a')) ) count(distinct(a)) |
158,939 | import warnings
from sqlalchemy.sql.elements import ClauseElement
from sqlalchemy import sql
from ..translate import (
SqlColumn, SqlColumnAgg,
win_cumul, AggOver, CumlOver, RankOver, warn_arg_default, win_absent
)
from ..dialects.sqlite import SqliteColumn
from ..dialects.mysql import MysqlColumn
from ..dialects.bigquery import BigqueryColumn
from ..dialects.duckdb import DuckdbColumn
from siuba.dply.vector import (
#cumall, cumany, cummean,
desc,
dense_rank, percent_rank, min_rank, cume_dist,
row_number,
#ntile,
between,
coalesce,
lead, lag,
n,
n_distinct,
na_if,
#near,
nth, first, last
)
from ..translate import SiubaSqlRuntimeWarning
class SqlColumn(SqlBase): pass
The provided code snippet includes necessary dependencies for implementing the `_na_if_sql` function. Write a Python function `def _na_if_sql(codata: SqlColumn, x, y) -> ClauseElement` to solve the following problem:
Example: >>> print(na_if(SqlColumn(), sql.column('x'), 2)) nullif(x, :nullif_1)
Here is the function:
def _na_if_sql(codata: SqlColumn, x, y) -> ClauseElement:
"""
Example:
>>> print(na_if(SqlColumn(), sql.column('x'), 2))
nullif(x, :nullif_1)
"""
return sql.func.nullif(x, y) | Example: >>> print(na_if(SqlColumn(), sql.column('x'), 2)) nullif(x, :nullif_1) |
158,940 | import warnings
from sqlalchemy.sql.elements import ClauseElement
from sqlalchemy import sql
from ..translate import (
SqlColumn, SqlColumnAgg,
win_cumul, AggOver, CumlOver, RankOver, warn_arg_default, win_absent
)
from ..dialects.sqlite import SqliteColumn
from ..dialects.mysql import MysqlColumn
from ..dialects.bigquery import BigqueryColumn
from ..dialects.duckdb import DuckdbColumn
from siuba.dply.vector import (
#cumall, cumany, cummean,
desc,
dense_rank, percent_rank, min_rank, cume_dist,
row_number,
#ntile,
between,
coalesce,
lead, lag,
n,
n_distinct,
na_if,
#near,
nth, first, last
)
from ..translate import SiubaSqlRuntimeWarning
class SqlColumn(SqlBase):
class CumlOver(CustomOverClause):
def set_over(self, group_by, order_by):
def func(cls, name, rows=(None, 0)):
def f(codata, col, *args, **kwargs) -> CumlOver:
def desc(x):
def _nth_sql(codata: SqlColumn, x, n, order_by = None, default = None) -> ClauseElement:
if default is not None:
raise NotImplementedError("default argument not implemented")
if n < 0 and order_by is None:
raise NotImplementedError(
"must explicitly pass order_by when using last or nth with "
"n < 0 in SQL."
)
if n < 0:
# e.g. -1 in python is 0, -2 is 1.
n = abs(n + 1)
order_by = order_by.desc()
# note the adjustment for 1-based index in SQL
return CumlOver(
sql.func.nth_value(x, n + 1),
order_by = order_by,
rows = (None, None)
) | null |
158,941 | import warnings
from sqlalchemy.sql.elements import ClauseElement
from sqlalchemy import sql
from ..translate import (
SqlColumn, SqlColumnAgg,
win_cumul, AggOver, CumlOver, RankOver, warn_arg_default, win_absent
)
from ..dialects.sqlite import SqliteColumn
from ..dialects.mysql import MysqlColumn
from ..dialects.bigquery import BigqueryColumn
from ..dialects.duckdb import DuckdbColumn
from siuba.dply.vector import (
#cumall, cumany, cummean,
desc,
dense_rank, percent_rank, min_rank, cume_dist,
row_number,
#ntile,
between,
coalesce,
lead, lag,
n,
n_distinct,
na_if,
#near,
nth, first, last
)
from ..translate import SiubaSqlRuntimeWarning
class SqlColumnAgg(SqlBase): pass
def _nth_sql_agg(codata: SqlColumnAgg, x, n, order_by = None, default = None) -> ClauseElement:
raise NotImplementedError("nth, first, and last not available in summarize") | null |
158,942 | from siuba.dply.string import str_c
from sqlalchemy.sql.elements import ClauseElement
from sqlalchemy import sql
The provided code snippet includes necessary dependencies for implementing the `_str_c_sql` function. Write a Python function `def _str_c_sql(x, *args, sep = "", collapse = None) -> ClauseElement` to solve the following problem:
Example:
Here is the function:
def _str_c_sql(x, *args, sep = "", collapse = None) -> ClauseElement:
"""
Example:
"""
if collapse is not None:
raise NotImplementedError("For SQL, collapse argument of str_c not supported")
if sep != "":
raise NotImplementedError('For SQL, sep argument of str_c must be ""')
return sql.func.concat(x, *args) | Example: |
158,943 | from siuba.siu import symbolic_dispatch
from types import SimpleNamespace
def not_implemented(f):
def _raise_not_impl(*args, **kwargs) -> NotImplementedError:
raise NotImplementedError("Method %s not implemented" %f.__name__)
return symbolic_dispatch(_raise_not_impl) | null |
158,944 | from siuba.siu import symbolic_dispatch
from types import SimpleNamespace
def register(namespace, cls, **kwargs):
def operation(name, *args):
def _default_pd_series(__op, self, *args, **kwargs):
def _register_series_default(generic):
import pandas as pd
from functools import partial
generic.register(pd.Series, partial(_default_pd_series, generic.operation)) | null |
158,945 | import json
import yaml
import pkg_resources
import pandas as pd
from siuba.ops import ALL_OPS
from siuba.siu import FunctionLookupBound
from siuba.sql.utils import get_dialect_translator
def read_sql_op(name, backend, translator):
# TODO: MC-NOTE - cleanup this code
from siuba.siu.visitors import CodataVisitor, FunctionLookupError
from siuba.ops.utils import Operation
co_win = CodataVisitor(translator.window.dispatch_cls)
co_agg = CodataVisitor(translator.aggregate.dispatch_cls)
disp_win = translator.window.local[name]
disp_agg = translator.aggregate.local[name]
try:
f_win = co_win.validate_dispatcher(disp_win, strict=False)
if isinstance(f_win, FunctionLookupBound):
win_supported = False
elif disp_win.dispatch(object) is f_win:
win_supported = False
else:
win_supported = True
except FunctionLookupError:
f_win = None
win_supported = False
try:
f_agg = co_agg.validate_dispatcher(disp_agg)
if isinstance(f_agg, FunctionLookupBound):
agg_supported = False
else:
agg_supported = True
except FunctionLookupError:
agg_supported = False
# window functions should be a superset of agg functions
if f_win is None and agg_supported:
raise Exception("agg functions in %s without window funcs: %s" %(backend, name))
if win_supported and not agg_supported:
flags = "no_aggregate"
elif agg_supported and not win_supported:
flags = "no_mutate"
else:
flags = ""
if win_supported or agg_supported:
metadata = getattr(f_win, "operation", {})
if isinstance(metadata, Operation):
metadata = {**vars(metadata)}
meta = {"is_supported": True, "flags": flags, **metadata}
else:
meta = {"is_supported": False, "flags": flags}
return {"full_name": name, "backend": backend, "metadata": meta}
def get_dialect_translator(name):
mod = importlib.import_module('siuba.sql.dialects.{}'.format(name))
return mod.translator
def read_dialect(name):
translator = get_dialect_translator(name)
support = []
for k in ALL_OPS:
support.append(read_sql_op(k, name, translator))
df_support = pd.DataFrame(support)
return df_support | null |
158,946 | import json
import yaml
import pkg_resources
import pandas as pd
from siuba.ops import ALL_OPS
from siuba.siu import FunctionLookupBound
from siuba.sql.utils import get_dialect_translator
def read_pandas_ops():
from siuba.experimental.pd_groups.groupby import SeriesGroupBy
all_meta = []
for k, v in ALL_OPS.items():
has_impl = not isinstance(v.dispatch(SeriesGroupBy), FunctionLookupBound)
all_meta.append({
"full_name": k,
"metadata": dict(is_supported = has_impl),
"backend": "pandas"}
)
return pd.DataFrame(all_meta) | null |
158,947 | import json
import yaml
import pkg_resources
import pandas as pd
from siuba.ops import ALL_OPS
from siuba.siu import FunctionLookupBound
from siuba.sql.utils import get_dialect_translator
def replace_meta_args(call, *args):
_ = Symbolic()
def enrich_spec_entry(entry):
from siuba.siu import _, strip_symbolic
accessors = ['str', 'dt', 'cat', 'sparse']
expr = strip_symbolic(eval(entry["example"], {"_": _}))
accessor = [ameth for ameth in accessors if ameth in expr.op_vars()] + [None]
tmp = {
**entry,
'is_property': expr.func == "__getattr__",
'expr_frame': replace_meta_args(expr, _.x, _.y, _.z),
'expr_series': expr,
'accessor': accessor[0],
}
return tmp | null |
158,948 | import json
import yaml
import pkg_resources
import pandas as pd
from siuba.ops import ALL_OPS
from siuba.siu import FunctionLookupBound
from siuba.sql.utils import get_dialect_translator
def set_default_support(d):
if d.get("is_supported"):
return {**d, "support": "supported"}
return {**d, "is_supported": False, "support": "maydo"} | null |
158,949 | from .generics import ALL_OPS, ALL_ACCESSORS, ALL_PROPERTIES
from siuba.siu import CallTreeLocal
ALL_ACCESSORS = set()
ALL_PROPERTIES = set()
def create_pandas_translator(local, dispatch_cls, result_cls):
return CallTreeLocal(
local,
call_sub_attr = tuple(ALL_ACCESSORS),
chain_sub_attr = True,
dispatch_cls = dispatch_cls,
result_cls = result_cls,
call_props = tuple(ALL_PROPERTIES)
) | null |
158,950 | import pandas as pd
from siuba.siu import Call, MetaArg, BinaryOp
from collections import OrderedDict
from itertools import chain
from functools import singledispatch
from typing import List
def _colwise_eval_pd(data: pd.DataFrame, predicate) -> List[bool]:
mask = []
for col_name in data:
res = predicate(data.loc[:, col_name])
if not pd.api.types.is_bool(res):
raise TypeError("TODO")
mask.append(res)
return mask | null |
158,951 | import pandas as pd
import numpy as np
from siuba.siu import symbolic_dispatch
from collections import defaultdict
def _get_cat_order(x):
if isinstance(x, pd.Series):
arr = x.array
else:
arr = x
if isinstance(arr, pd.Categorical):
return arr.ordered
return None
def fct_inorder(fct, ordered=None):
"""Return a copy of fct, with categories ordered by when they first appear.
Parameters
----------
fct : list-like
A pandas Series, Categorical, or list-like object
ordered : bool
Whether to return an ordered categorical. By default a Categorical inputs'
ordered setting is respected. Use this to override it.
See Also
--------
fct_infreq : Order categories by value frequency count.
Examples
--------
>>> fct = pd.Categorical(["c", "a", "b"])
>>> fct
['c', 'a', 'b']
Categories (3, object): ['a', 'b', 'c']
Note that above the categories are sorted alphabetically. Use fct_inorder
to keep the categories in first-observed order.
>>> fct_inorder(fct)
['c', 'a', 'b']
Categories (3, object): ['c', 'a', 'b']
fct_inorder also accepts pd.Series and list objects:
>>> fct_inorder(["z", "a"])
['z', 'a']
Categories (2, object): ['z', 'a']
By default, the ordered setting of categoricals is respected. Use the ordered
parameter to override it.
>>> fct2 = pd.Categorical(["z", "a", "b"], ordered=True)
>>> fct_inorder(fct2)
['z', 'a', 'b']
Categories (3, object): ['z' < 'a' < 'b']
>>> fct_inorder(fct2, ordered=False)
['z', 'a', 'b']
Categories (3, object): ['z', 'a', 'b']
"""
if ordered is None:
ordered = _get_cat_order(fct)
if isinstance(fct, (pd.Series, pd.Categorical)):
uniq = fct.dropna().unique()
if isinstance(uniq, pd.Categorical):
# the result of .unique for a categorical is a new categorical
# unsurprisingly, it also sorts the categories, so reorder manually
# (note that this also applies to Series[Categorical].unique())
categories = uniq.categories[uniq.dropna().codes]
return pd.Categorical(fct, categories, ordered=ordered)
# series in, so series out
cat = pd.Categorical(fct, uniq, ordered=ordered)
return pd.Series(cat)
ser = pd.Series(fct)
return pd.Categorical(fct, categories = ser.dropna().unique(), ordered=ordered)
The provided code snippet includes necessary dependencies for implementing the `fct_infreq` function. Write a Python function `def fct_infreq(fct, ordered=None)` to solve the following problem:
Return a copy of fct, with categories ordered by frequency (largest first) Parameters ---------- fct : list-like A pandas Series, Categorical, or list-like object ordered : bool Whether to return an ordered categorical. By default a Categorical inputs' ordered setting is respected. Use this to override it. See Also -------- fct_inorder : Order categories by when they're first observed. Examples -------- >>> fct_infreq(["c", "a", "c", "c", "a", "b"]) ['c', 'a', 'c', 'c', 'a', 'b'] Categories (3, object): ['c', 'a', 'b']
Here is the function:
def fct_infreq(fct, ordered=None):
"""Return a copy of fct, with categories ordered by frequency (largest first)
Parameters
----------
fct : list-like
A pandas Series, Categorical, or list-like object
ordered : bool
Whether to return an ordered categorical. By default a Categorical inputs'
ordered setting is respected. Use this to override it.
See Also
--------
fct_inorder : Order categories by when they're first observed.
Examples
--------
>>> fct_infreq(["c", "a", "c", "c", "a", "b"])
['c', 'a', 'c', 'c', 'a', 'b']
Categories (3, object): ['c', 'a', 'b']
"""
if ordered is None:
ordered = _get_cat_order(fct)
# sort and create new categorical ----
if isinstance(fct, pd.Categorical):
# Categorical value counts are sorted in categories order
# So to acheive the exact same result as the Series case below,
# we need to use fct_inorder, so categories is in first-observed order.
# This orders the final result by frequency, and then observed for ties.
freq = fct_inorder(fct).value_counts().sort_values(ascending=False)
# note that freq is a Series, but it has a CategoricalIndex.
# we want the index values as shown, so we need to strip them out of
# this nightmare index situation.
categories = freq.index.categories[freq.index.dropna().codes]
return pd.Categorical(fct, categories=categories, ordered=ordered)
else:
# Series sorts in descending frequency order
ser = pd.Series(fct) if not isinstance(fct, pd.Series) else fct
freq = ser.value_counts()
cat = pd.Categorical(ser, categories=freq.index, ordered=ordered)
if isinstance(fct, pd.Series):
return pd.Series(cat)
return cat | Return a copy of fct, with categories ordered by frequency (largest first) Parameters ---------- fct : list-like A pandas Series, Categorical, or list-like object ordered : bool Whether to return an ordered categorical. By default a Categorical inputs' ordered setting is respected. Use this to override it. See Also -------- fct_inorder : Order categories by when they're first observed. Examples -------- >>> fct_infreq(["c", "a", "c", "c", "a", "b"]) ['c', 'a', 'c', 'c', 'a', 'b'] Categories (3, object): ['c', 'a', 'b'] |
158,952 | import pandas as pd
import numpy as np
from siuba.siu import symbolic_dispatch
from collections import defaultdict
def _maybe_upcast(fct_in, fct_out):
if isinstance(fct_in, pd.Series):
return pd.Series(fct_out)
return fct_out
The provided code snippet includes necessary dependencies for implementing the `fct_reorder` function. Write a Python function `def fct_reorder(fct, x, func = np.median, desc = False) -> pd.Categorical` to solve the following problem:
Return copy of fct, with categories reordered according to values in x. Parameters ---------- fct : A pandas.Categorical, or array(-like) used to create one. x : Values used to reorder categorical. Must be same length as fct. func : Function run over all values within a level of the categorical. desc : Whether to sort in descending order. Notes ----- NaN categories can't be ordered. When func returns NaN, sorting is always done with NaNs last. Examples -------- >>> fct_reorder(['a', 'a', 'b'], [4, 3, 2]) ['a', 'a', 'b'] Categories (2, object): ['b', 'a'] >>> fct_reorder(['a', 'a', 'b'], [4, 3, 2], desc = True) ['a', 'a', 'b'] Categories (2, object): ['a', 'b'] >>> fct_reorder(['x', 'x', 'y'], [4, 0, 2], np.max) ['x', 'x', 'y'] Categories (2, object): ['y', 'x']
Here is the function:
def fct_reorder(fct, x, func = np.median, desc = False) -> pd.Categorical:
"""Return copy of fct, with categories reordered according to values in x.
Parameters
----------
fct :
A pandas.Categorical, or array(-like) used to create one.
x :
Values used to reorder categorical. Must be same length as fct.
func :
Function run over all values within a level of the categorical.
desc :
Whether to sort in descending order.
Notes
-----
NaN categories can't be ordered. When func returns NaN, sorting
is always done with NaNs last.
Examples
--------
>>> fct_reorder(['a', 'a', 'b'], [4, 3, 2])
['a', 'a', 'b']
Categories (2, object): ['b', 'a']
>>> fct_reorder(['a', 'a', 'b'], [4, 3, 2], desc = True)
['a', 'a', 'b']
Categories (2, object): ['a', 'b']
>>> fct_reorder(['x', 'x', 'y'], [4, 0, 2], np.max)
['x', 'x', 'y']
Categories (2, object): ['y', 'x']
"""
x_vals = x.values if isinstance(x, pd.Series) else x
s = pd.Series(x_vals, index = fct)
# sort groups by calculated agg func. note that groupby uses dropna=True by default,
# but that's okay, since pandas categoricals can't order the NA category
ordered = s.groupby(level = 0).agg(func).sort_values(ascending = not desc)
out = pd.Categorical(fct, categories=ordered.index)
return _maybe_upcast(fct, out) | Return copy of fct, with categories reordered according to values in x. Parameters ---------- fct : A pandas.Categorical, or array(-like) used to create one. x : Values used to reorder categorical. Must be same length as fct. func : Function run over all values within a level of the categorical. desc : Whether to sort in descending order. Notes ----- NaN categories can't be ordered. When func returns NaN, sorting is always done with NaNs last. Examples -------- >>> fct_reorder(['a', 'a', 'b'], [4, 3, 2]) ['a', 'a', 'b'] Categories (2, object): ['b', 'a'] >>> fct_reorder(['a', 'a', 'b'], [4, 3, 2], desc = True) ['a', 'a', 'b'] Categories (2, object): ['a', 'b'] >>> fct_reorder(['x', 'x', 'y'], [4, 0, 2], np.max) ['x', 'x', 'y'] Categories (2, object): ['y', 'x'] |
158,953 | import pandas as pd
import numpy as np
from siuba.siu import symbolic_dispatch
from collections import defaultdict
def _maybe_upcast(fct_in, fct_out):
if isinstance(fct_in, pd.Series):
return pd.Series(fct_out)
return fct_out
def fct_collapse(fct, recat, group_other = None) -> pd.Categorical:
"""Return copy of fct with categories renamed. Optionally group all others.
Parameters
----------
fct :
A pandas.Categorical, or array(-like) used to create one.
recat :
Dictionary of form {new_cat_name: old_cat_name}. old_cat_name may be
a list of existing categories, to be given the same name.
group_other :
An optional string, specifying what all other categories should be named.
This will always be the last category level in the result.
Notes
-----
Resulting levels index is ordered according to the earliest level replaced.
If we rename the first and last levels to "c", then "c" is the first level.
Examples
--------
>>> fct_collapse(['a', 'b', 'c'], {'x': 'a'})
['x', 'b', 'c']
Categories (3, object): ['x', 'b', 'c']
>>> fct_collapse(['a', 'b', 'c'], {'x': 'a'}, group_other = 'others')
['x', 'others', 'others']
Categories (2, object): ['x', 'others']
>>> fct_collapse(['a', 'b', 'c'], {'ab': ['a', 'b']})
['ab', 'ab', 'c']
Categories (2, object): ['ab', 'c']
>>> fct_collapse(['a', 'b', None], {'a': ['b']})
['a', 'a', NaN]
Categories (1, object): ['a']
"""
if not isinstance(fct, pd.Categorical):
new_fct = pd.Categorical(fct)
else:
new_fct = fct
# each existing cat will map to a new one ----
# need to know existing to new cat
# need to know new cat to new code
cat_to_new = {k: None for k in new_fct.categories}
for new_name, v in recat.items():
v = [v] if not np.ndim(v) else v
for old_name in v:
if cat_to_new[old_name] is not None:
raise Exception("category %s was already re-assigned"%old_name)
cat_to_new[old_name] = new_name
# collapse all unspecified cats to group_other if specified ----
for k, v in cat_to_new.items():
if v is None:
if group_other is not None:
cat_to_new[k] = group_other
else:
cat_to_new[k] = k
# map from old cat to new code ----
# calculate new codes
ordered_cats = {new: True for old, new in cat_to_new.items()}
# move the other group to last in the ordered set
if group_other is not None:
try:
del ordered_cats[group_other]
ordered_cats[group_other] = True
except KeyError:
pass
# map new category name to code
new_cat_set = {k: ii for ii, k in enumerate(ordered_cats)}
# at this point, we need remap codes to the other category
# make an array, where the index is old code + 1 (so missing val index is 0)
old_code_to_new = np.array(
[-1] + [new_cat_set[new_cat] for new_cat in cat_to_new.values()]
)
# map old cats to new codes
#remap_code = {old: new_cat_set[new] for old, new in cat_to_new.items()}
new_codes = old_code_to_new[new_fct.codes + 1]
new_cats = list(new_cat_set)
out = pd.Categorical.from_codes(new_codes, new_cats)
return _maybe_upcast(fct, out)
The provided code snippet includes necessary dependencies for implementing the `fct_recode` function. Write a Python function `def fct_recode(fct, recat=None, **kwargs) -> pd.Categorical` to solve the following problem:
Return copy of fct with renamed categories. Parameters ---------- fct : A pandas.Categorical, or array(-like) used to create one. **kwargs : Arguments of form new_name = old_name. Examples -------- >>> cat = ['a', 'b', 'c'] >>> fct_recode(cat, z = 'c') ['a', 'b', 'z'] Categories (3, object): ['a', 'b', 'z'] >>> fct_recode(cat, x = ['a', 'b']) ['x', 'x', 'c'] Categories (2, object): ['x', 'c'] >>> fct_recode(cat, {"x": ['a', 'b']}) ['x', 'x', 'c'] Categories (2, object): ['x', 'c']
Here is the function:
def fct_recode(fct, recat=None, **kwargs) -> pd.Categorical:
"""Return copy of fct with renamed categories.
Parameters
----------
fct :
A pandas.Categorical, or array(-like) used to create one.
**kwargs :
Arguments of form new_name = old_name.
Examples
--------
>>> cat = ['a', 'b', 'c']
>>> fct_recode(cat, z = 'c')
['a', 'b', 'z']
Categories (3, object): ['a', 'b', 'z']
>>> fct_recode(cat, x = ['a', 'b'])
['x', 'x', 'c']
Categories (2, object): ['x', 'c']
>>> fct_recode(cat, {"x": ['a', 'b']})
['x', 'x', 'c']
Categories (2, object): ['x', 'c']
"""
if recat and not isinstance(recat, dict):
raise TypeError("fct_recode requires named args or a dict.")
if recat and kwargs:
duplicate_keys = set(recat).intersection(set(kwargs))
if duplicate_keys:
raise ValueError(
"The following recode name(s) were specified more than once: {}" \
.format(duplicate_keys)
)
new_cats = {**recat, **kwargs} if recat else kwargs
return _maybe_upcast(fct, fct_collapse(fct, new_cats)) | Return copy of fct with renamed categories. Parameters ---------- fct : A pandas.Categorical, or array(-like) used to create one. **kwargs : Arguments of form new_name = old_name. Examples -------- >>> cat = ['a', 'b', 'c'] >>> fct_recode(cat, z = 'c') ['a', 'b', 'z'] Categories (3, object): ['a', 'b', 'z'] >>> fct_recode(cat, x = ['a', 'b']) ['x', 'x', 'c'] Categories (2, object): ['x', 'c'] >>> fct_recode(cat, {"x": ['a', 'b']}) ['x', 'x', 'c'] Categories (2, object): ['x', 'c'] |
158,954 | import pandas as pd
import numpy as np
from siuba.siu import symbolic_dispatch
from collections import defaultdict
def _maybe_upcast(fct_in, fct_out):
if isinstance(fct_in, pd.Series):
return pd.Series(fct_out)
return fct_out
def fct_collapse(fct, recat, group_other = None) -> pd.Categorical:
"""Return copy of fct with categories renamed. Optionally group all others.
Parameters
----------
fct :
A pandas.Categorical, or array(-like) used to create one.
recat :
Dictionary of form {new_cat_name: old_cat_name}. old_cat_name may be
a list of existing categories, to be given the same name.
group_other :
An optional string, specifying what all other categories should be named.
This will always be the last category level in the result.
Notes
-----
Resulting levels index is ordered according to the earliest level replaced.
If we rename the first and last levels to "c", then "c" is the first level.
Examples
--------
>>> fct_collapse(['a', 'b', 'c'], {'x': 'a'})
['x', 'b', 'c']
Categories (3, object): ['x', 'b', 'c']
>>> fct_collapse(['a', 'b', 'c'], {'x': 'a'}, group_other = 'others')
['x', 'others', 'others']
Categories (2, object): ['x', 'others']
>>> fct_collapse(['a', 'b', 'c'], {'ab': ['a', 'b']})
['ab', 'ab', 'c']
Categories (2, object): ['ab', 'c']
>>> fct_collapse(['a', 'b', None], {'a': ['b']})
['a', 'a', NaN]
Categories (1, object): ['a']
"""
if not isinstance(fct, pd.Categorical):
new_fct = pd.Categorical(fct)
else:
new_fct = fct
# each existing cat will map to a new one ----
# need to know existing to new cat
# need to know new cat to new code
cat_to_new = {k: None for k in new_fct.categories}
for new_name, v in recat.items():
v = [v] if not np.ndim(v) else v
for old_name in v:
if cat_to_new[old_name] is not None:
raise Exception("category %s was already re-assigned"%old_name)
cat_to_new[old_name] = new_name
# collapse all unspecified cats to group_other if specified ----
for k, v in cat_to_new.items():
if v is None:
if group_other is not None:
cat_to_new[k] = group_other
else:
cat_to_new[k] = k
# map from old cat to new code ----
# calculate new codes
ordered_cats = {new: True for old, new in cat_to_new.items()}
# move the other group to last in the ordered set
if group_other is not None:
try:
del ordered_cats[group_other]
ordered_cats[group_other] = True
except KeyError:
pass
# map new category name to code
new_cat_set = {k: ii for ii, k in enumerate(ordered_cats)}
# at this point, we need remap codes to the other category
# make an array, where the index is old code + 1 (so missing val index is 0)
old_code_to_new = np.array(
[-1] + [new_cat_set[new_cat] for new_cat in cat_to_new.values()]
)
# map old cats to new codes
#remap_code = {old: new_cat_set[new] for old, new in cat_to_new.items()}
new_codes = old_code_to_new[new_fct.codes + 1]
new_cats = list(new_cat_set)
out = pd.Categorical.from_codes(new_codes, new_cats)
return _maybe_upcast(fct, out)
def _fct_lump_n_cats(fct, w, other_level, ties, n = None, prop = None):
# TODO: currently always selects n, even if ties
# weights might be a Series, or array, etc..
arr = _get_values(w) if w is not None else 1
ser = pd.Series(arr, index = fct)
counts = ser.groupby(level = 0).sum()
if n is not None:
ascending = n < 0
sorted_arr = counts.sort_values(ascending = ascending)
res = sorted_arr.iloc[:abs(n)]
elif prop is not None:
sorted_arr = counts.sort_values() / counts.sum()
if prop < 0:
res = sorted_arr.loc[sorted_arr <= abs(prop)]
else:
res = sorted_arr.loc[sorted_arr > prop]
return res.index.values
The provided code snippet includes necessary dependencies for implementing the `fct_lump` function. Write a Python function `def fct_lump(fct, n = None, prop = None, w = None, other_level = "Other", ties = None) -> pd.Categorical` to solve the following problem:
Return a copy of fct with categories lumped together. Parameters ---------- fct : A pandas.Categorical, or array(-like) used to create one. n : Number of categories to keep. prop : (not implemented) keep categories that occur prop proportion of the time. w : Array of weights corresponding to each value in fct. other_level : Name for all lumped together levels. ties : (not implemented) method to use in the case of ties. Notes ----- Currently, one of n and prop must be specified. Examples -------- >>> fct_lump(['a', 'a', 'b', 'c'], n = 1) ['a', 'a', 'Other', 'Other'] Categories (2, object): ['a', 'Other'] >>> fct_lump(['a', 'a', 'b', 'b', 'c', 'd'], prop = .2) ['a', 'a', 'b', 'b', 'Other', 'Other'] Categories (3, object): ['a', 'b', 'Other']
Here is the function:
def fct_lump(fct, n = None, prop = None, w = None, other_level = "Other", ties = None) -> pd.Categorical:
"""Return a copy of fct with categories lumped together.
Parameters
----------
fct :
A pandas.Categorical, or array(-like) used to create one.
n :
Number of categories to keep.
prop :
(not implemented) keep categories that occur prop proportion of the time.
w :
Array of weights corresponding to each value in fct.
other_level :
Name for all lumped together levels.
ties :
(not implemented) method to use in the case of ties.
Notes
-----
Currently, one of n and prop must be specified.
Examples
--------
>>> fct_lump(['a', 'a', 'b', 'c'], n = 1)
['a', 'a', 'Other', 'Other']
Categories (2, object): ['a', 'Other']
>>> fct_lump(['a', 'a', 'b', 'b', 'c', 'd'], prop = .2)
['a', 'a', 'b', 'b', 'Other', 'Other']
Categories (3, object): ['a', 'b', 'Other']
"""
if ties is not None:
raise NotImplementedError("ties is not implemented")
if n is None and prop is None:
raise NotImplementedError("Either n or prop must be specified")
keep_cats = _fct_lump_n_cats(fct, w, other_level, ties, n = n, prop = prop)
out = fct_collapse(fct, {k:k for k in keep_cats}, group_other = other_level)
return _maybe_upcast(fct, out) | Return a copy of fct with categories lumped together. Parameters ---------- fct : A pandas.Categorical, or array(-like) used to create one. n : Number of categories to keep. prop : (not implemented) keep categories that occur prop proportion of the time. w : Array of weights corresponding to each value in fct. other_level : Name for all lumped together levels. ties : (not implemented) method to use in the case of ties. Notes ----- Currently, one of n and prop must be specified. Examples -------- >>> fct_lump(['a', 'a', 'b', 'c'], n = 1) ['a', 'a', 'Other', 'Other'] Categories (2, object): ['a', 'Other'] >>> fct_lump(['a', 'a', 'b', 'b', 'c', 'd'], prop = .2) ['a', 'a', 'b', 'b', 'Other', 'Other'] Categories (3, object): ['a', 'b', 'Other'] |
158,955 | import pandas as pd
import numpy as np
from siuba.siu import symbolic_dispatch
from collections import defaultdict
def _maybe_upcast(fct_in, fct_out):
if isinstance(fct_in, pd.Series):
return pd.Series(fct_out)
return fct_out
The provided code snippet includes necessary dependencies for implementing the `fct_rev` function. Write a Python function `def fct_rev(fct) -> pd.Categorical` to solve the following problem:
Return a copy of fct with category level order reversed.next Parameters ---------- fct : A pandas.Categorical, or array(-like) used to create one. Examples -------- >>> fct = pd.Categorical(["a", "b", "c"]) >>> fct ['a', 'b', 'c'] Categories (3, object): ['a', 'b', 'c'] >>> fct_rev(fct) ['a', 'b', 'c'] Categories (3, object): ['c', 'b', 'a'] Note that this function can also accept a list. >>> fct_rev(["a", "b", "c"]) ['a', 'b', 'c'] Categories (3, object): ['c', 'b', 'a']
Here is the function:
def fct_rev(fct) -> pd.Categorical:
"""Return a copy of fct with category level order reversed.next
Parameters
----------
fct :
A pandas.Categorical, or array(-like) used to create one.
Examples
--------
>>> fct = pd.Categorical(["a", "b", "c"])
>>> fct
['a', 'b', 'c']
Categories (3, object): ['a', 'b', 'c']
>>> fct_rev(fct)
['a', 'b', 'c']
Categories (3, object): ['c', 'b', 'a']
Note that this function can also accept a list.
>>> fct_rev(["a", "b", "c"])
['a', 'b', 'c']
Categories (3, object): ['c', 'b', 'a']
"""
if not isinstance(fct, pd.Categorical):
fct = pd.Categorical(fct)
rev_levels = list(reversed(fct.categories))
out = fct.reorder_categories(rev_levels)
return _maybe_upcast(fct, out) | Return a copy of fct with category level order reversed.next Parameters ---------- fct : A pandas.Categorical, or array(-like) used to create one. Examples -------- >>> fct = pd.Categorical(["a", "b", "c"]) >>> fct ['a', 'b', 'c'] Categories (3, object): ['a', 'b', 'c'] >>> fct_rev(fct) ['a', 'b', 'c'] Categories (3, object): ['c', 'b', 'a'] Note that this function can also accept a list. >>> fct_rev(["a", "b", "c"]) ['a', 'b', 'c'] Categories (3, object): ['c', 'b', 'a'] |
158,956 | import pandas as pd
import numpy as np
from functools import singledispatch
from siuba.siu import symbolic_dispatch
from pandas.core.groupby import SeriesGroupBy, GroupBy
from pandas.core.frame import NDFrame
from pandas import Series
from siuba.experimental.pd_groups.groupby import GroupByAgg, regroup
from siuba.experimental.pd_groups.translate import method_agg_op
from functools import wraps
def method_agg_op(name, is_property, accessor):
def f(__ser: SeriesGroupBy, *args, **kwargs) -> GroupByAgg:
_validate_data_args(__ser)
res = _apply_grouped_method(__ser, name, is_property, accessor, args, kwargs)
return GroupByAgg.from_result(res, __ser)
f.__name__ = f.__qualname__ = name
return f
def alias_series_agg(name):
method = method_agg_op(name, is_property = False, accessor = False)
def decorator(dispatcher):
dispatcher.register(SeriesGroupBy, method)
return dispatcher
return decorator | null |
158,957 | import pandas as pd
import numpy as np
from functools import singledispatch
from siuba.siu import symbolic_dispatch
from pandas.core.groupby import SeriesGroupBy, GroupBy
from pandas.core.frame import NDFrame
from pandas import Series
from siuba.experimental.pd_groups.groupby import GroupByAgg, regroup
from siuba.experimental.pd_groups.translate import method_agg_op
def _expand_bool(x, f):
return x.expanding().apply(f, raw = True).astype(bool)
from functools import wraps
The provided code snippet includes necessary dependencies for implementing the `cumall` function. Write a Python function `def cumall(x)` to solve the following problem:
Return a same-length array. For each entry, indicates whether that entry and all previous are True-like. Example: >>> cumall(pd.Series([True, False, False])) 0 True 1 False 2 False dtype: bool
Here is the function:
def cumall(x):
"""Return a same-length array. For each entry, indicates whether that entry and all previous are True-like.
Example:
>>> cumall(pd.Series([True, False, False]))
0 True
1 False
2 False
dtype: bool
"""
return _expand_bool(x, np.all) | Return a same-length array. For each entry, indicates whether that entry and all previous are True-like. Example: >>> cumall(pd.Series([True, False, False])) 0 True 1 False 2 False dtype: bool |
158,958 | import pandas as pd
import numpy as np
from functools import singledispatch
from siuba.siu import symbolic_dispatch
from pandas.core.groupby import SeriesGroupBy, GroupBy
from pandas.core.frame import NDFrame
from pandas import Series
from siuba.experimental.pd_groups.groupby import GroupByAgg, regroup
from siuba.experimental.pd_groups.translate import method_agg_op
def _expand_bool(x, f):
return x.expanding().apply(f, raw = True).astype(bool)
from functools import wraps
The provided code snippet includes necessary dependencies for implementing the `cumany` function. Write a Python function `def cumany(x)` to solve the following problem:
Return a same-length array. For each entry, indicates whether that entry or any previous are True-like. Example: >>> cumany(pd.Series([False, True, False])) 0 False 1 True 2 True dtype: bool
Here is the function:
def cumany(x):
"""Return a same-length array. For each entry, indicates whether that entry or any previous are True-like.
Example:
>>> cumany(pd.Series([False, True, False]))
0 False
1 True
2 True
dtype: bool
"""
return _expand_bool(x, np.any) | Return a same-length array. For each entry, indicates whether that entry or any previous are True-like. Example: >>> cumany(pd.Series([False, True, False])) 0 False 1 True 2 True dtype: bool |
158,959 | import pandas as pd
import numpy as np
from functools import singledispatch
from siuba.siu import symbolic_dispatch
from pandas.core.groupby import SeriesGroupBy, GroupBy
from pandas.core.frame import NDFrame
from pandas import Series
from siuba.experimental.pd_groups.groupby import GroupByAgg, regroup
from siuba.experimental.pd_groups.translate import method_agg_op
from functools import wraps
The provided code snippet includes necessary dependencies for implementing the `cummean` function. Write a Python function `def cummean(x)` to solve the following problem:
Return a same-length array, containing the cumulative mean.
Here is the function:
def cummean(x):
"""Return a same-length array, containing the cumulative mean."""
return x.expanding().mean() | Return a same-length array, containing the cumulative mean. |
158,960 | import pandas as pd
import numpy as np
from functools import singledispatch
from siuba.siu import symbolic_dispatch
from pandas.core.groupby import SeriesGroupBy, GroupBy
from pandas.core.frame import NDFrame
from pandas import Series
from siuba.experimental.pd_groups.groupby import GroupByAgg, regroup
from siuba.experimental.pd_groups.translate import method_agg_op
from functools import wraps
def _cummean_grouped(x):
grouper = x.grouper
n_entries = x.obj.notna().groupby(grouper).cumsum()
res = x.cumsum() / n_entries
return res.groupby(grouper) | null |
158,961 | import pandas as pd
import numpy as np
from functools import singledispatch
from siuba.siu import symbolic_dispatch
from pandas.core.groupby import SeriesGroupBy, GroupBy
from pandas.core.frame import NDFrame
from pandas import Series
from siuba.experimental.pd_groups.groupby import GroupByAgg, regroup
from siuba.experimental.pd_groups.translate import method_agg_op
from functools import wraps
The provided code snippet includes necessary dependencies for implementing the `dense_rank` function. Write a Python function `def dense_rank(x, na_option = "keep")` to solve the following problem:
Return the dense rank. This method of ranking returns values ranging from 1 to the number of unique entries. Ties are all given the same ranking. Example: >>> dense_rank(pd.Series([1,3,3,5])) 0 1.0 1 2.0 2 2.0 3 3.0 dtype: float64
Here is the function:
def dense_rank(x, na_option = "keep"):
"""Return the dense rank.
This method of ranking returns values ranging from 1 to the number of unique entries.
Ties are all given the same ranking.
Example:
>>> dense_rank(pd.Series([1,3,3,5]))
0 1.0
1 2.0
2 2.0
3 3.0
dtype: float64
"""
return x.rank(method = "dense", na_option = na_option) | Return the dense rank. This method of ranking returns values ranging from 1 to the number of unique entries. Ties are all given the same ranking. Example: >>> dense_rank(pd.Series([1,3,3,5])) 0 1.0 1 2.0 2 2.0 3 3.0 dtype: float64 |
158,962 | import pandas as pd
import numpy as np
from functools import singledispatch
from siuba.siu import symbolic_dispatch
from pandas.core.groupby import SeriesGroupBy, GroupBy
from pandas.core.frame import NDFrame
from pandas import Series
from siuba.experimental.pd_groups.groupby import GroupByAgg, regroup
from siuba.experimental.pd_groups.translate import method_agg_op
def min_rank(x, na_option = "keep"):
"""Return the min rank. See pd.Series.rank with method="min" for details.
"""
return x.rank(method = "min", na_option = na_option)
from functools import wraps
The provided code snippet includes necessary dependencies for implementing the `percent_rank` function. Write a Python function `def percent_rank(x, na_option = "keep")` to solve the following problem:
Return the percent rank. Note: Uses minimum rank, and reports the proportion of unique ranks each entry is greater than. Examples: >>> percent_rank(pd.Series([1, 2, 3])) 0 0.0 1 0.5 2 1.0 dtype: float64 >>> percent_rank(pd.Series([1, 2, 2])) 0 0.0 1 0.5 2 0.5 dtype: float64 >>> percent_rank(pd.Series([1])) 0 NaN dtype: float64
Here is the function:
def percent_rank(x, na_option = "keep"):
"""Return the percent rank.
Note:
Uses minimum rank, and reports the proportion of unique ranks each entry is greater than.
Examples:
>>> percent_rank(pd.Series([1, 2, 3]))
0 0.0
1 0.5
2 1.0
dtype: float64
>>> percent_rank(pd.Series([1, 2, 2]))
0 0.0
1 0.5
2 0.5
dtype: float64
>>> percent_rank(pd.Series([1]))
0 NaN
dtype: float64
"""
return (min_rank(x) - 1) / (x.count() - 1) | Return the percent rank. Note: Uses minimum rank, and reports the proportion of unique ranks each entry is greater than. Examples: >>> percent_rank(pd.Series([1, 2, 3])) 0 0.0 1 0.5 2 1.0 dtype: float64 >>> percent_rank(pd.Series([1, 2, 2])) 0 0.0 1 0.5 2 0.5 dtype: float64 >>> percent_rank(pd.Series([1])) 0 NaN dtype: float64 |
158,963 | import pandas as pd
import numpy as np
from functools import singledispatch
from siuba.siu import symbolic_dispatch
from pandas.core.groupby import SeriesGroupBy, GroupBy
from pandas.core.frame import NDFrame
from pandas import Series
from siuba.experimental.pd_groups.groupby import GroupByAgg, regroup
from siuba.experimental.pd_groups.translate import method_agg_op
from functools import wraps
The provided code snippet includes necessary dependencies for implementing the `cume_dist` function. Write a Python function `def cume_dist(x, na_option = "keep")` to solve the following problem:
Return the cumulative distribution corresponding to each value in x. This reflects the proportion of values that are less than or equal to each value.
Here is the function:
def cume_dist(x, na_option = "keep"):
"""Return the cumulative distribution corresponding to each value in x.
This reflects the proportion of values that are less than or equal to each value.
"""
return x.rank(method = "max", na_option = na_option) / x.count() | Return the cumulative distribution corresponding to each value in x. This reflects the proportion of values that are less than or equal to each value. |
158,964 | import pandas as pd
import numpy as np
from functools import singledispatch
from siuba.siu import symbolic_dispatch
from pandas.core.groupby import SeriesGroupBy, GroupBy
from pandas.core.frame import NDFrame
from pandas import Series
from siuba.experimental.pd_groups.groupby import GroupByAgg, regroup
from siuba.experimental.pd_groups.translate import method_agg_op
from functools import wraps
def regroup(groupby, res):
"""Return an instance of type(groupby) from res."""
raise TypeError("Not implemented for group by class: %s"% type(groupby))
def _row_number_grouped(g: GroupBy) -> GroupBy:
out = np.ones(len(g.obj), dtype = int)
indices = g.grouper.indices
for g_key, inds in indices.items():
out[inds] = np.arange(1, len(inds) + 1, dtype = int)
return regroup(g, out) | null |
158,965 | import pandas as pd
import numpy as np
from functools import singledispatch
from siuba.siu import symbolic_dispatch
from pandas.core.groupby import SeriesGroupBy, GroupBy
from pandas.core.frame import NDFrame
from pandas import Series
from siuba.experimental.pd_groups.groupby import GroupByAgg, regroup
from siuba.experimental.pd_groups.translate import method_agg_op
from functools import wraps
The provided code snippet includes necessary dependencies for implementing the `ntile` function. Write a Python function `def ntile(x, n)` to solve the following problem:
TODO: Not Implemented
Here is the function:
def ntile(x, n):
"""TODO: Not Implemented"""
raise NotImplementedError("ntile not implemented") | TODO: Not Implemented |
158,966 | import pandas as pd
import numpy as np
from functools import singledispatch
from siuba.siu import symbolic_dispatch
from pandas.core.groupby import SeriesGroupBy, GroupBy
from pandas.core.frame import NDFrame
from pandas import Series
from siuba.experimental.pd_groups.groupby import GroupByAgg, regroup
from siuba.experimental.pd_groups.translate import method_agg_op
from functools import wraps
The provided code snippet includes necessary dependencies for implementing the `lead` function. Write a Python function `def lead(x, n = 1, default = None)` to solve the following problem:
Return an array with each value replaced by the next (or further forward) value in the array. Arguments: x: a pandas Series object n: number of next values forward to replace each value with default: what to replace the n final values of the array with Example: >>> lead(pd.Series([1,2,3]), n=1) 0 2.0 1 3.0 2 NaN dtype: float64 >>> lead(pd.Series([1,2,3]), n=1, default = 99) 0 2 1 3 2 99 dtype: int64
Here is the function:
def lead(x, n = 1, default = None):
"""Return an array with each value replaced by the next (or further forward) value in the array.
Arguments:
x: a pandas Series object
n: number of next values forward to replace each value with
default: what to replace the n final values of the array with
Example:
>>> lead(pd.Series([1,2,3]), n=1)
0 2.0
1 3.0
2 NaN
dtype: float64
>>> lead(pd.Series([1,2,3]), n=1, default = 99)
0 2
1 3
2 99
dtype: int64
"""
res = x.shift(-1*n, fill_value = default)
return res | Return an array with each value replaced by the next (or further forward) value in the array. Arguments: x: a pandas Series object n: number of next values forward to replace each value with default: what to replace the n final values of the array with Example: >>> lead(pd.Series([1,2,3]), n=1) 0 2.0 1 3.0 2 NaN dtype: float64 >>> lead(pd.Series([1,2,3]), n=1, default = 99) 0 2 1 3 2 99 dtype: int64 |
158,967 | import pandas as pd
import numpy as np
from functools import singledispatch
from siuba.siu import symbolic_dispatch
from pandas.core.groupby import SeriesGroupBy, GroupBy
from pandas.core.frame import NDFrame
from pandas import Series
from siuba.experimental.pd_groups.groupby import GroupByAgg, regroup
from siuba.experimental.pd_groups.translate import method_agg_op
from functools import wraps
def regroup(groupby, res):
"""Return an instance of type(groupby) from res."""
raise TypeError("Not implemented for group by class: %s"% type(groupby))
def _lead_grouped(x, n = 1, default = None):
res = x.shift(-1*n, fill_value = default)
return regroup(x, res) | null |
158,968 | import pandas as pd
import numpy as np
from functools import singledispatch
from siuba.siu import symbolic_dispatch
from pandas.core.groupby import SeriesGroupBy, GroupBy
from pandas.core.frame import NDFrame
from pandas import Series
from siuba.experimental.pd_groups.groupby import GroupByAgg, regroup
from siuba.experimental.pd_groups.translate import method_agg_op
from functools import wraps
The provided code snippet includes necessary dependencies for implementing the `lag` function. Write a Python function `def lag(x, n = 1, default = None)` to solve the following problem:
Return an array with each value replaced by the previous (or further backward) value in the array. Arguments: x: a pandas Series object n: number of next values backward to replace each value with default: what to replace the n final values of the array with Example: >>> lag(pd.Series([1,2,3]), n=1) 0 NaN 1 1.0 2 2.0 dtype: float64 >>> lag(pd.Series([1,2,3]), n=1, default = 99) 0 99.0 1 1.0 2 2.0 dtype: float64
Here is the function:
def lag(x, n = 1, default = None):
"""Return an array with each value replaced by the previous (or further backward) value in the array.
Arguments:
x: a pandas Series object
n: number of next values backward to replace each value with
default: what to replace the n final values of the array with
Example:
>>> lag(pd.Series([1,2,3]), n=1)
0 NaN
1 1.0
2 2.0
dtype: float64
>>> lag(pd.Series([1,2,3]), n=1, default = 99)
0 99.0
1 1.0
2 2.0
dtype: float64
"""
res = x.shift(n)
if default is not None:
res.iloc[:n] = default
return res | Return an array with each value replaced by the previous (or further backward) value in the array. Arguments: x: a pandas Series object n: number of next values backward to replace each value with default: what to replace the n final values of the array with Example: >>> lag(pd.Series([1,2,3]), n=1) 0 NaN 1 1.0 2 2.0 dtype: float64 >>> lag(pd.Series([1,2,3]), n=1, default = 99) 0 99.0 1 1.0 2 2.0 dtype: float64 |
158,969 | import pandas as pd
import numpy as np
from functools import singledispatch
from siuba.siu import symbolic_dispatch
from pandas.core.groupby import SeriesGroupBy, GroupBy
from pandas.core.frame import NDFrame
from pandas import Series
from siuba.experimental.pd_groups.groupby import GroupByAgg, regroup
from siuba.experimental.pd_groups.translate import method_agg_op
from functools import wraps
def regroup(groupby, res):
"""Return an instance of type(groupby) from res."""
raise TypeError("Not implemented for group by class: %s"% type(groupby))
def _lag_grouped(x, n = 1, default = None):
res = x.shift(n, fill_value = default)
return regroup(x, res) | null |
158,970 | import pandas as pd
import numpy as np
from functools import singledispatch
from siuba.siu import symbolic_dispatch
from pandas.core.groupby import SeriesGroupBy, GroupBy
from pandas.core.frame import NDFrame
from pandas import Series
from siuba.experimental.pd_groups.groupby import GroupByAgg, regroup
from siuba.experimental.pd_groups.translate import method_agg_op
from functools import wraps
class GroupByAgg(SeriesGroupBy):
def __init__(self, *args, orig_grouper, orig_obj, **kwargs):
def from_result(cls, result: Series, src_groupby: SeriesGroupBy):
def _n_grouped(x: GroupBy) -> GroupByAgg:
return GroupByAgg.from_result(x.size(), x) | null |
158,971 | import pandas as pd
import numpy as np
from functools import singledispatch
from siuba.siu import symbolic_dispatch
from pandas.core.groupby import SeriesGroupBy, GroupBy
from pandas.core.frame import NDFrame
from pandas import Series
from siuba.experimental.pd_groups.groupby import GroupByAgg, regroup
from siuba.experimental.pd_groups.translate import method_agg_op
from functools import wraps
The provided code snippet includes necessary dependencies for implementing the `n_distinct` function. Write a Python function `def n_distinct(x)` to solve the following problem:
Return the total number of distinct (i.e. unique) elements in an array. Example: >>> n_distinct(pd.Series([1,1,2,2])) 2
Here is the function:
def n_distinct(x):
"""Return the total number of distinct (i.e. unique) elements in an array.
Example:
>>> n_distinct(pd.Series([1,1,2,2]))
2
"""
return x.nunique() | Return the total number of distinct (i.e. unique) elements in an array. Example: >>> n_distinct(pd.Series([1,1,2,2])) 2 |
158,972 | import pandas as pd
import numpy as np
from functools import singledispatch
from siuba.siu import symbolic_dispatch
from pandas.core.groupby import SeriesGroupBy, GroupBy
from pandas.core.frame import NDFrame
from pandas import Series
from siuba.experimental.pd_groups.groupby import GroupByAgg, regroup
from siuba.experimental.pd_groups.translate import method_agg_op
from functools import wraps
The provided code snippet includes necessary dependencies for implementing the `na_if` function. Write a Python function `def na_if(x, y)` to solve the following problem:
Return a array like x, but with values in y replaced by NAs. Examples: >>> na_if(pd.Series([1,2,3]), [1,3]) 0 NaN 1 2.0 2 NaN dtype: float64
Here is the function:
def na_if(x, y):
"""Return a array like x, but with values in y replaced by NAs.
Examples:
>>> na_if(pd.Series([1,2,3]), [1,3])
0 NaN
1 2.0
2 NaN
dtype: float64
"""
y = [y] if not np.ndim(y) else y
tmp_x = x.copy(deep = True)
tmp_x[x.isin(y)] = np.nan
return tmp_x | Return a array like x, but with values in y replaced by NAs. Examples: >>> na_if(pd.Series([1,2,3]), [1,3]) 0 NaN 1 2.0 2 NaN dtype: float64 |
158,973 | import pandas as pd
import numpy as np
from functools import singledispatch
from siuba.siu import symbolic_dispatch
from pandas.core.groupby import SeriesGroupBy, GroupBy
from pandas.core.frame import NDFrame
from pandas import Series
from siuba.experimental.pd_groups.groupby import GroupByAgg, regroup
from siuba.experimental.pd_groups.translate import method_agg_op
from functools import wraps
The provided code snippet includes necessary dependencies for implementing the `near` function. Write a Python function `def near(x)` to solve the following problem:
TODO: Not Implemented
Here is the function:
def near(x):
"""TODO: Not Implemented"""
raise NotImplementedError("near not implemented") | TODO: Not Implemented |
158,974 | import pandas as pd
import numpy as np
from functools import singledispatch
from siuba.siu import symbolic_dispatch
from pandas.core.groupby import SeriesGroupBy, GroupBy
from pandas.core.frame import NDFrame
from pandas import Series
from siuba.experimental.pd_groups.groupby import GroupByAgg, regroup
from siuba.experimental.pd_groups.translate import method_agg_op
def nth(x, n, order_by = None, default = None):
from functools import wraps
def first(x, order_by = None, default = None):
return nth(x, 0, order_by, default) | null |
158,975 | import pandas as pd
import numpy as np
from functools import singledispatch
from siuba.siu import symbolic_dispatch
from pandas.core.groupby import SeriesGroupBy, GroupBy
from pandas.core.frame import NDFrame
from pandas import Series
from siuba.experimental.pd_groups.groupby import GroupByAgg, regroup
from siuba.experimental.pd_groups.translate import method_agg_op
def nth(x, n, order_by = None, default = None):
"""Return the nth entry of x. Similar to x[n].
Note:
first(x) and last(x) are nth(x, 0) and nth(x, -1).
Arguments:
x: series to get entry from.
n: position of entry to get from x (0 indicates first entry).
order_by: optional Series used to reorder x.
default: (not implemented) value to return if no entry at n.
Examples:
>>> ser = pd.Series(['a', 'b', 'c'])
>>> nth(ser, 1)
'b'
>>> sorter = pd.Series([1, 2, 0])
>>> nth(ser, 1, order_by = sorter)
'a'
>>> nth(ser, 0), nth(ser, -1)
('a', 'c')
>>> first(ser), last(ser)
('a', 'c')
"""
if default is not None:
raise NotImplementedError("default argument not implemented")
# check indexing is in range, handles positive and negative cases.
# TODO: is returning None the correct behavior for an empty Series?
if n >= len(x) or abs(n) > len(x):
return default
if order_by is None:
return x.iloc[n]
# case where order_by is specified and n in range ----
# TODO: ensure order_by is arraylike
if not isinstance(order_by, pd.Series):
raise NotImplementedError(
"order_by argument is type %s, but currently only"
"implemented for Series" % type(order_by)
)
if len(x) != len(order_by):
raise ValueError("x and order_by arguments must be same length")
order_indx = order_by.reset_index(drop = True).sort_values().index
return x.iloc[order_indx[n]]
from functools import wraps
def last(x, order_by = None, default = None):
return nth(x, -1, order_by, default) | null |
158,976 | import pandas as pd
from pandas.api import types as pd_types
from pandas.core.groupby import DataFrameGroupBy
from .verbs import var_select, var_create
from ..siu import FormulaContext, Call, strip_symbolic, Fx, FuncArg
from ..siu.dispatchers import verb_dispatch, symbolic_dispatch, create_eager_pipe_call
from collections.abc import Mapping
from contextvars import ContextVar
from contextlib import contextmanager
from typing import Callable, Any
def _is_symbolic_operator(f):
# TODO: consolidate these checks, make the result of symbolic_dispatch a class.
return callable(f) and getattr(f, "_siu_symbolic_operator", False) | null |
158,977 | import pandas as pd
from pandas.api import types as pd_types
from pandas.core.groupby import DataFrameGroupBy
from .verbs import var_select, var_create
from ..siu import FormulaContext, Call, strip_symbolic, Fx, FuncArg
from ..siu.dispatchers import verb_dispatch, symbolic_dispatch, create_eager_pipe_call
from collections.abc import Mapping
from contextvars import ContextVar
from contextlib import contextmanager
from typing import Callable, Any
def _across_setup_fns(fns) -> "dict[str, Callable[[FormulaContext], Any]]":
final_calls = {}
if isinstance(fns, (list, tuple)):
raise NotImplementedError(
"Specifying functions as a list or tuple is not supported. "
"Please use a dictionary to define multiple functions to apply. \n\n"
"E.g. across(_[:], {'round': Fx.round(), 'round2': Fx.round() + 1})"
)
elif isinstance(fns, dict):
for name, fn_call_raw in fns.items():
# symbolics get stripped by default for arguments to verbs, but
# these are inside a dictionary, so need to strip manually.
fn_call = strip_symbolic(fn_call_raw)
if isinstance(fn_call, Call):
final_calls[name] = fn_call
elif callable(fn_call):
final_calls[name] = create_eager_pipe_call(FuncArg(fn_call), Fx)
else:
raise TypeError(
"All functions to be applied in across must be a siuba.siu.Call, "
f"but received a function of type {type(fn_call)}"
)
elif isinstance(fns, Call):
final_calls["fn1"] = fns
elif callable(fns):
final_calls["fn1"] = create_eager_pipe_call(FuncArg(fns), Fx)
else:
raise NotImplementedError(f"Unsupported function type in across: {type(fns)}")
return final_calls
def _get_name_template(fns, names: "str | None") -> str:
if names is not None:
return names
if callable(fns):
return DEFAULT_SINGLE_FUNC_TEMPLATE
return DEFAULT_MULTI_FUNC_TEMPLATE
Fx = Symbolic(FormulaArg("Fx"))
def across(__data, cols, fns, names: "str | None" = None) -> pd.DataFrame:
name_template = _get_name_template(fns, names)
selected_cols = var_select(__data.columns, *var_create(cols), data=__data)
fns_map = _across_setup_fns(fns)
results = {}
for old_name, new_name in selected_cols.items():
if new_name is None:
new_name = old_name
crnt_ser = __data[old_name]
context = FormulaContext(Fx=crnt_ser, _=__data)
for fn_name, fn in fns_map.items():
fmt_pars = {"fn": fn_name, "col": new_name}
res = fn(context)
results[name_template.format(**fmt_pars)] = res
# ensure at least one result is not a scalar, so we don't get the classic
# pandas error: "If using all scalar values, you must pass an index"
index = None
if results:
_, v = next(iter(results.items()))
if pd_types.is_scalar(v):
index = [0]
return pd.DataFrame(results, index=index) | null |
158,978 | import pandas as pd
import numpy as np
from functools import singledispatch
import itertools
from ..siu import Symbolic, symbolic_dispatch
def _coerce_to_str(x):
if isinstance(x, (pd.Series, np.ndarray)):
return x.astype(str)
elif not np.ndim(x) < 2:
raise ValueError("np.ndim must be less than 2, but is %s" %np.ndim(x))
return pd.Series(x, dtype = str)
def str_c(x, *args, sep = "", collapse = None):
all_args = itertools.chain([x], args)
strings = list(map(_coerce_to_str, all_args))
return np.sum(strings, axis = 0) | null |
158,979 | from functools import singledispatch, wraps
from pandas import DataFrame
import pandas as pd
import numpy as np
import warnings
from pandas.core.groupby import DataFrameGroupBy
from pandas.core.dtypes.inference import is_scalar
from siuba.siu import (
Symbolic, Call, strip_symbolic, create_sym_call,
MetaArg, BinaryOp, _SliceOpIndex, Lazy,
singledispatch2, pipe_no_args, Pipeable, pipe
)
from .tidyselect import var_create, var_select, Var
DPLY_FUNCTIONS = (
# Dply ----
"group_by", "ungroup",
"select", "rename",
"mutate", "transmute", "filter", "summarize",
"arrange", "distinct",
"count", "add_count",
"head",
"top_n",
# Tidy ----
"spread", "gather",
"nest", "unnest",
"expand", "complete",
"separate", "unite", "extract",
# Joins ----
"join", "inner_join", "full_join", "left_join", "right_join", "semi_join", "anti_join",
# TODO: move to vectors
"if_else", "case_when",
"collect", "show_query",
"tbl",
)
from siuba.siu import DictCall
from collections.abc import Mapping
from functools import partial
from pandas.core.reshape.merge import _MergeOperation
from pandas.core.reshape.util import cartesian_product
from functools import reduce
from siuba.siu._databackend import SqlaEngine, PlDataFrame, PdDataFrame
The provided code snippet includes necessary dependencies for implementing the `install_siu_methods` function. Write a Python function `def install_siu_methods(cls)` to solve the following problem:
This function attaches siuba's table verbs on a class, to use as methods.
Here is the function:
def install_siu_methods(cls):
"""This function attaches siuba's table verbs on a class, to use as methods.
"""
func_dict = globals()
for func_name in DPLY_FUNCTIONS:
f = func_dict[func_name]
method_name = "siu_{}".format(func_name)
setattr(cls, method_name, f) | This function attaches siuba's table verbs on a class, to use as methods. |
158,980 | from functools import singledispatch, wraps
from pandas import DataFrame
import pandas as pd
import numpy as np
import warnings
from pandas.core.groupby import DataFrameGroupBy
from pandas.core.dtypes.inference import is_scalar
from siuba.siu import (
Symbolic, Call, strip_symbolic, create_sym_call,
MetaArg, BinaryOp, _SliceOpIndex, Lazy,
singledispatch2, pipe_no_args, Pipeable, pipe
)
from .tidyselect import var_create, var_select, Var
DPLY_FUNCTIONS = (
# Dply ----
"group_by", "ungroup",
"select", "rename",
"mutate", "transmute", "filter", "summarize",
"arrange", "distinct",
"count", "add_count",
"head",
"top_n",
# Tidy ----
"spread", "gather",
"nest", "unnest",
"expand", "complete",
"separate", "unite", "extract",
# Joins ----
"join", "inner_join", "full_join", "left_join", "right_join", "semi_join", "anti_join",
# TODO: move to vectors
"if_else", "case_when",
"collect", "show_query",
"tbl",
)
def _repr_grouped_df_html_(self):
obj_repr = self.obj._repr_html_()
# user can config pandas not to return html representation, in which case
# the ipython behavior should fall back to repr
if obj_repr is None:
return None
return "<div><p>(grouped data frame)</p>" + self.obj._repr_html_() + "</div>"
def _repr_grouped_df_console_(self):
return "(grouped data frame)\n" + repr(self.obj)
from siuba.siu import DictCall
from collections.abc import Mapping
from functools import partial
from pandas.core.reshape.merge import _MergeOperation
from pandas.core.reshape.util import cartesian_product
from functools import reduce
from siuba.siu._databackend import SqlaEngine, PlDataFrame, PdDataFrame
def install_pd_siu():
# https://github.com/coursera/pandas-ply/blob/master/pandas_ply/methods.py
func_dict = globals()
for func_name in DPLY_FUNCTIONS:
f = func_dict[func_name]
method_name = "siu_{}".format(func_name)
setattr(pd.DataFrame, method_name, f)
setattr(DataFrameGroupBy, method_name, f)
DataFrameGroupBy._repr_html_ = _repr_grouped_df_html_
DataFrameGroupBy.__repr__ = _repr_grouped_df_console_ | null |
158,981 | from functools import singledispatch, wraps
from pandas import DataFrame
import pandas as pd
import numpy as np
import warnings
from pandas.core.groupby import DataFrameGroupBy
from pandas.core.dtypes.inference import is_scalar
from siuba.siu import (
Symbolic, Call, strip_symbolic, create_sym_call,
MetaArg, BinaryOp, _SliceOpIndex, Lazy,
singledispatch2, pipe_no_args, Pipeable, pipe
)
from .tidyselect import var_create, var_select, Var
from siuba.siu import DictCall
from collections.abc import Mapping
from functools import partial
from pandas.core.reshape.merge import _MergeOperation
from pandas.core.reshape.util import cartesian_product
from functools import reduce
from siuba.siu._databackend import SqlaEngine, PlDataFrame, PdDataFrame
def _bounce_groupby(f):
@wraps(f)
def wrapper(__data: "pd.DataFrame | DataFrameGroupBy", *args, **kwargs):
if isinstance(__data, pd.DataFrame):
return f(__data, *args, **kwargs)
groupings = __data.grouper.groupings
group_cols = [ping.name for ping in groupings]
res = f(__data.obj, *args, **kwargs)
return res.groupby(group_cols)
return wrapper | null |
158,982 | from functools import singledispatch, wraps
from pandas import DataFrame
import pandas as pd
import numpy as np
import warnings
from pandas.core.groupby import DataFrameGroupBy
from pandas.core.dtypes.inference import is_scalar
from siuba.siu import (
Symbolic, Call, strip_symbolic, create_sym_call,
MetaArg, BinaryOp, _SliceOpIndex, Lazy,
singledispatch2, pipe_no_args, Pipeable, pipe
)
from .tidyselect import var_create, var_select, Var
from siuba.siu import DictCall
from collections.abc import Mapping
from functools import partial
from pandas.core.reshape.merge import _MergeOperation
from pandas.core.reshape.util import cartesian_product
from functools import reduce
from siuba.siu._databackend import SqlaEngine, PlDataFrame, PdDataFrame
def _regroup(df):
# try to regroup after an apply, when user kept index (e.g. group_keys = True)
if len(df.index.names) > 1:
# handle cases where...
# 1. grouping with named indices (as_index = True)
# 2. grouping is level 0 (as_index = False)
grp_levels = [x for x in df.index.names if x is not None] or [0]
return df.groupby(level = grp_levels) | null |
158,983 | from functools import singledispatch, wraps
from pandas import DataFrame
import pandas as pd
import numpy as np
import warnings
from pandas.core.groupby import DataFrameGroupBy
from pandas.core.dtypes.inference import is_scalar
from siuba.siu import (
Symbolic, Call, strip_symbolic, create_sym_call,
MetaArg, BinaryOp, _SliceOpIndex, Lazy,
singledispatch2, pipe_no_args, Pipeable, pipe
)
from .tidyselect import var_create, var_select, Var
from siuba.siu import DictCall
from collections.abc import Mapping
from functools import partial
from pandas.core.reshape.merge import _MergeOperation
from pandas.core.reshape.util import cartesian_product
from functools import reduce
from siuba.siu._databackend import SqlaEngine, PlDataFrame, PdDataFrame
The provided code snippet includes necessary dependencies for implementing the `show_query` function. Write a Python function `def show_query(__data, simplify = False)` to solve the following problem:
Print the details of a query. Parameters ---------- __data: A DataFrame of siuba.sql.LazyTbl. simplify: Whether to attempt to simplify the query. **kwargs: Additional arguments passed to specific implementations.
Here is the function:
def show_query(__data, simplify = False):
"""Print the details of a query.
Parameters
----------
__data:
A DataFrame of siuba.sql.LazyTbl.
simplify:
Whether to attempt to simplify the query.
**kwargs:
Additional arguments passed to specific implementations.
"""
print("No query to show for a DataFrame")
return __data | Print the details of a query. Parameters ---------- __data: A DataFrame of siuba.sql.LazyTbl. simplify: Whether to attempt to simplify the query. **kwargs: Additional arguments passed to specific implementations. |
158,984 | from functools import singledispatch, wraps
from pandas import DataFrame
import pandas as pd
import numpy as np
import warnings
from pandas.core.groupby import DataFrameGroupBy
from pandas.core.dtypes.inference import is_scalar
from siuba.siu import (
Symbolic, Call, strip_symbolic, create_sym_call,
MetaArg, BinaryOp, _SliceOpIndex, Lazy,
singledispatch2, pipe_no_args, Pipeable, pipe
)
from .tidyselect import var_create, var_select, Var
def _make_groupby_safe(gdf):
return gdf.obj.groupby(gdf.grouper, group_keys=False, dropna=False)
def transmute(__data, *args, **kwargs):
"""Assign new columns to a DataFrame, while dropping previous columns.
Parameters
----------
__data:
The input data.
**kwargs:
Each keyword argument is the name of a new column, and an expression.
See Also
--------
mutate : Assign new columns, or modify existing ones.
Examples
--------
>>> from siuba import _, transmute, mutate, head
>>> from siuba.data import cars
Notice that transmute results in a table with only the new column:
>>> cars >> transmute(cyl2 = _.cyl + 1) >> head(2)
cyl2
0 7
1 7
By contrast, mutate adds the new column to the end of the table:
>>> cars >> mutate(cyl2 = _.cyl + 1) >> head(2)
cyl mpg hp cyl2
0 6 21.0 110 7
1 6 21.0 110 7
"""
arg_vars = list(map(simple_varname, args))
col_names, df_res = _mutate_cols(__data, args, kwargs)
return df_res[col_names]
from siuba.siu import DictCall
from collections.abc import Mapping
from functools import partial
from pandas.core.reshape.merge import _MergeOperation
from pandas.core.reshape.util import cartesian_product
from functools import reduce
from siuba.siu._databackend import SqlaEngine, PlDataFrame, PdDataFrame
def _mutate(__data, *args, **kwargs):
out = __data.obj.copy()
groupings = {ping.name: ping for ping in __data.grouper.groupings}
f_transmute = transmute.dispatch(pd.DataFrame)
df = _make_groupby_safe(__data).apply(lambda d: f_transmute(d, *args, **kwargs))
for varname, ser in df.items():
if varname in groupings:
groupings[varname] = varname
out[varname] = ser
return out.groupby(list(groupings.values())) | null |
158,985 | from functools import singledispatch, wraps
from pandas import DataFrame
import pandas as pd
import numpy as np
import warnings
from pandas.core.groupby import DataFrameGroupBy
from pandas.core.dtypes.inference import is_scalar
from siuba.siu import (
Symbolic, Call, strip_symbolic, create_sym_call,
MetaArg, BinaryOp, _SliceOpIndex, Lazy,
singledispatch2, pipe_no_args, Pipeable, pipe
)
from .tidyselect import var_create, var_select, Var
def filter(__data, *args):
"""Keep rows where conditions are true.
Parameters
----------
__data:
The data being filtered.
*args:
conditions that must be met to keep a column.
Examples
--------
>>> from siuba import _, filter
>>> from siuba.data import cars
Keep rows where cyl is 4 *and* mpg is less than 25.
>>> cars >> filter(_.cyl == 4, _.mpg < 22)
cyl mpg hp
20 4 21.5 97
31 4 21.4 109
Use `|` to represent an OR condition. For example, the code below keeps
rows where hp is over 250 *or* mpg is over 32.
>>> cars >> filter((_.hp > 300) | (_.mpg > 32))
cyl mpg hp
17 4 32.4 66
19 4 33.9 65
30 8 15.0 335
"""
crnt_indx = True
for arg in args:
res = arg(__data) if callable(arg) else arg
if isinstance(res, pd.DataFrame):
crnt_indx &= res.all(axis=1)
elif isinstance(res, pd.Series):
crnt_indx &= res
else:
crnt_indx &= res
# use loc or iloc to subset, depending on crnt_indx ----
# the main issue here is that loc can't remove all rows using a slice
# and iloc can't use a boolean series
if isinstance(crnt_indx, bool) or isinstance(crnt_indx, np.bool_):
# iloc can do slice, but not a bool series
result = __data.iloc[slice(None) if crnt_indx else slice(0),:]
else:
result = __data.loc[crnt_indx,:]
return result
from siuba.siu import DictCall
from collections.abc import Mapping
from functools import partial
from pandas.core.reshape.merge import _MergeOperation
from pandas.core.reshape.util import cartesian_product
from functools import reduce
from siuba.siu._databackend import SqlaEngine, PlDataFrame, PdDataFrame
def _filter(__data, *args):
groupings = __data.grouper.groupings
df_filter = filter.registry[pd.DataFrame]
df = __data.apply(df_filter, *args)
# will drop all but original index, then sort to get original order
group_by_lvls = list(range(df.index.nlevels - 1))
ordered = df.reset_index(group_by_lvls, drop = True).sort_index()
group_cols = [ping.name for ping in groupings]
return ordered.groupby(group_cols) | null |
158,986 | from functools import singledispatch, wraps
from pandas import DataFrame
import pandas as pd
import numpy as np
import warnings
from pandas.core.groupby import DataFrameGroupBy
from pandas.core.dtypes.inference import is_scalar
from siuba.siu import (
Symbolic, Call, strip_symbolic, create_sym_call,
MetaArg, BinaryOp, _SliceOpIndex, Lazy,
singledispatch2, pipe_no_args, Pipeable, pipe
)
from .tidyselect import var_create, var_select, Var
def summarize(__data, *args, **kwargs):
from siuba.siu import DictCall
from collections.abc import Mapping
from functools import partial
from pandas.core.reshape.merge import _MergeOperation
from pandas.core.reshape.util import cartesian_product
from functools import reduce
from siuba.siu._databackend import SqlaEngine, PlDataFrame, PdDataFrame
def _summarize(__data, *args, **kwargs):
if __data.dropna or not __data.group_keys:
warnings.warn(
f"Grouped data passed to summarize must have dropna=False and group_keys=True."
" Regrouping with these arguments set."
)
if __data.grouper.dropna:
# will need to recalculate groupings, otherwise it ignores dropna
group_cols = [ping.name for ping in __data.grouper.groupings]
else:
group_cols = __data.grouper.groupings
__data = __data.obj.groupby(group_cols, dropna=False, group_keys=True)
df_summarize = summarize.registry[pd.DataFrame]
df = __data.apply(df_summarize, *args, **kwargs)
group_by_lvls = list(range(df.index.nlevels - 1))
out = df.reset_index(group_by_lvls)
out.index = pd.RangeIndex(df.shape[0])
return out | null |
158,987 | from functools import singledispatch, wraps
from pandas import DataFrame
import pandas as pd
import numpy as np
import warnings
from pandas.core.groupby import DataFrameGroupBy
from pandas.core.dtypes.inference import is_scalar
from siuba.siu import (
Symbolic, Call, strip_symbolic, create_sym_call,
MetaArg, BinaryOp, _SliceOpIndex, Lazy,
singledispatch2, pipe_no_args, Pipeable, pipe
)
from .tidyselect import var_create, var_select, Var
def _make_groupby_safe(gdf):
def transmute(__data, *args, **kwargs):
from siuba.siu import DictCall
from collections.abc import Mapping
from functools import partial
from pandas.core.reshape.merge import _MergeOperation
from pandas.core.reshape.util import cartesian_product
from functools import reduce
from siuba.siu._databackend import SqlaEngine, PlDataFrame, PdDataFrame
def _transmute(__data, *args, **kwargs):
groupings = {ping.name: ping for ping in __data.grouper.groupings}
f_transmute = transmute.dispatch(pd.DataFrame)
df = _make_groupby_safe(__data).apply(lambda d: f_transmute(d, *args, **kwargs))
for varname in reversed(list(groupings)):
if varname in df.columns:
groupings[varname] = varname
else:
df.insert(0, varname, __data.obj[varname])
return df.groupby(list(groupings.values())) | null |
158,988 | from functools import singledispatch, wraps
from pandas import DataFrame
import pandas as pd
import numpy as np
import warnings
from pandas.core.groupby import DataFrameGroupBy
from pandas.core.dtypes.inference import is_scalar
from siuba.siu import (
Symbolic, Call, strip_symbolic, create_sym_call,
MetaArg, BinaryOp, _SliceOpIndex, Lazy,
singledispatch2, pipe_no_args, Pipeable, pipe
)
from .tidyselect import var_create, var_select, Var
def _insert_missing_groups(dst, orig, missing_groups):
if missing_groups:
warnings.warn(f"Adding missing grouping variables: {missing_groups}")
for ii, colname in enumerate(missing_groups):
dst.insert(ii, colname, orig[colname])
def _select_group_renames(selection: dict, group_cols):
"""Returns a 2-tuple: groups missing in the select, new group keys."""
renamed = {k: v for k,v in selection.items() if v is not None}
sel_groups = [
renamed[colname] or colname for colname in group_cols if colname in renamed
]
missing_groups = [colname for colname in group_cols if colname not in selection]
return missing_groups, (*missing_groups, *sel_groups)
def select(__data, *args, **kwargs):
"""Select columns of a table to keep or drop (and optionally rename).
Parameters
----------
__data:
The input table.
*args:
An expression specifying columns to keep or drop.
**kwargs:
Not implemented.
Examples
--------
>>> from siuba import _, select
>>> from siuba.data import cars
>>> small_cars = cars.head(1)
>>> small_cars
cyl mpg hp
0 6 21.0 110
You can refer to columns by name or position.
>>> small_cars >> select(_.cyl, _[2])
cyl hp
0 6 110
Use a `~` sign to exclude a column.
>>> small_cars >> select(~_.cyl)
mpg hp
0 21.0 110
You can use any methods you'd find on the .columns.str accessor:
>>> small_cars.columns.str.contains("p")
array([False, True, True])
>>> small_cars >> select(_.contains("p"))
mpg hp
0 21.0 110
Use a slice to select a range of columns:
>>> small_cars >> select(_[0:2])
cyl mpg
0 6 21.0
Multiple expressions can be combined using _[a, b, c] syntax. This is useful
for dropping a complex set of matches.
>>> small_cars >> select(~_[_.startswith("c"), -1])
mpg
0 21.0
"""
if kwargs:
raise NotImplementedError(
"Using kwargs in select not currently supported. "
"Use _.newname == _.oldname instead"
)
var_list = var_create(*args)
od = var_select(__data.columns, *var_list, data=__data)
to_rename = {k: v for k,v in od.items() if v is not None}
return __data[list(od)].rename(columns = to_rename)
from siuba.siu import DictCall
from collections.abc import Mapping
from functools import partial
from pandas.core.reshape.merge import _MergeOperation
from pandas.core.reshape.util import cartesian_product
from functools import reduce
from siuba.siu._databackend import SqlaEngine, PlDataFrame, PdDataFrame
def var_select(colnames, *args, data=None):
# TODO: don't erase named column if included again
colnames = colnames if isinstance(colnames, pd.Series) else pd.Series(colnames)
cols = OrderedDict()
#flat_args = var_flatten(args)
all_vars = chain(*map(flatten_var, args))
# Add entries in pandas.rename style {"orig_name": "new_name"}
for ii, arg in enumerate(all_vars):
# strings are added directly
if isinstance(arg, str):
cols[arg] = None
# integers add colname at corresponding index
elif isinstance(arg, int):
cols[colnames.iloc[arg]] = None
# general var handling
elif isinstance(arg, Var):
# remove negated Vars, otherwise include them
if ii == 0 and arg.negated:
# if negation used as first arg apply an implicit everything
cols.update((k, None) for k in colnames)
# slicing can refer to single, or range of columns
if isinstance(arg.name, slice):
start, stop = var_slice(colnames, arg.name)
for ii in range(start, stop):
var_put_cols(colnames[ii], arg, cols)
# method calls like endswith()
elif callable(arg.name):
# TODO: not sure if this is a good idea...
# basically proxies to pandas str methods (they must return bool array)
indx = arg.name(colnames.str)
var_put_cols(colnames[indx].tolist(), arg, cols)
#cols.update((x, None) for x in set(colnames[indx]) - set(cols))
elif isinstance(arg.name, int):
var_put_cols(colnames.iloc[arg.name], arg, cols)
else:
var_put_cols(arg.name, arg, cols)
elif callable(arg) and data is not None:
# TODO: call on the data
col_mask = colwise_eval(data, arg)
for name in colnames[col_mask]:
cols[name] = None
else:
raise Exception("variable must be either a string or Var instance")
return cols
def var_create(*args) -> "tuple[Var]":
vl = VarList()
all_vars = []
for arg in args:
if isinstance(arg, Call):
res = arg(vl)
if isinstance(res, VarList):
raise ValueError("Must select specific column. Did you pass `_` to select?")
all_vars.append(res)
elif isinstance(arg, Var):
all_vars.append(arg)
elif callable(arg):
all_vars.append(arg)
else:
all_vars.append(Var(arg))
return tuple(all_vars)
def _select(__data, *args, **kwargs):
# tidyselect
var_list = var_create(*args)
od = var_select(__data.obj.columns, *var_list)
group_cols = [ping.name for ping in __data.grouper.groupings]
res = select(__data.obj, *args, **kwargs)
missing_groups, group_keys = _select_group_renames(od, group_cols)
_insert_missing_groups(res, __data.obj, missing_groups)
return res.groupby(list(group_keys)) | null |
158,989 | from functools import singledispatch, wraps
from pandas import DataFrame
import pandas as pd
import numpy as np
import warnings
from pandas.core.groupby import DataFrameGroupBy
from pandas.core.dtypes.inference import is_scalar
from siuba.siu import (
Symbolic, Call, strip_symbolic, create_sym_call,
MetaArg, BinaryOp, _SliceOpIndex, Lazy,
singledispatch2, pipe_no_args, Pipeable, pipe
)
from .tidyselect import var_create, var_select, Var
def simple_varname(call):
if isinstance(call, str):
return call
# check for expr like _.some_var or _["some_var"]
if (isinstance(call, Call)
and call.func in {"__getitem__", "__getattr__"}
and isinstance(call.args[0], MetaArg)
and isinstance(call.args[1], (str, _SliceOpIndex))
):
# return variable name
name = call.args[1]
if isinstance(name, str):
return name
elif isinstance(name, _SliceOpIndex):
return name.args[0]
return None
def _select_group_renames(selection: dict, group_cols):
"""Returns a 2-tuple: groups missing in the select, new group keys."""
renamed = {k: v for k,v in selection.items() if v is not None}
sel_groups = [
renamed[colname] or colname for colname in group_cols if colname in renamed
]
missing_groups = [colname for colname in group_cols if colname not in selection]
return missing_groups, (*missing_groups, *sel_groups)
def rename(__data, **kwargs):
"""Rename columns of a table.
Parameters
----------
__data:
The input table.
**kwargs:
Keyword arguments of the form new_name = _.old_name, or new_name = "old_name".
Examples
--------
>>> import pandas as pd
>>> from siuba import _, rename, select
>>> df = pd.DataFrame({"zzz": [1], "b": [2]})
>>> df >> rename(a = _.zzz)
a b
0 1 2
Note that this is equivalent to this select code:
>>> df >> select(_.a == _.zzz, _.b)
a b
0 1 2
"""
# TODO: allow names with spaces, etc..
col_names = {simple_varname(v):k for k,v in kwargs.items()}
if None in col_names:
raise ValueError("Rename needs column name (e.g. 'a' or _.a), but received %s"%col_names[None])
return __data.rename(columns = col_names)
from siuba.siu import DictCall
from collections.abc import Mapping
from functools import partial
from pandas.core.reshape.merge import _MergeOperation
from pandas.core.reshape.util import cartesian_product
from functools import reduce
from siuba.siu._databackend import SqlaEngine, PlDataFrame, PdDataFrame
def _rename(__data, **kwargs):
col_names = {simple_varname(v):k for k,v in kwargs.items()}
group_cols = [ping.name for ping in __data.grouper.groupings]
res = rename(__data.obj, **kwargs)
missing_groups, group_keys = _select_group_renames(col_names, group_cols)
return res.groupby(list(group_keys)) | null |
158,990 | from functools import singledispatch, wraps
from pandas import DataFrame
import pandas as pd
import numpy as np
import warnings
from pandas.core.groupby import DataFrameGroupBy
from pandas.core.dtypes.inference import is_scalar
from siuba.siu import (
Symbolic, Call, strip_symbolic, create_sym_call,
MetaArg, BinaryOp, _SliceOpIndex, Lazy,
singledispatch2, pipe_no_args, Pipeable, pipe
)
from .tidyselect import var_create, var_select, Var
def simple_varname(call):
def _call_strip_ascending(f):
def arrange(__data, *args):
from siuba.siu import DictCall
from collections.abc import Mapping
from functools import partial
from pandas.core.reshape.merge import _MergeOperation
from pandas.core.reshape.util import cartesian_product
from functools import reduce
from siuba.siu._databackend import SqlaEngine, PlDataFrame, PdDataFrame
def _arrange(__data, *args):
for arg in args:
f, desc = _call_strip_ascending(arg)
if not simple_varname(f):
raise NotImplementedError(
"Arrange over DataFrameGroupBy only supports simple "
"column names, not expressions"
)
df_sorted = arrange(__data.obj, *args)
group_cols = [ping.name for ping in __data.grouper.groupings]
return df_sorted.groupby(group_cols) | null |
158,991 | from functools import singledispatch, wraps
from pandas import DataFrame
import pandas as pd
import numpy as np
import warnings
from pandas.core.groupby import DataFrameGroupBy
from pandas.core.dtypes.inference import is_scalar
from siuba.siu import (
Symbolic, Call, strip_symbolic, create_sym_call,
MetaArg, BinaryOp, _SliceOpIndex, Lazy,
singledispatch2, pipe_no_args, Pipeable, pipe
)
from .tidyselect import var_create, var_select, Var
def distinct(__data, *args, _keep_all = False, **kwargs):
"""Keep only distinct (unique) rows from a table.
Parameters
----------
__data:
The input data.
*args:
Columns to use when determining which rows are unique.
_keep_all:
Whether to keep all columns of the original data, not just *args.
**kwargs:
If specified, arguments passed to the verb mutate(), and then being used
in distinct().
See Also
--------
count : keep distinct rows, and count their number of observations.
Examples
--------
>>> from siuba import _, distinct, select
>>> from siuba.data import penguins
>>> penguins >> distinct(_.species, _.island)
species island
0 Adelie Torgersen
1 Adelie Biscoe
2 Adelie Dream
3 Gentoo Biscoe
4 Chinstrap Dream
Use _keep_all=True, to keep all columns in each distinct row. This lets you
peak at the values of the first unique row.
>>> small_penguins = penguins >> select(_[:4])
>>> small_penguins >> distinct(_.species, _keep_all = True)
species island bill_length_mm bill_depth_mm
0 Adelie Torgersen 39.1 18.7
1 Gentoo Biscoe 46.1 13.2
2 Chinstrap Dream 46.5 17.9
"""
if not (args or kwargs):
return __data.drop_duplicates().reset_index(drop=True)
new_names, df_res = _mutate_cols(__data, args, kwargs)
tmp_data = df_res.drop_duplicates(new_names).reset_index(drop=True)
if not _keep_all:
return tmp_data[new_names]
return tmp_data
from siuba.siu import DictCall
from collections.abc import Mapping
from functools import partial
from pandas.core.reshape.merge import _MergeOperation
from pandas.core.reshape.util import cartesian_product
from functools import reduce
from siuba.siu._databackend import SqlaEngine, PlDataFrame, PdDataFrame
def _distinct(__data, *args, _keep_all = False, **kwargs):
group_names = [ping.name for ping in __data.grouper.groupings]
f_distinct = distinct.dispatch(type(__data.obj))
tmp_data = (__data
.apply(f_distinct, *args, _keep_all=_keep_all, **kwargs)
)
index_keys = tmp_data.index.names[:-1]
keys_to_drop = [k for k in index_keys if k in tmp_data.columns]
keys_to_keep = [k for k in index_keys if k not in tmp_data.columns]
final = tmp_data.reset_index(keys_to_drop, drop=True).reset_index(keys_to_keep)
return final.groupby(group_names) | null |
158,992 | from functools import singledispatch, wraps
from pandas import DataFrame
import pandas as pd
import numpy as np
import warnings
from pandas.core.groupby import DataFrameGroupBy
from pandas.core.dtypes.inference import is_scalar
from siuba.siu import (
Symbolic, Call, strip_symbolic, create_sym_call,
MetaArg, BinaryOp, _SliceOpIndex, Lazy,
singledispatch2, pipe_no_args, Pipeable, pipe
)
from .tidyselect import var_create, var_select, Var
def if_else(condition, true, false):
"""
Parameters
----------
condition:
Logical vector (or lazy expression).
true:
Values to be used when condition is True.
false:
Values to be used when condition is False.
See Also
--------
case_when : Generalized if_else, for handling many cases.
Examples
--------
>>> ser1 = pd.Series([1,2,3])
>>> if_else(ser1 > 2, np.nan, ser1)
0 1.0
1 2.0
2 NaN
dtype: float64
>>> from siuba import _
>>> f = if_else(_ < 2, _, 2)
>>> f(ser1)
0 1
1 2
2 2
dtype: int64
>>> import numpy as np
>>> ser2 = pd.Series(['NA', 'a', 'b'])
>>> if_else(ser2 == 'NA', np.nan, ser2)
0 NaN
1 a
2 b
dtype: object
"""
raise_type_error(condition)
from siuba.siu import DictCall
from collections.abc import Mapping
from functools import partial
from pandas.core.reshape.merge import _MergeOperation
from pandas.core.reshape.util import cartesian_product
from functools import reduce
from siuba.siu._databackend import SqlaEngine, PlDataFrame, PdDataFrame
def _if_else(condition, true, false):
return create_sym_call(if_else, condition, true, false) | null |
158,993 | from functools import singledispatch, wraps
from pandas import DataFrame
import pandas as pd
import numpy as np
import warnings
from pandas.core.groupby import DataFrameGroupBy
from pandas.core.dtypes.inference import is_scalar
from siuba.siu import (
Symbolic, Call, strip_symbolic, create_sym_call,
MetaArg, BinaryOp, _SliceOpIndex, Lazy,
singledispatch2, pipe_no_args, Pipeable, pipe
)
from .tidyselect import var_create, var_select, Var
from siuba.siu import DictCall
from collections.abc import Mapping
from functools import partial
from pandas.core.reshape.merge import _MergeOperation
from pandas.core.reshape.util import cartesian_product
from functools import reduce
from siuba.siu._databackend import SqlaEngine, PlDataFrame, PdDataFrame
def _if_else(condition, true, false):
result = np.where(condition.fillna(False), true, false)
return pd.Series(result) | null |
158,994 | from functools import singledispatch, wraps
from pandas import DataFrame
import pandas as pd
import numpy as np
import warnings
from pandas.core.groupby import DataFrameGroupBy
from pandas.core.dtypes.inference import is_scalar
from siuba.siu import (
Symbolic, Call, strip_symbolic, create_sym_call,
MetaArg, BinaryOp, _SliceOpIndex, Lazy,
singledispatch2, pipe_no_args, Pipeable, pipe
)
from .tidyselect import var_create, var_select, Var
from siuba.siu import DictCall
def case_when(__data, cases: dict):
"""Generalized, vectorized if statement.
Parameters
----------
__data:
The input data.
cases: dict
A mapping of condition : value.
See Also
--------
if_else : Handles the special case of two conditions.
Examples
--------
>>> import pandas as pd
>>> from siuba import _, case_when
>>> df = pd.DataFrame({"x": [1, 2, 3]})
>>> case_when(df, {_.x == 1: "one", _.x == 2: "two"})
0 one
1 two
2 None
dtype: object
>>> df >> case_when({_.x == 1: "one", _.x == 2: "two"})
0 one
1 two
2 None
dtype: object
>>> df >> case_when({_.x == 1: "one", _.x == 2: "two", True: "other"})
0 one
1 two
2 other
dtype: object
"""
if isinstance(cases, Call):
cases = cases(__data)
# TODO: handle when receive list of (k,v) pairs for py < 3.5 compat?
stripped_cases = {strip_symbolic(k): strip_symbolic(v) for k,v in cases.items()}
n = len(__data)
out = np.repeat(None, n)
for k, v in reversed(list(stripped_cases.items())):
if callable(k):
result = _val_call(k, __data, n)
indx = np.where(result)[0]
val_res = _val_call(v, __data, n, indx)
out[indx] = val_res
elif k:
# e.g. k is just True, etc..
val_res = _val_call(v, __data, n)
out[:] = val_res
# by recreating an array, attempts to cast as best dtype
return pd.Series(list(out))
from collections.abc import Mapping
from functools import partial
from pandas.core.reshape.merge import _MergeOperation
from pandas.core.reshape.util import cartesian_product
from functools import reduce
from siuba.siu._databackend import SqlaEngine, PlDataFrame, PdDataFrame
def _case_when(__data, cases):
if not isinstance(cases, dict):
raise Exception("Cases must be a dictionary")
dict_entries = dict((strip_symbolic(k), strip_symbolic(v)) for k,v in cases.items())
cases_arg = Lazy(DictCall("__call__", dict, dict_entries))
return create_sym_call(case_when, __data, cases_arg) | null |
158,995 | from functools import singledispatch, wraps
from pandas import DataFrame
import pandas as pd
import numpy as np
import warnings
from pandas.core.groupby import DataFrameGroupBy
from pandas.core.dtypes.inference import is_scalar
from siuba.siu import (
Symbolic, Call, strip_symbolic, create_sym_call,
MetaArg, BinaryOp, _SliceOpIndex, Lazy,
singledispatch2, pipe_no_args, Pipeable, pipe
)
from .tidyselect import var_create, var_select, Var
def simple_varname(call):
if isinstance(call, str):
return call
# check for expr like _.some_var or _["some_var"]
if (isinstance(call, Call)
and call.func in {"__getitem__", "__getattr__"}
and isinstance(call.args[0], MetaArg)
and isinstance(call.args[1], (str, _SliceOpIndex))
):
# return variable name
name = call.args[1]
if isinstance(name, str):
return name
elif isinstance(name, _SliceOpIndex):
return name.args[0]
return None
def group_by(__data, *args, add = False, **kwargs):
"""Return a grouped DataFrame, using columns or expressions to define groups.
Any operations (e.g. summarize, mutate, filter) performed on grouped data
will be performed "by group". Use `ungroup()` to remove the groupings.
Parameters
----------
__data:
The data being grouped.
*args:
Lazy expressions used to select the grouping columns. Currently, each
arg must refer to a single columns (e.g. _.cyl, _.mpg).
add: bool
If the data is already grouped, whether to add these groupings on top of those.
**kwargs:
Keyword arguments define new columns used to group the data.
Examples
--------
>>> from siuba import _, group_by, summarize, filter, mutate, head
>>> from siuba.data import cars
>>> by_cyl = cars >> group_by(_.cyl)
>>> by_cyl >> summarize(max_mpg = _.mpg.max(), max_hp = _.hp.max())
cyl max_mpg max_hp
0 4 33.9 113
1 6 21.4 175
2 8 19.2 335
>>> by_cyl >> filter(_.mpg == _.mpg.max())
(grouped data frame)
cyl mpg hp
3 6 21.4 110
19 4 33.9 65
24 8 19.2 175
>>> cars >> group_by(cyl2 = _.cyl + 1) >> head(2)
(grouped data frame)
cyl mpg hp cyl2
0 6 21.0 110 7
1 6 21.0 110 7
Note that creating the new grouping column is always performed on ungrouped data.
Use an explicit mutate on the grouped data perform the operation within groups.
For example, the code below calls pd.cut on the mpg column, within each cyl group.
>>> from siuba.siu import call
>>> (cars
... >> group_by(_.cyl)
... >> mutate(mpg_bin = call(pd.cut, _.mpg, 3))
... >> group_by(_.mpg_bin, add=True)
... >> head(2)
... )
(grouped data frame)
cyl mpg hp mpg_bin
0 6 21.0 110 (20.2, 21.4]
1 6 21.0 110 (20.2, 21.4]
"""
if isinstance(__data, DataFrameGroupBy):
tmp_df = __data.obj.copy()
else:
tmp_df = __data.copy()
# TODO: super inefficient, since it makes multiple copies of data
# need way to get the by_vars and apply (grouped) computation
computed = transmute(tmp_df, *args, **kwargs)
by_vars = list(computed.columns)
for k in by_vars:
tmp_df[k] = computed[k]
if isinstance(__data, DataFrameGroupBy) and add:
groupings = {el.name: el for el in __data.grouper.groupings}
for varname in by_vars:
# ensures group levels are recalculated if varname was in transmute
groupings[varname] = varname
return tmp_df.groupby(list(groupings.values()), dropna=False, group_keys=True)
return tmp_df.groupby(by = by_vars, dropna=False, group_keys=True)
def ungroup(__data):
"""Return an ungrouped DataFrame.
Parameters
----------
__data:
The data being ungrouped.
Examples
--------
>>> from siuba import _, group_by, ungroup
>>> from siuba.data import cars
>>> g_cyl = cars.groupby("cyl")
>>> res1 = ungroup(g_cyl)
>>> res2 = cars >> group_by(_.cyl) >> ungroup()
"""
# TODO: can we somehow just restore the original df used to construct
# the groupby?
if isinstance(__data, pd.DataFrame):
return __data
elif isinstance(__data, DataFrameGroupBy):
return __data.obj
else:
raise TypeError(f"Unsupported type {type(__data)}")
from siuba.siu import DictCall
def _check_name(name, columns):
if name is None:
name = "n"
while name in columns:
name = name + "n"
elif name != "n" and name in columns:
raise ValueError(
f"Column name `{name}` specified for count name, but is already present in data."
)
elif not isinstance(name, str):
raise TypeError("`name` must be a single string.")
return name
from collections.abc import Mapping
from functools import partial
from pandas.core.reshape.merge import _MergeOperation
from pandas.core.reshape.util import cartesian_product
from functools import reduce
from siuba.siu._databackend import SqlaEngine, PlDataFrame, PdDataFrame
The provided code snippet includes necessary dependencies for implementing the `add_count` function. Write a Python function `def add_count(__data, *args, wt = None, sort = False, name = None, **kwargs)` to solve the following problem:
Add a column that is the number of observations for each grouping of data. Note that this function is similar to count(), but does not aggregate. It's useful combined with filter(). Parameters ---------- __data: A DataFrame. *args: The names of columns to be used for grouping. Passed to group_by. wt: The name of a column to use as a weighted for each row. sort: Whether to sort the results in descending order. **kwargs: Creates a new named column, and uses for grouping. Passed to group_by. Examples -------- >>> import pandas as pd >>> from siuba import _, add_count, group_by, ungroup, mutate >>> from siuba.data import mtcars >>> df = pd.DataFrame({"x": ["a", "a", "b"], "y": [1, 2, 3]}) >>> df >> add_count(_.x) x y n 0 a 1 2 1 a 2 2 2 b 3 1 This is useful if you want to see data associated with some count: >>> df >> add_count(_.x) >> filter(_.n == 1) x y n 2 b 3 1 Note that add_count is equivalent to a grouped mutate: >>> df >> group_by(_.x) >> mutate(n = _.shape[0]) >> ungroup() x y n 0 a 1 2 1 a 2 2 2 b 3 1
Here is the function:
def add_count(__data, *args, wt = None, sort = False, name = None, **kwargs):
"""Add a column that is the number of observations for each grouping of data.
Note that this function is similar to count(), but does not aggregate. It's
useful combined with filter().
Parameters
----------
__data:
A DataFrame.
*args:
The names of columns to be used for grouping. Passed to group_by.
wt:
The name of a column to use as a weighted for each row.
sort:
Whether to sort the results in descending order.
**kwargs:
Creates a new named column, and uses for grouping. Passed to group_by.
Examples
--------
>>> import pandas as pd
>>> from siuba import _, add_count, group_by, ungroup, mutate
>>> from siuba.data import mtcars
>>> df = pd.DataFrame({"x": ["a", "a", "b"], "y": [1, 2, 3]})
>>> df >> add_count(_.x)
x y n
0 a 1 2
1 a 2 2
2 b 3 1
This is useful if you want to see data associated with some count:
>>> df >> add_count(_.x) >> filter(_.n == 1)
x y n
2 b 3 1
Note that add_count is equivalent to a grouped mutate:
>>> df >> group_by(_.x) >> mutate(n = _.shape[0]) >> ungroup()
x y n
0 a 1 2
1 a 2 2
2 b 3 1
"""
no_grouping_vars = not args and not kwargs and isinstance(__data, pd.DataFrame)
if no_grouping_vars:
out = __data
else:
out = group_by(__data, *args, add=True, **kwargs)
var_names = ungroup(out).columns
name = _check_name(name, set(var_names))
if wt is None:
if no_grouping_vars:
# no groups, just use number of rows
counts = __data.copy()
counts[name] = counts.shape[0]
else:
# note that it's easy to transform tally using single grouped column, so
# we arbitrarily grab the first column..
counts = out.obj.copy()
counts[name] = out[var_names[0]].transform("size")
else:
wt_col = simple_varname(wt)
if wt_col is None:
raise Exception("wt argument has to be simple column name")
if no_grouping_vars:
# no groups, sum weights
counts = __data.copy()
counts[name] = counts[wt_col].sum()
else:
# TODO: should flip topmost if/else so grouped code is together
# do weighted tally
counts = out.obj.copy()
counts[name] = out[wt_col].transform("sum")
if sort:
return counts.sort_values(out_col, ascending = False)
return counts | Add a column that is the number of observations for each grouping of data. Note that this function is similar to count(), but does not aggregate. It's useful combined with filter(). Parameters ---------- __data: A DataFrame. *args: The names of columns to be used for grouping. Passed to group_by. wt: The name of a column to use as a weighted for each row. sort: Whether to sort the results in descending order. **kwargs: Creates a new named column, and uses for grouping. Passed to group_by. Examples -------- >>> import pandas as pd >>> from siuba import _, add_count, group_by, ungroup, mutate >>> from siuba.data import mtcars >>> df = pd.DataFrame({"x": ["a", "a", "b"], "y": [1, 2, 3]}) >>> df >> add_count(_.x) x y n 0 a 1 2 1 a 2 2 2 b 3 1 This is useful if you want to see data associated with some count: >>> df >> add_count(_.x) >> filter(_.n == 1) x y n 2 b 3 1 Note that add_count is equivalent to a grouped mutate: >>> df >> group_by(_.x) >> mutate(n = _.shape[0]) >> ungroup() x y n 0 a 1 2 1 a 2 2 2 b 3 1 |
158,996 | from functools import singledispatch, wraps
from pandas import DataFrame
import pandas as pd
import numpy as np
import warnings
from pandas.core.groupby import DataFrameGroupBy
from pandas.core.dtypes.inference import is_scalar
from siuba.siu import (
Symbolic, Call, strip_symbolic, create_sym_call,
MetaArg, BinaryOp, _SliceOpIndex, Lazy,
singledispatch2, pipe_no_args, Pipeable, pipe
)
from .tidyselect import var_create, var_select, Var
from siuba.siu import DictCall
from collections.abc import Mapping
from functools import partial
from pandas.core.reshape.merge import _MergeOperation
from pandas.core.reshape.util import cartesian_product
from functools import reduce
from siuba.siu._databackend import SqlaEngine, PlDataFrame, PdDataFrame
The provided code snippet includes necessary dependencies for implementing the `_fast_split_df` function. Write a Python function `def _fast_split_df(g_df)` to solve the following problem:
Note ---- splitting does not scale well to many groups (e.g. 50000+). This is due to pandas' (1) use of indexes, (2) some hard coded actions when subsetting. We are currently working on a fix, so that when people aren't using indexes, nesting will be much faster. see https://github.com/machow/siuba/issues/184
Here is the function:
def _fast_split_df(g_df):
"""
Note
----
splitting does not scale well to many groups (e.g. 50000+). This is due
to pandas' (1) use of indexes, (2) some hard coded actions when subsetting.
We are currently working on a fix, so that when people aren't using indexes,
nesting will be much faster.
see https://github.com/machow/siuba/issues/184
"""
# TODO (#184): speed up when user doesn't need an index
# right now, this is essentially a copy of
# pandas.core.groupby.ops.DataSplitter.__iter__
from pandas._libs import lib
splitter = g_df.grouper._get_splitter(g_df.obj)
starts, ends = lib.generate_slices(splitter.slabels, splitter.ngroups)
# TODO: reset index
sdata = splitter._get_sorted_data()
# TODO: avoid costly make_block call, and hard-coded BlockManager init actions.
# neither of these things is necessary when subsetting rows.
for start, end in zip(starts, ends):
yield splitter._chop(sdata, slice(start, end)) | Note ---- splitting does not scale well to many groups (e.g. 50000+). This is due to pandas' (1) use of indexes, (2) some hard coded actions when subsetting. We are currently working on a fix, so that when people aren't using indexes, nesting will be much faster. see https://github.com/machow/siuba/issues/184 |
158,997 | from functools import singledispatch, wraps
from pandas import DataFrame
import pandas as pd
import numpy as np
import warnings
from pandas.core.groupby import DataFrameGroupBy
from pandas.core.dtypes.inference import is_scalar
from siuba.siu import (
Symbolic, Call, strip_symbolic, create_sym_call,
MetaArg, BinaryOp, _SliceOpIndex, Lazy,
singledispatch2, pipe_no_args, Pipeable, pipe
)
from .tidyselect import var_create, var_select, Var
from siuba.siu import DictCall
def nest(__data, *args, key = "data"):
"""Nest columns within a DataFrame.
Parameters
----------
__data:
A DataFrame.
*args:
The names of columns to be nested. May use any syntax used by the
`select` function.
key:
The name of the column that will hold the nested columns.
Examples
--------
>>> from siuba import _, nest
>>> from siuba.data import cars
>>> nested_cars = cars >> nest(-_.cyl)
Note that pandas with nested DataFrames looks okay in juypter notebooks,
but has a weird representation in the IPython console, so the example below
shows that each entry in the data column is a DataFrame.
>>> nested_cars.shape
(3, 2)
>>> type(nested_cars.data[0])
<class 'pandas.core.frame.DataFrame'>
"""
# TODO: copied from select function
var_list = var_create(*args)
od = var_select(__data.columns, *var_list)
# unselected columns are treated similar to using groupby
grp_keys = list(k for k in __data.columns if k not in set(od))
nest_keys = list(od)
# split into sub DataFrames, with only nest_keys as columns
g_df = __data.groupby(grp_keys)
splitter = g_df.grouper._get_splitter(g_df.obj[nest_keys])
# TODO: iterating over splitter now only produces 1 item (the dataframe)
# check backwards compat
def _extract_subdf_pandas_1_3(entry):
# in pandas < 1.3, splitter.__iter__ returns tuple entries (ii, df)
if isinstance(entry, tuple):
return entry[1]
# in pandas 1.3, each entry is just the dataframe
return entry
result_index = g_df.grouper.result_index
nested_dfs = [_extract_subdf_pandas_1_3(x) for x in splitter]
out = pd.DataFrame({key: nested_dfs}, index = result_index).reset_index()
return out
from collections.abc import Mapping
from functools import partial
from pandas.core.reshape.merge import _MergeOperation
from pandas.core.reshape.util import cartesian_product
from functools import reduce
from siuba.siu._databackend import SqlaEngine, PlDataFrame, PdDataFrame
class VarAnd(Var):
name: "tuple[Var]"
def __init__(self, name: "tuple[Var]", negated=False, alias=None):
self.name = name
self.negated = negated
bad_var = [x for x in name if not isinstance(x, Var)]
if any(bad_var):
raise TypeError(f"VarAnd expects a tuple of Var, but saw entries: {bad_var}")
if alias is not None:
raise TypeError("alias must be none for VarAnd (extended slice syntax)")
self.alias = None
def __eq__(self, x):
raise NotImplementedError()
def __call__(self, *args, **kwargs):
raise NotImplementedError()
def flatten(self) -> "tuple[Var]":
res = []
for var in self.name:
neg_var = ~var if self.negated else var
if isinstance(neg_var, VarAnd):
res.extend(neg_var.flatten())
else:
res.append(neg_var)
return tuple(res)
def var_create(*args) -> "tuple[Var]":
vl = VarList()
all_vars = []
for arg in args:
if isinstance(arg, Call):
res = arg(vl)
if isinstance(res, VarList):
raise ValueError("Must select specific column. Did you pass `_` to select?")
all_vars.append(res)
elif isinstance(arg, Var):
all_vars.append(arg)
elif callable(arg):
all_vars.append(arg)
else:
all_vars.append(Var(arg))
return tuple(all_vars)
def _nest(__data, *args, key = "data"):
from siuba.dply.tidyselect import VarAnd
grp_keys = [x.name for x in __data.grouper.groupings]
if None in grp_keys:
raise NotImplementedError("All groupby variables must be named when using nest")
sel_vars = var_create(*grp_keys)
return nest(__data.obj, -VarAnd(sel_vars), *args, key = key) | null |
158,998 | from functools import singledispatch, wraps
from pandas import DataFrame
import pandas as pd
import numpy as np
import warnings
from pandas.core.groupby import DataFrameGroupBy
from pandas.core.dtypes.inference import is_scalar
from siuba.siu import (
Symbolic, Call, strip_symbolic, create_sym_call,
MetaArg, BinaryOp, _SliceOpIndex, Lazy,
singledispatch2, pipe_no_args, Pipeable, pipe
)
from .tidyselect import var_create, var_select, Var
from siuba.siu import DictCall
def _convert_nested_entry(x):
if isinstance(x, (tuple, list)):
return pd.Series(x)
return x
from collections.abc import Mapping
from functools import partial
from pandas.core.reshape.merge import _MergeOperation
def join(left, right, on = None, how = None, *args, by = None, **kwargs):
"""Join two tables together, by matching on specified columns.
The functions inner_join, left_join, right_join, and full_join are provided
as wrappers around join, and are used in the examples.
Parameters
----------
left :
The left-hand table.
right :
The right-hand table.
on :
How to match them. Note that the keyword "by" can also be used for this
parameter, in order to support compatibility with dplyr.
how :
The type of join to perform (inner, full, left, right).
*args:
Additional postition arguments. Currently not supported.
**kwargs:
Additional keyword arguments. Currently not supported.
Returns
-------
pd.DataFrame
Examples
--------
>>> from siuba import _, inner_join, left_join, full_join, right_join
>>> from siuba.data import band_members, band_instruments, band_instruments2
>>> band_members
name band
0 Mick Stones
1 John Beatles
2 Paul Beatles
>>> band_instruments
name plays
0 John guitar
1 Paul bass
2 Keith guitar
Notice that above, only John and Paul have entries for band instruments.
This means that they will be the only two rows in the inner_join result:
>>> band_members >> inner_join(_, band_instruments)
name band plays
0 John Beatles guitar
1 Paul Beatles bass
A left join ensures all original rows of the left hand data are included.
>>> band_members >> left_join(_, band_instruments)
name band plays
0 Mick Stones NaN
1 John Beatles guitar
2 Paul Beatles bass
A full join is similar, but ensures all rows of both data are included.
>>> band_members >> full_join(_, band_instruments)
name band plays
0 Mick Stones NaN
1 John Beatles guitar
2 Paul Beatles bass
3 Keith NaN guitar
You can explicilty specify columns to join on using the "by" argument:
>>> band_members >> inner_join(_, band_instruments, by = "name")
n...
Use a dictionary for the by argument, to match up columns with different names:
>>> band_members >> full_join(_, band_instruments2, {"name": "artist"})
n...
Joins create a new row for each pair of matches. For example, the value 1
is in two rows on the left, and 2 rows on the right so 4 rows will be created.
>>> df1 = pd.DataFrame({"x": [1, 1, 3]})
>>> df2 = pd.DataFrame({"x": [1, 1, 2], "y": ["first", "second", "third"]})
>>> df1 >> left_join(_, df2)
x y
0 1 first
1 1 second
2 1 first
3 1 second
4 3 NaN
Missing values count as matches to eachother by default:
>>> df3 = pd.DataFrame({"x": [1, None], "y": 2})
>>> df4 = pd.DataFrame({"x": [1, None], "z": 3})
>>> left_join(df3, df4)
x y z
0 1.0 2 3
1 NaN 2 3
"""
if isinstance(right, DataFrameGroupBy):
right = right.obj
if not isinstance(right, DataFrame):
raise Exception("right hand table must be a DataFrame")
if how is None:
raise Exception("Must specify how argument")
if len(args) or len(kwargs):
raise NotImplementedError("extra arguments to pandas join not currently supported")
if on is None and by is not None:
on = by
# pandas uses outer, but dplyr uses term full
if how == "full":
how = "outer"
if isinstance(on, Mapping):
left_on, right_on = zip(*on.items())
return left.merge(right, how = how, left_on = left_on, right_on = right_on)
return left.merge(right, how = how, on = on)
from pandas.core.reshape.util import cartesian_product
from functools import reduce
from siuba.siu._databackend import SqlaEngine, PlDataFrame, PdDataFrame
The provided code snippet includes necessary dependencies for implementing the `unnest` function. Write a Python function `def unnest(__data, key = "data")` to solve the following problem:
Unnest a column holding nested data (e.g. Series of lists or DataFrames). Parameters ---------- ___data: A DataFrame. key: The name of the column to be unnested. Examples -------- >>> import pandas as pd >>> df = pd.DataFrame({'id': [1,2], 'data': [['a', 'b'], ['c']]}) >>> df >> unnest() id data 0 1 a 1 1 b 2 2 c
Here is the function:
def unnest(__data, key = "data"):
"""Unnest a column holding nested data (e.g. Series of lists or DataFrames).
Parameters
----------
___data:
A DataFrame.
key:
The name of the column to be unnested.
Examples
--------
>>> import pandas as pd
>>> df = pd.DataFrame({'id': [1,2], 'data': [['a', 'b'], ['c']]})
>>> df >> unnest()
id data
0 1 a
1 1 b
2 2 c
"""
# TODO: currently only takes key, not expressions
nrows_nested = __data[key].apply(len, convert_dtype = True)
indx_nested = nrows_nested.index.repeat(nrows_nested)
grp_keys = list(__data.columns[__data.columns != key])
# flatten nested data
data_entries = map(_convert_nested_entry, __data[key])
long_data = pd.concat(data_entries, ignore_index = True)
long_data.name = key
# may be a better approach using a multi-index
long_grp = __data.loc[indx_nested, grp_keys].reset_index(drop = True)
return long_grp.join(long_data) | Unnest a column holding nested data (e.g. Series of lists or DataFrames). Parameters ---------- ___data: A DataFrame. key: The name of the column to be unnested. Examples -------- >>> import pandas as pd >>> df = pd.DataFrame({'id': [1,2], 'data': [['a', 'b'], ['c']]}) >>> df >> unnest() id data 0 1 a 1 1 b 2 2 c |
158,999 | from functools import singledispatch, wraps
from pandas import DataFrame
import pandas as pd
import numpy as np
import warnings
from pandas.core.groupby import DataFrameGroupBy
from pandas.core.dtypes.inference import is_scalar
from siuba.siu import (
Symbolic, Call, strip_symbolic, create_sym_call,
MetaArg, BinaryOp, _SliceOpIndex, Lazy,
singledispatch2, pipe_no_args, Pipeable, pipe
)
from .tidyselect import var_create, var_select, Var
from siuba.siu import DictCall
from collections.abc import Mapping
from functools import partial
from pandas.core.reshape.merge import _MergeOperation
from pandas.core.reshape.util import cartesian_product
from functools import reduce
from siuba.siu._databackend import SqlaEngine, PlDataFrame, PdDataFrame
def _join(left, right, on = None, how = None):
raise Exception("Unsupported type %s" %type(left)) | null |
159,000 | from functools import singledispatch, wraps
from pandas import DataFrame
import pandas as pd
import numpy as np
import warnings
from pandas.core.groupby import DataFrameGroupBy
from pandas.core.dtypes.inference import is_scalar
from siuba.siu import (
Symbolic, Call, strip_symbolic, create_sym_call,
MetaArg, BinaryOp, _SliceOpIndex, Lazy,
singledispatch2, pipe_no_args, Pipeable, pipe
)
from .tidyselect import var_create, var_select, Var
def rename(__data, **kwargs):
"""Rename columns of a table.
Parameters
----------
__data:
The input table.
**kwargs:
Keyword arguments of the form new_name = _.old_name, or new_name = "old_name".
Examples
--------
>>> import pandas as pd
>>> from siuba import _, rename, select
>>> df = pd.DataFrame({"zzz": [1], "b": [2]})
>>> df >> rename(a = _.zzz)
a b
0 1 2
Note that this is equivalent to this select code:
>>> df >> select(_.a == _.zzz, _.b)
a b
0 1 2
"""
# TODO: allow names with spaces, etc..
col_names = {simple_varname(v):k for k,v in kwargs.items()}
if None in col_names:
raise ValueError("Rename needs column name (e.g. 'a' or _.a), but received %s"%col_names[None])
return __data.rename(columns = col_names)
from siuba.siu import DictCall
from collections.abc import Mapping
from functools import partial
from pandas.core.reshape.merge import _MergeOperation
from pandas.core.reshape.util import cartesian_product
from functools import reduce
from siuba.siu._databackend import SqlaEngine, PlDataFrame, PdDataFrame
The provided code snippet includes necessary dependencies for implementing the `semi_join` function. Write a Python function `def semi_join(left, right = None, on = None, *args, by = None)` to solve the following problem:
Return the left table with every row that would be kept in an inner join. Parameters ---------- left : The left-hand table. right : The right-hand table. on : How to match them. By default it uses matches all columns with the same name across the two tables. Examples -------- >>> import pandas as pd >>> from siuba import _, semi_join, anti_join >>> df1 = pd.DataFrame({"id": [1, 2, 3], "x": ["a", "b", "c"]}) >>> df2 = pd.DataFrame({"id": [2, 3, 3], "y": ["l", "m", "n"]}) >>> df1 >> semi_join(_, df2) id x 1 2 b 2 3 c >>> df1 >> anti_join(_, df2) id x 0 1 a Generally, it's a good idea to explicitly specify the on argument. >>> df1 >> anti_join(_, df2, on="id") id x 0 1 a
Here is the function:
def semi_join(left, right = None, on = None, *args, by = None):
"""Return the left table with every row that would be kept in an inner join.
Parameters
----------
left :
The left-hand table.
right :
The right-hand table.
on :
How to match them. By default it uses matches all columns with the same
name across the two tables.
Examples
--------
>>> import pandas as pd
>>> from siuba import _, semi_join, anti_join
>>> df1 = pd.DataFrame({"id": [1, 2, 3], "x": ["a", "b", "c"]})
>>> df2 = pd.DataFrame({"id": [2, 3, 3], "y": ["l", "m", "n"]})
>>> df1 >> semi_join(_, df2)
id x
1 2 b
2 3 c
>>> df1 >> anti_join(_, df2)
id x
0 1 a
Generally, it's a good idea to explicitly specify the on argument.
>>> df1 >> anti_join(_, df2, on="id")
id x
0 1 a
"""
if on is None and by is not None:
on = by
if isinstance(on, Mapping):
# coerce colnames to list, to avoid indexing with tuples
on_cols, right_on = map(list, zip(*on.items()))
right = right[right_on].rename(dict(zip(right_on, on_cols)))
elif on is None:
warnings.warn(
"No on column passed to join. "
"Inferring join columns instead using shared column names."
)
on_cols = list(set(left.columns).intersection(set(right.columns)))
if not len(on_cols):
raise Exception("No join column specified, and no shared column names")
warnings.warn("Detected shared columns: %s" % on_cols)
elif isinstance(on, str):
on_cols = [on]
else:
on_cols = on
# get our semi join on ----
if len(on_cols) == 1:
col_name = on_cols[0]
indx = left[col_name].isin(right[col_name])
return left.loc[indx]
# Not a super efficient approach. Effectively, an inner join with what would
# be duplicate rows removed.
merger = _MergeOperation(left, right, left_on = on_cols, right_on = on_cols)
_, l_indx, _ = merger._get_join_info()
range_indx = pd.RangeIndex(len(left))
return left.loc[range_indx.isin(l_indx)] | Return the left table with every row that would be kept in an inner join. Parameters ---------- left : The left-hand table. right : The right-hand table. on : How to match them. By default it uses matches all columns with the same name across the two tables. Examples -------- >>> import pandas as pd >>> from siuba import _, semi_join, anti_join >>> df1 = pd.DataFrame({"id": [1, 2, 3], "x": ["a", "b", "c"]}) >>> df2 = pd.DataFrame({"id": [2, 3, 3], "y": ["l", "m", "n"]}) >>> df1 >> semi_join(_, df2) id x 1 2 b 2 3 c >>> df1 >> anti_join(_, df2) id x 0 1 a Generally, it's a good idea to explicitly specify the on argument. >>> df1 >> anti_join(_, df2, on="id") id x 0 1 a |
159,001 | from functools import singledispatch, wraps
from pandas import DataFrame
import pandas as pd
import numpy as np
import warnings
from pandas.core.groupby import DataFrameGroupBy
from pandas.core.dtypes.inference import is_scalar
from siuba.siu import (
Symbolic, Call, strip_symbolic, create_sym_call,
MetaArg, BinaryOp, _SliceOpIndex, Lazy,
singledispatch2, pipe_no_args, Pipeable, pipe
)
from .tidyselect import var_create, var_select, Var
from siuba.siu import DictCall
from collections.abc import Mapping
from functools import partial
from pandas.core.reshape.merge import _MergeOperation
from pandas.core.reshape.util import cartesian_product
from functools import reduce
from siuba.siu._databackend import SqlaEngine, PlDataFrame, PdDataFrame
The provided code snippet includes necessary dependencies for implementing the `anti_join` function. Write a Python function `def anti_join(left, right = None, on = None, *args, by = None)` to solve the following problem:
Return the left table with every row that would *not* be kept in an inner join. Parameters ---------- left : The left-hand table. right : The right-hand table. on : How to match them. By default it uses matches all columns with the same name across the two tables. Examples -------- >>> import pandas as pd >>> from siuba import _, semi_join, anti_join >>> df1 = pd.DataFrame({"id": [1, 2, 3], "x": ["a", "b", "c"]}) >>> df2 = pd.DataFrame({"id": [2, 3, 3], "y": ["l", "m", "n"]}) >>> df1 >> semi_join(_, df2) id x 1 2 b 2 3 c >>> df1 >> anti_join(_, df2) id x 0 1 a Generally, it's a good idea to explicitly specify the on argument. >>> df1 >> anti_join(_, df2, on="id") id x 0 1 a
Here is the function:
def anti_join(left, right = None, on = None, *args, by = None):
"""Return the left table with every row that would *not* be kept in an inner join.
Parameters
----------
left :
The left-hand table.
right :
The right-hand table.
on :
How to match them. By default it uses matches all columns with the same
name across the two tables.
Examples
--------
>>> import pandas as pd
>>> from siuba import _, semi_join, anti_join
>>> df1 = pd.DataFrame({"id": [1, 2, 3], "x": ["a", "b", "c"]})
>>> df2 = pd.DataFrame({"id": [2, 3, 3], "y": ["l", "m", "n"]})
>>> df1 >> semi_join(_, df2)
id x
1 2 b
2 3 c
>>> df1 >> anti_join(_, df2)
id x
0 1 a
Generally, it's a good idea to explicitly specify the on argument.
>>> df1 >> anti_join(_, df2, on="id")
id x
0 1 a
"""
if on is None and by is not None:
on = by
# copied from semi_join
if isinstance(on, Mapping):
left_on, right_on = zip(*on.items())
else:
left_on = right_on = on
if isinstance(right, DataFrameGroupBy):
right = right.obj
# manually perform merge, up to getting pieces need for indexing
merger = _MergeOperation(left, right, left_on = left_on, right_on = right_on)
_, l_indx, _ = merger._get_join_info()
# use the left table's indexer to exclude those rows
range_indx = pd.RangeIndex(len(left))
return left.iloc[range_indx.difference(l_indx),:] | Return the left table with every row that would *not* be kept in an inner join. Parameters ---------- left : The left-hand table. right : The right-hand table. on : How to match them. By default it uses matches all columns with the same name across the two tables. Examples -------- >>> import pandas as pd >>> from siuba import _, semi_join, anti_join >>> df1 = pd.DataFrame({"id": [1, 2, 3], "x": ["a", "b", "c"]}) >>> df2 = pd.DataFrame({"id": [2, 3, 3], "y": ["l", "m", "n"]}) >>> df1 >> semi_join(_, df2) id x 1 2 b 2 3 c >>> df1 >> anti_join(_, df2) id x 0 1 a Generally, it's a good idea to explicitly specify the on argument. >>> df1 >> anti_join(_, df2, on="id") id x 0 1 a |
159,002 | from functools import singledispatch, wraps
from pandas import DataFrame
import pandas as pd
import numpy as np
import warnings
from pandas.core.groupby import DataFrameGroupBy
from pandas.core.dtypes.inference import is_scalar
from siuba.siu import (
Symbolic, Call, strip_symbolic, create_sym_call,
MetaArg, BinaryOp, _SliceOpIndex, Lazy,
singledispatch2, pipe_no_args, Pipeable, pipe
)
from .tidyselect import var_create, var_select, Var
from siuba.siu import DictCall
from collections.abc import Mapping
from functools import partial
from pandas.core.reshape.merge import _MergeOperation
def head(__data, n = 5):
"""Return the first n rows of the data.
Parameters
----------
__data:
a DataFrame.
n:
The number of rows of data to keep.
Examples
--------
>>> from siuba import head
>>> from siuba.data import cars
>>> cars >> head(2)
cyl mpg hp
0 6 21.0 110
1 6 21.0 110
"""
return __data.head(n)
from pandas.core.reshape.util import cartesian_product
from functools import reduce
from siuba.siu._databackend import SqlaEngine, PlDataFrame, PdDataFrame
def _head_gdf(__data, n = 5):
groupings = __data.grouper.groupings
group_cols = [ping.name for ping in groupings]
df_subset = __data.obj.head(n)
return df_subset.groupby(group_cols) | null |
159,003 | from functools import singledispatch, wraps
from pandas import DataFrame
import pandas as pd
import numpy as np
import warnings
from pandas.core.groupby import DataFrameGroupBy
from pandas.core.dtypes.inference import is_scalar
from siuba.siu import (
Symbolic, Call, strip_symbolic, create_sym_call,
MetaArg, BinaryOp, _SliceOpIndex, Lazy,
singledispatch2, pipe_no_args, Pipeable, pipe
)
from .tidyselect import var_create, var_select, Var
def filter(__data, *args):
"""Keep rows where conditions are true.
Parameters
----------
__data:
The data being filtered.
*args:
conditions that must be met to keep a column.
Examples
--------
>>> from siuba import _, filter
>>> from siuba.data import cars
Keep rows where cyl is 4 *and* mpg is less than 25.
>>> cars >> filter(_.cyl == 4, _.mpg < 22)
cyl mpg hp
20 4 21.5 97
31 4 21.4 109
Use `|` to represent an OR condition. For example, the code below keeps
rows where hp is over 250 *or* mpg is over 32.
>>> cars >> filter((_.hp > 300) | (_.mpg > 32))
cyl mpg hp
17 4 32.4 66
19 4 33.9 65
30 8 15.0 335
"""
crnt_indx = True
for arg in args:
res = arg(__data) if callable(arg) else arg
if isinstance(res, pd.DataFrame):
crnt_indx &= res.all(axis=1)
elif isinstance(res, pd.Series):
crnt_indx &= res
else:
crnt_indx &= res
# use loc or iloc to subset, depending on crnt_indx ----
# the main issue here is that loc can't remove all rows using a slice
# and iloc can't use a boolean series
if isinstance(crnt_indx, bool) or isinstance(crnt_indx, np.bool_):
# iloc can do slice, but not a bool series
result = __data.iloc[slice(None) if crnt_indx else slice(0),:]
else:
result = __data.loc[crnt_indx,:]
return result
from siuba.siu import DictCall
from collections.abc import Mapping
from functools import partial
from pandas.core.reshape.merge import _MergeOperation
from pandas.core.reshape.util import cartesian_product
from functools import reduce
from siuba.siu._databackend import SqlaEngine, PlDataFrame, PdDataFrame
def min_rank(x, na_option = "keep"):
"""Return the min rank. See pd.Series.rank with method="min" for details.
"""
return x.rank(method = "min", na_option = na_option)
The provided code snippet includes necessary dependencies for implementing the `top_n` function. Write a Python function `def top_n(__data, n, wt = None)` to solve the following problem:
Filter to keep the top or bottom entries in each group. Parameters ---------- ___data: A DataFrame. n: The number of rows to keep in each group. wt: A column or expression that determines ordering (defaults to last column in data). Examples -------- >>> from siuba import _, top_n >>> df = pd.DataFrame({'x': [3, 1, 2, 4], 'y': [1, 1, 0, 0]}) >>> top_n(df, 2, _.x) x y 0 3 1 3 4 0 >>> top_n(df, -2, _.x) x y 1 1 1 2 2 0 >>> top_n(df, 2, _.x*_.y) x y 0 3 1 1 1 1
Here is the function:
def top_n(__data, n, wt = None):
"""Filter to keep the top or bottom entries in each group.
Parameters
----------
___data:
A DataFrame.
n:
The number of rows to keep in each group.
wt:
A column or expression that determines ordering (defaults to last column in data).
Examples
--------
>>> from siuba import _, top_n
>>> df = pd.DataFrame({'x': [3, 1, 2, 4], 'y': [1, 1, 0, 0]})
>>> top_n(df, 2, _.x)
x y
0 3 1
3 4 0
>>> top_n(df, -2, _.x)
x y
1 1 1
2 2 0
>>> top_n(df, 2, _.x*_.y)
x y
0 3 1
1 1 1
"""
# NOTE: using min_rank, since it can return a lazy expr for min_rank(ing)
# but I would rather not have it imported in verbs. will be more
# reasonable if each verb were its own file? need abstract verb / vector module.
# vector imports experimental right now, so we need to invert deps
# TODO:
# * what if wt is a string? should remove all str -> expr in verbs like group_by etc..
# * verbs like filter allow lambdas, but this func breaks with that
from .vector import min_rank
if wt is None:
sym_wt = getattr(Symbolic(MetaArg("_")), __data.columns[-1])
elif isinstance(wt, Call):
sym_wt = Symbolic(wt)
else:
raise TypeError("wt must be a symbolic expression, eg. _.some_col")
if n > 0:
return filter(__data, min_rank(-sym_wt) <= n)
else:
return filter(__data, min_rank(sym_wt) <= abs(n)) | Filter to keep the top or bottom entries in each group. Parameters ---------- ___data: A DataFrame. n: The number of rows to keep in each group. wt: A column or expression that determines ordering (defaults to last column in data). Examples -------- >>> from siuba import _, top_n >>> df = pd.DataFrame({'x': [3, 1, 2, 4], 'y': [1, 1, 0, 0]}) >>> top_n(df, 2, _.x) x y 0 3 1 3 4 0 >>> top_n(df, -2, _.x) x y 1 1 1 2 2 0 >>> top_n(df, 2, _.x*_.y) x y 0 3 1 1 1 1 |
159,004 | from functools import singledispatch, wraps
from pandas import DataFrame
import pandas as pd
import numpy as np
import warnings
from pandas.core.groupby import DataFrameGroupBy
from pandas.core.dtypes.inference import is_scalar
from siuba.siu import (
Symbolic, Call, strip_symbolic, create_sym_call,
MetaArg, BinaryOp, _SliceOpIndex, Lazy,
singledispatch2, pipe_no_args, Pipeable, pipe
)
from .tidyselect import var_create, var_select, Var
from siuba.siu import DictCall
from collections.abc import Mapping
from functools import partial
from pandas.core.reshape.merge import _MergeOperation
def gather(__data, key = "key", value = "value", *args, drop_na = False, convert = False):
"""Reshape table by gathering it in to long format.
Parameters
----------
__data:
The input data.
key:
Name of the key (or measure) column, which holds the names of the columns
that were turned into rows.
value:
Name of the value column, which holds the values from the columns that
were turned into rows.
*args:
A selection of columns. If unspecified, all columns are selected. Any
arguments you could pass to the select() verb are allowed.
drop_na: bool
Whether to remove any rows where the value column is NA.
Examples
--------
>>> import pandas as pd
>>> from siuba import _, gather
>>> df = pd.DataFrame({"id": ["a", "b"], "x": [1, 2], "y": [3, None]})
The code below gathers in all columns, except id:
>>> gather(df, "key", "value", -_.id)
id key value
0 a x 1.0
1 b x 2.0
2 a y 3.0
3 b y NaN
>>> gather(df, "measure", "result", _.x, _.y, drop_na=True)
id measure result
0 a x 1.0
1 b x 2.0
2 a y 3.0
"""
# TODO: implement var selection over *args
if convert:
raise NotImplementedError("convert not yet implemented")
# TODO: copied from nest and select
var_list = var_create(*(args or __data.columns))
od = var_select(__data.columns, *var_list)
if not od:
return __data
id_vars = [col for col in __data.columns if col not in od]
long = pd.melt(__data, id_vars, list(od), key, value)
if drop_na:
return long[~long[value].isna()].reset_index(drop = True)
return long
from pandas.core.reshape.util import cartesian_product
from functools import reduce
from siuba.siu._databackend import SqlaEngine, PlDataFrame, PdDataFrame
def _gather(__data, key = "key", value = "value", *args, **kwargs):
group_cols = [ping.name for ping in __data.grouper.groupings]
res = gather(__data.obj, key, value, *args, **kwargs)
# regroup on any grouping vars we did not gather ----
candidates = set(res.columns) - {key, value}
regroup_cols = [name for name in group_cols if name in candidates]
if res is __data.obj:
# special case where nothing happened
return __data
elif regroup_cols:
return res.groupby(regroup_cols)
return res | null |
159,005 | from functools import singledispatch, wraps
from pandas import DataFrame
import pandas as pd
import numpy as np
import warnings
from pandas.core.groupby import DataFrameGroupBy
from pandas.core.dtypes.inference import is_scalar
from siuba.siu import (
Symbolic, Call, strip_symbolic, create_sym_call,
MetaArg, BinaryOp, _SliceOpIndex, Lazy,
singledispatch2, pipe_no_args, Pipeable, pipe
)
from .tidyselect import var_create, var_select, Var
from siuba.siu import DictCall
from collections.abc import Mapping
from functools import partial
from pandas.core.reshape.merge import _MergeOperation
def spread(__data, key, value, fill = None, reset_index = True):
"""Reshape table by spreading it out to wide format.
Parameters
----------
__data:
The input data.
key:
Column whose values will be used as new column names.
value:
Column whose values will fill the new column entries.
fill:
Value to set for any missing values. By default keeps them as missing values.
Examples
--------
>>> import pandas as pd
>>> from siuba import _, gather
>>> df = pd.DataFrame({"id": ["a", "b"], "x": [1, 2], "y": [3, None]})
>>> long = gather(df, "key", "value", -_.id, drop_na=True)
>>> long
id key value
0 a x 1.0
1 b x 2.0
2 a y 3.0
>>> spread(long, "key", "value")
id x y
0 a 1.0 3.0
1 b 2.0 NaN
"""
key_col = _get_single_var_select(__data.columns, key)
val_col = _get_single_var_select(__data.columns, value)
id_cols = [col for col in __data.columns if col not in (key_col, val_col)]
wide = __data.set_index(id_cols + [key_col]).unstack(level = -1)
if fill is not None:
wide.fillna(fill, inplace = True)
# remove multi-index from both rows and cols
wide.columns = wide.columns.droplevel().rename(None)
if reset_index:
wide.reset_index(inplace = True)
return wide
from pandas.core.reshape.util import cartesian_product
from functools import reduce
from siuba.siu._databackend import SqlaEngine, PlDataFrame, PdDataFrame
def _spread_gdf(__data, *args, **kwargs):
groupings = __data.grouper.groupings
df = __data.obj
f_spread = spread.registry[pd.DataFrame]
out = f_spread(df, *args, **kwargs)
# regroup, using group names ----
group_names = [x.name for x in groupings]
if any([name is None for name in group_names]):
raise ValueError("spread can only work on grouped DataFrame if all groupings "
"have names. Groups are: %s" %group_names)
return out.groupby(group_names) | null |
159,006 | from functools import singledispatch, wraps
from pandas import DataFrame
import pandas as pd
import numpy as np
import warnings
from pandas.core.groupby import DataFrameGroupBy
from pandas.core.dtypes.inference import is_scalar
from siuba.siu import (
Symbolic, Call, strip_symbolic, create_sym_call,
MetaArg, BinaryOp, _SliceOpIndex, Lazy,
singledispatch2, pipe_no_args, Pipeable, pipe
)
from .tidyselect import var_create, var_select, Var
from siuba.siu import DictCall
from collections.abc import Mapping
from functools import partial
from pandas.core.reshape.merge import _MergeOperation
from pandas.core.reshape.util import cartesian_product
def separate(__data, col, into, sep = r"[^a-zA-Z0-9]",
remove = True, convert = False,
extra = "warn", fill = "warn"
):
from functools import reduce
from siuba.siu._databackend import SqlaEngine, PlDataFrame, PdDataFrame
def _separate_gdf(__data, *args, **kwargs):
groupings = __data.grouper.groupings
df = __data.obj
f_separate = separate.registry[pd.DataFrame]
out = f_separate(df, *args, **kwargs)
return out.groupby(groupings) | null |
159,007 | from functools import singledispatch, wraps
from pandas import DataFrame
import pandas as pd
import numpy as np
import warnings
from pandas.core.groupby import DataFrameGroupBy
from pandas.core.dtypes.inference import is_scalar
from siuba.siu import (
Symbolic, Call, strip_symbolic, create_sym_call,
MetaArg, BinaryOp, _SliceOpIndex, Lazy,
singledispatch2, pipe_no_args, Pipeable, pipe
)
from .tidyselect import var_create, var_select, Var
from siuba.siu import DictCall
from collections.abc import Mapping
from functools import partial
from pandas.core.reshape.merge import _MergeOperation
from pandas.core.reshape.util import cartesian_product
from functools import reduce
def unite(__data, col, *args, sep = "_", remove = True):
"""Combine multiple columns into a single column. Return DataFrame that column included.
Parameters
----------
__data:
a DataFrame
col:
name of the to-be-created column (string).
*args:
names of each column to combine.
sep:
separator joining each column being combined.
remove:
whether to remove the combined columns from the returned DataFrame.
"""
unite_col_names = list(map(simple_varname, args))
out_col_name = simple_varname(col)
# validations ----
if None in unite_col_names:
raise ValueError("*args must be string, or simple column name, e.g. _.col_name")
missing_cols = set(unite_col_names) - set(__data.columns)
if missing_cols:
raise ValueError("columns %s not in DataFrame.columns" %missing_cols)
unite_cols = [_coerce_to_str(__data[col_name]) for col_name in unite_col_names]
if out_col_name in __data:
raise ValueError("col argument %s already a column in data" % out_col_name)
# perform unite ----
# TODO: this is probably not very efficient. Maybe try with transform or apply?
res = reduce(lambda x,y: x + sep + y, unite_cols)
out_df = __data.copy()
out_df[out_col_name] = res
if remove:
return out_df.drop(columns = unite_col_names)
return out_df
from siuba.siu._databackend import SqlaEngine, PlDataFrame, PdDataFrame
def _unite_gdf(__data, *args, **kwargs):
# TODO: consolidate these trivial group by dispatched funcs
groupings = __data.grouper.groupings
df = __data.obj
f_unite = unite.registry[pd.DataFrame]
out = f_unite(df, *args, **kwargs)
return out.groupby(groupings) | null |
159,008 | from functools import singledispatch, wraps
from pandas import DataFrame
import pandas as pd
import numpy as np
import warnings
from pandas.core.groupby import DataFrameGroupBy
from pandas.core.dtypes.inference import is_scalar
from siuba.siu import (
Symbolic, Call, strip_symbolic, create_sym_call,
MetaArg, BinaryOp, _SliceOpIndex, Lazy,
singledispatch2, pipe_no_args, Pipeable, pipe
)
from .tidyselect import var_create, var_select, Var
from siuba.siu import DictCall
from collections.abc import Mapping
from functools import partial
from pandas.core.reshape.merge import _MergeOperation
from pandas.core.reshape.util import cartesian_product
from functools import reduce
def extract(
__data, col, into, regex = r"(\w+)",
remove = True, convert = False,
flags = 0
):
"""Pull out len(into) fields from character strings.
Returns a DataFrame with a column added for each piece.
Parameters
----------
__data:
a DataFrame
col:
name of column to split (either string, or siu expression).
into:
names of resulting columns holding each entry in pulled out fields.
regex:
regular expression used to extract field. Passed to col.str.extract method.
remove:
whether to remove col from the returned DataFrame.
convert:
whether to attempt to convert the split columns to numerics.
flags:
flags from the re module, passed to col.str.extract.
"""
col_name = simple_varname(col)
n_into = len(into)
all_splits = __data[col_name].str.extract(regex, flags)
n_split_cols = len(all_splits.columns)
if n_split_cols != n_into:
raise ValueError("Split into %s pieces, but expected %s" % (n_split_cols, n_into))
# attempt to convert columns to numeric ----
if convert:
# TODO: better strategy here?
for k in all_splits:
try:
all_splits[k] = pd.to_numeric(all_splits[k])
except ValueError:
pass
out = __data.copy()
for ii, name in enumerate(into):
out[name] = all_splits.iloc[:, ii]
if remove:
return out.drop(columns = col_name)
return out
from siuba.siu._databackend import SqlaEngine, PlDataFrame, PdDataFrame
def _extract_gdf(__data, *args, **kwargs):
# TODO: consolidate these trivial group by dispatched funcs
groupings = __data.grouper.groupings
df = __data.obj
f_extract = extract.registry[pd.DataFrame]
out = f_extract(df, *args, **kwargs)
return out.groupby(groupings) | null |
159,009 | from functools import singledispatch, wraps
from pandas import DataFrame
import pandas as pd
import numpy as np
import warnings
from pandas.core.groupby import DataFrameGroupBy
from pandas.core.dtypes.inference import is_scalar
from siuba.siu import (
Symbolic, Call, strip_symbolic, create_sym_call,
MetaArg, BinaryOp, _SliceOpIndex, Lazy,
singledispatch2, pipe_no_args, Pipeable, pipe
)
from .tidyselect import var_create, var_select, Var
from siuba.siu import DictCall
from collections.abc import Mapping
from functools import partial
from pandas.core.reshape.merge import _MergeOperation
from pandas.core.reshape.util import cartesian_product
from functools import reduce
from siuba.siu._databackend import SqlaEngine, PlDataFrame, PdDataFrame
class SqlaEngine(AbstractBackend): pass
class PlDataFrame(AbstractBackend): pass
class PdDataFrame(AbstractBackend): pass
SqlaEngine.register_backend("sqlalchemy.engine", "Connectable")
PlDataFrame.register_backend("polars", "DataFrame")
PdDataFrame.register_backend("pandas", "DataFrame")
def _tbl_sqla(src: SqlaEngine, table_name, columns=None):
from siuba.sql import LazyTbl
# TODO: once we subclass LazyTbl per dialect (e.g. duckdb), we can move out
# this dialect specific logic.
if src.dialect.name == "duckdb" and isinstance(columns, (PdDataFrame, PlDataFrame)):
with src.begin() as conn:
conn.exec_driver_sql("register", (table_name, columns))
return LazyTbl(src, table_name)
return LazyTbl(src, table_name, columns=columns) | null |
159,010 | from functools import singledispatch, wraps
from pandas import DataFrame
import pandas as pd
import numpy as np
import warnings
from pandas.core.groupby import DataFrameGroupBy
from pandas.core.dtypes.inference import is_scalar
from siuba.siu import (
Symbolic, Call, strip_symbolic, create_sym_call,
MetaArg, BinaryOp, _SliceOpIndex, Lazy,
singledispatch2, pipe_no_args, Pipeable, pipe
)
from .tidyselect import var_create, var_select, Var
from siuba.siu import DictCall
from collections.abc import Mapping
from functools import partial
from pandas.core.reshape.merge import _MergeOperation
from pandas.core.reshape.util import cartesian_product
from functools import reduce
from siuba.siu._databackend import SqlaEngine, PlDataFrame, PdDataFrame
def tbl(src, *args, **kwargs):
"""Create a table from a data source.
Parameters
----------
src:
A pandas DataFrame, SQLAlchemy Engine, or other registered object.
*args, **kwargs:
Additional arguments passed to the individual implementations.
Examples
--------
>>> from siuba.data import cars
A pandas DataFrame is already a table of data, so trivially returns itself.
>>> tbl(cars) is cars
True
tbl() is useful for quickly connecting to a SQL database table.
>>> from sqlalchemy import create_engine
>>> from siuba import count, show_query, collect
>>> engine = create_engine("sqlite:///:memory:")
>>> _rows = cars.to_sql("cars", engine, index=False)
>>> tbl_sql_cars = tbl(engine, "cars")
>>> tbl_sql_cars >> count()
# Source: lazy query
# DB Conn: Engine(sqlite:///:memory:)
# Preview:
n
0 32
# .. may have more rows
When using duckdb, pass a DataFrame as the third argument to operate directly on it:
>>> engine2 = create_engine("duckdb:///:memory:")
>>> tbl_cars_duck = tbl(engine, "cars", cars.head(2))
>>> tbl_cars_duck >> count() >> collect()
n
0 32
You can analyze a mock table
>>> from sqlalchemy import create_mock_engine
>>> from siuba import _
>>> mock_engine = create_mock_engine("postgresql:///", lambda *args, **kwargs: None)
>>> tbl_mock = tbl(mock_engine, "some_table", columns = ["a", "b", "c"])
>>> q = tbl_mock >> count(_.a) >> show_query() # doctest: +NORMALIZE_WHITESPACE
SELECT some_table_1.a, count(*) AS n
FROM some_table AS some_table_1 GROUP BY some_table_1.a ORDER BY n DESC
"""
return src
class SqlaEngine(AbstractBackend): pass
SqlaEngine.register_backend("sqlalchemy.engine", "Connectable")
def _tbl(__data, *args, **kwargs):
# sqlalchemy v2 does not have MockConnection inherit from anything
# even though it is a mock :/.
try:
from sqlalchemy.engine.mock import MockConnection
if isinstance(__data, MockConnection):
return tbl.dispatch(SqlaEngine)(__data, *args, **kwargs)
except ImportError:
pass
raise NotImplementedError(
f"Unsupported type {type(__data)}. "
"Note that tbl currently can be used at the start of a pipe, but not as "
"a step in the pipe."
) | null |
159,011 | from siuba.siu import FunctionLookupBound
from .groupby import GroupByAgg, SeriesGroupBy, broadcast_group_elements, regroup
import pandas as pd
def not_implemented(name, *args, **kwargs):
return FunctionLookupBound(name) | null |
159,012 | from siuba.siu import FunctionLookupBound
from .groupby import GroupByAgg, SeriesGroupBy, broadcast_group_elements, regroup
import pandas as pd
def _validate_data_args(x, **kwargs):
# Sanity checks that only SeriesGroupBy passed for ops
# could be removed once this sees more use
if not isinstance(x, SeriesGroupBy):
raise TypeError("First data argument must be a grouped Series object")
for name, other_data in kwargs.items():
if isinstance(other_data, pd.Series):
raise TypeError("{} may not be a Series.".format(name))
def _apply_grouped_method(ser, name, is_property, accessor, args, kwargs):
if accessor:
method = getattr(getattr(ser, accessor), name)
else:
method = getattr(ser, name)
res = method(*args, **kwargs) if not is_property else method
return res
def regroup(groupby, res):
"""Return an instance of type(groupby) from res."""
raise TypeError("Not implemented for group by class: %s"% type(groupby))
def method_el_op(name, is_property, accessor):
def f(__ser: SeriesGroupBy, *args, **kwargs) -> SeriesGroupBy:
_validate_data_args(__ser)
res = _apply_grouped_method(__ser.obj, name, is_property, accessor, args, kwargs)
return regroup(__ser, res)
f.__name__ = f.__qualname__ = name
return f | null |
159,013 | from siuba.siu import FunctionLookupBound
from .groupby import GroupByAgg, SeriesGroupBy, broadcast_group_elements, regroup
import pandas as pd
def _validate_data_args(x, **kwargs):
def regroup(groupby, res):
def broadcast_group_elements(x, y):
def method_el_op2(name, is_property, accessor):
def f(x: SeriesGroupBy, y: SeriesGroupBy, *args, **kwargs) -> SeriesGroupBy:
_validate_data_args(x, y = y)
left, right, ref_groupby = broadcast_group_elements(x, y)
op_function = getattr(left, name)
res = op_function(right, *args, **kwargs)
return regroup(ref_groupby, res)
f.__name__ = f.__qualname__ = name
return f | null |
159,014 | from siuba.siu import FunctionLookupBound
from .groupby import GroupByAgg, SeriesGroupBy, broadcast_group_elements, regroup
import pandas as pd
def _validate_data_args(x, **kwargs):
# Sanity checks that only SeriesGroupBy passed for ops
# could be removed once this sees more use
if not isinstance(x, SeriesGroupBy):
raise TypeError("First data argument must be a grouped Series object")
for name, other_data in kwargs.items():
if isinstance(other_data, pd.Series):
raise TypeError("{} may not be a Series.".format(name))
def _apply_grouped_method(ser, name, is_property, accessor, args, kwargs):
if accessor:
method = getattr(getattr(ser, accessor), name)
else:
method = getattr(ser, name)
res = method(*args, **kwargs) if not is_property else method
return res
def regroup(groupby, res):
"""Return an instance of type(groupby) from res."""
raise TypeError("Not implemented for group by class: %s"% type(groupby))
def method_win_op(name, is_property, accessor):
def f(__ser: SeriesGroupBy, *args, **kwargs) -> SeriesGroupBy:
_validate_data_args(__ser)
res = _apply_grouped_method(__ser, name, is_property, accessor, args, kwargs)
return regroup(__ser, res)
f.__name__ = f.__qualname__ = name
return f | null |
159,015 | from siuba.siu import FunctionLookupBound
from .groupby import GroupByAgg, SeriesGroupBy, broadcast_group_elements, regroup
import pandas as pd
def _validate_data_args(x, **kwargs):
# Sanity checks that only SeriesGroupBy passed for ops
# could be removed once this sees more use
if not isinstance(x, SeriesGroupBy):
raise TypeError("First data argument must be a grouped Series object")
for name, other_data in kwargs.items():
if isinstance(other_data, pd.Series):
raise TypeError("{} may not be a Series.".format(name))
def _apply_grouped_method(ser, name, is_property, accessor, args, kwargs):
if accessor:
method = getattr(getattr(ser, accessor), name)
else:
method = getattr(ser, name)
res = method(*args, **kwargs) if not is_property else method
return res
class GroupByAgg(SeriesGroupBy):
"""Class for representing the result of a grouped Series aggregation.
Imagine that you are trying to add two grouped Series, where one might be
an aggregate:
>>> from siuba.data import mtcars
>>> g_cyl = mtcars.groupby("cyl")
>>> avg_hp_raw = g_cyl.hp.mean()
>>> # how can we do: g_cyl.hp - avg_hp_raw ?
This class is designed to allows operations like minute (``-``) to work under 3 cases:
* original - aggregate: broadcast to original length; return SeriesGroupBy.
* aggregate - aggregate: no need to broadcast; return GroupByAgg.
* unary method over aggregate: no need to broadcast; return GroupByAgg.
Due to complexities in how pandas creates grouped objects, the easiest way
to create this class is to use its ``from_result`` class method:
>>> avg_hp = GroupByAgg.from_result(avg_hp_raw, g_cyl.hp)
Below are examples of the first two cases:
>>> # avg_hp plus hp ----
>>> x, y, grp = broadcast_group_elements(avg_hp, g_cyl.hp)
>>> res1 = regroup(grp, x + y) # SeriesGroupBy
>>> # avg_hp plus avg_hp ----
>>> x, y, grp = broadcast_group_elements(avg_hp, avg_hp)
>>> res2 = regroup(grp, x + y) # GroupByAgg
You can use `is_compatible` to check whether broadcasting will work:
>>> is_compatible(g_cyl.hp, avg_hp)
True
Lastly, this is a subclass of SeriesGroupBy, where each row is its
own group, so unary methods can be performed without issue.:
>>> res3 = regroup(grp, avg_hp.fillna(1)) # GroupByAgg
>>> ser = broadcast_agg(res3) # ungroup, same len as original data
"""
def __init__(self, *args, orig_grouper, orig_obj, **kwargs):
self._orig_grouper = orig_grouper
self._orig_obj = orig_obj
super().__init__(*args, **kwargs)
def from_result(cls, result: Series, src_groupby: SeriesGroupBy):
"""GroupByAgg class constructor.
"""
if not isinstance(result, Series):
raise TypeError("requires pandas Series")
# Series.groupby is hard-coded to produce a SeriesGroupBy,
# but its signature is very large, so use inspect to bind on it.
sig = inspect.signature(result.groupby)
bound = sig.bind(by = result.index)
orig_grouper = getattr(src_groupby, "_orig_grouper", src_groupby.grouper)
orig_obj = getattr(src_groupby, "_orig_obj", src_groupby.obj)
return cls(
result,
*bound.args, **bound.kwargs,
orig_grouper = orig_grouper,
orig_obj = orig_obj,
)
def method_win_op_agg_result(name, is_property, accessor):
def f(__ser, *args, **kwargs):
_validate_data_args(__ser)
res = _apply_grouped_method(__ser, name, is_property, accessor, args, kwargs)
return GroupByAgg.from_result(res, __ser)
f.__name__ = f.__qualname__ = name
return f | null |
159,016 | from siuba.siu import FunctionLookupBound
from .groupby import GroupByAgg, SeriesGroupBy, broadcast_group_elements, regroup
import pandas as pd
def _validate_data_args(x, **kwargs):
# Sanity checks that only SeriesGroupBy passed for ops
# could be removed once this sees more use
if not isinstance(x, SeriesGroupBy):
raise TypeError("First data argument must be a grouped Series object")
for name, other_data in kwargs.items():
if isinstance(other_data, pd.Series):
raise TypeError("{} may not be a Series.".format(name))
class GroupByAgg(SeriesGroupBy):
"""Class for representing the result of a grouped Series aggregation.
Imagine that you are trying to add two grouped Series, where one might be
an aggregate:
>>> from siuba.data import mtcars
>>> g_cyl = mtcars.groupby("cyl")
>>> avg_hp_raw = g_cyl.hp.mean()
>>> # how can we do: g_cyl.hp - avg_hp_raw ?
This class is designed to allows operations like minute (``-``) to work under 3 cases:
* original - aggregate: broadcast to original length; return SeriesGroupBy.
* aggregate - aggregate: no need to broadcast; return GroupByAgg.
* unary method over aggregate: no need to broadcast; return GroupByAgg.
Due to complexities in how pandas creates grouped objects, the easiest way
to create this class is to use its ``from_result`` class method:
>>> avg_hp = GroupByAgg.from_result(avg_hp_raw, g_cyl.hp)
Below are examples of the first two cases:
>>> # avg_hp plus hp ----
>>> x, y, grp = broadcast_group_elements(avg_hp, g_cyl.hp)
>>> res1 = regroup(grp, x + y) # SeriesGroupBy
>>> # avg_hp plus avg_hp ----
>>> x, y, grp = broadcast_group_elements(avg_hp, avg_hp)
>>> res2 = regroup(grp, x + y) # GroupByAgg
You can use `is_compatible` to check whether broadcasting will work:
>>> is_compatible(g_cyl.hp, avg_hp)
True
Lastly, this is a subclass of SeriesGroupBy, where each row is its
own group, so unary methods can be performed without issue.:
>>> res3 = regroup(grp, avg_hp.fillna(1)) # GroupByAgg
>>> ser = broadcast_agg(res3) # ungroup, same len as original data
"""
def __init__(self, *args, orig_grouper, orig_obj, **kwargs):
self._orig_grouper = orig_grouper
self._orig_obj = orig_obj
super().__init__(*args, **kwargs)
def from_result(cls, result: Series, src_groupby: SeriesGroupBy):
"""GroupByAgg class constructor.
"""
if not isinstance(result, Series):
raise TypeError("requires pandas Series")
# Series.groupby is hard-coded to produce a SeriesGroupBy,
# but its signature is very large, so use inspect to bind on it.
sig = inspect.signature(result.groupby)
bound = sig.bind(by = result.index)
orig_grouper = getattr(src_groupby, "_orig_grouper", src_groupby.grouper)
orig_obj = getattr(src_groupby, "_orig_obj", src_groupby.obj)
return cls(
result,
*bound.args, **bound.kwargs,
orig_grouper = orig_grouper,
orig_obj = orig_obj,
)
def method_agg_singleton(name, is_property, accessor):
def f(__ser: SeriesGroupBy, *args, **kwargs) -> SeriesGroupBy:
_validate_data_args(__ser)
if accessor is not None:
op_function = getattr(getattr(__ser.obj, accessor, __ser.obj), name)
else:
op_function = getattr(__ser.obj, name)
# cast singleton result to be GroupByAgg, as if we did an aggregation
# could create a class to for grouped singletons, but seems like overkill
# for now
singleton = op_function if is_property else op_function()
dtype = 'object' if singleton is None else None
# note that when the value is None, need to explicitly make dtype object
res = pd.Series(singleton, index = __ser.grouper.levels, dtype = dtype)
return GroupByAgg.from_result(res, __ser)
return f | null |
159,017 | from siuba.siu import FunctionLookupBound
from .groupby import GroupByAgg, SeriesGroupBy, broadcast_group_elements, regroup
import pandas as pd
GROUP_METHODS = {
("Elwise", 1): method_el_op,
("Elwise", 2): method_el_op2,
("Agg", 1): method_agg_op,
("Agg", 2): not_implemented,
("Window", 1): method_win_op,
("Window", 2): method_win_op,
("Singleton", 1): method_agg_singleton,
("Todo", 1): not_implemented,
("Maydo", 1): not_implemented,
("Wontdo", 1): not_implemented,
("Todo", 2): not_implemented,
("Maydo", 2): not_implemented,
("Wontdo", 2): not_implemented,
(None, 1): not_implemented,
(None, 2): not_implemented,
}
def forward_method(dispatcher, constructor = None, cls = SeriesGroupBy):
op = dispatcher.operation
kind = op.kind.title() if op.kind is not None else None
key = (kind, op.arity)
constructor = GROUP_METHODS[key] if constructor is None else constructor
f_concrete = constructor(
name = op.name,
is_property = op.is_property,
accessor = op.accessor
)
return dispatcher.register(cls, f_concrete) | null |
159,018 | import inspect
from functools import singledispatch
from pandas import Series
from pandas.api.types import is_scalar
from pandas.core.groupby import SeriesGroupBy, DataFrameGroupBy
def _(groupby, res):
return groupby.from_result(res, groupby)
def _(groupby, res):
# TODO: this will always return SeriesGroupBy, even if groupby is a subclass
return res.groupby(groupby.grouper)
The provided code snippet includes necessary dependencies for implementing the `_broadcast_agg_gba` function. Write a Python function `def _broadcast_agg_gba(groupby)` to solve the following problem:
>>> import pandas as pd >>> gdf = pd.DataFrame({"g": ['a','a','b'], "x": [4,5,6]}).groupby("g") >>> agg = GroupByAgg.from_result(gdf.x.mean(), gdf.x) >>> len(broadcast_agg(agg)) 3
Here is the function:
def _broadcast_agg_gba(groupby):
"""
>>> import pandas as pd
>>> gdf = pd.DataFrame({"g": ['a','a','b'], "x": [4,5,6]}).groupby("g")
>>> agg = GroupByAgg.from_result(gdf.x.mean(), gdf.x)
>>> len(broadcast_agg(agg))
3
"""
src = groupby._orig_obj
ids, _, ngroup = groupby._orig_grouper.group_info
out = take_1d(groupby.obj._values, ids)
# Note: reductions like siuba.dply.vector.n(_) map DataFrameGroupBy -> GroupByAgg,
# so the underlying object is a DataFrame, and does not have a .name attribute.
return Series(out, index=src.index, name=getattr(src, "name", None)) | >>> import pandas as pd >>> gdf = pd.DataFrame({"g": ['a','a','b'], "x": [4,5,6]}).groupby("g") >>> agg = GroupByAgg.from_result(gdf.x.mean(), gdf.x) >>> len(broadcast_agg(agg)) 3 |
159,019 | import inspect
from functools import singledispatch
from pandas import Series
from pandas.api.types import is_scalar
from pandas.core.groupby import SeriesGroupBy, DataFrameGroupBy
def _broadcast_agg_sgb(groupby):
return groupby.obj | null |
159,020 | from siuba.siu import CallTreeLocal, FunctionLookupError, ExecutionValidatorVisitor
from .groupby import SeriesGroupBy
from .translate import (
forward_method,
not_implemented,
method_agg_op,
method_win_op_agg_result
)
from siuba.experimental.pd_groups.groupby import SeriesGroupBy, GroupByAgg, broadcast_agg, is_compatible
from siuba.ops import ALL_OPS
from siuba import ops
from .translate import GroupByAgg, SeriesGroupBy
from siuba.ops.generics import ALL_PROPERTIES, ALL_ACCESSORS
from siuba.siu import Call, singledispatch2
from siuba.dply.verbs import mutate, filter, summarize, DataFrameGroupBy
from pandas.core.dtypes.inference import is_scalar
import warnings
def register_method(ns, op_name, f, is_property = False, accessor = None):
generic = ns[op_name]
return generic.register(SeriesGroupBy, f(op_name, is_property, accessor)) | null |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.