id int64 0 190k | prompt stringlengths 21 13.4M | docstring stringlengths 1 12k ⌀ |
|---|---|---|
166,712 | from __future__ import annotations
import abc
import asyncio
import functools
import sys
from collections.abc import Awaitable, Callable
from dataclasses import dataclass
from typing import ParamSpec, TypeVar
import pathway.internals.expression as expr
from pathway.internals.runtime_type_check import check_arg_types
from pathway.internals.udfs.caches import CacheStrategy, with_cache_strategy
from pathway.internals.udfs.retries import AsyncRetryStrategy, with_retry_strategy
from pathway.internals.udfs.utils import coerce_async
class Executor(abc.ABC):
"""
Base class executors of Pathway UDFs (user-defined functions).
"""
...
def _wrap(self, fun: Callable) -> Callable: ...
def _apply_expression_type(self) -> type[expr.ApplyExpression]: ...
class SyncExecutor(Executor):
def _wrap(self, fun: Callable) -> Callable:
return fun
def _apply_expression_type(self) -> type[expr.ApplyExpression]:
return expr.ApplyExpression
The provided code snippet includes necessary dependencies for implementing the `sync_executor` function. Write a Python function `def sync_executor() -> Executor` to solve the following problem:
Returns the synchronous executor for Pathway UDFs. Example: >>> import pathway as pw >>> t = pw.debug.table_from_markdown( ... ''' ... a | b ... 1 | 2 ... 3 | 4 ... 5 | 6 ... ''' ... ) >>> >>> @pw.udf(executor=pw.udfs.sync_executor()) ... def mul(a: int, b: int) -> int: ... return a * b ... >>> result = t.select(res=mul(pw.this.a, pw.this.b)) >>> pw.debug.compute_and_print(result, include_id=False) res 2 12 30
Here is the function:
def sync_executor() -> Executor:
"""
Returns the synchronous executor for Pathway UDFs.
Example:
>>> import pathway as pw
>>> t = pw.debug.table_from_markdown(
... '''
... a | b
... 1 | 2
... 3 | 4
... 5 | 6
... '''
... )
>>>
>>> @pw.udf(executor=pw.udfs.sync_executor())
... def mul(a: int, b: int) -> int:
... return a * b
...
>>> result = t.select(res=mul(pw.this.a, pw.this.b))
>>> pw.debug.compute_and_print(result, include_id=False)
res
2
12
30
"""
return SyncExecutor() | Returns the synchronous executor for Pathway UDFs. Example: >>> import pathway as pw >>> t = pw.debug.table_from_markdown( ... ''' ... a | b ... 1 | 2 ... 3 | 4 ... 5 | 6 ... ''' ... ) >>> >>> @pw.udf(executor=pw.udfs.sync_executor()) ... def mul(a: int, b: int) -> int: ... return a * b ... >>> result = t.select(res=mul(pw.this.a, pw.this.b)) >>> pw.debug.compute_and_print(result, include_id=False) res 2 12 30 |
166,713 | from __future__ import annotations
import abc
import asyncio
import functools
import sys
from collections.abc import Awaitable, Callable
from dataclasses import dataclass
from typing import ParamSpec, TypeVar
import pathway.internals.expression as expr
from pathway.internals.runtime_type_check import check_arg_types
from pathway.internals.udfs.caches import CacheStrategy, with_cache_strategy
from pathway.internals.udfs.retries import AsyncRetryStrategy, with_retry_strategy
from pathway.internals.udfs.utils import coerce_async
class Executor(abc.ABC):
"""
Base class executors of Pathway UDFs (user-defined functions).
"""
...
def _wrap(self, fun: Callable) -> Callable: ...
def _apply_expression_type(self) -> type[expr.ApplyExpression]: ...
class AsyncExecutor(Executor):
capacity: int | None = None
timeout: float | None = None
retry_strategy: AsyncRetryStrategy | None = None
def _wrap(self, fun: Callable) -> Callable:
return async_options(
capacity=self.capacity,
timeout=self.timeout,
retry_strategy=self.retry_strategy,
)(fun)
def _apply_expression_type(self) -> type[expr.ApplyExpression]:
return expr.AsyncApplyExpression
class AsyncRetryStrategy(abc.ABC):
"""Class representing strategy of delays or backoffs for the retries."""
async def invoke(
self, func: Callable[P, Awaitable[T]], /, *args: P.args, **kwargs: P.kwargs
) -> T: ...
The provided code snippet includes necessary dependencies for implementing the `async_executor` function. Write a Python function `def async_executor( *, capacity: int | None = None, timeout: float | None = None, retry_strategy: AsyncRetryStrategy | None = None, ) -> Executor` to solve the following problem:
Returns the asynchronous executor for Pathway UDFs. Can be applied to a regular or an asynchronous function. If applied to a regular function, it is executed in ``asyncio`` loop's ``run_in_executor``. The asynchronous UDFs are asynchronous *within a single batch* with batch defined as all entries with equal processing times assigned. The UDFs are started for all entries in the batch and the execution of further batches is blocked until all UDFs for a given batch have finished. Args: capacity: Maximum number of concurrent operations allowed. Defaults to None, indicating no specific limit. timeout: Maximum time (in seconds) to wait for the function result. When both ``timeout`` and ``retry_strategy`` are used, timeout applies to a single retry. Defaults to None, indicating no time limit. retry_strategy: Strategy for handling retries in case of failures. Defaults to None, meaning no retries. Example: >>> import pathway as pw >>> import asyncio >>> import time >>> t = pw.debug.table_from_markdown( ... ''' ... a | b ... 1 | 2 ... 3 | 4 ... 5 | 6 ... ''' ... ) >>> >>> @pw.udf( ... executor=pw.udfs.async_executor( ... capacity=2, retry_strategy=pw.udfs.ExponentialBackoffRetryStrategy() ... ) ... ) ... async def long_running_async_function(a: int, b: int) -> int: ... await asyncio.sleep(0.1) ... return a * b ... >>> result_1 = t.select(res=long_running_async_function(pw.this.a, pw.this.b)) >>> pw.debug.compute_and_print(result_1, include_id=False) res 2 12 30 >>> >>> @pw.udf(executor=pw.udfs.async_executor()) ... def long_running_function(a: int, b: int) -> int: ... time.sleep(0.1) ... return a * b ... >>> result_2 = t.select(res=long_running_function(pw.this.a, pw.this.b)) >>> pw.debug.compute_and_print(result_2, include_id=False) res 2 12 30
Here is the function:
def async_executor(
*,
capacity: int | None = None,
timeout: float | None = None,
retry_strategy: AsyncRetryStrategy | None = None,
) -> Executor:
"""
Returns the asynchronous executor for Pathway UDFs.
Can be applied to a regular or an asynchronous function. If applied to a regular
function, it is executed in ``asyncio`` loop's ``run_in_executor``.
The asynchronous UDFs are asynchronous *within a single batch* with batch defined as
all entries with equal processing times assigned. The UDFs are started for all entries
in the batch and the execution of further batches is blocked until all UDFs
for a given batch have finished.
Args:
capacity: Maximum number of concurrent operations allowed.
Defaults to None, indicating no specific limit.
timeout: Maximum time (in seconds) to wait for the function result. When both
``timeout`` and ``retry_strategy`` are used, timeout applies to a single retry.
Defaults to None, indicating no time limit.
retry_strategy: Strategy for handling retries in case of failures.
Defaults to None, meaning no retries.
Example:
>>> import pathway as pw
>>> import asyncio
>>> import time
>>> t = pw.debug.table_from_markdown(
... '''
... a | b
... 1 | 2
... 3 | 4
... 5 | 6
... '''
... )
>>>
>>> @pw.udf(
... executor=pw.udfs.async_executor(
... capacity=2, retry_strategy=pw.udfs.ExponentialBackoffRetryStrategy()
... )
... )
... async def long_running_async_function(a: int, b: int) -> int:
... await asyncio.sleep(0.1)
... return a * b
...
>>> result_1 = t.select(res=long_running_async_function(pw.this.a, pw.this.b))
>>> pw.debug.compute_and_print(result_1, include_id=False)
res
2
12
30
>>>
>>> @pw.udf(executor=pw.udfs.async_executor())
... def long_running_function(a: int, b: int) -> int:
... time.sleep(0.1)
... return a * b
...
>>> result_2 = t.select(res=long_running_function(pw.this.a, pw.this.b))
>>> pw.debug.compute_and_print(result_2, include_id=False)
res
2
12
30
"""
return AsyncExecutor(
capacity=capacity, timeout=timeout, retry_strategy=retry_strategy
) | Returns the asynchronous executor for Pathway UDFs. Can be applied to a regular or an asynchronous function. If applied to a regular function, it is executed in ``asyncio`` loop's ``run_in_executor``. The asynchronous UDFs are asynchronous *within a single batch* with batch defined as all entries with equal processing times assigned. The UDFs are started for all entries in the batch and the execution of further batches is blocked until all UDFs for a given batch have finished. Args: capacity: Maximum number of concurrent operations allowed. Defaults to None, indicating no specific limit. timeout: Maximum time (in seconds) to wait for the function result. When both ``timeout`` and ``retry_strategy`` are used, timeout applies to a single retry. Defaults to None, indicating no time limit. retry_strategy: Strategy for handling retries in case of failures. Defaults to None, meaning no retries. Example: >>> import pathway as pw >>> import asyncio >>> import time >>> t = pw.debug.table_from_markdown( ... ''' ... a | b ... 1 | 2 ... 3 | 4 ... 5 | 6 ... ''' ... ) >>> >>> @pw.udf( ... executor=pw.udfs.async_executor( ... capacity=2, retry_strategy=pw.udfs.ExponentialBackoffRetryStrategy() ... ) ... ) ... async def long_running_async_function(a: int, b: int) -> int: ... await asyncio.sleep(0.1) ... return a * b ... >>> result_1 = t.select(res=long_running_async_function(pw.this.a, pw.this.b)) >>> pw.debug.compute_and_print(result_1, include_id=False) res 2 12 30 >>> >>> @pw.udf(executor=pw.udfs.async_executor()) ... def long_running_function(a: int, b: int) -> int: ... time.sleep(0.1) ... return a * b ... >>> result_2 = t.select(res=long_running_function(pw.this.a, pw.this.b)) >>> pw.debug.compute_and_print(result_2, include_id=False) res 2 12 30 |
166,714 | from __future__ import annotations
import abc
import asyncio
import functools
import sys
from collections.abc import Awaitable, Callable
from dataclasses import dataclass
from typing import ParamSpec, TypeVar
import pathway.internals.expression as expr
from pathway.internals.runtime_type_check import check_arg_types
from pathway.internals.udfs.caches import CacheStrategy, with_cache_strategy
from pathway.internals.udfs.retries import AsyncRetryStrategy, with_retry_strategy
from pathway.internals.udfs.utils import coerce_async
T = TypeVar("T")
P = ParamSpec("P")
def with_capacity(
func: Callable[P, Awaitable[T]], capacity: int
) -> Callable[P, Awaitable[T]]:
"""
Limits the number of simultaneous calls of the specified function.
Regular function will be wrapped to run in async executor.
Args:
capacity: Maximum number of concurrent operations.
Returns:
Coroutine
"""
func = coerce_async(func)
semaphore = asyncio.Semaphore(capacity)
async def wrapper(*args: P.args, **kwargs: P.kwargs) -> T:
async with semaphore:
return await func(*args, **kwargs)
return wrapper
def with_timeout(
func: Callable[P, Awaitable[T]], timeout: float
) -> Callable[P, Awaitable[T]]:
"""
Limits the time spent waiting on the result of the function.
If the time limit is exceeded, the task is canceled and an Error is raised.
Regular function will be wrapped to run in async executor.
Args:
timeout: Maximum time (in seconds) to wait for the function result.
Defaults to None, indicating no time limit.
Returns:
Coroutine
"""
func = coerce_async(func)
if sys.version_info < (3, 11):
async def wrapper(*args: P.args, **kwargs: P.kwargs) -> T:
return await asyncio.wait_for(func(*args, **kwargs), timeout=timeout)
else:
async def wrapper(*args: P.args, **kwargs: P.kwargs) -> T:
async with asyncio.timeout(timeout):
return await func(*args, **kwargs)
return wrapper
class CacheStrategy(abc.ABC):
"""Base class used to represent caching strategy."""
def wrap_async(
self, func: Callable[P, Awaitable[T]]
) -> Callable[P, Awaitable[T]]: ...
def wrap_sync(self, func: Callable[P, T]) -> Callable[P, T]: ...
def with_cache_strategy(
func: Callable[P, T], cache_strategy: CacheStrategy
) -> Callable[P, T]: ...
def with_cache_strategy(
func: Callable[P, Awaitable[T]], cache_strategy: CacheStrategy
) -> Callable[P, Awaitable[T]]: ...
def with_cache_strategy(
func: Callable[P, Awaitable[T]] | Callable[P, T], cache_strategy: CacheStrategy
):
"""
Returns a function with applied cache strategy.
Args:
cache_strategy: Defines the caching mechanism.
Returns:
Callable/Coroutine
"""
if inspect.iscoroutinefunction(func):
return cache_strategy.wrap_async(func)
else:
return cache_strategy.wrap_sync(func)
def with_retry_strategy(
func: Callable[P, Awaitable[T]], retry_strategy: AsyncRetryStrategy
) -> Callable[P, Awaitable[T]]:
"""
Returns an asynchronous function with applied retry strategy.
Regular function will be wrapped to run in async executor.
Args:
retry_strategy: Defines how failures will be handled.
Returns:
Coroutine
"""
func = coerce_async(func)
async def wrapper(*args: P.args, **kwargs: P.kwargs) -> T:
return await retry_strategy.invoke(func, *args, **kwargs)
return wrapper
class AsyncRetryStrategy(abc.ABC):
"""Class representing strategy of delays or backoffs for the retries."""
async def invoke(
self, func: Callable[P, Awaitable[T]], /, *args: P.args, **kwargs: P.kwargs
) -> T: ...
def coerce_async(
func: Callable[P, T] | Callable[P, Awaitable[T]]
) -> Callable[P, Awaitable[T]]:
"""
Wraps a regular function to be executed in async executor.
It acts as a noop if the provided function is already a coroutine.
"""
if asyncio.iscoroutinefunction(func):
return func
else:
async def wrapper(*args: P.args, **kwargs: P.kwargs) -> T:
event_loop = asyncio.get_event_loop()
assert event_loop.is_running(), "event loop should be running"
pfunc = functools.partial(func, *args, **kwargs)
return await event_loop.run_in_executor(None, func=pfunc) # type: ignore[arg-type]
return wrapper
The provided code snippet includes necessary dependencies for implementing the `async_options` function. Write a Python function `def async_options( capacity: int | None = None, timeout: float | None = None, retry_strategy: AsyncRetryStrategy | None = None, cache_strategy: CacheStrategy | None = None, ) -> Callable` to solve the following problem:
Decorator applying async options to a provided function. Regular function will be wrapped to run in async executor. Args: capacity: Maximum number of concurrent operations. Defaults to None, indicating no specific limit. timeout: Maximum time (in seconds) to wait for the function result. When both ``timeout`` and ``retry_strategy`` are used, timeout applies to a single retry. Defaults to None, indicating no time limit. retry_strategy: Strategy for handling retries in case of failures. Defaults to None, meaning no retries. cache_strategy: Defines the caching mechanism. If set to None and a persistency is enabled, operations will be cached using the persistence layer. Defaults to None. Returns: Coroutine
Here is the function:
def async_options(
capacity: int | None = None,
timeout: float | None = None,
retry_strategy: AsyncRetryStrategy | None = None,
cache_strategy: CacheStrategy | None = None,
) -> Callable:
"""
Decorator applying async options to a provided function.
Regular function will be wrapped to run in async executor.
Args:
capacity: Maximum number of concurrent operations.
Defaults to None, indicating no specific limit.
timeout: Maximum time (in seconds) to wait for the function result. When both
``timeout`` and ``retry_strategy`` are used, timeout applies to a single retry.
Defaults to None, indicating no time limit.
retry_strategy: Strategy for handling retries in case of failures.
Defaults to None, meaning no retries.
cache_strategy: Defines the caching mechanism. If set to None
and a persistency is enabled, operations will be cached using the
persistence layer. Defaults to None.
Returns:
Coroutine
"""
def decorator(
f: Callable[P, T] | Callable[P, Awaitable[T]]
) -> Callable[P, Awaitable[T]]:
func = coerce_async(f)
if timeout is not None:
func = with_timeout(func, timeout)
if retry_strategy is not None:
func = with_retry_strategy(func, retry_strategy)
if capacity is not None:
func = with_capacity(func, capacity)
if cache_strategy is not None:
func = with_cache_strategy(func, cache_strategy)
return func
return decorator | Decorator applying async options to a provided function. Regular function will be wrapped to run in async executor. Args: capacity: Maximum number of concurrent operations. Defaults to None, indicating no specific limit. timeout: Maximum time (in seconds) to wait for the function result. When both ``timeout`` and ``retry_strategy`` are used, timeout applies to a single retry. Defaults to None, indicating no time limit. retry_strategy: Strategy for handling retries in case of failures. Defaults to None, meaning no retries. cache_strategy: Defines the caching mechanism. If set to None and a persistency is enabled, operations will be cached using the persistence layer. Defaults to None. Returns: Coroutine |
166,715 | from __future__ import annotations
import functools
import warnings
from collections.abc import Callable, Mapping
from typing import TYPE_CHECKING, Any, Generic, TypeVar, cast, overload
import pathway.internals.column as clmn
import pathway.internals.expression as expr
from pathway.internals import api, dtype as dt, groupbys, thisclass, universes
from pathway.internals.api import Value
from pathway.internals.arg_handlers import (
arg_handler,
groupby_handler,
reduce_args_handler,
select_args_handler,
)
from pathway.internals.decorators import contextualized_operator
from pathway.internals.desugaring import (
RestrictUniverseDesugaring,
combine_args_kwargs,
desugar,
)
from pathway.internals.expression_visitor import collect_tables
from pathway.internals.helpers import SetOnceProperty, StableSet
from pathway.internals.joins import Joinable, JoinResult
from pathway.internals.operator import DebugOperator, OutputHandle
from pathway.internals.operator_input import OperatorInput
from pathway.internals.parse_graph import G
from pathway.internals.runtime_type_check import check_arg_types
from pathway.internals.schema import Schema, schema_from_columns, schema_from_types
from pathway.internals.table_like import TableLike
from pathway.internals.table_slice import TableSlice
from pathway.internals.trace import trace_user_frame
from pathway.internals.type_interpreter import TypeInterpreterState
from pathway.internals.universe import Universe
class Table(
Joinable,
OperatorInput,
Generic[TSchema],
):
"""Collection of named columns over identical universes.
Example:
>>> import pathway as pw
>>> t1 = pw.debug.table_from_markdown('''
... age | owner | pet
... 10 | Alice | dog
... 9 | Bob | dog
... 8 | Alice | cat
... 7 | Bob | dog
... ''')
>>> isinstance(t1, pw.Table)
True
"""
if TYPE_CHECKING:
from pathway.stdlib.ordered import diff # type: ignore[misc]
from pathway.stdlib.statistical import interpolate # type: ignore[misc]
from pathway.stdlib.temporal import ( # type: ignore[misc]
asof_join,
asof_join_left,
asof_join_outer,
asof_join_right,
asof_now_join,
asof_now_join_inner,
asof_now_join_left,
interval_join,
interval_join_inner,
interval_join_left,
interval_join_outer,
interval_join_right,
window_join,
window_join_inner,
window_join_left,
window_join_outer,
window_join_right,
windowby,
)
from pathway.stdlib.viz import ( # type: ignore[misc]
_repr_mimebundle_,
plot,
show,
)
_columns: dict[str, clmn.Column]
_schema: type[Schema]
_id_column: clmn.IdColumn
_rowwise_context: clmn.RowwiseContext
_source: SetOnceProperty[OutputHandle] = SetOnceProperty()
"""Lateinit by operator."""
def __init__(
self,
_columns: Mapping[str, clmn.Column],
_context: clmn.Context,
_schema: type[Schema] | None = None,
):
if _schema is None:
_schema = schema_from_columns(_columns)
super().__init__(_context)
self._columns = dict(_columns)
self._schema = _schema
self._id_column = _context.id_column
self._substitution = {thisclass.this: self}
self._rowwise_context = clmn.RowwiseContext(self._id_column)
def id(self) -> expr.ColumnReference:
"""Get reference to pseudocolumn containing id's of a table.
Example:
>>> import pathway as pw
>>> t1 = pw.debug.table_from_markdown('''
... age | owner | pet
... 10 | Alice | dog
... 9 | Bob | dog
... 8 | Alice | cat
... 7 | Bob | dog
... ''')
>>> t2 = t1.select(ids = t1.id)
>>> t2.typehints()['ids']
<class 'pathway.engine.Pointer'>
>>> pw.debug.compute_and_print(t2.select(test=t2.id == t2.ids), include_id=False)
test
True
True
True
True
"""
return expr.ColumnReference(_table=self, _column=self._id_column, _name="id")
def column_names(self):
return self.keys()
def keys(self):
return self._columns.keys()
def _get_column(self, name: str) -> clmn.Column:
return self._columns[name]
def _ipython_key_completions_(self):
return list(self.column_names())
def __dir__(self):
return list(super().__dir__()) + list(self.column_names())
def _C(self) -> TSchema:
return self.C # type: ignore
def schema(self) -> type[Schema]:
"""Get schema of the table.
Example:
>>> import pathway as pw
>>> t1 = pw.debug.table_from_markdown('''
... age | owner | pet
... 10 | Alice | dog
... 9 | Bob | dog
... 8 | Alice | cat
... 7 | Bob | dog
... ''')
>>> t1.schema
<pathway.Schema types={'age': <class 'int'>, 'owner': <class 'str'>, 'pet': <class 'str'>}>
>>> t1.typehints()['age']
<class 'int'>
"""
return self._schema
def _get_colref_by_name(self, name, exception_type) -> expr.ColumnReference:
name = self._column_deprecation_rename(name)
if name == "id":
return self.id
if name not in self.keys():
raise exception_type(f"Table has no column with name {name}.")
return expr.ColumnReference(
_table=self, _column=self._get_column(name), _name=name
)
def __getitem__(self, args: str | expr.ColumnReference) -> expr.ColumnReference: ...
def __getitem__(self, args: list[str | expr.ColumnReference]) -> Table: ...
def __getitem__(
self, args: str | expr.ColumnReference | list[str | expr.ColumnReference]
) -> expr.ColumnReference | Table:
"""Get columns by name.
Warning:
- Does not allow repetitions of columns.
- Fails if tries to access nonexistent column.
Args:
names: a singe column name or list of columns names to be extracted from `self`.
Returns:
Table with specified columns, or column expression (if single argument given).
Instead of column names, column references are valid here.
Example:
>>> import pathway as pw
>>> t1 = pw.debug.table_from_markdown('''
... age | owner | pet
... 10 | Alice | dog
... 9 | Bob | dog
... 8 | Alice | cat
... 7 | Bob | dog
... ''')
>>> t2 = t1[["age", "pet"]]
>>> t2 = t1[["age", t1.pet]]
>>> pw.debug.compute_and_print(t2, include_id=False)
age | pet
7 | dog
8 | cat
9 | dog
10 | dog
"""
if isinstance(args, expr.ColumnReference):
if (args.table is not self) and not isinstance(
args.table, thisclass.ThisMetaclass
):
raise ValueError(
"Table.__getitem__ argument has to be a ColumnReference to the same table or pw.this, or a string "
+ "(or a list of those)."
)
return self._get_colref_by_name(args.name, KeyError)
elif isinstance(args, str):
return self._get_colref_by_name(args, KeyError)
else:
return self.select(*[self[name] for name in args])
def from_columns(
*args: expr.ColumnReference, **kwargs: expr.ColumnReference
) -> Table:
"""Build a table from columns.
All columns must have the same ids. Columns' names must be pairwise distinct.
Args:
args: List of columns.
kwargs: Columns with their new names.
Returns:
Table: Created table.
Example:
>>> import pathway as pw
>>> t1 = pw.Table.empty(age=float, pet=float)
>>> t2 = pw.Table.empty(foo=float, bar=float).with_universe_of(t1)
>>> t3 = pw.Table.from_columns(t1.pet, qux=t2.foo)
>>> pw.debug.compute_and_print(t3, include_id=False)
pet | qux
"""
all_args = cast(
dict[str, expr.ColumnReference], combine_args_kwargs(args, kwargs)
)
if not all_args:
raise ValueError("Table.from_columns() cannot have empty arguments list")
else:
arg = next(iter(all_args.values()))
table: Table = arg.table
for arg in all_args.values():
if not G.universe_solver.query_are_equal(
table._universe, arg.table._universe
):
raise ValueError(
"Universes of all arguments of Table.from_columns() have to be equal.\n"
+ "Consider using Table.promise_universes_are_equal() to assert it.\n"
+ "(However, untrue assertion might result in runtime errors.)"
)
return table.select(*args, **kwargs)
def concat_reindex(self, *tables: Table) -> Table:
"""Concatenate contents of several tables.
This is similar to PySpark union. All tables must have the same schema. Each row is reindexed.
Args:
tables: List of tables to concatenate. All tables must have the same schema.
Returns:
Table: The concatenated table. It will have new, synthetic ids.
Example:
>>> import pathway as pw
>>> t1 = pw.debug.table_from_markdown('''
... | pet
... 1 | Dog
... 7 | Cat
... ''')
>>> t2 = pw.debug.table_from_markdown('''
... | pet
... 1 | Manul
... 8 | Octopus
... ''')
>>> t3 = t1.concat_reindex(t2)
>>> pw.debug.compute_and_print(t3, include_id=False)
pet
Cat
Dog
Manul
Octopus
"""
reindexed = [
table.with_id_from(table.id, i) for i, table in enumerate([self, *tables])
]
universes.promise_are_pairwise_disjoint(*reindexed)
return Table.concat(*reindexed)
def empty(**kwargs: dt.DType) -> Table:
"""Creates an empty table with a schema specified by kwargs.
Args:
kwargs: Dict whose keys are column names and values are column types.
Returns:
Table: Created empty table.
Example:
>>> import pathway as pw
>>> t1 = pw.Table.empty(age=float, pet=float)
>>> pw.debug.compute_and_print(t1, include_id=False)
age | pet
"""
from pathway.internals import table_io
ret = table_io.empty_from_schema(schema_from_types(None, **kwargs))
G.universe_solver.register_as_empty(ret._universe)
return ret
def select(self, *args: expr.ColumnReference, **kwargs: Any) -> Table:
"""Build a new table with columns specified by kwargs.
Output columns' names are keys(kwargs). values(kwargs) can be raw values, boxed
values, columns. Assigning to id reindexes the table.
Args:
args: Column references.
kwargs: Column expressions with their new assigned names.
Returns:
Table: Created table.
Example:
>>> import pathway as pw
>>> t1 = pw.debug.table_from_markdown('''
... pet
... Dog
... Cat
... ''')
>>> t2 = t1.select(animal=t1.pet, desc="fluffy")
>>> pw.debug.compute_and_print(t2, include_id=False)
animal | desc
Cat | fluffy
Dog | fluffy
"""
new_columns = []
all_args = combine_args_kwargs(args, kwargs)
for new_name, expression in all_args.items():
self._validate_expression(expression)
column = self._eval(expression)
new_columns.append((new_name, column))
return self._with_same_universe(*new_columns)
def __add__(self, other: Table) -> Table:
"""Build a union of `self` with `other`.
Semantics: Returns a table C, such that
- C.columns == self.columns + other.columns
- C.id == self.id == other.id
Args:
other: The other table. `self.id` must be equal `other.id` and
`self.columns` and `other.columns` must be disjoint (or overlapping names
are THE SAME COLUMN)
Returns:
Table: Created table.
Example:
>>> import pathway as pw
>>> t1 = pw.debug.table_from_markdown('''
... pet
... 1 Dog
... 7 Cat
... ''')
>>> t2 = pw.debug.table_from_markdown('''
... age
... 1 10
... 7 3
... ''')
>>> t3 = t1 + t2
>>> pw.debug.compute_and_print(t3, include_id=False)
pet | age
Cat | 3
Dog | 10
"""
if not G.universe_solver.query_are_equal(self._universe, other._universe):
raise ValueError(
"Universes of all arguments of Table.__add__() have to be equal.\n"
+ "Consider using Table.promise_universes_are_equal() to assert it.\n"
+ "(However, untrue assertion might result in runtime errors.)"
)
return self.select(*self, *other)
def slice(self) -> TableSlice:
"""Creates a collection of references to self columns.
Supports basic column manipulation methods.
Example:
>>> import pathway as pw
>>> t1 = pw.debug.table_from_markdown('''
... age | owner | pet
... 10 | Alice | dog
... 9 | Bob | dog
... 8 | Alice | cat
... 7 | Bob | dog
... ''')
>>> t1.slice.without("age")
TableSlice({'owner': <table1>.owner, 'pet': <table1>.pet})
"""
return TableSlice(dict(**self), self)
def filter(self, filter_expression: expr.ColumnExpression) -> Table[TSchema]:
"""Filter a table according to `filter_expression` condition.
Args:
filter_expression: `ColumnExpression` that specifies the filtering condition.
Returns:
Table: Result has the same schema as `self` and its ids are subset of `self.id`.
Example:
>>> import pathway as pw
>>> vertices = pw.debug.table_from_markdown('''
... label outdegree
... 1 3
... 7 0
... ''')
>>> filtered = vertices.filter(vertices.outdegree == 0)
>>> pw.debug.compute_and_print(filtered, include_id=False)
label | outdegree
7 | 0
"""
filter_type = self.eval_type(filter_expression)
if filter_type != dt.BOOL:
raise TypeError(
f"Filter argument of Table.filter() has to be bool, found {filter_type}."
)
ret = self._filter(filter_expression)
if (
filter_col := expr.get_column_filtered_by_is_none(filter_expression)
) is not None and filter_col.table == self:
name = filter_col.name
dtype = self._columns[name].dtype
ret = ret.update_types(**{name: dt.unoptionalize(dtype)})
return ret
def split(
self, split_expression: expr.ColumnExpression
) -> tuple[Table[TSchema], Table[TSchema]]:
"""Split a table according to `split_expression` condition.
Args:
split_expression: `ColumnExpression` that specifies the split condition.
Returns:
positive_table, negative_table: tuple of tables,
with the same schemas as `self` and with ids that are subsets of `self.id`,
and provably disjoint.
Example:
>>> import pathway as pw
>>> vertices = pw.debug.table_from_markdown('''
... label outdegree
... 1 3
... 7 0
... ''')
>>> positive, negative = vertices.split(vertices.outdegree == 0)
>>> pw.debug.compute_and_print(positive, include_id=False)
label | outdegree
7 | 0
>>> pw.debug.compute_and_print(negative, include_id=False)
label | outdegree
1 | 3
"""
positive = self.filter(split_expression)
negative = self.filter(~split_expression)
universes.promise_are_pairwise_disjoint(positive, negative)
universes.promise_are_equal(
self, Table.concat(positive, negative)
) # TODO: add API method for this
return positive, negative
def _filter(self, filter_expression: expr.ColumnExpression) -> Table[TSchema]:
self._validate_expression(filter_expression)
filtering_column = self._eval(filter_expression)
assert self._universe == filtering_column.universe
context = clmn.FilterContext(filtering_column, self._id_column)
return self._table_with_context(context)
def _gradual_broadcast(
self,
threshold_table,
lower_column,
value_column,
upper_column,
) -> Table:
return self + self.__gradual_broadcast(
threshold_table, lower_column, value_column, upper_column
)
def __gradual_broadcast(
self,
threshold_table,
lower_column,
value_column,
upper_column,
):
context = clmn.GradualBroadcastContext(
self._id_column,
threshold_table._eval(lower_column),
threshold_table._eval(value_column),
threshold_table._eval(upper_column),
)
return Table(_columns={"apx_value": context.apx_value_column}, _context=context)
def _forget(
self,
threshold_column: expr.ColumnExpression,
time_column: expr.ColumnExpression,
mark_forgetting_records: bool,
) -> Table:
context = clmn.ForgetContext(
self._id_column,
self._eval(threshold_column),
self._eval(time_column),
mark_forgetting_records,
)
return self._table_with_context(context)
def _forget_immediately(
self,
) -> Table:
context = clmn.ForgetImmediatelyContext(self._id_column)
return self._table_with_context(context)
def _filter_out_results_of_forgetting(
self,
) -> Table:
# The output universe is a superset of input universe because forgetting entries
# are filtered out. At each point in time, the set of keys with +1 diff can be
# bigger than a set of keys with +1 diff in an input table.
context = clmn.FilterOutForgettingContext(self._id_column)
return self._table_with_context(context)
def _freeze(
self,
threshold_column: expr.ColumnExpression,
time_column: expr.ColumnExpression,
) -> Table:
context = clmn.FreezeContext(
self._id_column,
self._eval(threshold_column),
self._eval(time_column),
)
return self._table_with_context(context)
def _buffer(
self,
threshold_column: expr.ColumnExpression,
time_column: expr.ColumnExpression,
) -> Table:
context = clmn.BufferContext(
self._id_column,
self._eval(threshold_column),
self._eval(time_column),
)
return self._table_with_context(context)
def difference(self, other: Table) -> Table[TSchema]:
r"""Restrict self universe to keys not appearing in the other table.
Args:
other: table with ids to remove from self.
Returns:
Table: table with restricted universe, with the same set of columns
Example:
>>> import pathway as pw
>>> t1 = pw.debug.table_from_markdown('''
... | age | owner | pet
... 1 | 10 | Alice | 1
... 2 | 9 | Bob | 1
... 3 | 8 | Alice | 2
... ''')
>>> t2 = pw.debug.table_from_markdown('''
... | cost
... 2 | 100
... 3 | 200
... 4 | 300
... ''')
>>> t3 = t1.difference(t2)
>>> pw.debug.compute_and_print(t3, include_id=False)
age | owner | pet
10 | Alice | 1
"""
context = clmn.DifferenceContext(
left=self._id_column,
right=other._id_column,
)
return self._table_with_context(context)
def intersect(self, *tables: Table) -> Table[TSchema]:
"""Restrict self universe to keys appearing in all of the tables.
Args:
tables: tables keys of which are used to restrict universe.
Returns:
Table: table with restricted universe, with the same set of columns
Example:
>>> import pathway as pw
>>> t1 = pw.debug.table_from_markdown('''
... | age | owner | pet
... 1 | 10 | Alice | 1
... 2 | 9 | Bob | 1
... 3 | 8 | Alice | 2
... ''')
>>> t2 = pw.debug.table_from_markdown('''
... | cost
... 2 | 100
... 3 | 200
... 4 | 300
... ''')
>>> t3 = t1.intersect(t2)
>>> pw.debug.compute_and_print(t3, include_id=False)
age | owner | pet
8 | Alice | 2
9 | Bob | 1
"""
intersecting_universes = (
self._universe,
*tuple(table._universe for table in tables),
)
universe = G.universe_solver.get_intersection(*intersecting_universes)
if universe in intersecting_universes:
context: clmn.Context = clmn.RestrictContext(self._id_column, universe)
else:
intersecting_ids = (
self._id_column,
*tuple(table._id_column for table in tables),
)
context = clmn.IntersectContext(
intersecting_ids=intersecting_ids,
)
return self._table_with_context(context)
def restrict(self, other: TableLike) -> Table[TSchema]:
"""Restrict self universe to keys appearing in other.
Args:
other: table which universe is used to restrict universe of self.
Returns:
Table: table with restricted universe, with the same set of columns
Example:
>>> import pathway as pw
>>> t1 = pw.debug.table_from_markdown(
... '''
... | age | owner | pet
... 1 | 10 | Alice | 1
... 2 | 9 | Bob | 1
... 3 | 8 | Alice | 2
... '''
... )
>>> t2 = pw.debug.table_from_markdown(
... '''
... | cost
... 2 | 100
... 3 | 200
... '''
... )
>>> t2.promise_universe_is_subset_of(t1)
<pathway.Table schema={'cost': <class 'int'>}>
>>> t3 = t1.restrict(t2)
>>> pw.debug.compute_and_print(t3, include_id=False)
age | owner | pet
8 | Alice | 2
9 | Bob | 1
"""
if not G.universe_solver.query_is_subset(other._universe, self._universe):
raise ValueError(
"Table.restrict(): other universe has to be a subset of self universe."
+ "Consider using Table.promise_universe_is_subset_of() to assert it."
)
context = clmn.RestrictContext(self._id_column, other._universe)
columns = {
name: self._wrap_column_in_context(context, column, name)
for name, column in self._columns.items()
}
return Table(
_columns=columns,
_context=context,
)
def copy(self) -> Table[TSchema]:
"""Returns a copy of a table.
Example:
>>> import pathway as pw
>>> t1 = pw.debug.table_from_markdown('''
... age | owner | pet
... 10 | Alice | dog
... 9 | Bob | dog
... 8 | Alice | cat
... 7 | Bob | dog
... ''')
>>> t2 = t1.copy()
>>> pw.debug.compute_and_print(t2, include_id=False)
age | owner | pet
7 | Bob | dog
8 | Alice | cat
9 | Bob | dog
10 | Alice | dog
>>> t1 is t2
False
"""
return self._copy_as(type(self))
def _copy_as(self, table_type: type[TTable], /, **kwargs) -> TTable:
columns = {
name: self._wrap_column_in_context(self._rowwise_context, column, name)
for name, column in self._columns.items()
}
return table_type(_columns=columns, _context=self._rowwise_context, **kwargs)
def groupby(
self,
*args: expr.ColumnReference,
id: expr.ColumnReference | None = None,
sort_by: expr.ColumnReference | None = None,
_filter_out_results_of_forgetting: bool = False,
instance: expr.ColumnReference | None = None,
) -> groupbys.GroupedTable:
"""Groups table by columns from args.
Note:
Usually followed by `.reduce()` that aggregates the result and returns a table.
Args:
args: columns to group by.
id: if provided, is the column used to set id's of the rows of the result
sort_by: if provided, column values are used as sorting keys for particular reducers
instance: optional argument describing partitioning of the data into separate instances
Returns:
GroupedTable: Groupby object.
Example:
>>> import pathway as pw
>>> t1 = pw.debug.table_from_markdown('''
... age | owner | pet
... 10 | Alice | dog
... 9 | Bob | dog
... 8 | Alice | cat
... 7 | Bob | dog
... ''')
>>> t2 = t1.groupby(t1.pet, t1.owner).reduce(t1.owner, t1.pet, ageagg=pw.reducers.sum(t1.age))
>>> pw.debug.compute_and_print(t2, include_id=False)
owner | pet | ageagg
Alice | cat | 8
Alice | dog | 10
Bob | dog | 16
"""
if instance is not None:
args = (*args, instance)
if id is not None:
if len(args) == 0:
args = (id,)
elif len(args) > 1:
raise ValueError(
"Table.groupby() cannot have id argument when grouping by multiple columns."
)
elif args[0]._column != id._column:
raise ValueError(
"Table.groupby() received id argument and is grouped by a single column,"
+ " but the arguments are not equal.\n"
+ "Consider using <table>.groupby(id=...), skipping the positional argument."
)
for arg in args:
if not isinstance(arg, expr.ColumnReference):
if isinstance(arg, str):
raise ValueError(
f"Expected a ColumnReference, found a string. Did you mean <table>.{arg}"
+ f" instead of {repr(arg)}?"
)
else:
raise ValueError(
"All Table.groupby() arguments have to be a ColumnReference."
)
return groupbys.GroupedTable.create(
table=self,
grouping_columns=args,
set_id=id is not None,
sort_by=sort_by,
_filter_out_results_of_forgetting=_filter_out_results_of_forgetting,
)
def reduce(
self, *args: expr.ColumnReference, **kwargs: expr.ColumnExpression
) -> Table:
"""Reduce a table to a single row.
Equivalent to `self.groupby().reduce(*args, **kwargs)`.
Args:
args: reducer to reduce the table with
kwargs: reducer to reduce the table with. Its key is the new name of a column.
Returns:
Table: Reduced table.
Example:
>>> import pathway as pw
>>> t1 = pw.debug.table_from_markdown('''
... age | owner | pet
... 10 | Alice | dog
... 9 | Bob | dog
... 8 | Alice | cat
... 7 | Bob | dog
... ''')
>>> t2 = t1.reduce(ageagg=pw.reducers.argmin(t1.age))
>>> pw.debug.compute_and_print(t2, include_id=False) # doctest: +ELLIPSIS
ageagg
^...
>>> t3 = t2.select(t1.ix(t2.ageagg).age, t1.ix(t2.ageagg).pet)
>>> pw.debug.compute_and_print(t3, include_id=False)
age | pet
7 | dog
"""
return self.groupby().reduce(*args, **kwargs)
def deduplicate(
self,
*,
value: expr.ColumnExpression,
instance: expr.ColumnExpression | None = None,
acceptor: Callable[[T, T], bool],
persistent_id: str | None = None,
) -> Table:
"""Deduplicates rows in `self` on `value` column using acceptor function.
It keeps rows which where accepted by the acceptor function.
Acceptor operates on two arguments - current value and the previously accepted value.
Args:
value: column expression used for deduplication.
instance: Grouping column. For rows with different
values in this column, deduplication will be performed separately.
Defaults to None.
acceptor: callback telling whether two values are different.
persistent_id: (unstable) An identifier, under which the state of the table
will be persisted or ``None``, if there is no need to persist the state of this table.
When a program restarts, it restores the state for all input tables according to what
was saved for their ``persistent_id``. This way it's possible to configure the start of
computations from the moment they were terminated last time.
Returns:
Table: the result of deduplication.
Example:
>>> import pathway as pw
>>> table = pw.debug.table_from_markdown(
... '''
... val | __time__
... 1 | 2
... 2 | 4
... 3 | 6
... 4 | 8
... '''
... )
>>>
>>> def acceptor(new_value, old_value) -> bool:
... return new_value >= old_value + 2
...
>>>
>>> result = table.deduplicate(value=pw.this.val, acceptor=acceptor)
>>> pw.debug.compute_and_print_update_stream(result, include_id=False)
val | __time__ | __diff__
1 | 2 | 1
1 | 6 | -1
3 | 6 | 1
>>>
>>> table = pw.debug.table_from_markdown(
... '''
... val | instance | __time__
... 1 | 1 | 2
... 2 | 1 | 4
... 3 | 2 | 6
... 4 | 1 | 8
... 4 | 2 | 8
... 5 | 1 | 10
... '''
... )
>>>
>>> def acceptor(new_value, old_value) -> bool:
... return new_value >= old_value + 2
...
>>>
>>> result = table.deduplicate(
... value=pw.this.val, instance=pw.this.instance, acceptor=acceptor
... )
>>> pw.debug.compute_and_print_update_stream(result, include_id=False)
val | instance | __time__ | __diff__
1 | 1 | 2 | 1
3 | 2 | 6 | 1
1 | 1 | 8 | -1
4 | 1 | 8 | 1
"""
if instance is None:
instance = expr.ColumnConstExpression(None)
self._validate_expression(value)
self._validate_expression(instance)
value_col = self._eval(value)
instance_col = self._eval(instance)
context = clmn.DeduplicateContext(
value_col,
(instance_col,),
acceptor,
self._id_column,
persistent_id,
)
return self._table_with_context(context)
def ix(
self, expression: expr.ColumnExpression, *, optional: bool = False, context=None
) -> Table:
"""Reindexes the table using expression values as keys. Uses keys from context, or tries to infer
proper context from the expression.
If optional is True, then None in expression values result in None values in the result columns.
Missing values in table keys result in RuntimeError.
Context can be anything that allows for `select` or `reduce`, or `pathway.this` construct
(latter results in returning a delayed operation, and should be only used when using `ix` inside
join().select() or groupby().reduce() sequence).
Returns:
Reindexed table with the same set of columns.
Example:
>>> import pathway as pw
>>> t_animals = pw.debug.table_from_markdown('''
... | epithet | genus
... 1 | upupa | epops
... 2 | acherontia | atropos
... 3 | bubo | scandiacus
... 4 | dynastes | hercules
... ''')
>>> t_birds = pw.debug.table_from_markdown('''
... | desc
... 2 | hoopoe
... 4 | owl
... ''')
>>> ret = t_birds.select(t_birds.desc, latin=t_animals.ix(t_birds.id).genus)
>>> pw.debug.compute_and_print(ret, include_id=False)
desc | latin
hoopoe | atropos
owl | hercules
"""
if context is None:
all_tables = collect_tables(expression)
if len(all_tables) == 0:
context = thisclass.this
elif all(tab == all_tables[0] for tab in all_tables):
context = all_tables[0]
if context is None:
for tab in all_tables:
if not isinstance(tab, Table):
raise ValueError("Table expected here.")
if len(all_tables) == 0:
raise ValueError("Const value provided.")
context = all_tables[0]
for tab in all_tables:
assert context._universe.is_equal_to(tab._universe)
if isinstance(context, groupbys.GroupedJoinable):
context = thisclass.this
if isinstance(context, thisclass.ThisMetaclass):
return context._delayed_op(
lambda table, expression: self.ix(
expression=expression, optional=optional, context=table
),
expression=expression,
qualname=f"{self}.ix(...)",
name="ix",
)
restrict_universe = RestrictUniverseDesugaring(context)
expression = restrict_universe.eval_expression(expression)
key_col = context.select(tmp=expression).tmp
key_dtype = self.eval_type(key_col)
if (
optional and not dt.dtype_issubclass(key_dtype, dt.Optional(dt.POINTER))
) or (not optional and not isinstance(key_dtype, dt.Pointer)):
raise TypeError(
f"Pathway supports indexing with Pointer type only. The type used was {key_dtype}."
)
if optional and isinstance(key_dtype, dt.Optional):
self_ = self.update_types(
**{name: dt.Optional(self.typehints()[name]) for name in self.keys()}
)
else:
self_ = self
return self_._ix(key_col, optional)
def _ix(
self,
key_expression: expr.ColumnReference,
optional: bool,
) -> Table:
key_column = key_expression._column
context = clmn.IxContext(key_column, self._id_column, optional)
return self._table_with_context(context)
def __lshift__(self, other: Table) -> Table:
"""Alias to update_cells method.
Updates cells of `self`, breaking ties in favor of the values in `other`.
Semantics:
- result.columns == self.columns
- result.id == self.id
- conflicts are resolved preferring other's values
Requires:
- other.columns ⊆ self.columns
- other.id ⊆ self.id
Args:
other: the other table.
Returns:
Table: `self` updated with cells form `other`.
Example:
>>> import pathway as pw
>>> t1 = pw.debug.table_from_markdown('''
... | age | owner | pet
... 1 | 10 | Alice | 1
... 2 | 9 | Bob | 1
... 3 | 8 | Alice | 2
... ''')
>>> t2 = pw.debug.table_from_markdown('''
... | age | owner | pet
... 1 | 10 | Alice | 30
... ''')
>>> pw.universes.promise_is_subset_of(t2, t1)
>>> t3 = t1 << t2
>>> pw.debug.compute_and_print(t3, include_id=False)
age | owner | pet
8 | Alice | 2
9 | Bob | 1
10 | Alice | 30
"""
return self.update_cells(other, _stacklevel=2)
def concat(self, *others: Table[TSchema]) -> Table[TSchema]:
"""Concats `self` with every `other` ∊ `others`.
Semantics:
- result.columns == self.columns == other.columns
- result.id == self.id ∪ other.id
if self.id and other.id collide, throws an exception.
Requires:
- other.columns == self.columns
- self.id disjoint with other.id
Args:
other: the other table.
Returns:
Table: The concatenated table. Id's of rows from original tables are preserved.
Example:
>>> import pathway as pw
>>> t1 = pw.debug.table_from_markdown('''
... | age | owner | pet
... 1 | 10 | Alice | 1
... 2 | 9 | Bob | 1
... 3 | 8 | Alice | 2
... ''')
>>> t2 = pw.debug.table_from_markdown('''
... | age | owner | pet
... 11 | 11 | Alice | 30
... 12 | 12 | Tom | 40
... ''')
>>> pw.universes.promise_are_pairwise_disjoint(t1, t2)
>>> t3 = t1.concat(t2)
>>> pw.debug.compute_and_print(t3, include_id=False)
age | owner | pet
8 | Alice | 2
9 | Bob | 1
10 | Alice | 1
11 | Alice | 30
12 | Tom | 40
"""
for other in others:
if other.keys() != self.keys():
raise ValueError(
"columns do not match in the argument of Table.concat()"
)
schema = {
key: functools.reduce(
dt.types_lca,
[other.schema._dtypes()[key] for other in others],
self.schema._dtypes()[key],
)
for key in self.keys()
}
return Table._concat(
self.cast_to_types(**schema),
*[other.cast_to_types(**schema) for other in others],
)
def _concat(self, *others: Table[TSchema]) -> Table[TSchema]:
union_ids = (self._id_column, *(other._id_column for other in others))
if not G.universe_solver.query_are_disjoint(*(c.universe for c in union_ids)):
raise ValueError(
"Universes of the arguments of Table.concat() have to be disjoint.\n"
+ "Consider using Table.promise_universes_are_disjoint() to assert it.\n"
+ "(However, untrue assertion might result in runtime errors.)"
)
context = clmn.ConcatUnsafeContext(
union_ids=union_ids,
updates=tuple(
{col_name: other._columns[col_name] for col_name in self.keys()}
for other in others
),
)
return self._table_with_context(context)
def update_cells(self, other: Table, _stacklevel: int = 1) -> Table:
"""Updates cells of `self`, breaking ties in favor of the values in `other`.
Semantics:
- result.columns == self.columns
- result.id == self.id
- conflicts are resolved preferring other's values
Requires:
- other.columns ⊆ self.columns
- other.id ⊆ self.id
Args:
other: the other table.
Returns:
Table: `self` updated with cells form `other`.
Example:
>>> import pathway as pw
>>> t1 = pw.debug.table_from_markdown('''
... | age | owner | pet
... 1 | 10 | Alice | 1
... 2 | 9 | Bob | 1
... 3 | 8 | Alice | 2
... ''')
>>> t2 = pw.debug.table_from_markdown('''
... age | owner | pet
... 1 | 10 | Alice | 30
... ''')
>>> pw.universes.promise_is_subset_of(t2, t1)
>>> t3 = t1.update_cells(t2)
>>> pw.debug.compute_and_print(t3, include_id=False)
age | owner | pet
8 | Alice | 2
9 | Bob | 1
10 | Alice | 30
"""
if names := (set(other.keys()) - set(self.keys())):
raise ValueError(
f"Columns of the argument in Table.update_cells() not present in the updated table: {list(names)}."
)
if self._universe == other._universe:
warnings.warn(
"Key sets of self and other in update_cells are the same."
+ " Using with_columns instead of update_cells.",
stacklevel=_stacklevel + 4,
)
return self.with_columns(*(other[name] for name in other))
schema = {
key: dt.types_lca(self.schema.__dtypes__[key], other.schema.__dtypes__[key])
for key in other.keys()
}
return Table._update_cells(
self.cast_to_types(**schema), other.cast_to_types(**schema)
)
def _update_cells(self, other: Table) -> Table:
if not other._universe.is_subset_of(self._universe):
raise ValueError(
"Universe of the argument of Table.update_cells() needs to be "
+ "a subset of the universe of the updated table.\n"
+ "Consider using Table.promise_is_subset_of() to assert this.\n"
+ "(However, untrue assertion might result in runtime errors.)"
)
context = clmn.UpdateCellsContext(
left=self._id_column,
right=other._id_column,
updates={name: other._columns[name] for name in other.keys()},
)
return self._table_with_context(context)
def update_rows(self, other: Table[TSchema]) -> Table[TSchema]:
"""Updates rows of `self`, breaking ties in favor for the rows in `other`.
Semantics:
- result.columns == self.columns == other.columns
- result.id == self.id ∪ other.id
Requires:
- other.columns == self.columns
Args:
other: the other table.
Returns:
Table: `self` updated with rows form `other`.
Example:
>>> import pathway as pw
>>> t1 = pw.debug.table_from_markdown('''
... | age | owner | pet
... 1 | 10 | Alice | 1
... 2 | 9 | Bob | 1
... 3 | 8 | Alice | 2
... ''')
>>> t2 = pw.debug.table_from_markdown('''
... | age | owner | pet
... 1 | 10 | Alice | 30
... 12 | 12 | Tom | 40
... ''')
>>> t3 = t1.update_rows(t2)
>>> pw.debug.compute_and_print(t3, include_id=False)
age | owner | pet
8 | Alice | 2
9 | Bob | 1
10 | Alice | 30
12 | Tom | 40
"""
if other.keys() != self.keys():
raise ValueError(
"Columns do not match between argument of Table.update_rows() and the updated table."
)
if self._universe.is_subset_of(other._universe):
warnings.warn(
"Universe of self is a subset of universe of other in update_rows. Returning other.",
stacklevel=5,
)
return other
schema = {
key: dt.types_lca(self.schema.__dtypes__[key], other.schema.__dtypes__[key])
for key in self.keys()
}
union_universes = (self._universe, other._universe)
universe = G.universe_solver.get_union(*union_universes)
if universe == self._universe:
return Table._update_cells(
self.cast_to_types(**schema), other.cast_to_types(**schema)
)
else:
return Table._update_rows(
self.cast_to_types(**schema), other.cast_to_types(**schema)
)
def _update_rows(self, other: Table[TSchema]) -> Table[TSchema]:
union_ids = (self._id_column, other._id_column)
context = clmn.UpdateRowsContext(
updates={col_name: other._columns[col_name] for col_name in self.keys()},
union_ids=union_ids,
)
return self._table_with_context(context)
def with_columns(self, *args: expr.ColumnReference, **kwargs: Any) -> Table:
"""Updates columns of `self`, according to args and kwargs.
See `table.select` specification for evaluation of args and kwargs.
Example:
>>> import pathway as pw
>>> t1 = pw.debug.table_from_markdown('''
... | age | owner | pet
... 1 | 10 | Alice | 1
... 2 | 9 | Bob | 1
... 3 | 8 | Alice | 2
... ''')
>>> t2 = pw.debug.table_from_markdown('''
... | owner | pet | size
... 1 | Tom | 1 | 10
... 2 | Bob | 1 | 9
... 3 | Tom | 2 | 8
... ''')
>>> t3 = t1.with_columns(*t2)
>>> pw.debug.compute_and_print(t3, include_id=False)
age | owner | pet | size
8 | Tom | 2 | 8
9 | Bob | 1 | 9
10 | Tom | 1 | 10
"""
other = self.select(*args, **kwargs)
columns = dict(self)
columns.update(other)
return self.select(**columns)
def with_id(self, new_index: expr.ColumnReference) -> Table:
"""Set new ids based on another column containing id-typed values.
To generate ids based on arbitrary valued columns, use `with_id_from`.
Values assigned must be row-wise unique.
Args:
new_id: column to be used as the new index.
Returns:
Table with updated ids.
Example:
>>> import pytest; pytest.xfail("with_id is hard to test")
>>> import pathway as pw
>>> t1 = pw.debug.table_from_markdown('''
... | age | owner | pet
... 1 | 10 | Alice | 1
... 2 | 9 | Bob | 1
... 3 | 8 | Alice | 2
... ''')
>>> t2 = pw.debug.table_from_markdown('''
... | new_id
... 1 | 2
... 2 | 3
... 3 | 4
... ''')
>>> t3 = t1.promise_universe_is_subset_of(t2).with_id(t2.new_id)
>>> pw.debug.compute_and_print(t3)
age owner pet
^2 10 Alice 1
^3 9 Bob 1
^4 8 Alice 2
"""
return self._with_new_index(new_index)
def with_id_from(
self,
*args: expr.ColumnExpression | Value,
instance: expr.ColumnReference | None = None,
) -> Table:
"""Compute new ids based on values in columns.
Ids computed from `columns` must be row-wise unique.
Args:
columns: columns to be used as primary keys.
Returns:
Table: `self` updated with recomputed ids.
Example:
>>> import pathway as pw
>>> t1 = pw.debug.table_from_markdown('''
... | age | owner | pet
... 1 | 10 | Alice | 1
... 2 | 9 | Bob | 1
... 3 | 8 | Alice | 2
... ''')
>>> t2 = t1 + t1.select(old_id=t1.id)
>>> t3 = t2.with_id_from(t2.age)
>>> pw.debug.compute_and_print(t3) # doctest: +ELLIPSIS, +NORMALIZE_WHITESPACE
| age | owner | pet | old_id
^... | 8 | Alice | 2 | ^...
^... | 9 | Bob | 1 | ^...
^... | 10 | Alice | 1 | ^...
>>> t4 = t3.select(t3.age, t3.owner, t3.pet, same_as_old=(t3.id == t3.old_id),
... same_as_new=(t3.id == t3.pointer_from(t3.age)))
>>> pw.debug.compute_and_print(t4) # doctest: +ELLIPSIS, +NORMALIZE_WHITESPACE
| age | owner | pet | same_as_old | same_as_new
^... | 8 | Alice | 2 | False | True
^... | 9 | Bob | 1 | False | True
^... | 10 | Alice | 1 | False | True
"""
# new_index should be a column, so a little workaround
new_index = self.select(
ref_column=self.pointer_from(*args, instance=instance)
).ref_column
return self._with_new_index(
new_index=new_index,
)
def _with_new_index(
self,
new_index: expr.ColumnExpression,
) -> Table:
self._validate_expression(new_index)
index_type = self.eval_type(new_index)
if not isinstance(index_type, dt.Pointer):
raise TypeError(
f"Pathway supports reindexing Tables with Pointer type only. The type used was {index_type}."
)
reindex_column = self._eval(new_index)
assert self._universe == reindex_column.universe
context = clmn.ReindexContext(reindex_column)
return self._table_with_context(context)
def rename_columns(self, **kwargs: str | expr.ColumnReference) -> Table:
"""Rename columns according to kwargs.
Columns not in keys(kwargs) are not changed. New name of a column must not be `id`.
Args:
kwargs: mapping from old column names to new names.
Returns:
Table: `self` with columns renamed.
Example:
>>> import pathway as pw
>>> t1 = pw.debug.table_from_markdown('''
... age | owner | pet
... 10 | Alice | 1
... 9 | Bob | 1
... 8 | Alice | 2
... ''')
>>> t2 = t1.rename_columns(years_old=t1.age, animal=t1.pet)
>>> pw.debug.compute_and_print(t2, include_id=False)
owner | years_old | animal
Alice | 8 | 2
Alice | 10 | 1
Bob | 9 | 1
"""
mapping: dict[str, str] = {}
for new_name, old_name_col in kwargs.items():
if isinstance(old_name_col, expr.ColumnReference):
old_name = old_name_col.name
else:
old_name = old_name_col
if old_name not in self._columns:
raise ValueError(f"Column {old_name} does not exist in a given table.")
mapping[new_name] = old_name
renamed_columns = self._columns.copy()
for new_name, old_name in mapping.items():
renamed_columns.pop(old_name)
for new_name, old_name in mapping.items():
renamed_columns[new_name] = self._columns[old_name]
columns_wrapped = {
name: self._wrap_column_in_context(
self._rowwise_context,
column,
mapping[name] if name in mapping else name,
)
for name, column in renamed_columns.items()
}
return self._with_same_universe(*columns_wrapped.items())
def rename_by_dict(
self, names_mapping: dict[str | expr.ColumnReference, str]
) -> Table:
"""Rename columns according to a dictionary.
Columns not in keys(kwargs) are not changed. New name of a column must not be `id`.
Args:
names_mapping: mapping from old column names to new names.
Returns:
Table: `self` with columns renamed.
Example:
>>> import pathway as pw
>>> t1 = pw.debug.table_from_markdown('''
... age | owner | pet
... 10 | Alice | 1
... 9 | Bob | 1
... 8 | Alice | 2
... ''')
>>> t2 = t1.rename_by_dict({"age": "years_old", t1.pet: "animal"})
>>> pw.debug.compute_and_print(t2, include_id=False)
owner | years_old | animal
Alice | 8 | 2
Alice | 10 | 1
Bob | 9 | 1
"""
return self.rename_columns(
**{new_name: self[old_name] for old_name, new_name in names_mapping.items()}
)
def with_prefix(self, prefix: str) -> Table:
"""Rename columns by adding prefix to each name of column.
Example:
>>> import pathway as pw
>>> t1 = pw.debug.table_from_markdown('''
... age | owner | pet
... 10 | Alice | 1
... 9 | Bob | 1
... 8 | Alice | 2
... ''')
>>> t2 = t1.with_prefix("u_")
>>> pw.debug.compute_and_print(t2, include_id=False)
u_age | u_owner | u_pet
8 | Alice | 2
9 | Bob | 1
10 | Alice | 1
"""
return self.rename_by_dict({name: prefix + name for name in self.keys()})
def with_suffix(self, suffix: str) -> Table:
"""Rename columns by adding suffix to each name of column.
Example:
>>> import pathway as pw
>>> t1 = pw.debug.table_from_markdown('''
... age | owner | pet
... 10 | Alice | 1
... 9 | Bob | 1
... 8 | Alice | 2
... ''')
>>> t2 = t1.with_suffix("_current")
>>> pw.debug.compute_and_print(t2, include_id=False)
age_current | owner_current | pet_current
8 | Alice | 2
9 | Bob | 1
10 | Alice | 1
"""
return self.rename_by_dict({name: name + suffix for name in self.keys()})
def rename(
self,
names_mapping: dict[str | expr.ColumnReference, str] | None = None,
**kwargs: expr.ColumnExpression,
) -> Table:
"""Rename columns according either a dictionary or kwargs.
If a mapping is provided using a dictionary, ``rename_by_dict`` will be used.
Otherwise, ``rename_columns`` will be used with kwargs.
Columns not in keys(kwargs) are not changed. New name of a column must not be ``id``.
Args:
names_mapping: mapping from old column names to new names.
kwargs: mapping from old column names to new names.
Returns:
Table: `self` with columns renamed.
"""
if names_mapping is not None:
return self.rename_by_dict(names_mapping=names_mapping)
return self.rename_columns(**kwargs)
def without(self, *columns: str | expr.ColumnReference) -> Table:
"""Selects all columns without named column references.
Args:
columns: columns to be dropped provided by `table.column_name` notation.
Returns:
Table: `self` without specified columns.
Example:
>>> import pathway as pw
>>> t1 = pw.debug.table_from_markdown('''
... age | owner | pet
... 10 | Alice | 1
... 9 | Bob | 1
... 8 | Alice | 2
... ''')
>>> t2 = t1.without(t1.age, pw.this.pet)
>>> pw.debug.compute_and_print(t2, include_id=False)
owner
Alice
Alice
Bob
"""
new_columns = self._columns.copy()
for col in columns:
if isinstance(col, expr.ColumnReference):
new_columns.pop(col.name)
else:
assert isinstance(col, str)
new_columns.pop(col)
columns_wrapped = {
name: self._wrap_column_in_context(self._rowwise_context, column, name)
for name, column in new_columns.items()
}
return self._with_same_universe(*columns_wrapped.items())
def having(self, *indexers: expr.ColumnReference) -> Table[TSchema]:
"""Removes rows so that indexed.ix(indexer) is possible when some rows are missing,
for each indexer in indexers"""
rets: list[Table] = []
for indexer in indexers:
rets.append(self._having(indexer))
if len(rets) == 0:
return self
elif len(rets) == 1:
[ret] = rets
return ret
else:
return rets[0].intersect(*rets[1:])
def update_types(self, **kwargs: Any) -> Table:
"""Updates types in schema. Has no effect on the runtime."""
for name in kwargs.keys():
if name not in self.keys():
raise ValueError(
"Table.update_types() argument name has to be an existing table column name."
)
from pathway.internals.common import declare_type
return self.with_columns(
**{key: declare_type(val, self[key]) for key, val in kwargs.items()}
)
def cast_to_types(self, **kwargs: Any) -> Table:
"""Casts columns to types."""
for name in kwargs.keys():
if name not in self.keys():
raise ValueError(
"Table.cast_to_types() argument name has to be an existing table column name."
)
from pathway.internals.common import cast
return self.with_columns(
**{key: cast(val, self[key]) for key, val in kwargs.items()}
)
def _having(self, indexer: expr.ColumnReference) -> Table[TSchema]:
context = clmn.HavingContext(
orig_id_column=self._id_column, key_column=indexer._column
)
return self._table_with_context(context)
def with_universe_of(self, other: TableLike) -> Table:
"""Returns a copy of self with exactly the same universe as others.
Semantics: Required precondition self.universe == other.universe
Used in situations where Pathway cannot deduce equality of universes, but
those are equal as verified during runtime.
Example:
>>> import pathway as pw
>>> t1 = pw.debug.table_from_markdown('''
... | pet
... 1 | Dog
... 7 | Cat
... ''')
>>> t2 = pw.debug.table_from_markdown('''
... | age
... 1 | 10
... 7 | 3
... 8 | 100
... ''')
>>> t3 = t2.filter(pw.this.age < 30).with_universe_of(t1)
>>> t4 = t1 + t3
>>> pw.debug.compute_and_print(t4, include_id=False)
pet | age
Cat | 3
Dog | 10
"""
if self._universe == other._universe:
return self.copy()
universes.promise_are_equal(self, other)
return self._unsafe_promise_universe(other)
def flatten(self, *args: expr.ColumnReference, **kwargs: Any) -> Table:
"""Performs a flatmap operation on a column or expression given as a first
argument. Datatype of this column or expression has to be iterable or Json array.
Other columns specified in the method arguments are duplicated
as many times as the length of the iterable.
It is possible to get ids of source rows by using `table.id` column, e.g.
`table.flatten(table.column_to_be_flattened, original_id = table.id)`.
Example:
>>> import pathway as pw
>>> t1 = pw.debug.table_from_markdown('''
... | pet | age
... 1 | Dog | 2
... 7 | Cat | 5
... ''')
>>> t2 = t1.flatten(t1.pet)
>>> pw.debug.compute_and_print(t2, include_id=False)
pet
C
D
a
g
o
t
>>> t3 = t1.flatten(t1.pet, t1.age)
>>> pw.debug.compute_and_print(t3, include_id=False)
pet | age
C | 5
D | 2
a | 5
g | 2
o | 2
t | 5
"""
intermediate_table = self.select(*args, **kwargs)
all_args = combine_args_kwargs(args, kwargs)
if not all_args:
raise ValueError("Table.flatten() cannot have empty arguments list.")
all_names_iter = iter(all_args.keys())
flatten_name = next(all_names_iter)
return intermediate_table._flatten(flatten_name)
def _flatten(
self,
flatten_name: str,
) -> Table:
flatten_column = self._columns[flatten_name]
assert isinstance(flatten_column, clmn.ColumnWithExpression)
context = clmn.FlattenContext(
orig_universe=self._universe,
flatten_column=flatten_column,
)
columns = {
name: self._wrap_column_in_context(context, column, name)
for name, column in self._columns.items()
if name != flatten_name
}
return Table(
_columns={
flatten_name: context.flatten_result_column,
**columns,
},
_context=context,
)
def sort(
self,
key: expr.ColumnExpression,
instance: expr.ColumnExpression | None = None,
) -> Table:
"""
Sorts a table by the specified keys.
Args:
table : pw.Table
The table to be sorted.
key (ColumnExpression[int | float | datetime | str | bytes]):
An expression to sort by.
instance : ColumnReference or None
An expression with instance. Rows are sorted within an instance.
``prev`` and ``next`` columns will only point to rows that have the same instance.
Returns:
pw.Table: The sorted table. Contains two columns: ``prev`` and ``next``, containing the pointers
to the previous and next rows.
Example:
>>> import pathway as pw
>>> table = pw.debug.table_from_markdown('''
... name | age | score
... Alice | 25 | 80
... Bob | 20 | 90
... Charlie | 30 | 80
... ''')
>>> table = table.with_id_from(pw.this.name)
>>> table += table.sort(key=pw.this.age)
>>> pw.debug.compute_and_print(table, include_id=True)
| name | age | score | prev | next
^GBSDEEW... | Alice | 25 | 80 | ^EDPSSB1... | ^DS9AT95...
^EDPSSB1... | Bob | 20 | 90 | | ^GBSDEEW...
^DS9AT95... | Charlie | 30 | 80 | ^GBSDEEW... |
>>> table = pw.debug.table_from_markdown('''
... name | age | score
... Alice | 25 | 80
... Bob | 20 | 90
... Charlie | 30 | 80
... David | 35 | 90
... Eve | 15 | 80
... ''')
>>> table = table.with_id_from(pw.this.name)
>>> table += table.sort(key=pw.this.age, instance=pw.this.score)
>>> pw.debug.compute_and_print(table, include_id=True)
| name | age | score | prev | next
^GBSDEEW... | Alice | 25 | 80 | ^T0B95XH... | ^DS9AT95...
^EDPSSB1... | Bob | 20 | 90 | | ^RT0AZWX...
^DS9AT95... | Charlie | 30 | 80 | ^GBSDEEW... |
^RT0AZWX... | David | 35 | 90 | ^EDPSSB1... |
^T0B95XH... | Eve | 15 | 80 | | ^GBSDEEW...
"""
instance = clmn.ColumnExpression._wrap(instance)
context = clmn.SortingContext(
self._eval(key),
self._eval(instance),
)
return Table(
_columns={
"prev": context.prev_column,
"next": context.next_column,
},
_context=context,
)
def _set_source(self, source: OutputHandle):
self._source = source
if not hasattr(self._id_column, "lineage"):
self._id_column.lineage = clmn.ColumnLineage(name="id", source=source)
for name, column in self._columns.items():
if not hasattr(column, "lineage"):
column.lineage = clmn.ColumnLineage(name=name, source=source)
universe = self._universe
if not hasattr(universe, "lineage"):
universe.lineage = clmn.Lineage(source=source)
def _unsafe_promise_universe(self, other: TableLike) -> Table:
context = clmn.PromiseSameUniverseContext(self._id_column, other._id_column)
return self._table_with_context(context)
def _validate_expression(self, expression: expr.ColumnExpression):
for dep in expression._dependencies_above_reducer():
if self._universe != dep._column.universe:
raise ValueError(
f"You cannot use {dep.to_column_expression()} in this context."
+ " Its universe is different than the universe of the table the method"
+ " was called on. You can use <table1>.with_universe_of(<table2>)"
+ " to assign universe of <table2> to <table1> if you're sure their"
+ " sets of keys are equal."
)
def _wrap_column_in_context(
self,
context: clmn.Context,
column: clmn.Column,
name: str,
lineage: clmn.Lineage | None = None,
) -> clmn.Column:
"""Contextualize column by wrapping it in expression."""
expression = expr.ColumnReference(_table=self, _column=column, _name=name)
return expression._column_with_expression_cls(
context=context,
universe=context.universe,
expression=expression,
lineage=lineage,
)
def _table_with_context(self, context: clmn.Context) -> Table:
columns = {
name: self._wrap_column_in_context(context, column, name)
for name, column in self._columns.items()
}
return Table(
_columns=columns,
_context=context,
)
def _table_restricted_context(self) -> clmn.TableRestrictedRowwiseContext:
return clmn.TableRestrictedRowwiseContext(self._id_column, self)
def _eval(
self, expression: expr.ColumnExpression, context: clmn.Context | None = None
) -> clmn.ColumnWithExpression:
"""Desugar expression and wrap it in given context."""
if context is None:
context = self._rowwise_context
column = expression._column_with_expression_cls(
context=context,
universe=context.universe,
expression=expression,
)
return column
def _from_schema(cls: type[TTable], schema: type[Schema]) -> TTable:
universe = Universe()
context = clmn.MaterializedContext(universe, schema.universe_properties)
columns = {
name: clmn.MaterializedColumn(
universe,
schema.column_properties(name),
)
for name in schema.column_names()
}
return cls(_columns=columns, _schema=schema, _context=context)
def __repr__(self) -> str:
return f"<pathway.Table schema={dict(self.typehints())}>"
def _with_same_universe(
self,
*columns: tuple[str, clmn.Column],
schema: type[Schema] | None = None,
) -> Table:
return Table(
_columns=dict(columns),
_schema=schema,
_context=self._rowwise_context,
)
def _sort_columns_by_other(self, other: Table):
assert self.keys() == other.keys()
self._columns = {name: self._columns[name] for name in other.keys()}
def _operator_dependencies(self) -> StableSet[Table]:
return StableSet([self])
def debug(self, name: str):
G.add_operator(
lambda id: DebugOperator(name, id),
lambda operator: operator(self),
)
return self
def to(self, sink: DataSink) -> None:
from pathway.internals import table_io
table_io.table_to_datasink(self, sink)
def _materialize(self, universe: Universe):
context = clmn.MaterializedContext(universe)
columns = {
name: clmn.MaterializedColumn(universe, column.properties)
for (name, column) in self._columns.items()
}
return Table(
_columns=columns,
_schema=self.schema,
_context=context,
)
def pointer_from(
self, *args: Any, optional=False, instance: expr.ColumnReference | None = None
):
"""Pseudo-random hash of its argument. Produces pointer types. Applied column-wise.
Example:
>>> import pathway as pw
>>> t1 = pw.debug.table_from_markdown('''
... age owner pet
... 1 10 Alice dog
... 2 9 Bob dog
... 3 8 Alice cat
... 4 7 Bob dog''')
>>> g = t1.groupby(t1.owner).reduce(refcol = t1.pointer_from(t1.owner)) # g.id == g.refcol
>>> pw.debug.compute_and_print(g.select(test = (g.id == g.refcol)), include_id=False)
test
True
True
"""
if instance is not None:
args = (*args, instance)
# XXX verify types for the table primary_keys
return expr.PointerExpression(self, *args, optional=optional)
def ix_ref(
self,
*args: expr.ColumnExpression | Value,
optional: bool = False,
context=None,
instance: expr.ColumnReference | None = None,
):
"""Reindexes the table using expressions as primary keys.
Uses keys from context, or tries to infer proper context from the expression.
If optional is True, then None in expression values result in None values in the result columns.
Missing values in table keys result in RuntimeError.
Context can be anything that allows for `select` or `reduce`, or `pathway.this` construct
(latter results in returning a delayed operation, and should be only used when using `ix` inside
join().select() or groupby().reduce() sequence).
Args:
args: Column references.
Returns:
Row: indexed row.
Example:
>>> import pathway as pw
>>> t1 = pw.debug.table_from_markdown('''
... name | pet
... Alice | dog
... Bob | cat
... Carole | cat
... David | dog
... ''')
>>> t2 = t1.with_id_from(pw.this.name)
>>> t2 = t2.select(*pw.this, new_value=pw.this.ix_ref("Alice").pet)
>>> pw.debug.compute_and_print(t2, include_id=False)
name | pet | new_value
Alice | dog | dog
Bob | cat | dog
Carole | cat | dog
David | dog | dog
Tables obtained by a groupby/reduce scheme always have primary keys:
>>> import pathway as pw
>>> t1 = pw.debug.table_from_markdown('''
... name | pet
... Alice | dog
... Bob | cat
... Carole | cat
... David | cat
... ''')
>>> t2 = t1.groupby(pw.this.pet).reduce(pw.this.pet, count=pw.reducers.count())
>>> t3 = t1.select(*pw.this, new_value=t2.ix_ref(t1.pet).count)
>>> pw.debug.compute_and_print(t3, include_id=False)
name | pet | new_value
Alice | dog | 1
Bob | cat | 3
Carole | cat | 3
David | cat | 3
Single-row tables can be accessed via `ix_ref()`:
>>> import pathway as pw
>>> t1 = pw.debug.table_from_markdown('''
... name | pet
... Alice | dog
... Bob | cat
... Carole | cat
... David | cat
... ''')
>>> t2 = t1.reduce(count=pw.reducers.count())
>>> t3 = t1.select(*pw.this, new_value=t2.ix_ref(context=t1).count)
>>> pw.debug.compute_and_print(t3, include_id=False)
name | pet | new_value
Alice | dog | 4
Bob | cat | 4
Carole | cat | 4
David | cat | 4
"""
return self.ix(
self.pointer_from(*args, optional=optional, instance=instance),
optional=optional,
context=context,
)
def _subtables(self) -> StableSet[Table]:
return StableSet([self])
def _substitutions(
self,
) -> tuple[Table, dict[expr.InternalColRef, expr.ColumnExpression]]:
return self, {}
def typehints(self) -> Mapping[str, Any]:
"""
Return the types of the columns as a dictionary.
Example:
>>> import pathway as pw
>>> t1 = pw.debug.table_from_markdown('''
... age | owner | pet
... 10 | Alice | dog
... 9 | Bob | dog
... 8 | Alice | cat
... 7 | Bob | dog
... ''')
>>> t1.typehints()
mappingproxy({'age': <class 'int'>, 'owner': <class 'str'>, 'pet': <class 'str'>})
"""
return self.schema.typehints()
def eval_type(self, expression: expr.ColumnExpression) -> dt.DType:
return (
self._rowwise_context._get_type_interpreter()
.eval_expression(expression, state=TypeInterpreterState())
._dtype
)
def _auto_live(self) -> Table:
"""Make self automatically live in interactive mode"""
from pathway.internals.interactive import is_interactive_mode_enabled
if is_interactive_mode_enabled():
return self.live()
else:
return self
def live(self) -> LiveTable[TSchema]:
from pathway.internals.interactive import LiveTable
warnings.warn("live tables are an experimental feature", stacklevel=2)
return LiveTable._create(self)
def groupby(
grouped: Table,
*args: expr.ColumnReference,
id: expr.ColumnReference | None = None,
sort_by: expr.ColumnReference | None = None,
_filter_out_results_of_forgetting: bool = False,
instance: expr.ColumnReference | None = None,
) -> groupbys.GroupedTable: ... | null |
166,716 | from __future__ import annotations
import functools
import warnings
from collections.abc import Callable, Mapping
from typing import TYPE_CHECKING, Any, Generic, TypeVar, cast, overload
import pathway.internals.column as clmn
import pathway.internals.expression as expr
from pathway.internals import api, dtype as dt, groupbys, thisclass, universes
from pathway.internals.api import Value
from pathway.internals.arg_handlers import (
arg_handler,
groupby_handler,
reduce_args_handler,
select_args_handler,
)
from pathway.internals.decorators import contextualized_operator
from pathway.internals.desugaring import (
RestrictUniverseDesugaring,
combine_args_kwargs,
desugar,
)
from pathway.internals.expression_visitor import collect_tables
from pathway.internals.helpers import SetOnceProperty, StableSet
from pathway.internals.joins import Joinable, JoinResult
from pathway.internals.operator import DebugOperator, OutputHandle
from pathway.internals.operator_input import OperatorInput
from pathway.internals.parse_graph import G
from pathway.internals.runtime_type_check import check_arg_types
from pathway.internals.schema import Schema, schema_from_columns, schema_from_types
from pathway.internals.table_like import TableLike
from pathway.internals.table_slice import TableSlice
from pathway.internals.trace import trace_user_frame
from pathway.internals.type_interpreter import TypeInterpreterState
from pathway.internals.universe import Universe
class JoinResult(Joinable, OperatorInput):
"""Result of a join between tables.
Example:
>>> import pathway as pw
>>> t1 = pw.debug.table_from_markdown('''
... age owner pet
... 1 10 Alice 1
... 2 9 Bob 1
... 3 8 Alice 2
... ''')
>>> t2 = pw.debug.table_from_markdown('''
... age owner pet size
... 11 10 Alice 3 M
... 12 9 Bob 1 L
... 13 8 Tom 1 XL
... ''')
>>> joinresult= t1.join(t2, t1.pet == t2.pet, t1.owner == t2.owner) # noqa: E501
>>> isinstance(joinresult, pw.JoinResult)
True
>>> pw.debug.compute_and_print(joinresult.select(t1.age, t2.size), include_id=False)
age | size
9 | L
"""
_inner_table: Table
_columns_mapping: dict[expr.InternalColRef, expr.ColumnReference]
_left_table: Table
_right_table: Table
_original_left: Joinable
_original_right: Joinable
_substitution: dict[thisclass.ThisMetaclass, Joinable]
_chained_join_desugaring: SubstitutionDesugaring
_joined_on_names: StableSet[str]
_all_colnames: StableSet[str]
_join_mode: JoinMode
def __init__(
self,
_context: clmn.Context,
_inner_table: Table,
_columns_mapping: dict[expr.InternalColRef, expr.ColumnReference],
_left_table: Table,
_right_table: Table,
_original_left: Joinable,
_original_right: Joinable,
_substitution: dict[thisclass.ThisMetaclass, Joinable],
_joined_on_names: StableSet[str],
_join_mode: JoinMode,
):
super().__init__(_context)
self._inner_table = _inner_table
self._columns_mapping = _columns_mapping
self._left_table = _left_table
self._right_table = _right_table
self._substitution = {**_substitution, thisclass.this: self}
self._joined_on_names = _joined_on_names
self._join_mode = _join_mode
self._original_left = _original_left
self._original_right = _original_right
assert _original_left._subtables().isdisjoint(_original_right._subtables())
self._all_colnames = StableSet.union(
_original_left.keys(), _original_right.keys()
)
self._chained_join_desugaring = SubstitutionDesugaring(self._substitutions()[1])
def _compute_universe(
left_table: Table,
right_table: Table,
id: clmn.Column | None,
mode: JoinMode,
) -> Universe:
if id is left_table._id_column:
if mode == JoinMode.LEFT:
return left_table._universe
elif mode == JoinMode.INNER:
return left_table._universe.subset()
else:
raise KeyError("Cannot assign id's for this join type.")
elif id is right_table._id_column:
if mode == JoinMode.RIGHT:
return right_table._universe
elif mode == JoinMode.INNER:
return right_table._universe.subset()
else:
raise KeyError("Cannot assign id's for this join type.")
else:
assert id is None
return Universe()
def _subtables(self) -> StableSet[Table]:
return self._original_left._subtables() | self._original_right._subtables()
def keys(self):
common_colnames = self._original_left.keys() & self._original_right.keys()
return self._all_colnames - (common_colnames - self._joined_on_names)
def _get_colref_by_name(
self,
name: str,
exception_type,
) -> expr.ColumnReference:
name = self._column_deprecation_rename(name)
if name == "id":
return self._inner_table.id
elif name in self._joined_on_names:
if self._join_mode is JoinMode.INNER:
return self._original_left[name]
else:
return self._inner_table[name]
elif name in self._original_left.keys() and name in self._original_right.keys():
raise exception_type(
f"Column {name} appears on both left and right inputs of join."
)
elif name in self._original_left.keys():
return self._original_left[name]
elif name in self._original_right.keys():
return self._original_right[name]
else:
raise exception_type(f"No column with name {name}.")
def __getitem__(self, args: str | expr.ColumnReference) -> expr.ColumnReference:
if isinstance(args, expr.ColumnReference):
assert args.table is self or args.table is thisclass.this
return self._get_colref_by_name(args.name, KeyError)
else:
return self._get_colref_by_name(args, KeyError)
def select(self, *args: expr.ColumnReference, **kwargs: Any) -> Table:
"""Computes result of a join.
Args:
args: Column references.
kwargs: Column expressions with their new assigned names.
Returns:
Table: Created table.
Example:
>>> import pathway as pw
>>> t1 = pw.debug.table_from_markdown('''
... age | owner | pet
... 10 | Alice | 1
... 9 | Bob | 1
... 8 | Alice | 2
... ''')
>>> t2 = pw.debug.table_from_markdown('''
... age | owner | pet | size
... 10 | Alice | 3 | M
... 9 | Bob | 1 | L
... 8 | Tom | 1 | XL
... ''')
>>> t3 = t1.join(t2, t1.pet == t2.pet, t1.owner == t2.owner).select(age=t1.age, owner_name=t2.owner, size=t2.size) # noqa: E501
>>> pw.debug.compute_and_print(t3, include_id=False)
age | owner_name | size
9 | Bob | L
"""
expressions: dict[str, expr.ColumnExpression] = {}
all_args = combine_args_kwargs(args, kwargs)
for new_name, expression in all_args.items():
expressions[new_name] = self._chained_join_desugaring.eval_expression(
expression
)
return self._inner_table.select(**expressions)
def _operator_dependencies(self) -> StableSet[Table]:
return (
self._left_table._operator_dependencies()
| self._right_table._operator_dependencies()
)
def filter(self, filter_expression: expr.ColumnExpression) -> JoinResult:
"""Filters rows, keeping the ones satisfying the predicate.
Example:
>>> import pathway as pw
>>> t1 = pw.debug.table_from_markdown('''
... age owner pet
... 1 10 Alice 1
... 2 9 Bob 1
... 3 8 Alice 2
... ''')
>>> t2 = pw.debug.table_from_markdown('''
... age owner pet size
... 11 10 Alice 3 M
... 12 9 Bob 1 L
... 13 8 Tom 1 XL
... ''')
>>> result = t1.join(t2).filter(t1.owner == t2.owner).select(t1.age, t2.size) # noqa: E501
>>> pw.debug.compute_and_print(result, include_id=False)
age | size
8 | M
9 | L
10 | M
"""
desugared_filter_expression = self._chained_join_desugaring.eval_expression(
filter_expression
)
inner_table = self._inner_table.filter(desugared_filter_expression)
new_columns_mapping = {
int_ref: inner_table[expression.name]
for int_ref, expression in self._columns_mapping.items()
}
new_columns_mapping[inner_table.id._to_internal()] = inner_table.id
context = clmn.JoinRowwiseContext.from_mapping(
inner_table._id_column, new_columns_mapping
)
inner_table._rowwise_context = context
return JoinResult(
_context=context,
_inner_table=inner_table,
_columns_mapping=new_columns_mapping,
_left_table=self._left_table,
_right_table=self._right_table,
_original_left=self._original_left,
_original_right=self._original_right,
_substitution=self._substitution,
_joined_on_names=self._joined_on_names,
_join_mode=self._join_mode,
)
def groupby(
self,
*args: expr.ColumnReference,
id: expr.ColumnReference | None = None,
) -> GroupedJoinResult:
"""Groups join result by columns from args.
Note:
Usually followed by `.reduce()` that aggregates the result and returns a table.
Args:
args: columns to group by.
id: if provided, is the column used to set id's of the rows of the result
Returns:
GroupedJoinResult: Groupby object.
Example:
>>> import pathway as pw
>>> t1 = pw.debug.table_from_markdown('''
... cost owner pet
... 1 100 Alice 1
... 2 90 Bob 1
... 3 80 Alice 2
... ''')
>>> t2 = pw.debug.table_from_markdown('''
... cost owner pet size
... 11 100 Alice 3 M
... 12 90 Bob 1 L
... 13 80 Tom 1 XL
... ''')
>>> result = (t1.join(t2, t1.owner==t2.owner).groupby(pw.this.owner)
... .reduce(pw.this.owner, pairs = pw.reducers.count()))
>>> pw.debug.compute_and_print(result, include_id=False)
owner | pairs
Alice | 2
Bob | 1
"""
for arg in args:
if not isinstance(arg, expr.ColumnReference):
if isinstance(arg, str):
raise ValueError(
f"Expected a ColumnReference, found a string. Did you mean this.{arg} instead of {repr(arg)}?"
)
else:
raise ValueError(
"In JoinResult.groupby() all arguments have to be a ColumnReference."
)
from pathway.internals.groupbys import GroupedJoinResult
return GroupedJoinResult(
_join_result=self,
_args=args,
_id=id,
)
def reduce(
self, *args: expr.ColumnReference, **kwargs: expr.ColumnExpression
) -> Table:
"""Reduce a join result to a single row.
Equivalent to `self.groupby().reduce(*args, **kwargs)`.
Args:
args: reducer to reduce the table with
kwargs: reducer to reduce the table with. Its key is the new name of a column.
Returns:
Table: Reduced table.
Example:
>>> import pathway as pw
>>> t1 = pw.debug.table_from_markdown('''
... cost owner pet
... 1 100 Alice 1
... 2 90 Bob 1
... 3 80 Alice 2
... ''')
>>> t2 = pw.debug.table_from_markdown('''
... cost owner pet size
... 11 100 Alice 3 M
... 12 90 Bob 1 L
... 13 80 Tom 1 XL
... ''')
>>> result = t1.join(t2, t1.owner==t2.owner).reduce(total_pairs = pw.reducers.count())
>>> pw.debug.compute_and_print(result, include_id=False)
total_pairs
3
"""
for arg in args:
if not isinstance(arg, expr.ColumnReference):
if isinstance(arg, str):
raise ValueError(
f"Expected a ColumnReference, found a string. Did you mean this.{arg} instead of {repr(arg)}?"
)
else:
raise ValueError(
"In JoinResult.reduce() all positional arguments have to be a ColumnReference."
)
return self.groupby().reduce(*args, **kwargs)
def _substitutions(
self,
) -> tuple[Table, dict[expr.InternalColRef, expr.ColumnExpression]]:
return self._inner_table, {
int_ref: expression for int_ref, expression in self._columns_mapping.items()
}
def _join(
context: clmn.JoinContext, *args: expr.ColumnReference, **kwargs: Any
) -> Table:
"""Used internally to create an internal Table containing result of a join."""
columns: dict[str, clmn.Column] = {}
all_args = combine_args_kwargs(args, kwargs)
for new_name, expression in all_args.items():
columns[new_name] = expression._column_with_expression_cls(
context=context,
universe=context.universe,
expression=expression,
)
from pathway.internals.table import Table
return Table(
_columns=columns,
_context=context,
)
def _prepare_inner_table_with_mapping(
context: clmn.JoinContext,
original_left: Joinable,
original_right: Joinable,
common_column_names: StableSet[str],
) -> tuple[Table, dict[expr.InternalColRef, expr.ColumnReference]]:
left_table, left_substitutions = original_left._substitutions()
right_table, right_substitutions = original_right._substitutions()
cnt = itertools.count(0)
expressions: dict[str, expr.ColumnExpression] = {}
colref_to_name_mapping: dict[expr.InternalColRef, str] = {}
for table, subs in [
(left_table, left_substitutions),
(right_table, right_substitutions),
]:
if len(subs) == 0: # tables have empty subs, so set them here
for ref in table:
subs[ref._to_internal()] = ref
subs_total = subs | {table.id._to_internal(): table.id}
for int_ref, expression in subs_total.items():
inner_name = f"_pw_{next(cnt)}"
expressions[inner_name] = expression
colref_to_name_mapping[int_ref] = inner_name
from pathway.internals.common import coalesce
for name in common_column_names:
if name != "id":
expressions[name] = coalesce(original_left[name], original_right[name])
inner_table = JoinResult._join(context, **expressions)
final_mapping = {
colref: inner_table[name] for colref, name in colref_to_name_mapping.items()
}
for name in common_column_names:
if name != "id":
colref = inner_table[name]
final_mapping[colref._to_internal()] = colref
final_mapping[inner_table.id._to_internal()] = inner_table.id
rowwise_context = clmn.JoinRowwiseContext.from_mapping(
inner_table._id_column, final_mapping
)
inner_table._rowwise_context = (
rowwise_context # FIXME don't set _context property of table
)
return (inner_table, final_mapping)
def _table_join(
left: Joinable,
right: Joinable,
*on: expr.ColumnExpression,
mode: JoinMode,
id: expr.ColumnReference | None = None,
left_instance: expr.ColumnReference | None = None,
right_instance: expr.ColumnReference | None = None,
) -> JoinResult:
if left == right:
raise ValueError(
"Cannot join table with itself. Use <table>.copy() as one of the arguments of the join."
)
left_table, left_substitutions = left._substitutions()
right_table, right_substitutions = right._substitutions()
chained_join_desugaring = SubstitutionDesugaring(
{**left_substitutions, **right_substitutions}
)
if id is not None:
id = chained_join_desugaring.eval_expression(id)
id_column = id._column
else:
id_column = None
common_column_names: StableSet[str] = StableSet()
if left_instance is not None and right_instance is not None:
on = (*on, left_instance == right_instance)
else:
assert left_instance is None and right_instance is None
on_ = tuple(validate_shape(cond) for cond in on)
for cond in on_:
cond_left = cast(expr.ColumnReference, cond._left)
cond_right = cast(expr.ColumnReference, cond._right)
if cond_left.name == cond_right.name:
common_column_names.add(cond_left.name)
on_ = tuple(chained_join_desugaring.eval_expression(cond) for cond in on_)
for cond in on_:
validate_join_condition(cond, left_table, right_table)
on_left = tuple(
left_table._eval(cond._left, left_table._table_restricted_context)
for cond in on_
)
on_right = tuple(
right_table._eval(cond._right, right_table._table_restricted_context)
for cond in on_
)
swp = id_column is not None and id_column is right_table._id_column
assert (
id_column is None
or (id_column is left_table._id_column)
or (id_column is right_table._id_column)
)
left_context_table = clmn.ContextTable(universe=left._universe, columns=on_left)
right_context_table = clmn.ContextTable(
universe=right._universe, columns=on_right
)
substitution: dict[thisclass.ThisMetaclass, Joinable] = {
thisclass.left: left,
thisclass.right: right,
}
universe = JoinResult._compute_universe(
left_table, right_table, id_column, mode
)
if swp:
context = clmn.JoinContext(
universe,
right_table,
left_table,
right_context_table,
left_context_table,
id_column is not None,
mode in [JoinMode.RIGHT, JoinMode.OUTER],
mode in [JoinMode.LEFT, JoinMode.OUTER],
)
else:
context = clmn.JoinContext(
universe,
left_table,
right_table,
left_context_table,
right_context_table,
id_column is not None,
mode in [JoinMode.LEFT, JoinMode.OUTER],
mode in [JoinMode.RIGHT, JoinMode.OUTER],
)
inner_table, columns_mapping = JoinResult._prepare_inner_table_with_mapping(
context,
left,
right,
common_column_names,
)
return JoinResult(
context,
inner_table,
columns_mapping,
left_table,
right_table,
left,
right,
substitution,
common_column_names,
mode,
)
def groupby(
grouped: JoinResult,
*args: expr.ColumnReference,
id: expr.ColumnReference | None = None,
) -> groupbys.GroupedJoinResult: ... | null |
166,717 | from __future__ import annotations
import functools
import warnings
from collections.abc import Callable, Mapping
from typing import TYPE_CHECKING, Any, Generic, TypeVar, cast, overload
import pathway.internals.column as clmn
import pathway.internals.expression as expr
from pathway.internals import api, dtype as dt, groupbys, thisclass, universes
from pathway.internals.api import Value
from pathway.internals.arg_handlers import (
arg_handler,
groupby_handler,
reduce_args_handler,
select_args_handler,
)
from pathway.internals.decorators import contextualized_operator
from pathway.internals.desugaring import (
RestrictUniverseDesugaring,
combine_args_kwargs,
desugar,
)
from pathway.internals.expression_visitor import collect_tables
from pathway.internals.helpers import SetOnceProperty, StableSet
from pathway.internals.joins import Joinable, JoinResult
from pathway.internals.operator import DebugOperator, OutputHandle
from pathway.internals.operator_input import OperatorInput
from pathway.internals.parse_graph import G
from pathway.internals.runtime_type_check import check_arg_types
from pathway.internals.schema import Schema, schema_from_columns, schema_from_types
from pathway.internals.table_like import TableLike
from pathway.internals.table_slice import TableSlice
from pathway.internals.trace import trace_user_frame
from pathway.internals.type_interpreter import TypeInterpreterState
from pathway.internals.universe import Universe
class Table(
Joinable,
OperatorInput,
Generic[TSchema],
):
"""Collection of named columns over identical universes.
Example:
>>> import pathway as pw
>>> t1 = pw.debug.table_from_markdown('''
... age | owner | pet
... 10 | Alice | dog
... 9 | Bob | dog
... 8 | Alice | cat
... 7 | Bob | dog
... ''')
>>> isinstance(t1, pw.Table)
True
"""
if TYPE_CHECKING:
from pathway.stdlib.ordered import diff # type: ignore[misc]
from pathway.stdlib.statistical import interpolate # type: ignore[misc]
from pathway.stdlib.temporal import ( # type: ignore[misc]
asof_join,
asof_join_left,
asof_join_outer,
asof_join_right,
asof_now_join,
asof_now_join_inner,
asof_now_join_left,
interval_join,
interval_join_inner,
interval_join_left,
interval_join_outer,
interval_join_right,
window_join,
window_join_inner,
window_join_left,
window_join_outer,
window_join_right,
windowby,
)
from pathway.stdlib.viz import ( # type: ignore[misc]
_repr_mimebundle_,
plot,
show,
)
_columns: dict[str, clmn.Column]
_schema: type[Schema]
_id_column: clmn.IdColumn
_rowwise_context: clmn.RowwiseContext
_source: SetOnceProperty[OutputHandle] = SetOnceProperty()
"""Lateinit by operator."""
def __init__(
self,
_columns: Mapping[str, clmn.Column],
_context: clmn.Context,
_schema: type[Schema] | None = None,
):
if _schema is None:
_schema = schema_from_columns(_columns)
super().__init__(_context)
self._columns = dict(_columns)
self._schema = _schema
self._id_column = _context.id_column
self._substitution = {thisclass.this: self}
self._rowwise_context = clmn.RowwiseContext(self._id_column)
def id(self) -> expr.ColumnReference:
"""Get reference to pseudocolumn containing id's of a table.
Example:
>>> import pathway as pw
>>> t1 = pw.debug.table_from_markdown('''
... age | owner | pet
... 10 | Alice | dog
... 9 | Bob | dog
... 8 | Alice | cat
... 7 | Bob | dog
... ''')
>>> t2 = t1.select(ids = t1.id)
>>> t2.typehints()['ids']
<class 'pathway.engine.Pointer'>
>>> pw.debug.compute_and_print(t2.select(test=t2.id == t2.ids), include_id=False)
test
True
True
True
True
"""
return expr.ColumnReference(_table=self, _column=self._id_column, _name="id")
def column_names(self):
return self.keys()
def keys(self):
return self._columns.keys()
def _get_column(self, name: str) -> clmn.Column:
return self._columns[name]
def _ipython_key_completions_(self):
return list(self.column_names())
def __dir__(self):
return list(super().__dir__()) + list(self.column_names())
def _C(self) -> TSchema:
return self.C # type: ignore
def schema(self) -> type[Schema]:
"""Get schema of the table.
Example:
>>> import pathway as pw
>>> t1 = pw.debug.table_from_markdown('''
... age | owner | pet
... 10 | Alice | dog
... 9 | Bob | dog
... 8 | Alice | cat
... 7 | Bob | dog
... ''')
>>> t1.schema
<pathway.Schema types={'age': <class 'int'>, 'owner': <class 'str'>, 'pet': <class 'str'>}>
>>> t1.typehints()['age']
<class 'int'>
"""
return self._schema
def _get_colref_by_name(self, name, exception_type) -> expr.ColumnReference:
name = self._column_deprecation_rename(name)
if name == "id":
return self.id
if name not in self.keys():
raise exception_type(f"Table has no column with name {name}.")
return expr.ColumnReference(
_table=self, _column=self._get_column(name), _name=name
)
def __getitem__(self, args: str | expr.ColumnReference) -> expr.ColumnReference: ...
def __getitem__(self, args: list[str | expr.ColumnReference]) -> Table: ...
def __getitem__(
self, args: str | expr.ColumnReference | list[str | expr.ColumnReference]
) -> expr.ColumnReference | Table:
"""Get columns by name.
Warning:
- Does not allow repetitions of columns.
- Fails if tries to access nonexistent column.
Args:
names: a singe column name or list of columns names to be extracted from `self`.
Returns:
Table with specified columns, or column expression (if single argument given).
Instead of column names, column references are valid here.
Example:
>>> import pathway as pw
>>> t1 = pw.debug.table_from_markdown('''
... age | owner | pet
... 10 | Alice | dog
... 9 | Bob | dog
... 8 | Alice | cat
... 7 | Bob | dog
... ''')
>>> t2 = t1[["age", "pet"]]
>>> t2 = t1[["age", t1.pet]]
>>> pw.debug.compute_and_print(t2, include_id=False)
age | pet
7 | dog
8 | cat
9 | dog
10 | dog
"""
if isinstance(args, expr.ColumnReference):
if (args.table is not self) and not isinstance(
args.table, thisclass.ThisMetaclass
):
raise ValueError(
"Table.__getitem__ argument has to be a ColumnReference to the same table or pw.this, or a string "
+ "(or a list of those)."
)
return self._get_colref_by_name(args.name, KeyError)
elif isinstance(args, str):
return self._get_colref_by_name(args, KeyError)
else:
return self.select(*[self[name] for name in args])
def from_columns(
*args: expr.ColumnReference, **kwargs: expr.ColumnReference
) -> Table:
"""Build a table from columns.
All columns must have the same ids. Columns' names must be pairwise distinct.
Args:
args: List of columns.
kwargs: Columns with their new names.
Returns:
Table: Created table.
Example:
>>> import pathway as pw
>>> t1 = pw.Table.empty(age=float, pet=float)
>>> t2 = pw.Table.empty(foo=float, bar=float).with_universe_of(t1)
>>> t3 = pw.Table.from_columns(t1.pet, qux=t2.foo)
>>> pw.debug.compute_and_print(t3, include_id=False)
pet | qux
"""
all_args = cast(
dict[str, expr.ColumnReference], combine_args_kwargs(args, kwargs)
)
if not all_args:
raise ValueError("Table.from_columns() cannot have empty arguments list")
else:
arg = next(iter(all_args.values()))
table: Table = arg.table
for arg in all_args.values():
if not G.universe_solver.query_are_equal(
table._universe, arg.table._universe
):
raise ValueError(
"Universes of all arguments of Table.from_columns() have to be equal.\n"
+ "Consider using Table.promise_universes_are_equal() to assert it.\n"
+ "(However, untrue assertion might result in runtime errors.)"
)
return table.select(*args, **kwargs)
def concat_reindex(self, *tables: Table) -> Table:
"""Concatenate contents of several tables.
This is similar to PySpark union. All tables must have the same schema. Each row is reindexed.
Args:
tables: List of tables to concatenate. All tables must have the same schema.
Returns:
Table: The concatenated table. It will have new, synthetic ids.
Example:
>>> import pathway as pw
>>> t1 = pw.debug.table_from_markdown('''
... | pet
... 1 | Dog
... 7 | Cat
... ''')
>>> t2 = pw.debug.table_from_markdown('''
... | pet
... 1 | Manul
... 8 | Octopus
... ''')
>>> t3 = t1.concat_reindex(t2)
>>> pw.debug.compute_and_print(t3, include_id=False)
pet
Cat
Dog
Manul
Octopus
"""
reindexed = [
table.with_id_from(table.id, i) for i, table in enumerate([self, *tables])
]
universes.promise_are_pairwise_disjoint(*reindexed)
return Table.concat(*reindexed)
def empty(**kwargs: dt.DType) -> Table:
"""Creates an empty table with a schema specified by kwargs.
Args:
kwargs: Dict whose keys are column names and values are column types.
Returns:
Table: Created empty table.
Example:
>>> import pathway as pw
>>> t1 = pw.Table.empty(age=float, pet=float)
>>> pw.debug.compute_and_print(t1, include_id=False)
age | pet
"""
from pathway.internals import table_io
ret = table_io.empty_from_schema(schema_from_types(None, **kwargs))
G.universe_solver.register_as_empty(ret._universe)
return ret
def select(self, *args: expr.ColumnReference, **kwargs: Any) -> Table:
"""Build a new table with columns specified by kwargs.
Output columns' names are keys(kwargs). values(kwargs) can be raw values, boxed
values, columns. Assigning to id reindexes the table.
Args:
args: Column references.
kwargs: Column expressions with their new assigned names.
Returns:
Table: Created table.
Example:
>>> import pathway as pw
>>> t1 = pw.debug.table_from_markdown('''
... pet
... Dog
... Cat
... ''')
>>> t2 = t1.select(animal=t1.pet, desc="fluffy")
>>> pw.debug.compute_and_print(t2, include_id=False)
animal | desc
Cat | fluffy
Dog | fluffy
"""
new_columns = []
all_args = combine_args_kwargs(args, kwargs)
for new_name, expression in all_args.items():
self._validate_expression(expression)
column = self._eval(expression)
new_columns.append((new_name, column))
return self._with_same_universe(*new_columns)
def __add__(self, other: Table) -> Table:
"""Build a union of `self` with `other`.
Semantics: Returns a table C, such that
- C.columns == self.columns + other.columns
- C.id == self.id == other.id
Args:
other: The other table. `self.id` must be equal `other.id` and
`self.columns` and `other.columns` must be disjoint (or overlapping names
are THE SAME COLUMN)
Returns:
Table: Created table.
Example:
>>> import pathway as pw
>>> t1 = pw.debug.table_from_markdown('''
... pet
... 1 Dog
... 7 Cat
... ''')
>>> t2 = pw.debug.table_from_markdown('''
... age
... 1 10
... 7 3
... ''')
>>> t3 = t1 + t2
>>> pw.debug.compute_and_print(t3, include_id=False)
pet | age
Cat | 3
Dog | 10
"""
if not G.universe_solver.query_are_equal(self._universe, other._universe):
raise ValueError(
"Universes of all arguments of Table.__add__() have to be equal.\n"
+ "Consider using Table.promise_universes_are_equal() to assert it.\n"
+ "(However, untrue assertion might result in runtime errors.)"
)
return self.select(*self, *other)
def slice(self) -> TableSlice:
"""Creates a collection of references to self columns.
Supports basic column manipulation methods.
Example:
>>> import pathway as pw
>>> t1 = pw.debug.table_from_markdown('''
... age | owner | pet
... 10 | Alice | dog
... 9 | Bob | dog
... 8 | Alice | cat
... 7 | Bob | dog
... ''')
>>> t1.slice.without("age")
TableSlice({'owner': <table1>.owner, 'pet': <table1>.pet})
"""
return TableSlice(dict(**self), self)
def filter(self, filter_expression: expr.ColumnExpression) -> Table[TSchema]:
"""Filter a table according to `filter_expression` condition.
Args:
filter_expression: `ColumnExpression` that specifies the filtering condition.
Returns:
Table: Result has the same schema as `self` and its ids are subset of `self.id`.
Example:
>>> import pathway as pw
>>> vertices = pw.debug.table_from_markdown('''
... label outdegree
... 1 3
... 7 0
... ''')
>>> filtered = vertices.filter(vertices.outdegree == 0)
>>> pw.debug.compute_and_print(filtered, include_id=False)
label | outdegree
7 | 0
"""
filter_type = self.eval_type(filter_expression)
if filter_type != dt.BOOL:
raise TypeError(
f"Filter argument of Table.filter() has to be bool, found {filter_type}."
)
ret = self._filter(filter_expression)
if (
filter_col := expr.get_column_filtered_by_is_none(filter_expression)
) is not None and filter_col.table == self:
name = filter_col.name
dtype = self._columns[name].dtype
ret = ret.update_types(**{name: dt.unoptionalize(dtype)})
return ret
def split(
self, split_expression: expr.ColumnExpression
) -> tuple[Table[TSchema], Table[TSchema]]:
"""Split a table according to `split_expression` condition.
Args:
split_expression: `ColumnExpression` that specifies the split condition.
Returns:
positive_table, negative_table: tuple of tables,
with the same schemas as `self` and with ids that are subsets of `self.id`,
and provably disjoint.
Example:
>>> import pathway as pw
>>> vertices = pw.debug.table_from_markdown('''
... label outdegree
... 1 3
... 7 0
... ''')
>>> positive, negative = vertices.split(vertices.outdegree == 0)
>>> pw.debug.compute_and_print(positive, include_id=False)
label | outdegree
7 | 0
>>> pw.debug.compute_and_print(negative, include_id=False)
label | outdegree
1 | 3
"""
positive = self.filter(split_expression)
negative = self.filter(~split_expression)
universes.promise_are_pairwise_disjoint(positive, negative)
universes.promise_are_equal(
self, Table.concat(positive, negative)
) # TODO: add API method for this
return positive, negative
def _filter(self, filter_expression: expr.ColumnExpression) -> Table[TSchema]:
self._validate_expression(filter_expression)
filtering_column = self._eval(filter_expression)
assert self._universe == filtering_column.universe
context = clmn.FilterContext(filtering_column, self._id_column)
return self._table_with_context(context)
def _gradual_broadcast(
self,
threshold_table,
lower_column,
value_column,
upper_column,
) -> Table:
return self + self.__gradual_broadcast(
threshold_table, lower_column, value_column, upper_column
)
def __gradual_broadcast(
self,
threshold_table,
lower_column,
value_column,
upper_column,
):
context = clmn.GradualBroadcastContext(
self._id_column,
threshold_table._eval(lower_column),
threshold_table._eval(value_column),
threshold_table._eval(upper_column),
)
return Table(_columns={"apx_value": context.apx_value_column}, _context=context)
def _forget(
self,
threshold_column: expr.ColumnExpression,
time_column: expr.ColumnExpression,
mark_forgetting_records: bool,
) -> Table:
context = clmn.ForgetContext(
self._id_column,
self._eval(threshold_column),
self._eval(time_column),
mark_forgetting_records,
)
return self._table_with_context(context)
def _forget_immediately(
self,
) -> Table:
context = clmn.ForgetImmediatelyContext(self._id_column)
return self._table_with_context(context)
def _filter_out_results_of_forgetting(
self,
) -> Table:
# The output universe is a superset of input universe because forgetting entries
# are filtered out. At each point in time, the set of keys with +1 diff can be
# bigger than a set of keys with +1 diff in an input table.
context = clmn.FilterOutForgettingContext(self._id_column)
return self._table_with_context(context)
def _freeze(
self,
threshold_column: expr.ColumnExpression,
time_column: expr.ColumnExpression,
) -> Table:
context = clmn.FreezeContext(
self._id_column,
self._eval(threshold_column),
self._eval(time_column),
)
return self._table_with_context(context)
def _buffer(
self,
threshold_column: expr.ColumnExpression,
time_column: expr.ColumnExpression,
) -> Table:
context = clmn.BufferContext(
self._id_column,
self._eval(threshold_column),
self._eval(time_column),
)
return self._table_with_context(context)
def difference(self, other: Table) -> Table[TSchema]:
r"""Restrict self universe to keys not appearing in the other table.
Args:
other: table with ids to remove from self.
Returns:
Table: table with restricted universe, with the same set of columns
Example:
>>> import pathway as pw
>>> t1 = pw.debug.table_from_markdown('''
... | age | owner | pet
... 1 | 10 | Alice | 1
... 2 | 9 | Bob | 1
... 3 | 8 | Alice | 2
... ''')
>>> t2 = pw.debug.table_from_markdown('''
... | cost
... 2 | 100
... 3 | 200
... 4 | 300
... ''')
>>> t3 = t1.difference(t2)
>>> pw.debug.compute_and_print(t3, include_id=False)
age | owner | pet
10 | Alice | 1
"""
context = clmn.DifferenceContext(
left=self._id_column,
right=other._id_column,
)
return self._table_with_context(context)
def intersect(self, *tables: Table) -> Table[TSchema]:
"""Restrict self universe to keys appearing in all of the tables.
Args:
tables: tables keys of which are used to restrict universe.
Returns:
Table: table with restricted universe, with the same set of columns
Example:
>>> import pathway as pw
>>> t1 = pw.debug.table_from_markdown('''
... | age | owner | pet
... 1 | 10 | Alice | 1
... 2 | 9 | Bob | 1
... 3 | 8 | Alice | 2
... ''')
>>> t2 = pw.debug.table_from_markdown('''
... | cost
... 2 | 100
... 3 | 200
... 4 | 300
... ''')
>>> t3 = t1.intersect(t2)
>>> pw.debug.compute_and_print(t3, include_id=False)
age | owner | pet
8 | Alice | 2
9 | Bob | 1
"""
intersecting_universes = (
self._universe,
*tuple(table._universe for table in tables),
)
universe = G.universe_solver.get_intersection(*intersecting_universes)
if universe in intersecting_universes:
context: clmn.Context = clmn.RestrictContext(self._id_column, universe)
else:
intersecting_ids = (
self._id_column,
*tuple(table._id_column for table in tables),
)
context = clmn.IntersectContext(
intersecting_ids=intersecting_ids,
)
return self._table_with_context(context)
def restrict(self, other: TableLike) -> Table[TSchema]:
"""Restrict self universe to keys appearing in other.
Args:
other: table which universe is used to restrict universe of self.
Returns:
Table: table with restricted universe, with the same set of columns
Example:
>>> import pathway as pw
>>> t1 = pw.debug.table_from_markdown(
... '''
... | age | owner | pet
... 1 | 10 | Alice | 1
... 2 | 9 | Bob | 1
... 3 | 8 | Alice | 2
... '''
... )
>>> t2 = pw.debug.table_from_markdown(
... '''
... | cost
... 2 | 100
... 3 | 200
... '''
... )
>>> t2.promise_universe_is_subset_of(t1)
<pathway.Table schema={'cost': <class 'int'>}>
>>> t3 = t1.restrict(t2)
>>> pw.debug.compute_and_print(t3, include_id=False)
age | owner | pet
8 | Alice | 2
9 | Bob | 1
"""
if not G.universe_solver.query_is_subset(other._universe, self._universe):
raise ValueError(
"Table.restrict(): other universe has to be a subset of self universe."
+ "Consider using Table.promise_universe_is_subset_of() to assert it."
)
context = clmn.RestrictContext(self._id_column, other._universe)
columns = {
name: self._wrap_column_in_context(context, column, name)
for name, column in self._columns.items()
}
return Table(
_columns=columns,
_context=context,
)
def copy(self) -> Table[TSchema]:
"""Returns a copy of a table.
Example:
>>> import pathway as pw
>>> t1 = pw.debug.table_from_markdown('''
... age | owner | pet
... 10 | Alice | dog
... 9 | Bob | dog
... 8 | Alice | cat
... 7 | Bob | dog
... ''')
>>> t2 = t1.copy()
>>> pw.debug.compute_and_print(t2, include_id=False)
age | owner | pet
7 | Bob | dog
8 | Alice | cat
9 | Bob | dog
10 | Alice | dog
>>> t1 is t2
False
"""
return self._copy_as(type(self))
def _copy_as(self, table_type: type[TTable], /, **kwargs) -> TTable:
columns = {
name: self._wrap_column_in_context(self._rowwise_context, column, name)
for name, column in self._columns.items()
}
return table_type(_columns=columns, _context=self._rowwise_context, **kwargs)
def groupby(
self,
*args: expr.ColumnReference,
id: expr.ColumnReference | None = None,
sort_by: expr.ColumnReference | None = None,
_filter_out_results_of_forgetting: bool = False,
instance: expr.ColumnReference | None = None,
) -> groupbys.GroupedTable:
"""Groups table by columns from args.
Note:
Usually followed by `.reduce()` that aggregates the result and returns a table.
Args:
args: columns to group by.
id: if provided, is the column used to set id's of the rows of the result
sort_by: if provided, column values are used as sorting keys for particular reducers
instance: optional argument describing partitioning of the data into separate instances
Returns:
GroupedTable: Groupby object.
Example:
>>> import pathway as pw
>>> t1 = pw.debug.table_from_markdown('''
... age | owner | pet
... 10 | Alice | dog
... 9 | Bob | dog
... 8 | Alice | cat
... 7 | Bob | dog
... ''')
>>> t2 = t1.groupby(t1.pet, t1.owner).reduce(t1.owner, t1.pet, ageagg=pw.reducers.sum(t1.age))
>>> pw.debug.compute_and_print(t2, include_id=False)
owner | pet | ageagg
Alice | cat | 8
Alice | dog | 10
Bob | dog | 16
"""
if instance is not None:
args = (*args, instance)
if id is not None:
if len(args) == 0:
args = (id,)
elif len(args) > 1:
raise ValueError(
"Table.groupby() cannot have id argument when grouping by multiple columns."
)
elif args[0]._column != id._column:
raise ValueError(
"Table.groupby() received id argument and is grouped by a single column,"
+ " but the arguments are not equal.\n"
+ "Consider using <table>.groupby(id=...), skipping the positional argument."
)
for arg in args:
if not isinstance(arg, expr.ColumnReference):
if isinstance(arg, str):
raise ValueError(
f"Expected a ColumnReference, found a string. Did you mean <table>.{arg}"
+ f" instead of {repr(arg)}?"
)
else:
raise ValueError(
"All Table.groupby() arguments have to be a ColumnReference."
)
return groupbys.GroupedTable.create(
table=self,
grouping_columns=args,
set_id=id is not None,
sort_by=sort_by,
_filter_out_results_of_forgetting=_filter_out_results_of_forgetting,
)
def reduce(
self, *args: expr.ColumnReference, **kwargs: expr.ColumnExpression
) -> Table:
"""Reduce a table to a single row.
Equivalent to `self.groupby().reduce(*args, **kwargs)`.
Args:
args: reducer to reduce the table with
kwargs: reducer to reduce the table with. Its key is the new name of a column.
Returns:
Table: Reduced table.
Example:
>>> import pathway as pw
>>> t1 = pw.debug.table_from_markdown('''
... age | owner | pet
... 10 | Alice | dog
... 9 | Bob | dog
... 8 | Alice | cat
... 7 | Bob | dog
... ''')
>>> t2 = t1.reduce(ageagg=pw.reducers.argmin(t1.age))
>>> pw.debug.compute_and_print(t2, include_id=False) # doctest: +ELLIPSIS
ageagg
^...
>>> t3 = t2.select(t1.ix(t2.ageagg).age, t1.ix(t2.ageagg).pet)
>>> pw.debug.compute_and_print(t3, include_id=False)
age | pet
7 | dog
"""
return self.groupby().reduce(*args, **kwargs)
def deduplicate(
self,
*,
value: expr.ColumnExpression,
instance: expr.ColumnExpression | None = None,
acceptor: Callable[[T, T], bool],
persistent_id: str | None = None,
) -> Table:
"""Deduplicates rows in `self` on `value` column using acceptor function.
It keeps rows which where accepted by the acceptor function.
Acceptor operates on two arguments - current value and the previously accepted value.
Args:
value: column expression used for deduplication.
instance: Grouping column. For rows with different
values in this column, deduplication will be performed separately.
Defaults to None.
acceptor: callback telling whether two values are different.
persistent_id: (unstable) An identifier, under which the state of the table
will be persisted or ``None``, if there is no need to persist the state of this table.
When a program restarts, it restores the state for all input tables according to what
was saved for their ``persistent_id``. This way it's possible to configure the start of
computations from the moment they were terminated last time.
Returns:
Table: the result of deduplication.
Example:
>>> import pathway as pw
>>> table = pw.debug.table_from_markdown(
... '''
... val | __time__
... 1 | 2
... 2 | 4
... 3 | 6
... 4 | 8
... '''
... )
>>>
>>> def acceptor(new_value, old_value) -> bool:
... return new_value >= old_value + 2
...
>>>
>>> result = table.deduplicate(value=pw.this.val, acceptor=acceptor)
>>> pw.debug.compute_and_print_update_stream(result, include_id=False)
val | __time__ | __diff__
1 | 2 | 1
1 | 6 | -1
3 | 6 | 1
>>>
>>> table = pw.debug.table_from_markdown(
... '''
... val | instance | __time__
... 1 | 1 | 2
... 2 | 1 | 4
... 3 | 2 | 6
... 4 | 1 | 8
... 4 | 2 | 8
... 5 | 1 | 10
... '''
... )
>>>
>>> def acceptor(new_value, old_value) -> bool:
... return new_value >= old_value + 2
...
>>>
>>> result = table.deduplicate(
... value=pw.this.val, instance=pw.this.instance, acceptor=acceptor
... )
>>> pw.debug.compute_and_print_update_stream(result, include_id=False)
val | instance | __time__ | __diff__
1 | 1 | 2 | 1
3 | 2 | 6 | 1
1 | 1 | 8 | -1
4 | 1 | 8 | 1
"""
if instance is None:
instance = expr.ColumnConstExpression(None)
self._validate_expression(value)
self._validate_expression(instance)
value_col = self._eval(value)
instance_col = self._eval(instance)
context = clmn.DeduplicateContext(
value_col,
(instance_col,),
acceptor,
self._id_column,
persistent_id,
)
return self._table_with_context(context)
def ix(
self, expression: expr.ColumnExpression, *, optional: bool = False, context=None
) -> Table:
"""Reindexes the table using expression values as keys. Uses keys from context, or tries to infer
proper context from the expression.
If optional is True, then None in expression values result in None values in the result columns.
Missing values in table keys result in RuntimeError.
Context can be anything that allows for `select` or `reduce`, or `pathway.this` construct
(latter results in returning a delayed operation, and should be only used when using `ix` inside
join().select() or groupby().reduce() sequence).
Returns:
Reindexed table with the same set of columns.
Example:
>>> import pathway as pw
>>> t_animals = pw.debug.table_from_markdown('''
... | epithet | genus
... 1 | upupa | epops
... 2 | acherontia | atropos
... 3 | bubo | scandiacus
... 4 | dynastes | hercules
... ''')
>>> t_birds = pw.debug.table_from_markdown('''
... | desc
... 2 | hoopoe
... 4 | owl
... ''')
>>> ret = t_birds.select(t_birds.desc, latin=t_animals.ix(t_birds.id).genus)
>>> pw.debug.compute_and_print(ret, include_id=False)
desc | latin
hoopoe | atropos
owl | hercules
"""
if context is None:
all_tables = collect_tables(expression)
if len(all_tables) == 0:
context = thisclass.this
elif all(tab == all_tables[0] for tab in all_tables):
context = all_tables[0]
if context is None:
for tab in all_tables:
if not isinstance(tab, Table):
raise ValueError("Table expected here.")
if len(all_tables) == 0:
raise ValueError("Const value provided.")
context = all_tables[0]
for tab in all_tables:
assert context._universe.is_equal_to(tab._universe)
if isinstance(context, groupbys.GroupedJoinable):
context = thisclass.this
if isinstance(context, thisclass.ThisMetaclass):
return context._delayed_op(
lambda table, expression: self.ix(
expression=expression, optional=optional, context=table
),
expression=expression,
qualname=f"{self}.ix(...)",
name="ix",
)
restrict_universe = RestrictUniverseDesugaring(context)
expression = restrict_universe.eval_expression(expression)
key_col = context.select(tmp=expression).tmp
key_dtype = self.eval_type(key_col)
if (
optional and not dt.dtype_issubclass(key_dtype, dt.Optional(dt.POINTER))
) or (not optional and not isinstance(key_dtype, dt.Pointer)):
raise TypeError(
f"Pathway supports indexing with Pointer type only. The type used was {key_dtype}."
)
if optional and isinstance(key_dtype, dt.Optional):
self_ = self.update_types(
**{name: dt.Optional(self.typehints()[name]) for name in self.keys()}
)
else:
self_ = self
return self_._ix(key_col, optional)
def _ix(
self,
key_expression: expr.ColumnReference,
optional: bool,
) -> Table:
key_column = key_expression._column
context = clmn.IxContext(key_column, self._id_column, optional)
return self._table_with_context(context)
def __lshift__(self, other: Table) -> Table:
"""Alias to update_cells method.
Updates cells of `self`, breaking ties in favor of the values in `other`.
Semantics:
- result.columns == self.columns
- result.id == self.id
- conflicts are resolved preferring other's values
Requires:
- other.columns ⊆ self.columns
- other.id ⊆ self.id
Args:
other: the other table.
Returns:
Table: `self` updated with cells form `other`.
Example:
>>> import pathway as pw
>>> t1 = pw.debug.table_from_markdown('''
... | age | owner | pet
... 1 | 10 | Alice | 1
... 2 | 9 | Bob | 1
... 3 | 8 | Alice | 2
... ''')
>>> t2 = pw.debug.table_from_markdown('''
... | age | owner | pet
... 1 | 10 | Alice | 30
... ''')
>>> pw.universes.promise_is_subset_of(t2, t1)
>>> t3 = t1 << t2
>>> pw.debug.compute_and_print(t3, include_id=False)
age | owner | pet
8 | Alice | 2
9 | Bob | 1
10 | Alice | 30
"""
return self.update_cells(other, _stacklevel=2)
def concat(self, *others: Table[TSchema]) -> Table[TSchema]:
"""Concats `self` with every `other` ∊ `others`.
Semantics:
- result.columns == self.columns == other.columns
- result.id == self.id ∪ other.id
if self.id and other.id collide, throws an exception.
Requires:
- other.columns == self.columns
- self.id disjoint with other.id
Args:
other: the other table.
Returns:
Table: The concatenated table. Id's of rows from original tables are preserved.
Example:
>>> import pathway as pw
>>> t1 = pw.debug.table_from_markdown('''
... | age | owner | pet
... 1 | 10 | Alice | 1
... 2 | 9 | Bob | 1
... 3 | 8 | Alice | 2
... ''')
>>> t2 = pw.debug.table_from_markdown('''
... | age | owner | pet
... 11 | 11 | Alice | 30
... 12 | 12 | Tom | 40
... ''')
>>> pw.universes.promise_are_pairwise_disjoint(t1, t2)
>>> t3 = t1.concat(t2)
>>> pw.debug.compute_and_print(t3, include_id=False)
age | owner | pet
8 | Alice | 2
9 | Bob | 1
10 | Alice | 1
11 | Alice | 30
12 | Tom | 40
"""
for other in others:
if other.keys() != self.keys():
raise ValueError(
"columns do not match in the argument of Table.concat()"
)
schema = {
key: functools.reduce(
dt.types_lca,
[other.schema._dtypes()[key] for other in others],
self.schema._dtypes()[key],
)
for key in self.keys()
}
return Table._concat(
self.cast_to_types(**schema),
*[other.cast_to_types(**schema) for other in others],
)
def _concat(self, *others: Table[TSchema]) -> Table[TSchema]:
union_ids = (self._id_column, *(other._id_column for other in others))
if not G.universe_solver.query_are_disjoint(*(c.universe for c in union_ids)):
raise ValueError(
"Universes of the arguments of Table.concat() have to be disjoint.\n"
+ "Consider using Table.promise_universes_are_disjoint() to assert it.\n"
+ "(However, untrue assertion might result in runtime errors.)"
)
context = clmn.ConcatUnsafeContext(
union_ids=union_ids,
updates=tuple(
{col_name: other._columns[col_name] for col_name in self.keys()}
for other in others
),
)
return self._table_with_context(context)
def update_cells(self, other: Table, _stacklevel: int = 1) -> Table:
"""Updates cells of `self`, breaking ties in favor of the values in `other`.
Semantics:
- result.columns == self.columns
- result.id == self.id
- conflicts are resolved preferring other's values
Requires:
- other.columns ⊆ self.columns
- other.id ⊆ self.id
Args:
other: the other table.
Returns:
Table: `self` updated with cells form `other`.
Example:
>>> import pathway as pw
>>> t1 = pw.debug.table_from_markdown('''
... | age | owner | pet
... 1 | 10 | Alice | 1
... 2 | 9 | Bob | 1
... 3 | 8 | Alice | 2
... ''')
>>> t2 = pw.debug.table_from_markdown('''
... age | owner | pet
... 1 | 10 | Alice | 30
... ''')
>>> pw.universes.promise_is_subset_of(t2, t1)
>>> t3 = t1.update_cells(t2)
>>> pw.debug.compute_and_print(t3, include_id=False)
age | owner | pet
8 | Alice | 2
9 | Bob | 1
10 | Alice | 30
"""
if names := (set(other.keys()) - set(self.keys())):
raise ValueError(
f"Columns of the argument in Table.update_cells() not present in the updated table: {list(names)}."
)
if self._universe == other._universe:
warnings.warn(
"Key sets of self and other in update_cells are the same."
+ " Using with_columns instead of update_cells.",
stacklevel=_stacklevel + 4,
)
return self.with_columns(*(other[name] for name in other))
schema = {
key: dt.types_lca(self.schema.__dtypes__[key], other.schema.__dtypes__[key])
for key in other.keys()
}
return Table._update_cells(
self.cast_to_types(**schema), other.cast_to_types(**schema)
)
def _update_cells(self, other: Table) -> Table:
if not other._universe.is_subset_of(self._universe):
raise ValueError(
"Universe of the argument of Table.update_cells() needs to be "
+ "a subset of the universe of the updated table.\n"
+ "Consider using Table.promise_is_subset_of() to assert this.\n"
+ "(However, untrue assertion might result in runtime errors.)"
)
context = clmn.UpdateCellsContext(
left=self._id_column,
right=other._id_column,
updates={name: other._columns[name] for name in other.keys()},
)
return self._table_with_context(context)
def update_rows(self, other: Table[TSchema]) -> Table[TSchema]:
"""Updates rows of `self`, breaking ties in favor for the rows in `other`.
Semantics:
- result.columns == self.columns == other.columns
- result.id == self.id ∪ other.id
Requires:
- other.columns == self.columns
Args:
other: the other table.
Returns:
Table: `self` updated with rows form `other`.
Example:
>>> import pathway as pw
>>> t1 = pw.debug.table_from_markdown('''
... | age | owner | pet
... 1 | 10 | Alice | 1
... 2 | 9 | Bob | 1
... 3 | 8 | Alice | 2
... ''')
>>> t2 = pw.debug.table_from_markdown('''
... | age | owner | pet
... 1 | 10 | Alice | 30
... 12 | 12 | Tom | 40
... ''')
>>> t3 = t1.update_rows(t2)
>>> pw.debug.compute_and_print(t3, include_id=False)
age | owner | pet
8 | Alice | 2
9 | Bob | 1
10 | Alice | 30
12 | Tom | 40
"""
if other.keys() != self.keys():
raise ValueError(
"Columns do not match between argument of Table.update_rows() and the updated table."
)
if self._universe.is_subset_of(other._universe):
warnings.warn(
"Universe of self is a subset of universe of other in update_rows. Returning other.",
stacklevel=5,
)
return other
schema = {
key: dt.types_lca(self.schema.__dtypes__[key], other.schema.__dtypes__[key])
for key in self.keys()
}
union_universes = (self._universe, other._universe)
universe = G.universe_solver.get_union(*union_universes)
if universe == self._universe:
return Table._update_cells(
self.cast_to_types(**schema), other.cast_to_types(**schema)
)
else:
return Table._update_rows(
self.cast_to_types(**schema), other.cast_to_types(**schema)
)
def _update_rows(self, other: Table[TSchema]) -> Table[TSchema]:
union_ids = (self._id_column, other._id_column)
context = clmn.UpdateRowsContext(
updates={col_name: other._columns[col_name] for col_name in self.keys()},
union_ids=union_ids,
)
return self._table_with_context(context)
def with_columns(self, *args: expr.ColumnReference, **kwargs: Any) -> Table:
"""Updates columns of `self`, according to args and kwargs.
See `table.select` specification for evaluation of args and kwargs.
Example:
>>> import pathway as pw
>>> t1 = pw.debug.table_from_markdown('''
... | age | owner | pet
... 1 | 10 | Alice | 1
... 2 | 9 | Bob | 1
... 3 | 8 | Alice | 2
... ''')
>>> t2 = pw.debug.table_from_markdown('''
... | owner | pet | size
... 1 | Tom | 1 | 10
... 2 | Bob | 1 | 9
... 3 | Tom | 2 | 8
... ''')
>>> t3 = t1.with_columns(*t2)
>>> pw.debug.compute_and_print(t3, include_id=False)
age | owner | pet | size
8 | Tom | 2 | 8
9 | Bob | 1 | 9
10 | Tom | 1 | 10
"""
other = self.select(*args, **kwargs)
columns = dict(self)
columns.update(other)
return self.select(**columns)
def with_id(self, new_index: expr.ColumnReference) -> Table:
"""Set new ids based on another column containing id-typed values.
To generate ids based on arbitrary valued columns, use `with_id_from`.
Values assigned must be row-wise unique.
Args:
new_id: column to be used as the new index.
Returns:
Table with updated ids.
Example:
>>> import pytest; pytest.xfail("with_id is hard to test")
>>> import pathway as pw
>>> t1 = pw.debug.table_from_markdown('''
... | age | owner | pet
... 1 | 10 | Alice | 1
... 2 | 9 | Bob | 1
... 3 | 8 | Alice | 2
... ''')
>>> t2 = pw.debug.table_from_markdown('''
... | new_id
... 1 | 2
... 2 | 3
... 3 | 4
... ''')
>>> t3 = t1.promise_universe_is_subset_of(t2).with_id(t2.new_id)
>>> pw.debug.compute_and_print(t3)
age owner pet
^2 10 Alice 1
^3 9 Bob 1
^4 8 Alice 2
"""
return self._with_new_index(new_index)
def with_id_from(
self,
*args: expr.ColumnExpression | Value,
instance: expr.ColumnReference | None = None,
) -> Table:
"""Compute new ids based on values in columns.
Ids computed from `columns` must be row-wise unique.
Args:
columns: columns to be used as primary keys.
Returns:
Table: `self` updated with recomputed ids.
Example:
>>> import pathway as pw
>>> t1 = pw.debug.table_from_markdown('''
... | age | owner | pet
... 1 | 10 | Alice | 1
... 2 | 9 | Bob | 1
... 3 | 8 | Alice | 2
... ''')
>>> t2 = t1 + t1.select(old_id=t1.id)
>>> t3 = t2.with_id_from(t2.age)
>>> pw.debug.compute_and_print(t3) # doctest: +ELLIPSIS, +NORMALIZE_WHITESPACE
| age | owner | pet | old_id
^... | 8 | Alice | 2 | ^...
^... | 9 | Bob | 1 | ^...
^... | 10 | Alice | 1 | ^...
>>> t4 = t3.select(t3.age, t3.owner, t3.pet, same_as_old=(t3.id == t3.old_id),
... same_as_new=(t3.id == t3.pointer_from(t3.age)))
>>> pw.debug.compute_and_print(t4) # doctest: +ELLIPSIS, +NORMALIZE_WHITESPACE
| age | owner | pet | same_as_old | same_as_new
^... | 8 | Alice | 2 | False | True
^... | 9 | Bob | 1 | False | True
^... | 10 | Alice | 1 | False | True
"""
# new_index should be a column, so a little workaround
new_index = self.select(
ref_column=self.pointer_from(*args, instance=instance)
).ref_column
return self._with_new_index(
new_index=new_index,
)
def _with_new_index(
self,
new_index: expr.ColumnExpression,
) -> Table:
self._validate_expression(new_index)
index_type = self.eval_type(new_index)
if not isinstance(index_type, dt.Pointer):
raise TypeError(
f"Pathway supports reindexing Tables with Pointer type only. The type used was {index_type}."
)
reindex_column = self._eval(new_index)
assert self._universe == reindex_column.universe
context = clmn.ReindexContext(reindex_column)
return self._table_with_context(context)
def rename_columns(self, **kwargs: str | expr.ColumnReference) -> Table:
"""Rename columns according to kwargs.
Columns not in keys(kwargs) are not changed. New name of a column must not be `id`.
Args:
kwargs: mapping from old column names to new names.
Returns:
Table: `self` with columns renamed.
Example:
>>> import pathway as pw
>>> t1 = pw.debug.table_from_markdown('''
... age | owner | pet
... 10 | Alice | 1
... 9 | Bob | 1
... 8 | Alice | 2
... ''')
>>> t2 = t1.rename_columns(years_old=t1.age, animal=t1.pet)
>>> pw.debug.compute_and_print(t2, include_id=False)
owner | years_old | animal
Alice | 8 | 2
Alice | 10 | 1
Bob | 9 | 1
"""
mapping: dict[str, str] = {}
for new_name, old_name_col in kwargs.items():
if isinstance(old_name_col, expr.ColumnReference):
old_name = old_name_col.name
else:
old_name = old_name_col
if old_name not in self._columns:
raise ValueError(f"Column {old_name} does not exist in a given table.")
mapping[new_name] = old_name
renamed_columns = self._columns.copy()
for new_name, old_name in mapping.items():
renamed_columns.pop(old_name)
for new_name, old_name in mapping.items():
renamed_columns[new_name] = self._columns[old_name]
columns_wrapped = {
name: self._wrap_column_in_context(
self._rowwise_context,
column,
mapping[name] if name in mapping else name,
)
for name, column in renamed_columns.items()
}
return self._with_same_universe(*columns_wrapped.items())
def rename_by_dict(
self, names_mapping: dict[str | expr.ColumnReference, str]
) -> Table:
"""Rename columns according to a dictionary.
Columns not in keys(kwargs) are not changed. New name of a column must not be `id`.
Args:
names_mapping: mapping from old column names to new names.
Returns:
Table: `self` with columns renamed.
Example:
>>> import pathway as pw
>>> t1 = pw.debug.table_from_markdown('''
... age | owner | pet
... 10 | Alice | 1
... 9 | Bob | 1
... 8 | Alice | 2
... ''')
>>> t2 = t1.rename_by_dict({"age": "years_old", t1.pet: "animal"})
>>> pw.debug.compute_and_print(t2, include_id=False)
owner | years_old | animal
Alice | 8 | 2
Alice | 10 | 1
Bob | 9 | 1
"""
return self.rename_columns(
**{new_name: self[old_name] for old_name, new_name in names_mapping.items()}
)
def with_prefix(self, prefix: str) -> Table:
"""Rename columns by adding prefix to each name of column.
Example:
>>> import pathway as pw
>>> t1 = pw.debug.table_from_markdown('''
... age | owner | pet
... 10 | Alice | 1
... 9 | Bob | 1
... 8 | Alice | 2
... ''')
>>> t2 = t1.with_prefix("u_")
>>> pw.debug.compute_and_print(t2, include_id=False)
u_age | u_owner | u_pet
8 | Alice | 2
9 | Bob | 1
10 | Alice | 1
"""
return self.rename_by_dict({name: prefix + name for name in self.keys()})
def with_suffix(self, suffix: str) -> Table:
"""Rename columns by adding suffix to each name of column.
Example:
>>> import pathway as pw
>>> t1 = pw.debug.table_from_markdown('''
... age | owner | pet
... 10 | Alice | 1
... 9 | Bob | 1
... 8 | Alice | 2
... ''')
>>> t2 = t1.with_suffix("_current")
>>> pw.debug.compute_and_print(t2, include_id=False)
age_current | owner_current | pet_current
8 | Alice | 2
9 | Bob | 1
10 | Alice | 1
"""
return self.rename_by_dict({name: name + suffix for name in self.keys()})
def rename(
self,
names_mapping: dict[str | expr.ColumnReference, str] | None = None,
**kwargs: expr.ColumnExpression,
) -> Table:
"""Rename columns according either a dictionary or kwargs.
If a mapping is provided using a dictionary, ``rename_by_dict`` will be used.
Otherwise, ``rename_columns`` will be used with kwargs.
Columns not in keys(kwargs) are not changed. New name of a column must not be ``id``.
Args:
names_mapping: mapping from old column names to new names.
kwargs: mapping from old column names to new names.
Returns:
Table: `self` with columns renamed.
"""
if names_mapping is not None:
return self.rename_by_dict(names_mapping=names_mapping)
return self.rename_columns(**kwargs)
def without(self, *columns: str | expr.ColumnReference) -> Table:
"""Selects all columns without named column references.
Args:
columns: columns to be dropped provided by `table.column_name` notation.
Returns:
Table: `self` without specified columns.
Example:
>>> import pathway as pw
>>> t1 = pw.debug.table_from_markdown('''
... age | owner | pet
... 10 | Alice | 1
... 9 | Bob | 1
... 8 | Alice | 2
... ''')
>>> t2 = t1.without(t1.age, pw.this.pet)
>>> pw.debug.compute_and_print(t2, include_id=False)
owner
Alice
Alice
Bob
"""
new_columns = self._columns.copy()
for col in columns:
if isinstance(col, expr.ColumnReference):
new_columns.pop(col.name)
else:
assert isinstance(col, str)
new_columns.pop(col)
columns_wrapped = {
name: self._wrap_column_in_context(self._rowwise_context, column, name)
for name, column in new_columns.items()
}
return self._with_same_universe(*columns_wrapped.items())
def having(self, *indexers: expr.ColumnReference) -> Table[TSchema]:
"""Removes rows so that indexed.ix(indexer) is possible when some rows are missing,
for each indexer in indexers"""
rets: list[Table] = []
for indexer in indexers:
rets.append(self._having(indexer))
if len(rets) == 0:
return self
elif len(rets) == 1:
[ret] = rets
return ret
else:
return rets[0].intersect(*rets[1:])
def update_types(self, **kwargs: Any) -> Table:
"""Updates types in schema. Has no effect on the runtime."""
for name in kwargs.keys():
if name not in self.keys():
raise ValueError(
"Table.update_types() argument name has to be an existing table column name."
)
from pathway.internals.common import declare_type
return self.with_columns(
**{key: declare_type(val, self[key]) for key, val in kwargs.items()}
)
def cast_to_types(self, **kwargs: Any) -> Table:
"""Casts columns to types."""
for name in kwargs.keys():
if name not in self.keys():
raise ValueError(
"Table.cast_to_types() argument name has to be an existing table column name."
)
from pathway.internals.common import cast
return self.with_columns(
**{key: cast(val, self[key]) for key, val in kwargs.items()}
)
def _having(self, indexer: expr.ColumnReference) -> Table[TSchema]:
context = clmn.HavingContext(
orig_id_column=self._id_column, key_column=indexer._column
)
return self._table_with_context(context)
def with_universe_of(self, other: TableLike) -> Table:
"""Returns a copy of self with exactly the same universe as others.
Semantics: Required precondition self.universe == other.universe
Used in situations where Pathway cannot deduce equality of universes, but
those are equal as verified during runtime.
Example:
>>> import pathway as pw
>>> t1 = pw.debug.table_from_markdown('''
... | pet
... 1 | Dog
... 7 | Cat
... ''')
>>> t2 = pw.debug.table_from_markdown('''
... | age
... 1 | 10
... 7 | 3
... 8 | 100
... ''')
>>> t3 = t2.filter(pw.this.age < 30).with_universe_of(t1)
>>> t4 = t1 + t3
>>> pw.debug.compute_and_print(t4, include_id=False)
pet | age
Cat | 3
Dog | 10
"""
if self._universe == other._universe:
return self.copy()
universes.promise_are_equal(self, other)
return self._unsafe_promise_universe(other)
def flatten(self, *args: expr.ColumnReference, **kwargs: Any) -> Table:
"""Performs a flatmap operation on a column or expression given as a first
argument. Datatype of this column or expression has to be iterable or Json array.
Other columns specified in the method arguments are duplicated
as many times as the length of the iterable.
It is possible to get ids of source rows by using `table.id` column, e.g.
`table.flatten(table.column_to_be_flattened, original_id = table.id)`.
Example:
>>> import pathway as pw
>>> t1 = pw.debug.table_from_markdown('''
... | pet | age
... 1 | Dog | 2
... 7 | Cat | 5
... ''')
>>> t2 = t1.flatten(t1.pet)
>>> pw.debug.compute_and_print(t2, include_id=False)
pet
C
D
a
g
o
t
>>> t3 = t1.flatten(t1.pet, t1.age)
>>> pw.debug.compute_and_print(t3, include_id=False)
pet | age
C | 5
D | 2
a | 5
g | 2
o | 2
t | 5
"""
intermediate_table = self.select(*args, **kwargs)
all_args = combine_args_kwargs(args, kwargs)
if not all_args:
raise ValueError("Table.flatten() cannot have empty arguments list.")
all_names_iter = iter(all_args.keys())
flatten_name = next(all_names_iter)
return intermediate_table._flatten(flatten_name)
def _flatten(
self,
flatten_name: str,
) -> Table:
flatten_column = self._columns[flatten_name]
assert isinstance(flatten_column, clmn.ColumnWithExpression)
context = clmn.FlattenContext(
orig_universe=self._universe,
flatten_column=flatten_column,
)
columns = {
name: self._wrap_column_in_context(context, column, name)
for name, column in self._columns.items()
if name != flatten_name
}
return Table(
_columns={
flatten_name: context.flatten_result_column,
**columns,
},
_context=context,
)
def sort(
self,
key: expr.ColumnExpression,
instance: expr.ColumnExpression | None = None,
) -> Table:
"""
Sorts a table by the specified keys.
Args:
table : pw.Table
The table to be sorted.
key (ColumnExpression[int | float | datetime | str | bytes]):
An expression to sort by.
instance : ColumnReference or None
An expression with instance. Rows are sorted within an instance.
``prev`` and ``next`` columns will only point to rows that have the same instance.
Returns:
pw.Table: The sorted table. Contains two columns: ``prev`` and ``next``, containing the pointers
to the previous and next rows.
Example:
>>> import pathway as pw
>>> table = pw.debug.table_from_markdown('''
... name | age | score
... Alice | 25 | 80
... Bob | 20 | 90
... Charlie | 30 | 80
... ''')
>>> table = table.with_id_from(pw.this.name)
>>> table += table.sort(key=pw.this.age)
>>> pw.debug.compute_and_print(table, include_id=True)
| name | age | score | prev | next
^GBSDEEW... | Alice | 25 | 80 | ^EDPSSB1... | ^DS9AT95...
^EDPSSB1... | Bob | 20 | 90 | | ^GBSDEEW...
^DS9AT95... | Charlie | 30 | 80 | ^GBSDEEW... |
>>> table = pw.debug.table_from_markdown('''
... name | age | score
... Alice | 25 | 80
... Bob | 20 | 90
... Charlie | 30 | 80
... David | 35 | 90
... Eve | 15 | 80
... ''')
>>> table = table.with_id_from(pw.this.name)
>>> table += table.sort(key=pw.this.age, instance=pw.this.score)
>>> pw.debug.compute_and_print(table, include_id=True)
| name | age | score | prev | next
^GBSDEEW... | Alice | 25 | 80 | ^T0B95XH... | ^DS9AT95...
^EDPSSB1... | Bob | 20 | 90 | | ^RT0AZWX...
^DS9AT95... | Charlie | 30 | 80 | ^GBSDEEW... |
^RT0AZWX... | David | 35 | 90 | ^EDPSSB1... |
^T0B95XH... | Eve | 15 | 80 | | ^GBSDEEW...
"""
instance = clmn.ColumnExpression._wrap(instance)
context = clmn.SortingContext(
self._eval(key),
self._eval(instance),
)
return Table(
_columns={
"prev": context.prev_column,
"next": context.next_column,
},
_context=context,
)
def _set_source(self, source: OutputHandle):
self._source = source
if not hasattr(self._id_column, "lineage"):
self._id_column.lineage = clmn.ColumnLineage(name="id", source=source)
for name, column in self._columns.items():
if not hasattr(column, "lineage"):
column.lineage = clmn.ColumnLineage(name=name, source=source)
universe = self._universe
if not hasattr(universe, "lineage"):
universe.lineage = clmn.Lineage(source=source)
def _unsafe_promise_universe(self, other: TableLike) -> Table:
context = clmn.PromiseSameUniverseContext(self._id_column, other._id_column)
return self._table_with_context(context)
def _validate_expression(self, expression: expr.ColumnExpression):
for dep in expression._dependencies_above_reducer():
if self._universe != dep._column.universe:
raise ValueError(
f"You cannot use {dep.to_column_expression()} in this context."
+ " Its universe is different than the universe of the table the method"
+ " was called on. You can use <table1>.with_universe_of(<table2>)"
+ " to assign universe of <table2> to <table1> if you're sure their"
+ " sets of keys are equal."
)
def _wrap_column_in_context(
self,
context: clmn.Context,
column: clmn.Column,
name: str,
lineage: clmn.Lineage | None = None,
) -> clmn.Column:
"""Contextualize column by wrapping it in expression."""
expression = expr.ColumnReference(_table=self, _column=column, _name=name)
return expression._column_with_expression_cls(
context=context,
universe=context.universe,
expression=expression,
lineage=lineage,
)
def _table_with_context(self, context: clmn.Context) -> Table:
columns = {
name: self._wrap_column_in_context(context, column, name)
for name, column in self._columns.items()
}
return Table(
_columns=columns,
_context=context,
)
def _table_restricted_context(self) -> clmn.TableRestrictedRowwiseContext:
return clmn.TableRestrictedRowwiseContext(self._id_column, self)
def _eval(
self, expression: expr.ColumnExpression, context: clmn.Context | None = None
) -> clmn.ColumnWithExpression:
"""Desugar expression and wrap it in given context."""
if context is None:
context = self._rowwise_context
column = expression._column_with_expression_cls(
context=context,
universe=context.universe,
expression=expression,
)
return column
def _from_schema(cls: type[TTable], schema: type[Schema]) -> TTable:
universe = Universe()
context = clmn.MaterializedContext(universe, schema.universe_properties)
columns = {
name: clmn.MaterializedColumn(
universe,
schema.column_properties(name),
)
for name in schema.column_names()
}
return cls(_columns=columns, _schema=schema, _context=context)
def __repr__(self) -> str:
return f"<pathway.Table schema={dict(self.typehints())}>"
def _with_same_universe(
self,
*columns: tuple[str, clmn.Column],
schema: type[Schema] | None = None,
) -> Table:
return Table(
_columns=dict(columns),
_schema=schema,
_context=self._rowwise_context,
)
def _sort_columns_by_other(self, other: Table):
assert self.keys() == other.keys()
self._columns = {name: self._columns[name] for name in other.keys()}
def _operator_dependencies(self) -> StableSet[Table]:
return StableSet([self])
def debug(self, name: str):
G.add_operator(
lambda id: DebugOperator(name, id),
lambda operator: operator(self),
)
return self
def to(self, sink: DataSink) -> None:
from pathway.internals import table_io
table_io.table_to_datasink(self, sink)
def _materialize(self, universe: Universe):
context = clmn.MaterializedContext(universe)
columns = {
name: clmn.MaterializedColumn(universe, column.properties)
for (name, column) in self._columns.items()
}
return Table(
_columns=columns,
_schema=self.schema,
_context=context,
)
def pointer_from(
self, *args: Any, optional=False, instance: expr.ColumnReference | None = None
):
"""Pseudo-random hash of its argument. Produces pointer types. Applied column-wise.
Example:
>>> import pathway as pw
>>> t1 = pw.debug.table_from_markdown('''
... age owner pet
... 1 10 Alice dog
... 2 9 Bob dog
... 3 8 Alice cat
... 4 7 Bob dog''')
>>> g = t1.groupby(t1.owner).reduce(refcol = t1.pointer_from(t1.owner)) # g.id == g.refcol
>>> pw.debug.compute_and_print(g.select(test = (g.id == g.refcol)), include_id=False)
test
True
True
"""
if instance is not None:
args = (*args, instance)
# XXX verify types for the table primary_keys
return expr.PointerExpression(self, *args, optional=optional)
def ix_ref(
self,
*args: expr.ColumnExpression | Value,
optional: bool = False,
context=None,
instance: expr.ColumnReference | None = None,
):
"""Reindexes the table using expressions as primary keys.
Uses keys from context, or tries to infer proper context from the expression.
If optional is True, then None in expression values result in None values in the result columns.
Missing values in table keys result in RuntimeError.
Context can be anything that allows for `select` or `reduce`, or `pathway.this` construct
(latter results in returning a delayed operation, and should be only used when using `ix` inside
join().select() or groupby().reduce() sequence).
Args:
args: Column references.
Returns:
Row: indexed row.
Example:
>>> import pathway as pw
>>> t1 = pw.debug.table_from_markdown('''
... name | pet
... Alice | dog
... Bob | cat
... Carole | cat
... David | dog
... ''')
>>> t2 = t1.with_id_from(pw.this.name)
>>> t2 = t2.select(*pw.this, new_value=pw.this.ix_ref("Alice").pet)
>>> pw.debug.compute_and_print(t2, include_id=False)
name | pet | new_value
Alice | dog | dog
Bob | cat | dog
Carole | cat | dog
David | dog | dog
Tables obtained by a groupby/reduce scheme always have primary keys:
>>> import pathway as pw
>>> t1 = pw.debug.table_from_markdown('''
... name | pet
... Alice | dog
... Bob | cat
... Carole | cat
... David | cat
... ''')
>>> t2 = t1.groupby(pw.this.pet).reduce(pw.this.pet, count=pw.reducers.count())
>>> t3 = t1.select(*pw.this, new_value=t2.ix_ref(t1.pet).count)
>>> pw.debug.compute_and_print(t3, include_id=False)
name | pet | new_value
Alice | dog | 1
Bob | cat | 3
Carole | cat | 3
David | cat | 3
Single-row tables can be accessed via `ix_ref()`:
>>> import pathway as pw
>>> t1 = pw.debug.table_from_markdown('''
... name | pet
... Alice | dog
... Bob | cat
... Carole | cat
... David | cat
... ''')
>>> t2 = t1.reduce(count=pw.reducers.count())
>>> t3 = t1.select(*pw.this, new_value=t2.ix_ref(context=t1).count)
>>> pw.debug.compute_and_print(t3, include_id=False)
name | pet | new_value
Alice | dog | 4
Bob | cat | 4
Carole | cat | 4
David | cat | 4
"""
return self.ix(
self.pointer_from(*args, optional=optional, instance=instance),
optional=optional,
context=context,
)
def _subtables(self) -> StableSet[Table]:
return StableSet([self])
def _substitutions(
self,
) -> tuple[Table, dict[expr.InternalColRef, expr.ColumnExpression]]:
return self, {}
def typehints(self) -> Mapping[str, Any]:
"""
Return the types of the columns as a dictionary.
Example:
>>> import pathway as pw
>>> t1 = pw.debug.table_from_markdown('''
... age | owner | pet
... 10 | Alice | dog
... 9 | Bob | dog
... 8 | Alice | cat
... 7 | Bob | dog
... ''')
>>> t1.typehints()
mappingproxy({'age': <class 'int'>, 'owner': <class 'str'>, 'pet': <class 'str'>})
"""
return self.schema.typehints()
def eval_type(self, expression: expr.ColumnExpression) -> dt.DType:
return (
self._rowwise_context._get_type_interpreter()
.eval_expression(expression, state=TypeInterpreterState())
._dtype
)
def _auto_live(self) -> Table:
"""Make self automatically live in interactive mode"""
from pathway.internals.interactive import is_interactive_mode_enabled
if is_interactive_mode_enabled():
return self.live()
else:
return self
def live(self) -> LiveTable[TSchema]:
from pathway.internals.interactive import LiveTable
warnings.warn("live tables are an experimental feature", stacklevel=2)
return LiveTable._create(self)
class JoinResult(Joinable, OperatorInput):
"""Result of a join between tables.
Example:
>>> import pathway as pw
>>> t1 = pw.debug.table_from_markdown('''
... age owner pet
... 1 10 Alice 1
... 2 9 Bob 1
... 3 8 Alice 2
... ''')
>>> t2 = pw.debug.table_from_markdown('''
... age owner pet size
... 11 10 Alice 3 M
... 12 9 Bob 1 L
... 13 8 Tom 1 XL
... ''')
>>> joinresult= t1.join(t2, t1.pet == t2.pet, t1.owner == t2.owner) # noqa: E501
>>> isinstance(joinresult, pw.JoinResult)
True
>>> pw.debug.compute_and_print(joinresult.select(t1.age, t2.size), include_id=False)
age | size
9 | L
"""
_inner_table: Table
_columns_mapping: dict[expr.InternalColRef, expr.ColumnReference]
_left_table: Table
_right_table: Table
_original_left: Joinable
_original_right: Joinable
_substitution: dict[thisclass.ThisMetaclass, Joinable]
_chained_join_desugaring: SubstitutionDesugaring
_joined_on_names: StableSet[str]
_all_colnames: StableSet[str]
_join_mode: JoinMode
def __init__(
self,
_context: clmn.Context,
_inner_table: Table,
_columns_mapping: dict[expr.InternalColRef, expr.ColumnReference],
_left_table: Table,
_right_table: Table,
_original_left: Joinable,
_original_right: Joinable,
_substitution: dict[thisclass.ThisMetaclass, Joinable],
_joined_on_names: StableSet[str],
_join_mode: JoinMode,
):
super().__init__(_context)
self._inner_table = _inner_table
self._columns_mapping = _columns_mapping
self._left_table = _left_table
self._right_table = _right_table
self._substitution = {**_substitution, thisclass.this: self}
self._joined_on_names = _joined_on_names
self._join_mode = _join_mode
self._original_left = _original_left
self._original_right = _original_right
assert _original_left._subtables().isdisjoint(_original_right._subtables())
self._all_colnames = StableSet.union(
_original_left.keys(), _original_right.keys()
)
self._chained_join_desugaring = SubstitutionDesugaring(self._substitutions()[1])
def _compute_universe(
left_table: Table,
right_table: Table,
id: clmn.Column | None,
mode: JoinMode,
) -> Universe:
if id is left_table._id_column:
if mode == JoinMode.LEFT:
return left_table._universe
elif mode == JoinMode.INNER:
return left_table._universe.subset()
else:
raise KeyError("Cannot assign id's for this join type.")
elif id is right_table._id_column:
if mode == JoinMode.RIGHT:
return right_table._universe
elif mode == JoinMode.INNER:
return right_table._universe.subset()
else:
raise KeyError("Cannot assign id's for this join type.")
else:
assert id is None
return Universe()
def _subtables(self) -> StableSet[Table]:
return self._original_left._subtables() | self._original_right._subtables()
def keys(self):
common_colnames = self._original_left.keys() & self._original_right.keys()
return self._all_colnames - (common_colnames - self._joined_on_names)
def _get_colref_by_name(
self,
name: str,
exception_type,
) -> expr.ColumnReference:
name = self._column_deprecation_rename(name)
if name == "id":
return self._inner_table.id
elif name in self._joined_on_names:
if self._join_mode is JoinMode.INNER:
return self._original_left[name]
else:
return self._inner_table[name]
elif name in self._original_left.keys() and name in self._original_right.keys():
raise exception_type(
f"Column {name} appears on both left and right inputs of join."
)
elif name in self._original_left.keys():
return self._original_left[name]
elif name in self._original_right.keys():
return self._original_right[name]
else:
raise exception_type(f"No column with name {name}.")
def __getitem__(self, args: str | expr.ColumnReference) -> expr.ColumnReference:
if isinstance(args, expr.ColumnReference):
assert args.table is self or args.table is thisclass.this
return self._get_colref_by_name(args.name, KeyError)
else:
return self._get_colref_by_name(args, KeyError)
def select(self, *args: expr.ColumnReference, **kwargs: Any) -> Table:
"""Computes result of a join.
Args:
args: Column references.
kwargs: Column expressions with their new assigned names.
Returns:
Table: Created table.
Example:
>>> import pathway as pw
>>> t1 = pw.debug.table_from_markdown('''
... age | owner | pet
... 10 | Alice | 1
... 9 | Bob | 1
... 8 | Alice | 2
... ''')
>>> t2 = pw.debug.table_from_markdown('''
... age | owner | pet | size
... 10 | Alice | 3 | M
... 9 | Bob | 1 | L
... 8 | Tom | 1 | XL
... ''')
>>> t3 = t1.join(t2, t1.pet == t2.pet, t1.owner == t2.owner).select(age=t1.age, owner_name=t2.owner, size=t2.size) # noqa: E501
>>> pw.debug.compute_and_print(t3, include_id=False)
age | owner_name | size
9 | Bob | L
"""
expressions: dict[str, expr.ColumnExpression] = {}
all_args = combine_args_kwargs(args, kwargs)
for new_name, expression in all_args.items():
expressions[new_name] = self._chained_join_desugaring.eval_expression(
expression
)
return self._inner_table.select(**expressions)
def _operator_dependencies(self) -> StableSet[Table]:
return (
self._left_table._operator_dependencies()
| self._right_table._operator_dependencies()
)
def filter(self, filter_expression: expr.ColumnExpression) -> JoinResult:
"""Filters rows, keeping the ones satisfying the predicate.
Example:
>>> import pathway as pw
>>> t1 = pw.debug.table_from_markdown('''
... age owner pet
... 1 10 Alice 1
... 2 9 Bob 1
... 3 8 Alice 2
... ''')
>>> t2 = pw.debug.table_from_markdown('''
... age owner pet size
... 11 10 Alice 3 M
... 12 9 Bob 1 L
... 13 8 Tom 1 XL
... ''')
>>> result = t1.join(t2).filter(t1.owner == t2.owner).select(t1.age, t2.size) # noqa: E501
>>> pw.debug.compute_and_print(result, include_id=False)
age | size
8 | M
9 | L
10 | M
"""
desugared_filter_expression = self._chained_join_desugaring.eval_expression(
filter_expression
)
inner_table = self._inner_table.filter(desugared_filter_expression)
new_columns_mapping = {
int_ref: inner_table[expression.name]
for int_ref, expression in self._columns_mapping.items()
}
new_columns_mapping[inner_table.id._to_internal()] = inner_table.id
context = clmn.JoinRowwiseContext.from_mapping(
inner_table._id_column, new_columns_mapping
)
inner_table._rowwise_context = context
return JoinResult(
_context=context,
_inner_table=inner_table,
_columns_mapping=new_columns_mapping,
_left_table=self._left_table,
_right_table=self._right_table,
_original_left=self._original_left,
_original_right=self._original_right,
_substitution=self._substitution,
_joined_on_names=self._joined_on_names,
_join_mode=self._join_mode,
)
def groupby(
self,
*args: expr.ColumnReference,
id: expr.ColumnReference | None = None,
) -> GroupedJoinResult:
"""Groups join result by columns from args.
Note:
Usually followed by `.reduce()` that aggregates the result and returns a table.
Args:
args: columns to group by.
id: if provided, is the column used to set id's of the rows of the result
Returns:
GroupedJoinResult: Groupby object.
Example:
>>> import pathway as pw
>>> t1 = pw.debug.table_from_markdown('''
... cost owner pet
... 1 100 Alice 1
... 2 90 Bob 1
... 3 80 Alice 2
... ''')
>>> t2 = pw.debug.table_from_markdown('''
... cost owner pet size
... 11 100 Alice 3 M
... 12 90 Bob 1 L
... 13 80 Tom 1 XL
... ''')
>>> result = (t1.join(t2, t1.owner==t2.owner).groupby(pw.this.owner)
... .reduce(pw.this.owner, pairs = pw.reducers.count()))
>>> pw.debug.compute_and_print(result, include_id=False)
owner | pairs
Alice | 2
Bob | 1
"""
for arg in args:
if not isinstance(arg, expr.ColumnReference):
if isinstance(arg, str):
raise ValueError(
f"Expected a ColumnReference, found a string. Did you mean this.{arg} instead of {repr(arg)}?"
)
else:
raise ValueError(
"In JoinResult.groupby() all arguments have to be a ColumnReference."
)
from pathway.internals.groupbys import GroupedJoinResult
return GroupedJoinResult(
_join_result=self,
_args=args,
_id=id,
)
def reduce(
self, *args: expr.ColumnReference, **kwargs: expr.ColumnExpression
) -> Table:
"""Reduce a join result to a single row.
Equivalent to `self.groupby().reduce(*args, **kwargs)`.
Args:
args: reducer to reduce the table with
kwargs: reducer to reduce the table with. Its key is the new name of a column.
Returns:
Table: Reduced table.
Example:
>>> import pathway as pw
>>> t1 = pw.debug.table_from_markdown('''
... cost owner pet
... 1 100 Alice 1
... 2 90 Bob 1
... 3 80 Alice 2
... ''')
>>> t2 = pw.debug.table_from_markdown('''
... cost owner pet size
... 11 100 Alice 3 M
... 12 90 Bob 1 L
... 13 80 Tom 1 XL
... ''')
>>> result = t1.join(t2, t1.owner==t2.owner).reduce(total_pairs = pw.reducers.count())
>>> pw.debug.compute_and_print(result, include_id=False)
total_pairs
3
"""
for arg in args:
if not isinstance(arg, expr.ColumnReference):
if isinstance(arg, str):
raise ValueError(
f"Expected a ColumnReference, found a string. Did you mean this.{arg} instead of {repr(arg)}?"
)
else:
raise ValueError(
"In JoinResult.reduce() all positional arguments have to be a ColumnReference."
)
return self.groupby().reduce(*args, **kwargs)
def _substitutions(
self,
) -> tuple[Table, dict[expr.InternalColRef, expr.ColumnExpression]]:
return self._inner_table, {
int_ref: expression for int_ref, expression in self._columns_mapping.items()
}
def _join(
context: clmn.JoinContext, *args: expr.ColumnReference, **kwargs: Any
) -> Table:
"""Used internally to create an internal Table containing result of a join."""
columns: dict[str, clmn.Column] = {}
all_args = combine_args_kwargs(args, kwargs)
for new_name, expression in all_args.items():
columns[new_name] = expression._column_with_expression_cls(
context=context,
universe=context.universe,
expression=expression,
)
from pathway.internals.table import Table
return Table(
_columns=columns,
_context=context,
)
def _prepare_inner_table_with_mapping(
context: clmn.JoinContext,
original_left: Joinable,
original_right: Joinable,
common_column_names: StableSet[str],
) -> tuple[Table, dict[expr.InternalColRef, expr.ColumnReference]]:
left_table, left_substitutions = original_left._substitutions()
right_table, right_substitutions = original_right._substitutions()
cnt = itertools.count(0)
expressions: dict[str, expr.ColumnExpression] = {}
colref_to_name_mapping: dict[expr.InternalColRef, str] = {}
for table, subs in [
(left_table, left_substitutions),
(right_table, right_substitutions),
]:
if len(subs) == 0: # tables have empty subs, so set them here
for ref in table:
subs[ref._to_internal()] = ref
subs_total = subs | {table.id._to_internal(): table.id}
for int_ref, expression in subs_total.items():
inner_name = f"_pw_{next(cnt)}"
expressions[inner_name] = expression
colref_to_name_mapping[int_ref] = inner_name
from pathway.internals.common import coalesce
for name in common_column_names:
if name != "id":
expressions[name] = coalesce(original_left[name], original_right[name])
inner_table = JoinResult._join(context, **expressions)
final_mapping = {
colref: inner_table[name] for colref, name in colref_to_name_mapping.items()
}
for name in common_column_names:
if name != "id":
colref = inner_table[name]
final_mapping[colref._to_internal()] = colref
final_mapping[inner_table.id._to_internal()] = inner_table.id
rowwise_context = clmn.JoinRowwiseContext.from_mapping(
inner_table._id_column, final_mapping
)
inner_table._rowwise_context = (
rowwise_context # FIXME don't set _context property of table
)
return (inner_table, final_mapping)
def _table_join(
left: Joinable,
right: Joinable,
*on: expr.ColumnExpression,
mode: JoinMode,
id: expr.ColumnReference | None = None,
left_instance: expr.ColumnReference | None = None,
right_instance: expr.ColumnReference | None = None,
) -> JoinResult:
if left == right:
raise ValueError(
"Cannot join table with itself. Use <table>.copy() as one of the arguments of the join."
)
left_table, left_substitutions = left._substitutions()
right_table, right_substitutions = right._substitutions()
chained_join_desugaring = SubstitutionDesugaring(
{**left_substitutions, **right_substitutions}
)
if id is not None:
id = chained_join_desugaring.eval_expression(id)
id_column = id._column
else:
id_column = None
common_column_names: StableSet[str] = StableSet()
if left_instance is not None and right_instance is not None:
on = (*on, left_instance == right_instance)
else:
assert left_instance is None and right_instance is None
on_ = tuple(validate_shape(cond) for cond in on)
for cond in on_:
cond_left = cast(expr.ColumnReference, cond._left)
cond_right = cast(expr.ColumnReference, cond._right)
if cond_left.name == cond_right.name:
common_column_names.add(cond_left.name)
on_ = tuple(chained_join_desugaring.eval_expression(cond) for cond in on_)
for cond in on_:
validate_join_condition(cond, left_table, right_table)
on_left = tuple(
left_table._eval(cond._left, left_table._table_restricted_context)
for cond in on_
)
on_right = tuple(
right_table._eval(cond._right, right_table._table_restricted_context)
for cond in on_
)
swp = id_column is not None and id_column is right_table._id_column
assert (
id_column is None
or (id_column is left_table._id_column)
or (id_column is right_table._id_column)
)
left_context_table = clmn.ContextTable(universe=left._universe, columns=on_left)
right_context_table = clmn.ContextTable(
universe=right._universe, columns=on_right
)
substitution: dict[thisclass.ThisMetaclass, Joinable] = {
thisclass.left: left,
thisclass.right: right,
}
universe = JoinResult._compute_universe(
left_table, right_table, id_column, mode
)
if swp:
context = clmn.JoinContext(
universe,
right_table,
left_table,
right_context_table,
left_context_table,
id_column is not None,
mode in [JoinMode.RIGHT, JoinMode.OUTER],
mode in [JoinMode.LEFT, JoinMode.OUTER],
)
else:
context = clmn.JoinContext(
universe,
left_table,
right_table,
left_context_table,
right_context_table,
id_column is not None,
mode in [JoinMode.LEFT, JoinMode.OUTER],
mode in [JoinMode.RIGHT, JoinMode.OUTER],
)
inner_table, columns_mapping = JoinResult._prepare_inner_table_with_mapping(
context,
left,
right,
common_column_names,
)
return JoinResult(
context,
inner_table,
columns_mapping,
left_table,
right_table,
left,
right,
substitution,
common_column_names,
mode,
)
The provided code snippet includes necessary dependencies for implementing the `groupby` function. Write a Python function `def groupby( grouped: Table | JoinResult, *args, id: expr.ColumnReference | None = None, **kwargs )` to solve the following problem:
Groups join result by columns from args. Note: Usually followed by `.reduce()` that aggregates the result and returns a table. Args: grouped: ``JoinResult`` to group by. args: columns to group by. id: if provided, is the column used to set id's of the rows of the result **kwargs: extra arguments, see respective documentation for ``Table.groupby`` and ``JoinResult.groupby`` Returns: Groupby object of ``GroupedJoinResult`` or ``GroupedTable`` type. Example: >>> import pathway as pw >>> t1 = pw.debug.table_from_markdown(''' ... age | owner | pet ... 10 | Alice | dog ... 9 | Bob | dog ... 8 | Alice | cat ... 7 | Bob | dog ... ''') >>> t2 = pw.groupby(t1, t1.pet, t1.owner).reduce( ... t1.owner, t1.pet, ageagg=pw.reducers.sum(t1.age) ... ) >>> pw.debug.compute_and_print(t2, include_id=False) owner | pet | ageagg Alice | cat | 8 Alice | dog | 10 Bob | dog | 16 >>> t3 = pw.debug.table_from_markdown(''' ... cost owner pet ... 1 100 Alice 1 ... 2 90 Bob 1 ... 3 80 Alice 2 ... ''') >>> t4 = pw.debug.table_from_markdown(''' ... cost owner pet size ... 11 100 Alice 3 M ... 12 90 Bob 1 L ... 13 80 Tom 1 XL ... ''') >>> join_result = t3.join(t4, t3.owner==t4.owner) >>> result = pw.groupby(join_result, pw.this.owner).reduce( ... pw.this.owner, pairs=pw.reducers.count() ... ) >>> pw.debug.compute_and_print(result, include_id=False) owner | pairs Alice | 2 Bob | 1
Here is the function:
def groupby(
grouped: Table | JoinResult, *args, id: expr.ColumnReference | None = None, **kwargs
):
"""Groups join result by columns from args.
Note:
Usually followed by `.reduce()` that aggregates the result and returns a table.
Args:
grouped: ``JoinResult`` to group by.
args: columns to group by.
id: if provided, is the column used to set id's of the rows of the result
**kwargs: extra arguments, see respective documentation for ``Table.groupby`` and ``JoinResult.groupby``
Returns:
Groupby object of ``GroupedJoinResult`` or ``GroupedTable`` type.
Example:
>>> import pathway as pw
>>> t1 = pw.debug.table_from_markdown('''
... age | owner | pet
... 10 | Alice | dog
... 9 | Bob | dog
... 8 | Alice | cat
... 7 | Bob | dog
... ''')
>>> t2 = pw.groupby(t1, t1.pet, t1.owner).reduce(
... t1.owner, t1.pet, ageagg=pw.reducers.sum(t1.age)
... )
>>> pw.debug.compute_and_print(t2, include_id=False)
owner | pet | ageagg
Alice | cat | 8
Alice | dog | 10
Bob | dog | 16
>>> t3 = pw.debug.table_from_markdown('''
... cost owner pet
... 1 100 Alice 1
... 2 90 Bob 1
... 3 80 Alice 2
... ''')
>>> t4 = pw.debug.table_from_markdown('''
... cost owner pet size
... 11 100 Alice 3 M
... 12 90 Bob 1 L
... 13 80 Tom 1 XL
... ''')
>>> join_result = t3.join(t4, t3.owner==t4.owner)
>>> result = pw.groupby(join_result, pw.this.owner).reduce(
... pw.this.owner, pairs=pw.reducers.count()
... )
>>> pw.debug.compute_and_print(result, include_id=False)
owner | pairs
Alice | 2
Bob | 1
"""
return grouped.groupby(*args, id=id, **kwargs) | Groups join result by columns from args. Note: Usually followed by `.reduce()` that aggregates the result and returns a table. Args: grouped: ``JoinResult`` to group by. args: columns to group by. id: if provided, is the column used to set id's of the rows of the result **kwargs: extra arguments, see respective documentation for ``Table.groupby`` and ``JoinResult.groupby`` Returns: Groupby object of ``GroupedJoinResult`` or ``GroupedTable`` type. Example: >>> import pathway as pw >>> t1 = pw.debug.table_from_markdown(''' ... age | owner | pet ... 10 | Alice | dog ... 9 | Bob | dog ... 8 | Alice | cat ... 7 | Bob | dog ... ''') >>> t2 = pw.groupby(t1, t1.pet, t1.owner).reduce( ... t1.owner, t1.pet, ageagg=pw.reducers.sum(t1.age) ... ) >>> pw.debug.compute_and_print(t2, include_id=False) owner | pet | ageagg Alice | cat | 8 Alice | dog | 10 Bob | dog | 16 >>> t3 = pw.debug.table_from_markdown(''' ... cost owner pet ... 1 100 Alice 1 ... 2 90 Bob 1 ... 3 80 Alice 2 ... ''') >>> t4 = pw.debug.table_from_markdown(''' ... cost owner pet size ... 11 100 Alice 3 M ... 12 90 Bob 1 L ... 13 80 Tom 1 XL ... ''') >>> join_result = t3.join(t4, t3.owner==t4.owner) >>> result = pw.groupby(join_result, pw.this.owner).reduce( ... pw.this.owner, pairs=pw.reducers.count() ... ) >>> pw.debug.compute_and_print(result, include_id=False) owner | pairs Alice | 2 Bob | 1 |
166,718 | from __future__ import annotations
import itertools
from collections import defaultdict
from collections.abc import Iterable
from typing import TYPE_CHECKING
from pathway.internals import dtype as dt, expression as expr
from pathway.internals.expression_visitor import ExpressionVisitor
class ExpressionFormatter(ExpressionVisitor):
table_numbers: dict[Table, int]
def __init__(self):
self.table_counter = itertools.count(start=1)
self.table_numbers = defaultdict(lambda: next(self.table_counter))
def table_infos(self):
for tab, cnt in self.table_numbers.items():
trace: Trace = tab._source.operator.trace
yield f"<table{cnt}>", trace.user_frame
def print_table_infos(self):
return "\n".join(
f"{name} created in {frame.filename}:{frame.line_number}"
for name, frame in self.table_infos()
)
def eval_column_val(self, expression: expr.ColumnReference):
from pathway.internals.thisclass import ThisMetaclass
if isinstance(expression._table, ThisMetaclass):
return f"{expression._table}.{expression._name}"
else:
return f"<table{self.table_numbers[expression._table]}>.{expression._name}"
def eval_unary_op(self, expression: expr.ColumnUnaryOpExpression):
symbol = getattr(expression._operator, "_symbol", expression._operator.__name__)
uexpr = self.eval_expression(expression._expr)
return f"({symbol}{uexpr})"
def eval_binary_op(self, expression: expr.ColumnBinaryOpExpression):
symbol = getattr(expression._operator, "_symbol", expression._operator.__name__)
lexpr = self.eval_expression(expression._left)
rexpr = self.eval_expression(expression._right)
return f"({lexpr} {symbol} {rexpr})"
def eval_const(self, expression: expr.ColumnConstExpression):
return repr(expression._val)
def eval_reducer(self, expression: expr.ReducerExpression):
args = self._eval_args_kwargs(expression._args, expression._kwargs)
name = expression._reducer.name
return f"pathway.reducers.{name}({args})"
def eval_count(self, expression: expr.CountExpression):
return "pathway.reducers.count()"
def eval_apply(self, expression: expr.ApplyExpression):
args = self._eval_args_kwargs(expression._args, expression._kwargs)
return f"pathway.apply({expression._fun.__name__}, {args})"
def eval_async_apply(self, expression: expr.ApplyExpression):
args = self._eval_args_kwargs(expression._args, expression._kwargs)
return f"pathway.apply_async({expression._fun.__name__}, {args})"
def eval_numbaapply(self, expression: expr.NumbaApplyExpression):
args = self._eval_args_kwargs(expression._args, expression._kwargs)
return f"pathway.numba_apply({expression._fun.__name__}, {args})"
def eval_pointer(self, expression: expr.PointerExpression):
kwargs: dict[str, expr.ColumnExpression] = {}
if expression._optional:
import pathway.internals.expression as expr
kwargs["optional"] = expr.ColumnConstExpression(True)
args = self._eval_args_kwargs(expression._args, kwargs)
return f"<table{self.table_numbers[expression._table]}>.pointer_from({args})"
def eval_call(self, expression: expr.ColumnCallExpression):
args = self._eval_args_kwargs(expression._args)
return self.eval_expression(expression._col_expr) + f"({args})"
def eval_cast(self, expression: expr.CastExpression):
uexpr = self.eval_expression(expression._expr)
return f"pathway.cast({_type_name(expression._return_type)}, {uexpr})"
def eval_convert(self, expression: expr.ConvertExpression):
uexpr = self.eval_expression(expression._expr)
dtype = dt.unoptionalize(expression._return_type)
return f"pathway.as_{_type_name(dtype).lower()}({uexpr})"
def eval_declare(self, expression: expr.DeclareTypeExpression):
uexpr = self.eval_expression(expression._expr)
return f"pathway.declare_type({_type_name(expression._return_type)}, {uexpr})"
def eval_coalesce(self, expression: expr.CoalesceExpression):
args = self._eval_args_kwargs(expression._args)
return f"pathway.coalesce({args})"
def eval_require(self, expression: expr.RequireExpression):
args = self._eval_args_kwargs((expression._val, *expression._args))
return f"pathway.require({args})"
def eval_ifelse(self, expression: expr.IfElseExpression):
args = self._eval_args_kwargs(
(expression._if, expression._then, expression._else)
)
return f"pathway.if_else({args})"
def eval_not_none(self, expression: expr.IsNotNoneExpression):
args = self._eval_args_kwargs((expression._expr,))
return f"{args}.is_not_none())"
def eval_none(self, expression: expr.IsNoneExpression):
args = self._eval_args_kwargs((expression._expr,))
return f"{args}.is_none())"
def eval_method_call(self, expression: expr.MethodCallExpression):
object_ = self.eval_expression(expression._args_used_for_repr[0])
args = self._eval_args_kwargs(expression._args_used_for_repr[1:])
return f"({object_}).{expression._name}({args})"
def _eval_args_kwargs(
self,
args: Iterable[expr.ColumnExpression] = (),
kwargs: dict[str, expr.ColumnExpression] = {},
):
return ", ".join(
itertools.chain(
(self.eval_expression(arg) for arg in args),
(
key + "=" + self.eval_expression(value)
for key, value in kwargs.items()
),
)
)
def eval_make_tuple(self, expression: expr.MakeTupleExpression):
args = self._eval_args_kwargs(expression._args)
return f"pathway.make_tuple({args})"
def eval_get(self, expression: expr.GetExpression):
object = self.eval_expression(expression._object)
args = [expression._index]
if expression._check_if_exists:
args += [expression._default]
args_formatted = self._eval_args_kwargs(args)
if expression._check_if_exists:
return f"({object}).get({args_formatted})"
else:
return f"({object})[{args_formatted}]"
def eval_unwrap(self, expression: expr.UnwrapExpression):
uexpr = self.eval_expression(expression._expr)
return f"pathway.unwrap({uexpr})"
def get_expression_info(expression: expr.ColumnExpression) -> str:
printer = ExpressionFormatter()
expression_str = f"\t{printer.eval_expression(expression)}\n"
expression_info = ""
frame = expression._trace.user_frame
if frame is not None:
expression_info = f"called in {frame.filename}:{frame.line_number}\n"
tabnames = printer.print_table_infos()
if tabnames != "":
tabnames = "with tables:\n\t" + tabnames + "\n"
return expression_str + expression_info + tabnames | null |
166,719 | from __future__ import annotations
import itertools
from collections import defaultdict
from collections.abc import Iterable
from typing import TYPE_CHECKING
from pathway.internals import dtype as dt, expression as expr
from pathway.internals.expression_visitor import ExpressionVisitor
def _type_name(return_type):
from pathway.internals import dtype as dt
if isinstance(return_type, str) or isinstance(return_type, dt.DType):
return repr(return_type)
else:
return return_type.__name__ | null |
166,720 | from __future__ import annotations
from collections.abc import Callable
from functools import wraps
from pathway.internals import dtype as dt, operator as op, row_transformer as rt
from pathway.internals.helpers import function_spec, with_optional_kwargs
from pathway.internals.parse_graph import G
def _operator_wrapper(func: Callable, operator_cls: type[op.OperatorFromDef]):
fn_spec = function_spec(func)
def wrapper(*args, **kwargs):
return G.add_operator(
lambda id: operator_cls(fn_spec, id),
lambda operator: operator(*args, **kwargs),
)
return wrapper
def contextualized_operator(func):
return _operator_wrapper(func, op.ContextualizedIntermediateOperator) | null |
166,721 | from __future__ import annotations
from collections.abc import Callable
from functools import wraps
from pathway.internals import dtype as dt, operator as op, row_transformer as rt
from pathway.internals.helpers import function_spec, with_optional_kwargs
from pathway.internals.parse_graph import G
The provided code snippet includes necessary dependencies for implementing the `input_attribute` function. Write a Python function `def input_attribute(type=float)` to solve the following problem:
Returns new input_attribute. To be used inside class transformers. Example: >>> import pathway as pw >>> @pw.transformer ... class simple_transformer: ... class table(pw.ClassArg): ... arg = pw.input_attribute() ... ... @pw.output_attribute ... def ret(self) -> float: ... return self.arg + 1 ... >>> t1 = pw.debug.table_from_markdown(''' ... age ... 10 ... 9 ... 8 ... 7''') >>> t2 = simple_transformer(table=t1.select(arg=t1.age)).table >>> pw.debug.compute_and_print(t1 + t2, include_id=False) age | ret 7 | 8 8 | 9 9 | 10 10 | 11
Here is the function:
def input_attribute(type=float):
"""Returns new input_attribute. To be used inside class transformers.
Example:
>>> import pathway as pw
>>> @pw.transformer
... class simple_transformer:
... class table(pw.ClassArg):
... arg = pw.input_attribute()
...
... @pw.output_attribute
... def ret(self) -> float:
... return self.arg + 1
...
>>> t1 = pw.debug.table_from_markdown('''
... age
... 10
... 9
... 8
... 7''')
>>> t2 = simple_transformer(table=t1.select(arg=t1.age)).table
>>> pw.debug.compute_and_print(t1 + t2, include_id=False)
age | ret
7 | 8
8 | 9
9 | 10
10 | 11
"""
return rt.InputAttribute(dtype=dt.wrap(type)) | Returns new input_attribute. To be used inside class transformers. Example: >>> import pathway as pw >>> @pw.transformer ... class simple_transformer: ... class table(pw.ClassArg): ... arg = pw.input_attribute() ... ... @pw.output_attribute ... def ret(self) -> float: ... return self.arg + 1 ... >>> t1 = pw.debug.table_from_markdown(''' ... age ... 10 ... 9 ... 8 ... 7''') >>> t2 = simple_transformer(table=t1.select(arg=t1.age)).table >>> pw.debug.compute_and_print(t1 + t2, include_id=False) age | ret 7 | 8 8 | 9 9 | 10 10 | 11 |
166,722 | from __future__ import annotations
from collections.abc import Callable
from functools import wraps
from pathway.internals import dtype as dt, operator as op, row_transformer as rt
from pathway.internals.helpers import function_spec, with_optional_kwargs
from pathway.internals.parse_graph import G
The provided code snippet includes necessary dependencies for implementing the `input_method` function. Write a Python function `def input_method(type=float)` to solve the following problem:
Decorator for defining input methods in class transformers. Example: >>> import pathway as pw >>> @pw.transformer ... class first_transformer: ... class table(pw.ClassArg): ... a: float = pw.input_attribute() ... ... @pw.method ... def fun(self, arg) -> int: ... return self.a * arg ... >>> @pw.transformer ... class second_transformer: ... class table(pw.ClassArg): ... m = pw.input_method(int) ... ... @pw.output_attribute ... def val(self): ... return self.m(2) ... >>> t1 = pw.debug.table_from_markdown(''' ... age ... 10 ... 9 ... 8 ... 7''') >>> t2 = first_transformer(table=t1.select(a=t1.age)).table >>> t2.schema <pathway.Schema types={'fun': typing.Callable[..., int]}> >>> t3 = second_transformer(table=t2.select(m=t2.fun)).table >>> pw.debug.compute_and_print(t1 + t3, include_id=False) age | val 7 | 14 8 | 16 9 | 18 10 | 20
Here is the function:
def input_method(type=float):
"""Decorator for defining input methods in class transformers.
Example:
>>> import pathway as pw
>>> @pw.transformer
... class first_transformer:
... class table(pw.ClassArg):
... a: float = pw.input_attribute()
...
... @pw.method
... def fun(self, arg) -> int:
... return self.a * arg
...
>>> @pw.transformer
... class second_transformer:
... class table(pw.ClassArg):
... m = pw.input_method(int)
...
... @pw.output_attribute
... def val(self):
... return self.m(2)
...
>>> t1 = pw.debug.table_from_markdown('''
... age
... 10
... 9
... 8
... 7''')
>>> t2 = first_transformer(table=t1.select(a=t1.age)).table
>>> t2.schema
<pathway.Schema types={'fun': typing.Callable[..., int]}>
>>> t3 = second_transformer(table=t2.select(m=t2.fun)).table
>>> pw.debug.compute_and_print(t1 + t3, include_id=False)
age | val
7 | 14
8 | 16
9 | 18
10 | 20
"""
return rt.InputMethod(dtype=dt.wrap(type)) | Decorator for defining input methods in class transformers. Example: >>> import pathway as pw >>> @pw.transformer ... class first_transformer: ... class table(pw.ClassArg): ... a: float = pw.input_attribute() ... ... @pw.method ... def fun(self, arg) -> int: ... return self.a * arg ... >>> @pw.transformer ... class second_transformer: ... class table(pw.ClassArg): ... m = pw.input_method(int) ... ... @pw.output_attribute ... def val(self): ... return self.m(2) ... >>> t1 = pw.debug.table_from_markdown(''' ... age ... 10 ... 9 ... 8 ... 7''') >>> t2 = first_transformer(table=t1.select(a=t1.age)).table >>> t2.schema <pathway.Schema types={'fun': typing.Callable[..., int]}> >>> t3 = second_transformer(table=t2.select(m=t2.fun)).table >>> pw.debug.compute_and_print(t1 + t3, include_id=False) age | val 7 | 14 8 | 16 9 | 18 10 | 20 |
166,723 | from __future__ import annotations
from collections.abc import Callable
from functools import wraps
from pathway.internals import dtype as dt, operator as op, row_transformer as rt
from pathway.internals.helpers import function_spec, with_optional_kwargs
from pathway.internals.parse_graph import G
The provided code snippet includes necessary dependencies for implementing the `attribute` function. Write a Python function `def attribute(func, **kwargs)` to solve the following problem:
Decorator for creation of attributes. Example: >>> import pathway as pw >>> @pw.transformer ... class simple_transformer: ... class table(pw.ClassArg): ... arg = pw.input_attribute() ... ... @pw.attribute ... def attr(self) -> float: ... return self.arg*2 ... ... @pw.output_attribute ... def ret(self) -> float: ... return self.attr + 1 ... >>> t1 = pw.debug.table_from_markdown(''' ... age ... 10 ... 9 ... 8 ... 7''') >>> t2 = simple_transformer(table=t1.select(arg=t1.age)).table >>> pw.debug.compute_and_print(t1 + t2, include_id=False) age | ret 7 | 15 8 | 17 9 | 19 10 | 21
Here is the function:
def attribute(func, **kwargs):
"""Decorator for creation of attributes.
Example:
>>> import pathway as pw
>>> @pw.transformer
... class simple_transformer:
... class table(pw.ClassArg):
... arg = pw.input_attribute()
...
... @pw.attribute
... def attr(self) -> float:
... return self.arg*2
...
... @pw.output_attribute
... def ret(self) -> float:
... return self.attr + 1
...
>>> t1 = pw.debug.table_from_markdown('''
... age
... 10
... 9
... 8
... 7''')
>>> t2 = simple_transformer(table=t1.select(arg=t1.age)).table
>>> pw.debug.compute_and_print(t1 + t2, include_id=False)
age | ret
7 | 15
8 | 17
9 | 19
10 | 21
"""
return rt.Attribute(func, **kwargs) | Decorator for creation of attributes. Example: >>> import pathway as pw >>> @pw.transformer ... class simple_transformer: ... class table(pw.ClassArg): ... arg = pw.input_attribute() ... ... @pw.attribute ... def attr(self) -> float: ... return self.arg*2 ... ... @pw.output_attribute ... def ret(self) -> float: ... return self.attr + 1 ... >>> t1 = pw.debug.table_from_markdown(''' ... age ... 10 ... 9 ... 8 ... 7''') >>> t2 = simple_transformer(table=t1.select(arg=t1.age)).table >>> pw.debug.compute_and_print(t1 + t2, include_id=False) age | ret 7 | 15 8 | 17 9 | 19 10 | 21 |
166,724 | from __future__ import annotations
from collections.abc import Callable
from functools import wraps
from pathway.internals import dtype as dt, operator as op, row_transformer as rt
from pathway.internals.helpers import function_spec, with_optional_kwargs
from pathway.internals.parse_graph import G
The provided code snippet includes necessary dependencies for implementing the `output_attribute` function. Write a Python function `def output_attribute(func, **kwargs)` to solve the following problem:
Decorator for creation of output_attributes. Example: >>> import pathway as pw >>> @pw.transformer ... class simple_transformer: ... class table(pw.ClassArg): ... arg = pw.input_attribute() ... ... @pw.output_attribute ... def ret(self) -> float: ... return self.arg + 1 ... >>> t1 = pw.debug.table_from_markdown(''' ... age ... 10 ... 9 ... 8 ... 7''') >>> t2 = simple_transformer(table=t1.select(arg=t1.age)).table >>> pw.debug.compute_and_print(t1 + t2, include_id=False) age | ret 7 | 8 8 | 9 9 | 10 10 | 11
Here is the function:
def output_attribute(func, **kwargs):
"""Decorator for creation of output_attributes.
Example:
>>> import pathway as pw
>>> @pw.transformer
... class simple_transformer:
... class table(pw.ClassArg):
... arg = pw.input_attribute()
...
... @pw.output_attribute
... def ret(self) -> float:
... return self.arg + 1
...
>>> t1 = pw.debug.table_from_markdown('''
... age
... 10
... 9
... 8
... 7''')
>>> t2 = simple_transformer(table=t1.select(arg=t1.age)).table
>>> pw.debug.compute_and_print(t1 + t2, include_id=False)
age | ret
7 | 8
8 | 9
9 | 10
10 | 11
"""
return rt.OutputAttribute(func, **kwargs) | Decorator for creation of output_attributes. Example: >>> import pathway as pw >>> @pw.transformer ... class simple_transformer: ... class table(pw.ClassArg): ... arg = pw.input_attribute() ... ... @pw.output_attribute ... def ret(self) -> float: ... return self.arg + 1 ... >>> t1 = pw.debug.table_from_markdown(''' ... age ... 10 ... 9 ... 8 ... 7''') >>> t2 = simple_transformer(table=t1.select(arg=t1.age)).table >>> pw.debug.compute_and_print(t1 + t2, include_id=False) age | ret 7 | 8 8 | 9 9 | 10 10 | 11 |
166,725 | from __future__ import annotations
from collections.abc import Callable
from functools import wraps
from pathway.internals import dtype as dt, operator as op, row_transformer as rt
from pathway.internals.helpers import function_spec, with_optional_kwargs
from pathway.internals.parse_graph import G
The provided code snippet includes necessary dependencies for implementing the `method` function. Write a Python function `def method(func, **kwargs)` to solve the following problem:
Decorator for creation methods in class transformers. Example: >>> import pathway as pw >>> @pw.transformer ... class simple_transformer: ... class table(pw.ClassArg): ... a: float = pw.input_attribute() ... ... @pw.output_attribute ... def b(self) -> float: ... return self.fun(self.a) ... ... @method ... def fun(self, arg) -> float: ... return self.a * arg ... >>> t1 = pw.debug.table_from_markdown(''' ... age ... 10 ... 9 ... 8 ... 7''') >>> t2 = simple_transformer(table=t1.select(a=t1.age)).table >>> t2.schema <pathway.Schema types={'b': <class 'float'>, 'fun': typing.Callable[..., float]}> >>> pw.debug.compute_and_print(t1 + t2.select(t2.b), include_id=False) age | b 7 | 49 8 | 64 9 | 81 10 | 100 >>> pw.debug.compute_and_print(t1 + t2.select(out = t2.fun(t2.b)), include_id=False) age | out 7 | 343 8 | 512 9 | 729 10 | 1000
Here is the function:
def method(func, **kwargs):
"""Decorator for creation methods in class transformers.
Example:
>>> import pathway as pw
>>> @pw.transformer
... class simple_transformer:
... class table(pw.ClassArg):
... a: float = pw.input_attribute()
...
... @pw.output_attribute
... def b(self) -> float:
... return self.fun(self.a)
...
... @method
... def fun(self, arg) -> float:
... return self.a * arg
...
>>> t1 = pw.debug.table_from_markdown('''
... age
... 10
... 9
... 8
... 7''')
>>> t2 = simple_transformer(table=t1.select(a=t1.age)).table
>>> t2.schema
<pathway.Schema types={'b': <class 'float'>, 'fun': typing.Callable[..., float]}>
>>> pw.debug.compute_and_print(t1 + t2.select(t2.b), include_id=False)
age | b
7 | 49
8 | 64
9 | 81
10 | 100
>>> pw.debug.compute_and_print(t1 + t2.select(out = t2.fun(t2.b)), include_id=False)
age | out
7 | 343
8 | 512
9 | 729
10 | 1000
"""
return rt.Method(func, **kwargs) | Decorator for creation methods in class transformers. Example: >>> import pathway as pw >>> @pw.transformer ... class simple_transformer: ... class table(pw.ClassArg): ... a: float = pw.input_attribute() ... ... @pw.output_attribute ... def b(self) -> float: ... return self.fun(self.a) ... ... @method ... def fun(self, arg) -> float: ... return self.a * arg ... >>> t1 = pw.debug.table_from_markdown(''' ... age ... 10 ... 9 ... 8 ... 7''') >>> t2 = simple_transformer(table=t1.select(a=t1.age)).table >>> t2.schema <pathway.Schema types={'b': <class 'float'>, 'fun': typing.Callable[..., float]}> >>> pw.debug.compute_and_print(t1 + t2.select(t2.b), include_id=False) age | b 7 | 49 8 | 64 9 | 81 10 | 100 >>> pw.debug.compute_and_print(t1 + t2.select(out = t2.fun(t2.b)), include_id=False) age | out 7 | 343 8 | 512 9 | 729 10 | 1000 |
166,726 | from __future__ import annotations
from collections.abc import Callable
from functools import wraps
from pathway.internals import dtype as dt, operator as op, row_transformer as rt
from pathway.internals.helpers import function_spec, with_optional_kwargs
from pathway.internals.parse_graph import G
The provided code snippet includes necessary dependencies for implementing the `transformer` function. Write a Python function `def transformer(cls)` to solve the following problem:
Decorator that wraps the outer class when defining class transformers. Example: >>> import pathway as pw >>> @pw.transformer ... class simple_transformer: ... class table(pw.ClassArg): ... arg = pw.input_attribute() ... ... @pw.output_attribute ... def ret(self) -> float: ... return self.arg + 1 ... >>> t1 = pw.debug.table_from_markdown(''' ... age ... 10 ... 9 ... 8 ... 7''') >>> t2 = simple_transformer(table=t1.select(arg=t1.age)).table >>> pw.debug.compute_and_print(t1 + t2, include_id=False) age | ret 7 | 8 8 | 9 9 | 10 10 | 11
Here is the function:
def transformer(cls):
"""Decorator that wraps the outer class when defining class transformers.
Example:
>>> import pathway as pw
>>> @pw.transformer
... class simple_transformer:
... class table(pw.ClassArg):
... arg = pw.input_attribute()
...
... @pw.output_attribute
... def ret(self) -> float:
... return self.arg + 1
...
>>> t1 = pw.debug.table_from_markdown('''
... age
... 10
... 9
... 8
... 7''')
>>> t2 = simple_transformer(table=t1.select(arg=t1.age)).table
>>> pw.debug.compute_and_print(t1 + t2, include_id=False)
age | ret
7 | 8
8 | 9
9 | 10
10 | 11
"""
return rt.RowTransformer.from_class(cls) | Decorator that wraps the outer class when defining class transformers. Example: >>> import pathway as pw >>> @pw.transformer ... class simple_transformer: ... class table(pw.ClassArg): ... arg = pw.input_attribute() ... ... @pw.output_attribute ... def ret(self) -> float: ... return self.arg + 1 ... >>> t1 = pw.debug.table_from_markdown(''' ... age ... 10 ... 9 ... 8 ... 7''') >>> t2 = simple_transformer(table=t1.select(arg=t1.age)).table >>> pw.debug.compute_and_print(t1 + t2, include_id=False) age | ret 7 | 8 8 | 9 9 | 10 10 | 11 |
166,727 | from __future__ import annotations
import contextlib
import functools
import sys
import traceback
from collections.abc import Callable
from dataclasses import dataclass
from typing import TYPE_CHECKING, ParamSpec, TypeVar
class Trace:
frames: list[Frame]
user_frame: Frame | None
def from_traceback():
frames = [
Frame(
filename=e.filename,
line_number=e.lineno,
line=e.line,
function=e.name,
)
for e in traceback.extract_stack()[:-1]
]
user_frame: Frame | None = None
for frame in frames:
if frame.is_marker():
break
elif frame.is_external():
user_frame = frame
return Trace(frames=frames, user_frame=user_frame)
def to_engine(self) -> api.Trace | None:
user_frame = self.user_frame
if (
user_frame is None
or user_frame.line_number is None
or user_frame.line is None
):
return None
else:
from pathway.internals import api
return api.Trace(
file_name=user_frame.filename,
line_number=user_frame.line_number,
line=user_frame.line,
function=user_frame.function,
)
def _reraise_with_user_frame(e: Exception, trace: Trace | None = None) -> None:
traceback = e.__traceback__
if traceback is not None:
traceback = traceback.tb_next
e = e.with_traceback(traceback)
if hasattr(e, "_pathway_trace_note"):
raise e
if trace is None:
trace = Trace.from_traceback()
user_frame = trace.user_frame
if user_frame is not None:
add_pathway_trace_note(e, user_frame)
raise e
def custom_trace(trace: Trace):
try:
yield
except Exception as e:
_reraise_with_user_frame(e, trace) | null |
166,728 | import os
from dataclasses import dataclass, field
from pathway import persistence
from pathway.internals import api
def _env_field(name: str, default: str | None = None):
def factory():
return os.environ.get(name, default)
return field(default_factory=factory) | null |
166,729 | import os
from dataclasses import dataclass, field
from pathway import persistence
from pathway.internals import api
def _env_bool_field(name: str):
def factory():
value = os.environ.get(name, "false").lower()
if value in ("1", "true", "yes"):
return True
elif value in ("0", "false", "no"):
return False
else:
raise ValueError(
f"Unexpected value for {name!r} environment variable: {value!r}"
)
return field(default_factory=factory) | null |
166,730 | import os
from dataclasses import dataclass, field
from pathway import persistence
from pathway.internals import api
def _snapshot_access() -> api.SnapshotAccess | None:
match os.environ.get("PATHWAY_SNAPSHOT_ACCESS", "").lower():
case "record":
return api.SnapshotAccess.RECORD
case "replay":
return api.SnapshotAccess.REPLAY
case _:
return None | null |
166,731 | import os
from dataclasses import dataclass, field
from pathway import persistence
from pathway.internals import api
def _persistence_mode() -> api.PersistenceMode:
match os.environ.get(
"PATHWAY_PERSISTENCE_MODE", os.environ.get("PATHWAY_REPLAY_MODE", "")
).lower():
case "speedrun":
return api.PersistenceMode.SPEEDRUN_REPLAY
case "batch":
return api.PersistenceMode.BATCH
case _:
return api.PersistenceMode.BATCH | null |
166,732 | import os
from dataclasses import dataclass, field
from pathway import persistence
from pathway.internals import api
pathway_config = PathwayConfig()
def set_license_key(key: str) -> None:
pathway_config.license_key = key | null |
166,733 | import os
from dataclasses import dataclass, field
from pathway import persistence
from pathway.internals import api
pathway_config = PathwayConfig()
def set_telemetry_server(endpoint: str) -> None:
pathway_config.telemetry_server = endpoint | null |
166,734 | from __future__ import annotations
import itertools
from collections.abc import Callable
from typing import TYPE_CHECKING, Any
import sqlglot
import sqlglot.expressions as sql_expr
from sqlglot.errors import OptimizeError
from sqlglot.optimizer import qualify_columns
from pathway.internals import expression as expr, if_else, reducers, table, thisclass
from pathway.internals.desugaring import TableSubstitutionDesugaring
from pathway.internals.expression_visitor import IdentityTransform
from pathway.internals.runtime_type_check import check_arg_types
from pathway.internals.shadows import operator
_expression_handlers: dict[type[sql_expr.Expression], Callable] = {}
def register(nodetype):
def wrapper(func):
def inner(node, context):
assert isinstance(node, nodetype), nodetype
return func(node, context)
_expression_handlers[nodetype] = inner
return inner
return wrapper | null |
166,735 | from __future__ import annotations
import itertools
from collections.abc import Callable
from typing import TYPE_CHECKING, Any
import sqlglot
import sqlglot.expressions as sql_expr
from sqlglot.errors import OptimizeError
from sqlglot.optimizer import qualify_columns
from pathway.internals import expression as expr, if_else, reducers, table, thisclass
from pathway.internals.desugaring import TableSubstitutionDesugaring
from pathway.internals.expression_visitor import IdentityTransform
from pathway.internals.runtime_type_check import check_arg_types
from pathway.internals.shadows import operator
def _run(node: sql_expr.Expression, context: ContextType) -> Any:
handler = _expression_handlers.get(type(node))
if handler is None:
raise NotImplementedError(f"{node.sql()} not supported.")
return handler(node, context)
def _if(
node: sql_expr.If, context: ContextType
) -> tuple[expr.ColumnExpression, expr.ColumnExpression]:
return _run(node.this, context), _run(node.args.pop("true"), context)
def _case(node: sql_expr.Case, context: ContextType) -> expr.IfElseExpression:
args = []
for arg in node.args.pop("ifs"):
args.extend(_run(arg, context))
if (default_field := node.args.pop("default", None)) is not None:
args.append(_run(default_field, context))
else:
args.append(None)
assert len(args) >= 3, "Wrong number of arguments."
while len(args) >= 3:
_else = args.pop()
_then = args.pop()
_if = args.pop()
args.append(if_else(_if, _then, _else))
[ret] = args
return ret | null |
166,736 | from __future__ import annotations
import itertools
from collections.abc import Callable
from typing import TYPE_CHECKING, Any
import sqlglot
import sqlglot.expressions as sql_expr
from sqlglot.errors import OptimizeError
from sqlglot.optimizer import qualify_columns
from pathway.internals import expression as expr, if_else, reducers, table, thisclass
from pathway.internals.desugaring import TableSubstitutionDesugaring
from pathway.internals.expression_visitor import IdentityTransform
from pathway.internals.runtime_type_check import check_arg_types
from pathway.internals.shadows import operator
def _run(node: sql_expr.Expression, context: ContextType) -> Any:
handler = _expression_handlers.get(type(node))
if handler is None:
raise NotImplementedError(f"{node.sql()} not supported.")
return handler(node, context)
def _between(node: sql_expr.Between, context: ContextType) -> expr.ReducerExpression:
middle = _run(node.this, context)
low = _run(node.args.pop("low"), context)
high = _run(node.args.pop("high"), context)
return (middle >= low) & (middle <= high) | null |
166,737 | from __future__ import annotations
import itertools
from collections.abc import Callable
from typing import TYPE_CHECKING, Any
import sqlglot
import sqlglot.expressions as sql_expr
from sqlglot.errors import OptimizeError
from sqlglot.optimizer import qualify_columns
from pathway.internals import expression as expr, if_else, reducers, table, thisclass
from pathway.internals.desugaring import TableSubstitutionDesugaring
from pathway.internals.expression_visitor import IdentityTransform
from pathway.internals.runtime_type_check import check_arg_types
from pathway.internals.shadows import operator
def _run(node: sql_expr.Expression, context: ContextType) -> Any:
def _max(node: sql_expr.Max, context: ContextType) -> expr.ReducerExpression:
return reducers.max(_run(node.this, context)) | null |
166,738 | from __future__ import annotations
import itertools
from collections.abc import Callable
from typing import TYPE_CHECKING, Any
import sqlglot
import sqlglot.expressions as sql_expr
from sqlglot.errors import OptimizeError
from sqlglot.optimizer import qualify_columns
from pathway.internals import expression as expr, if_else, reducers, table, thisclass
from pathway.internals.desugaring import TableSubstitutionDesugaring
from pathway.internals.expression_visitor import IdentityTransform
from pathway.internals.runtime_type_check import check_arg_types
from pathway.internals.shadows import operator
def _run(node: sql_expr.Expression, context: ContextType) -> Any:
handler = _expression_handlers.get(type(node))
if handler is None:
raise NotImplementedError(f"{node.sql()} not supported.")
return handler(node, context)
def _min(node: sql_expr.Min, context: ContextType) -> expr.ReducerExpression:
return reducers.min(_run(node.this, context)) | null |
166,739 | from __future__ import annotations
import itertools
from collections.abc import Callable
from typing import TYPE_CHECKING, Any
import sqlglot
import sqlglot.expressions as sql_expr
from sqlglot.errors import OptimizeError
from sqlglot.optimizer import qualify_columns
from pathway.internals import expression as expr, if_else, reducers, table, thisclass
from pathway.internals.desugaring import TableSubstitutionDesugaring
from pathway.internals.expression_visitor import IdentityTransform
from pathway.internals.runtime_type_check import check_arg_types
from pathway.internals.shadows import operator
def _run(node: sql_expr.Expression, context: ContextType) -> Any:
def _sum(node: sql_expr.Sum, context: ContextType) -> expr.ReducerExpression:
return reducers.sum(_run(node.this, context)) | null |
166,740 | from __future__ import annotations
import itertools
from collections.abc import Callable
from typing import TYPE_CHECKING, Any
import sqlglot
import sqlglot.expressions as sql_expr
from sqlglot.errors import OptimizeError
from sqlglot.optimizer import qualify_columns
from pathway.internals import expression as expr, if_else, reducers, table, thisclass
from pathway.internals.desugaring import TableSubstitutionDesugaring
from pathway.internals.expression_visitor import IdentityTransform
from pathway.internals.runtime_type_check import check_arg_types
from pathway.internals.shadows import operator
def _run(node: sql_expr.Expression, context: ContextType) -> Any:
handler = _expression_handlers.get(type(node))
if handler is None:
raise NotImplementedError(f"{node.sql()} not supported.")
return handler(node, context)
def _avg(node: sql_expr.Avg, context: ContextType) -> expr.ColumnExpression:
return reducers.avg(_run(node.this, context)) | null |
166,741 | from __future__ import annotations
import itertools
from collections.abc import Callable
from typing import TYPE_CHECKING, Any
import sqlglot
import sqlglot.expressions as sql_expr
from sqlglot.errors import OptimizeError
from sqlglot.optimizer import qualify_columns
from pathway.internals import expression as expr, if_else, reducers, table, thisclass
from pathway.internals.desugaring import TableSubstitutionDesugaring
from pathway.internals.expression_visitor import IdentityTransform
from pathway.internals.runtime_type_check import check_arg_types
from pathway.internals.shadows import operator
def _count(node: sql_expr.Count, context: ContextType) -> expr.ReducerExpression:
return reducers.count() | null |
166,742 | from __future__ import annotations
import itertools
from collections.abc import Callable
from typing import TYPE_CHECKING, Any
import sqlglot
import sqlglot.expressions as sql_expr
from sqlglot.errors import OptimizeError
from sqlglot.optimizer import qualify_columns
from pathway.internals import expression as expr, if_else, reducers, table, thisclass
from pathway.internals.desugaring import TableSubstitutionDesugaring
from pathway.internals.expression_visitor import IdentityTransform
from pathway.internals.runtime_type_check import check_arg_types
from pathway.internals.shadows import operator
def _star(node: sql_expr.Star, context: ContextType):
[ret] = thisclass.this.__iter__()
return ret | null |
166,743 | from __future__ import annotations
import itertools
from collections.abc import Callable
from typing import TYPE_CHECKING, Any
import sqlglot
import sqlglot.expressions as sql_expr
from sqlglot.errors import OptimizeError
from sqlglot.optimizer import qualify_columns
from pathway.internals import expression as expr, if_else, reducers, table, thisclass
from pathway.internals.desugaring import TableSubstitutionDesugaring
from pathway.internals.expression_visitor import IdentityTransform
from pathway.internals.runtime_type_check import check_arg_types
from pathway.internals.shadows import operator
def _null(node: sql_expr.Null, context: ContextType):
return None | null |
166,744 | from __future__ import annotations
import itertools
from collections.abc import Callable
from typing import TYPE_CHECKING, Any
import sqlglot
import sqlglot.expressions as sql_expr
from sqlglot.errors import OptimizeError
from sqlglot.optimizer import qualify_columns
from pathway.internals import expression as expr, if_else, reducers, table, thisclass
from pathway.internals.desugaring import TableSubstitutionDesugaring
from pathway.internals.expression_visitor import IdentityTransform
from pathway.internals.runtime_type_check import check_arg_types
from pathway.internals.shadows import operator
def _boolean(node: sql_expr.Boolean, context: ContextType) -> bool:
return node.this | null |
166,745 | from __future__ import annotations
import itertools
from collections.abc import Callable
from typing import TYPE_CHECKING, Any
import sqlglot
import sqlglot.expressions as sql_expr
from sqlglot.errors import OptimizeError
from sqlglot.optimizer import qualify_columns
from pathway.internals import expression as expr, if_else, reducers, table, thisclass
from pathway.internals.desugaring import TableSubstitutionDesugaring
from pathway.internals.expression_visitor import IdentityTransform
from pathway.internals.runtime_type_check import check_arg_types
from pathway.internals.shadows import operator
def _identifier(node: sql_expr.Identifier, context: ContextType):
def _column(node: sql_expr.Column, context: ContextType) -> expr.ColumnReference:
tab = node.table
colname = _identifier(node.this, context)
if tab == "":
return thisclass.this[colname]
else:
return context[tab][colname] | null |
166,746 | from __future__ import annotations
import itertools
from collections.abc import Callable
from typing import TYPE_CHECKING, Any
import sqlglot
import sqlglot.expressions as sql_expr
from sqlglot.errors import OptimizeError
from sqlglot.optimizer import qualify_columns
from pathway.internals import expression as expr, if_else, reducers, table, thisclass
from pathway.internals.desugaring import TableSubstitutionDesugaring
from pathway.internals.expression_visitor import IdentityTransform
from pathway.internals.runtime_type_check import check_arg_types
from pathway.internals.shadows import operator
def _identifier(node: sql_expr.Identifier, context: ContextType):
return node.this
def _tablealias(node: sql_expr.TableAlias, context: ContextType):
return _identifier(node.this, context) | null |
166,747 | from __future__ import annotations
import itertools
from collections.abc import Callable
from typing import TYPE_CHECKING, Any
import sqlglot
import sqlglot.expressions as sql_expr
from sqlglot.errors import OptimizeError
from sqlglot.optimizer import qualify_columns
from pathway.internals import expression as expr, if_else, reducers, table, thisclass
from pathway.internals.desugaring import TableSubstitutionDesugaring
from pathway.internals.expression_visitor import IdentityTransform
from pathway.internals.runtime_type_check import check_arg_types
from pathway.internals.shadows import operator
def _run(node: sql_expr.Expression, context: ContextType) -> Any:
handler = _expression_handlers.get(type(node))
if handler is None:
raise NotImplementedError(f"{node.sql()} not supported.")
return handler(node, context)
def _alias(
node: sql_expr.Alias, context: ContextType
) -> dict[str, expr.ColumnExpression]:
return {node.alias: _run(node.this, context)} | null |
166,748 | from __future__ import annotations
import itertools
from collections.abc import Callable
from typing import TYPE_CHECKING, Any
import sqlglot
import sqlglot.expressions as sql_expr
from sqlglot.errors import OptimizeError
from sqlglot.optimizer import qualify_columns
from pathway.internals import expression as expr, if_else, reducers, table, thisclass
from pathway.internals.desugaring import TableSubstitutionDesugaring
from pathway.internals.expression_visitor import IdentityTransform
from pathway.internals.runtime_type_check import check_arg_types
from pathway.internals.shadows import operator
def _literal(node: sql_expr.Literal, context: ContextType) -> str | int | float:
if node.is_string:
return node.this
else:
try:
return int(node.this)
except ValueError:
return float(node.this) | null |
166,749 | from __future__ import annotations
import itertools
from collections.abc import Callable
from typing import TYPE_CHECKING, Any
import sqlglot
import sqlglot.expressions as sql_expr
from sqlglot.errors import OptimizeError
from sqlglot.optimizer import qualify_columns
from pathway.internals import expression as expr, if_else, reducers, table, thisclass
from pathway.internals.desugaring import TableSubstitutionDesugaring
from pathway.internals.expression_visitor import IdentityTransform
from pathway.internals.runtime_type_check import check_arg_types
from pathway.internals.shadows import operator
def _run(node: sql_expr.Expression, context: ContextType) -> Any:
handler = _expression_handlers.get(type(node))
if handler is None:
raise NotImplementedError(f"{node.sql()} not supported.")
return handler(node, context)
def _paren(node: sql_expr.Paren, context: ContextType) -> expr.ColumnExpression:
return _run(node.this, context) | null |
166,750 | from __future__ import annotations
import itertools
from collections.abc import Callable
from typing import TYPE_CHECKING, Any
import sqlglot
import sqlglot.expressions as sql_expr
from sqlglot.errors import OptimizeError
from sqlglot.optimizer import qualify_columns
from pathway.internals import expression as expr, if_else, reducers, table, thisclass
from pathway.internals.desugaring import TableSubstitutionDesugaring
from pathway.internals.expression_visitor import IdentityTransform
from pathway.internals.runtime_type_check import check_arg_types
from pathway.internals.shadows import operator
def _run(node: sql_expr.Expression, context: ContextType) -> Any:
handler = _expression_handlers.get(type(node))
if handler is None:
raise NotImplementedError(f"{node.sql()} not supported.")
return handler(node, context)
def _add(node: sql_expr.Add, context: ContextType) -> expr.ColumnBinaryOpExpression:
return _run(node.this, context) + _run(node.expression, context) | null |
166,751 | from __future__ import annotations
import itertools
from collections.abc import Callable
from typing import TYPE_CHECKING, Any
import sqlglot
import sqlglot.expressions as sql_expr
from sqlglot.errors import OptimizeError
from sqlglot.optimizer import qualify_columns
from pathway.internals import expression as expr, if_else, reducers, table, thisclass
from pathway.internals.desugaring import TableSubstitutionDesugaring
from pathway.internals.expression_visitor import IdentityTransform
from pathway.internals.runtime_type_check import check_arg_types
from pathway.internals.shadows import operator
def _run(node: sql_expr.Expression, context: ContextType) -> Any:
handler = _expression_handlers.get(type(node))
if handler is None:
raise NotImplementedError(f"{node.sql()} not supported.")
return handler(node, context)
def _div(node: sql_expr.Div, context: ContextType) -> expr.ColumnBinaryOpExpression:
return _run(node.this, context) / _run(node.expression, context) | null |
166,752 | from __future__ import annotations
import itertools
from collections.abc import Callable
from typing import TYPE_CHECKING, Any
import sqlglot
import sqlglot.expressions as sql_expr
from sqlglot.errors import OptimizeError
from sqlglot.optimizer import qualify_columns
from pathway.internals import expression as expr, if_else, reducers, table, thisclass
from pathway.internals.desugaring import TableSubstitutionDesugaring
from pathway.internals.expression_visitor import IdentityTransform
from pathway.internals.runtime_type_check import check_arg_types
from pathway.internals.shadows import operator
def _run(node: sql_expr.Expression, context: ContextType) -> Any:
handler = _expression_handlers.get(type(node))
if handler is None:
raise NotImplementedError(f"{node.sql()} not supported.")
return handler(node, context)
def _intDiv(
node: sql_expr.IntDiv, context: ContextType
) -> expr.ColumnBinaryOpExpression:
return _run(node.this, context) // _run(node.expression, context) | null |
166,753 | from __future__ import annotations
import itertools
from collections.abc import Callable
from typing import TYPE_CHECKING, Any
import sqlglot
import sqlglot.expressions as sql_expr
from sqlglot.errors import OptimizeError
from sqlglot.optimizer import qualify_columns
from pathway.internals import expression as expr, if_else, reducers, table, thisclass
from pathway.internals.desugaring import TableSubstitutionDesugaring
from pathway.internals.expression_visitor import IdentityTransform
from pathway.internals.runtime_type_check import check_arg_types
from pathway.internals.shadows import operator
def _run(node: sql_expr.Expression, context: ContextType) -> Any:
handler = _expression_handlers.get(type(node))
if handler is None:
raise NotImplementedError(f"{node.sql()} not supported.")
return handler(node, context)
def _mul(node: sql_expr.Mul, context: ContextType) -> expr.ColumnBinaryOpExpression:
return _run(node.this, context) * _run(node.expression, context) | null |
166,754 | from __future__ import annotations
import itertools
from collections.abc import Callable
from typing import TYPE_CHECKING, Any
import sqlglot
import sqlglot.expressions as sql_expr
from sqlglot.errors import OptimizeError
from sqlglot.optimizer import qualify_columns
from pathway.internals import expression as expr, if_else, reducers, table, thisclass
from pathway.internals.desugaring import TableSubstitutionDesugaring
from pathway.internals.expression_visitor import IdentityTransform
from pathway.internals.runtime_type_check import check_arg_types
from pathway.internals.shadows import operator
def _run(node: sql_expr.Expression, context: ContextType) -> Any:
handler = _expression_handlers.get(type(node))
if handler is None:
raise NotImplementedError(f"{node.sql()} not supported.")
return handler(node, context)
def _sub(node: sql_expr.Sub, context: ContextType) -> expr.ColumnBinaryOpExpression:
return _run(node.this, context) - _run(node.expression, context) | null |
166,755 | from __future__ import annotations
import itertools
from collections.abc import Callable
from typing import TYPE_CHECKING, Any
import sqlglot
import sqlglot.expressions as sql_expr
from sqlglot.errors import OptimizeError
from sqlglot.optimizer import qualify_columns
from pathway.internals import expression as expr, if_else, reducers, table, thisclass
from pathway.internals.desugaring import TableSubstitutionDesugaring
from pathway.internals.expression_visitor import IdentityTransform
from pathway.internals.runtime_type_check import check_arg_types
from pathway.internals.shadows import operator
def _run(node: sql_expr.Expression, context: ContextType) -> Any:
handler = _expression_handlers.get(type(node))
if handler is None:
raise NotImplementedError(f"{node.sql()} not supported.")
return handler(node, context)
def _mod(node: sql_expr.Mod, context: ContextType) -> expr.ColumnBinaryOpExpression:
return _run(node.this, context) % _run(node.expression, context) | null |
166,756 | from __future__ import annotations
import itertools
from collections.abc import Callable
from typing import TYPE_CHECKING, Any
import sqlglot
import sqlglot.expressions as sql_expr
from sqlglot.errors import OptimizeError
from sqlglot.optimizer import qualify_columns
from pathway.internals import expression as expr, if_else, reducers, table, thisclass
from pathway.internals.desugaring import TableSubstitutionDesugaring
from pathway.internals.expression_visitor import IdentityTransform
from pathway.internals.runtime_type_check import check_arg_types
from pathway.internals.shadows import operator
def _run(node: sql_expr.Expression, context: ContextType) -> Any:
handler = _expression_handlers.get(type(node))
if handler is None:
raise NotImplementedError(f"{node.sql()} not supported.")
return handler(node, context)
def _neg(node: sql_expr.Neg, context: ContextType) -> expr.ColumnUnaryOpExpression:
return -_run(node.this, context) | null |
166,757 | from __future__ import annotations
import itertools
from collections.abc import Callable
from typing import TYPE_CHECKING, Any
import sqlglot
import sqlglot.expressions as sql_expr
from sqlglot.errors import OptimizeError
from sqlglot.optimizer import qualify_columns
from pathway.internals import expression as expr, if_else, reducers, table, thisclass
from pathway.internals.desugaring import TableSubstitutionDesugaring
from pathway.internals.expression_visitor import IdentityTransform
from pathway.internals.runtime_type_check import check_arg_types
from pathway.internals.shadows import operator
def _run(node: sql_expr.Expression, context: ContextType) -> Any:
def _and(node: sql_expr.And, context: ContextType) -> expr.ColumnBinaryOpExpression:
return _run(node.this, context) & _run(node.expression, context) | null |
166,758 | from __future__ import annotations
import itertools
from collections.abc import Callable
from typing import TYPE_CHECKING, Any
import sqlglot
import sqlglot.expressions as sql_expr
from sqlglot.errors import OptimizeError
from sqlglot.optimizer import qualify_columns
from pathway.internals import expression as expr, if_else, reducers, table, thisclass
from pathway.internals.desugaring import TableSubstitutionDesugaring
from pathway.internals.expression_visitor import IdentityTransform
from pathway.internals.runtime_type_check import check_arg_types
from pathway.internals.shadows import operator
def _run(node: sql_expr.Expression, context: ContextType) -> Any:
handler = _expression_handlers.get(type(node))
if handler is None:
raise NotImplementedError(f"{node.sql()} not supported.")
return handler(node, context)
def _or(node: sql_expr.Or, context: ContextType) -> expr.ColumnBinaryOpExpression:
return _run(node.this, context) | _run(node.expression, context) | null |
166,759 | from __future__ import annotations
import itertools
from collections.abc import Callable
from typing import TYPE_CHECKING, Any
import sqlglot
import sqlglot.expressions as sql_expr
from sqlglot.errors import OptimizeError
from sqlglot.optimizer import qualify_columns
from pathway.internals import expression as expr, if_else, reducers, table, thisclass
from pathway.internals.desugaring import TableSubstitutionDesugaring
from pathway.internals.expression_visitor import IdentityTransform
from pathway.internals.runtime_type_check import check_arg_types
from pathway.internals.shadows import operator
def _run(node: sql_expr.Expression, context: ContextType) -> Any:
handler = _expression_handlers.get(type(node))
if handler is None:
raise NotImplementedError(f"{node.sql()} not supported.")
return handler(node, context)
def _not(node: sql_expr.Not, context: ContextType) -> expr.ColumnUnaryOpExpression:
return ~_run(node.this, context) | null |
166,760 | from __future__ import annotations
import itertools
from collections.abc import Callable
from typing import TYPE_CHECKING, Any
import sqlglot
import sqlglot.expressions as sql_expr
from sqlglot.errors import OptimizeError
from sqlglot.optimizer import qualify_columns
from pathway.internals import expression as expr, if_else, reducers, table, thisclass
from pathway.internals.desugaring import TableSubstitutionDesugaring
from pathway.internals.expression_visitor import IdentityTransform
from pathway.internals.runtime_type_check import check_arg_types
from pathway.internals.shadows import operator
def _run(node: sql_expr.Expression, context: ContextType) -> Any:
handler = _expression_handlers.get(type(node))
if handler is None:
raise NotImplementedError(f"{node.sql()} not supported.")
return handler(node, context)
def _lt(node: sql_expr.LT, context: ContextType) -> expr.ColumnBinaryOpExpression:
return _run(node.this, context) < _run(node.expression, context) | null |
166,761 | from __future__ import annotations
import itertools
from collections.abc import Callable
from typing import TYPE_CHECKING, Any
import sqlglot
import sqlglot.expressions as sql_expr
from sqlglot.errors import OptimizeError
from sqlglot.optimizer import qualify_columns
from pathway.internals import expression as expr, if_else, reducers, table, thisclass
from pathway.internals.desugaring import TableSubstitutionDesugaring
from pathway.internals.expression_visitor import IdentityTransform
from pathway.internals.runtime_type_check import check_arg_types
from pathway.internals.shadows import operator
def _run(node: sql_expr.Expression, context: ContextType) -> Any:
handler = _expression_handlers.get(type(node))
if handler is None:
raise NotImplementedError(f"{node.sql()} not supported.")
return handler(node, context)
def _gt(node: sql_expr.GT, context: ContextType) -> expr.ColumnBinaryOpExpression:
return _run(node.this, context) > _run(node.expression, context) | null |
166,762 | from __future__ import annotations
import itertools
from collections.abc import Callable
from typing import TYPE_CHECKING, Any
import sqlglot
import sqlglot.expressions as sql_expr
from sqlglot.errors import OptimizeError
from sqlglot.optimizer import qualify_columns
from pathway.internals import expression as expr, if_else, reducers, table, thisclass
from pathway.internals.desugaring import TableSubstitutionDesugaring
from pathway.internals.expression_visitor import IdentityTransform
from pathway.internals.runtime_type_check import check_arg_types
from pathway.internals.shadows import operator
def _run(node: sql_expr.Expression, context: ContextType) -> Any:
handler = _expression_handlers.get(type(node))
if handler is None:
raise NotImplementedError(f"{node.sql()} not supported.")
return handler(node, context)
def _lte(node: sql_expr.LTE, context: ContextType) -> expr.ColumnBinaryOpExpression:
return _run(node.this, context) <= _run(node.expression, context) | null |
166,763 | from __future__ import annotations
import itertools
from collections.abc import Callable
from typing import TYPE_CHECKING, Any
import sqlglot
import sqlglot.expressions as sql_expr
from sqlglot.errors import OptimizeError
from sqlglot.optimizer import qualify_columns
from pathway.internals import expression as expr, if_else, reducers, table, thisclass
from pathway.internals.desugaring import TableSubstitutionDesugaring
from pathway.internals.expression_visitor import IdentityTransform
from pathway.internals.runtime_type_check import check_arg_types
from pathway.internals.shadows import operator
def _run(node: sql_expr.Expression, context: ContextType) -> Any:
handler = _expression_handlers.get(type(node))
if handler is None:
raise NotImplementedError(f"{node.sql()} not supported.")
return handler(node, context)
def _gte(node: sql_expr.GTE, context: ContextType) -> expr.ColumnBinaryOpExpression:
return _run(node.this, context) >= _run(node.expression, context) | null |
166,764 | from __future__ import annotations
import itertools
from collections.abc import Callable
from typing import TYPE_CHECKING, Any
import sqlglot
import sqlglot.expressions as sql_expr
from sqlglot.errors import OptimizeError
from sqlglot.optimizer import qualify_columns
from pathway.internals import expression as expr, if_else, reducers, table, thisclass
from pathway.internals.desugaring import TableSubstitutionDesugaring
from pathway.internals.expression_visitor import IdentityTransform
from pathway.internals.runtime_type_check import check_arg_types
from pathway.internals.shadows import operator
def _run(node: sql_expr.Expression, context: ContextType) -> Any:
handler = _expression_handlers.get(type(node))
if handler is None:
raise NotImplementedError(f"{node.sql()} not supported.")
return handler(node, context)
def _eq(node: sql_expr.EQ, context: ContextType) -> expr.ColumnBinaryOpExpression:
return _run(node.this, context) == _run(node.expression, context) | null |
166,765 | from __future__ import annotations
import itertools
from collections.abc import Callable
from typing import TYPE_CHECKING, Any
import sqlglot
import sqlglot.expressions as sql_expr
from sqlglot.errors import OptimizeError
from sqlglot.optimizer import qualify_columns
from pathway.internals import expression as expr, if_else, reducers, table, thisclass
from pathway.internals.desugaring import TableSubstitutionDesugaring
from pathway.internals.expression_visitor import IdentityTransform
from pathway.internals.runtime_type_check import check_arg_types
from pathway.internals.shadows import operator
def _run(node: sql_expr.Expression, context: ContextType) -> Any:
handler = _expression_handlers.get(type(node))
if handler is None:
raise NotImplementedError(f"{node.sql()} not supported.")
return handler(node, context)
def _is(node: sql_expr.Is, context: ContextType) -> expr.ColumnBinaryOpExpression:
return _run(node.this, context) == _run(node.expression, context) | null |
166,766 | from __future__ import annotations
import itertools
from collections.abc import Callable
from typing import TYPE_CHECKING, Any
import sqlglot
import sqlglot.expressions as sql_expr
from sqlglot.errors import OptimizeError
from sqlglot.optimizer import qualify_columns
from pathway.internals import expression as expr, if_else, reducers, table, thisclass
from pathway.internals.desugaring import TableSubstitutionDesugaring
from pathway.internals.expression_visitor import IdentityTransform
from pathway.internals.runtime_type_check import check_arg_types
from pathway.internals.shadows import operator
def _run(node: sql_expr.Expression, context: ContextType) -> Any:
handler = _expression_handlers.get(type(node))
if handler is None:
raise NotImplementedError(f"{node.sql()} not supported.")
return handler(node, context)
def _neq(node: sql_expr.NEQ, context: ContextType) -> expr.ColumnBinaryOpExpression:
return _run(node.this, context) != _run(node.expression, context) | null |
166,767 | from __future__ import annotations
import itertools
from collections.abc import Callable
from typing import TYPE_CHECKING, Any
import sqlglot
import sqlglot.expressions as sql_expr
from sqlglot.errors import OptimizeError
from sqlglot.optimizer import qualify_columns
from pathway.internals import expression as expr, if_else, reducers, table, thisclass
from pathway.internals.desugaring import TableSubstitutionDesugaring
from pathway.internals.expression_visitor import IdentityTransform
from pathway.internals.runtime_type_check import check_arg_types
from pathway.internals.shadows import operator
def _identifier(node: sql_expr.Identifier, context: ContextType):
return node.this
def _joins_block(
node: sql_expr.Expression, tab: table.Joinable, context: ContextType
) -> tuple[table.Joinable, ContextType]:
if (joins_field := node.args.pop("joins", None)) is not None:
for arg in joins_field:
fun = _join(arg, context)
tab, context = fun(tab, context)
return tab, context
def _alias_block(
node: sql_expr.Expression, tab: table.Table, context: ContextType
) -> tuple[table.Table, ContextType]:
if (alias_field := node.args.pop("alias", None)) is not None:
alias = _run(alias_field, context)
assert isinstance(alias, str)
context = context.copy()
tab = tab.copy()
context[alias] = tab
return tab, context
def _check_work_done(node: sql_expr.Expression) -> None:
for key, obj in node.args.items():
if obj is None:
continue
try:
repr = obj.sql()
except AttributeError:
repr = str(obj)
raise NotImplementedError(f"{key}: {repr} not supported.")
def _table(
node: sql_expr.Table, context: ContextType
) -> tuple[table.Joinable, ContextType]:
name = _identifier(node.args.pop("this"), context)
tab = context[name]
tab, context = _alias_block(node, tab, context)
joined_tab, context = _joins_block(node, tab, context)
if node.args.pop("pivots", []) != []:
raise NotImplementedError("PIVOTS not supported")
_check_work_done(node)
return joined_tab, context | null |
166,768 | from __future__ import annotations
import itertools
from collections.abc import Callable
from typing import TYPE_CHECKING, Any
import sqlglot
import sqlglot.expressions as sql_expr
from sqlglot.errors import OptimizeError
from sqlglot.optimizer import qualify_columns
from pathway.internals import expression as expr, if_else, reducers, table, thisclass
from pathway.internals.desugaring import TableSubstitutionDesugaring
from pathway.internals.expression_visitor import IdentityTransform
from pathway.internals.runtime_type_check import check_arg_types
from pathway.internals.shadows import operator
def _run(node: sql_expr.Expression, context: ContextType) -> Any:
handler = _expression_handlers.get(type(node))
if handler is None:
raise NotImplementedError(f"{node.sql()} not supported.")
return handler(node, context)
def _with_block(node: sql_expr.Expression, context: ContextType) -> ContextType:
if (with_field := node.args.pop("with", None)) is not None:
new_context = context.copy()
for context_update in _with(with_field, context):
new_context.update(context_update)
return new_context
else:
return context
def _check_work_done(node: sql_expr.Expression) -> None:
for key, obj in node.args.items():
if obj is None:
continue
try:
repr = obj.sql()
except AttributeError:
repr = str(obj)
raise NotImplementedError(f"{key}: {repr} not supported.")
def _union(
node: sql_expr.Union, context: ContextType
) -> tuple[table.Table, ContextType]:
orig_context = context
context = _with_block(node, context)
left, _ = _run(node.args.pop("this"), context)
right, _ = _run(node.args.pop("expression"), context)
ret = left.concat_reindex(right)
if node.args.pop("distinct"):
ret = ret.groupby(*thisclass.this).reduce(*thisclass.this)
assert node.args.pop("expressions", []) == []
_check_work_done(node)
return ret, orig_context | null |
166,769 | from __future__ import annotations
import itertools
from collections.abc import Callable
from typing import TYPE_CHECKING, Any
import sqlglot
import sqlglot.expressions as sql_expr
from sqlglot.errors import OptimizeError
from sqlglot.optimizer import qualify_columns
from pathway.internals import expression as expr, if_else, reducers, table, thisclass
from pathway.internals.desugaring import TableSubstitutionDesugaring
from pathway.internals.expression_visitor import IdentityTransform
from pathway.internals.runtime_type_check import check_arg_types
from pathway.internals.shadows import operator
def _run(node: sql_expr.Expression, context: ContextType) -> Any:
handler = _expression_handlers.get(type(node))
if handler is None:
raise NotImplementedError(f"{node.sql()} not supported.")
return handler(node, context)
def _with_block(node: sql_expr.Expression, context: ContextType) -> ContextType:
if (with_field := node.args.pop("with", None)) is not None:
new_context = context.copy()
for context_update in _with(with_field, context):
new_context.update(context_update)
return new_context
else:
return context
def _check_work_done(node: sql_expr.Expression) -> None:
for key, obj in node.args.items():
if obj is None:
continue
try:
repr = obj.sql()
except AttributeError:
repr = str(obj)
raise NotImplementedError(f"{key}: {repr} not supported.")
def _intersect(
node: sql_expr.Intersect, context: ContextType
) -> tuple[table.Table, ContextType]:
orig_context = context
context = _with_block(node, context)
left, _ = _run(node.args.pop("this"), context)
right, _ = _run(node.args.pop("expression"), context)
left = left.groupby(*thisclass.this).reduce(*thisclass.this)
right = right.groupby(*thisclass.this).reduce(*thisclass.this)
ret = left.intersect(right)
assert node.args.pop("distinct")
assert node.args.pop("expressions", []) == []
_check_work_done(node)
return ret, orig_context | null |
166,770 | from __future__ import annotations
import itertools
from collections.abc import Callable
from typing import TYPE_CHECKING, Any
import sqlglot
import sqlglot.expressions as sql_expr
from sqlglot.errors import OptimizeError
from sqlglot.optimizer import qualify_columns
from pathway.internals import expression as expr, if_else, reducers, table, thisclass
from pathway.internals.desugaring import TableSubstitutionDesugaring
from pathway.internals.expression_visitor import IdentityTransform
from pathway.internals.runtime_type_check import check_arg_types
from pathway.internals.shadows import operator
class ReducerDetector(IdentityTransform):
contains_reducers: bool
def __init__(self):
self.contains_reducers = False
def eval_reducer(
self, expression: expr.ReducerExpression, **kwargs
) -> expr.ReducerExpression:
self.contains_reducers = True
return super().eval_reducer(expression, **kwargs)
def eval_count(
self, expression: expr.CountExpression, **kwargs
) -> expr.CountExpression:
self.contains_reducers = True
return super().eval_count(expression, **kwargs)
def _run(node: sql_expr.Expression, context: ContextType) -> Any:
handler = _expression_handlers.get(type(node))
if handler is None:
raise NotImplementedError(f"{node.sql()} not supported.")
return handler(node, context)
def _group(node: sql_expr.Group, context: ContextType) -> list[expr.ColumnExpression]:
return [_run(e, context) for e in node.expressions]
def _where(node: sql_expr.Where, context: ContextType):
return _run(node.this, context)
def _having(node: sql_expr.Having, context: ContextType):
return _run(node.this, context)
def _from(node: sql_expr.From, context: ContextType) -> tuple[table.Table, ContextType]:
tabs = []
for expression in node.expressions:
tab, context = _run(expression, context)
tabs.append(tab)
ret = tabs[0]
for tab in tabs[1:]:
ret = ret.join(tab)
return ret, context
class _ReducersGatherer(IdentityTransform):
gathered_reducers: dict[str, expr.ColumnExpression]
def __init__(self) -> None:
self.count = itertools.count(0)
self.gathered_reducers = {}
def add_expression(self, expression: expr.ColumnExpression) -> expr.ColumnReference:
name = f"_pw_having_{next(self.count)}"
self.gathered_reducers[name] = expression
return thisclass.this[name]
def eval_column_val(self, expression: expr.ColumnReference, **kwargs):
if isinstance(expression.table, thisclass.ThisMetaclass):
return super().eval_column_val(expression, **kwargs)
else:
return self.add_expression(expression)
def eval_count(self, expression: expr.CountExpression, **kwargs):
return self.add_expression(expression)
def eval_reducer(self, expression: expr.ReducerExpression, **kwargs):
return self.add_expression(expression)
class _HavingHelper(IdentityTransform):
tab: table.Table
def __init__(self, tab):
self.tab = tab
def eval_column_val(
self, expression: expr.ColumnReference, **kwargs
) -> expr.ColumnReference:
if isinstance(expression.table, thisclass.ThisMetaclass):
try:
return self.tab[expression.name]
except KeyError:
pass
return super().eval_column_val(expression, **kwargs)
def _process_field_for_subqueries(field, tab, context, orig_context, agg_fun):
context_subqueries = {**context}
tab_joined = tab
for subquery in _all_nonnested_subqueries(field):
try:
subquery_tab, _ = _subquery(subquery, orig_context)
except KeyError:
raise SyntaxError("Correlated subqueries not supported.")
tabname = f"__pathway__tmp__table__name__{next(_tmp_table_cnt)}"
context_subqueries[tabname] = subquery_tab
[colexpr] = subquery_tab
subquery.replace(sqlglot.parse_one(f"{agg_fun}({tabname}.{colexpr.name})"))
tab_joined = tab_joined.join(subquery_tab, id=tab_joined.id)
return tab_joined, context_subqueries
def _with_block(node: sql_expr.Expression, context: ContextType) -> ContextType:
if (with_field := node.args.pop("with", None)) is not None:
new_context = context.copy()
for context_update in _with(with_field, context):
new_context.update(context_update)
return new_context
else:
return context
def _joins_block(
node: sql_expr.Expression, tab: table.Joinable, context: ContextType
) -> tuple[table.Joinable, ContextType]:
if (joins_field := node.args.pop("joins", None)) is not None:
for arg in joins_field:
fun = _join(arg, context)
tab, context = fun(tab, context)
return tab, context
def _check_work_done(node: sql_expr.Expression) -> None:
for key, obj in node.args.items():
if obj is None:
continue
try:
repr = obj.sql()
except AttributeError:
repr = str(obj)
raise NotImplementedError(f"{key}: {repr} not supported.")
class TableSubstitutionDesugaring(DesugaringTransform):
"""Maps all references to tables according to `table_substitution` dictionary."""
def __init__(
self,
table_substitution: dict[table.TableLike, table.Table],
):
self._table_substitution = table_substitution
def eval_column_val(
self, expression: expr.ColumnReference, **kwargs
) -> expr.ColumnReference:
target_table = self._table_substitution.get(expression.table)
if target_table is None:
return super().eval_column_val(expression, **kwargs)
else:
return target_table[expression.name]
def _select(
node: sql_expr.Select, context: ContextType
) -> tuple[table.Table, ContextType]:
orig_context = context
# WITH block
context = _with_block(node, context)
# FROM block
tab, context = _from(node.args.pop("from"), context)
tab, context = _joins_block(node, tab, context)
# GROUP block
if (group_field := node.args.pop("group", None)) is not None:
groupby = _group(group_field, context)
else:
groupby = None
# args building
expr_args = []
expr_kwargs = {}
for e in node.args.pop("expressions"):
ret = _run(e, context)
if isinstance(ret, dict):
expr_kwargs.update(ret)
else:
expr_args.append(ret)
# WHERE block
if (where_field := node.args.pop("where", None)) is not None:
# mutates `where_field`
tab_joined_where, context_subqueries_where = _process_field_for_subqueries(
where_field, tab, context, orig_context, ""
)
tab_filter_where = tab_joined_where.select(
filter_col=_where(where_field, context_subqueries_where)
).with_universe_of(tab)
tab_filtered = tab.filter(tab_filter_where.filter_col)
table_replacer = TableSubstitutionDesugaring({tab: tab_filtered})
expr_args = [table_replacer.eval_expression(e) for e in expr_args]
if groupby is not None:
groupby = [table_replacer.eval_expression(e) for e in groupby]
expr_kwargs = {
name: table_replacer.eval_expression(e) for name, e in expr_kwargs.items()
}
tab = tab_filtered
# HAVING block
if (having_field := node.args.pop("having", None)) is not None:
if groupby is None:
groupby = []
_check_work_done(node)
# maybe we have implicit GROUP BY
if groupby is None:
detector = ReducerDetector()
for arg in expr_args:
detector.eval_expression(arg)
for arg in expr_kwargs.values():
detector.eval_expression(arg)
if detector.contains_reducers:
groupby = []
if groupby is None:
result = tab.select(*expr_args, **expr_kwargs)
return result, orig_context
if having_field is not None:
# mutates `having_field`
tab, context_subqueries_having = _process_field_for_subqueries(
having_field, tab, context, orig_context, "MIN"
)
having_expr = _having(having_field, context_subqueries_having)
gatherer = _ReducersGatherer()
having_expr = gatherer.eval_expression(having_expr)
expr_kwargs = {**expr_kwargs, **gatherer.gathered_reducers}
grouped = tab.groupby(*groupby)
result = grouped.reduce(*expr_args, **expr_kwargs)
if having_field is None:
return result, orig_context
having_col = _HavingHelper(result).eval_expression(having_expr)
result = result.filter(having_col).without(
*[thisclass.this[name] for name in gatherer.gathered_reducers.keys()]
)
return result, orig_context | null |
166,771 | from __future__ import annotations
import builtins
from abc import ABC, abstractmethod
from warnings import warn
import numpy as np
from pathway.internals import api, dtype as dt, expression as expr
from pathway.internals.column import ColumnExpression, GroupedContext
_min = TypePreservingUnaryReducer(name="min", engine_reducer=api.Reducer.MIN)
def _apply_unary_reducer(
reducer: UnaryReducer, arg: expr.ColumnExpression, **kwargs
) -> expr.ReducerExpression:
return expr.ReducerExpression(reducer, arg, **kwargs)
The provided code snippet includes necessary dependencies for implementing the `min` function. Write a Python function `def min(arg: expr.ColumnExpression) -> expr.ReducerExpression` to solve the following problem:
Returns the minimum of the aggregated values. Example: >>> import pathway as pw >>> t = pw.debug.table_from_markdown(''' ... colA | colB ... valA | -1 ... valA | 1 ... valA | 2 ... valB | 4 ... valB | 4 ... valB | 7 ... ''') >>> result = t.groupby(t.colA).reduce(min=pw.reducers.min(t.colB)) >>> pw.debug.compute_and_print(result, include_id=False) min -1 4
Here is the function:
def min(arg: expr.ColumnExpression) -> expr.ReducerExpression:
"""
Returns the minimum of the aggregated values.
Example:
>>> import pathway as pw
>>> t = pw.debug.table_from_markdown('''
... colA | colB
... valA | -1
... valA | 1
... valA | 2
... valB | 4
... valB | 4
... valB | 7
... ''')
>>> result = t.groupby(t.colA).reduce(min=pw.reducers.min(t.colB))
>>> pw.debug.compute_and_print(result, include_id=False)
min
-1
4
"""
return _apply_unary_reducer(_min, arg) | Returns the minimum of the aggregated values. Example: >>> import pathway as pw >>> t = pw.debug.table_from_markdown(''' ... colA | colB ... valA | -1 ... valA | 1 ... valA | 2 ... valB | 4 ... valB | 4 ... valB | 7 ... ''') >>> result = t.groupby(t.colA).reduce(min=pw.reducers.min(t.colB)) >>> pw.debug.compute_and_print(result, include_id=False) min -1 4 |
166,772 | from __future__ import annotations
import builtins
from abc import ABC, abstractmethod
from warnings import warn
import numpy as np
from pathway.internals import api, dtype as dt, expression as expr
from pathway.internals.column import ColumnExpression, GroupedContext
_max = TypePreservingUnaryReducer(name="max", engine_reducer=api.Reducer.MAX)
def _apply_unary_reducer(
reducer: UnaryReducer, arg: expr.ColumnExpression, **kwargs
) -> expr.ReducerExpression:
return expr.ReducerExpression(reducer, arg, **kwargs)
The provided code snippet includes necessary dependencies for implementing the `max` function. Write a Python function `def max(arg: expr.ColumnExpression) -> expr.ReducerExpression` to solve the following problem:
Returns the maximum of the aggregated values. Example: >>> import pathway as pw >>> t = pw.debug.table_from_markdown(''' ... colA | colB ... valA | -1 ... valA | 1 ... valA | 2 ... valB | 4 ... valB | 4 ... valB | 7 ... ''') >>> result = t.groupby(t.colA).reduce(max=pw.reducers.max(t.colB)) >>> pw.debug.compute_and_print(result, include_id=False) max 2 7
Here is the function:
def max(arg: expr.ColumnExpression) -> expr.ReducerExpression:
"""
Returns the maximum of the aggregated values.
Example:
>>> import pathway as pw
>>> t = pw.debug.table_from_markdown('''
... colA | colB
... valA | -1
... valA | 1
... valA | 2
... valB | 4
... valB | 4
... valB | 7
... ''')
>>> result = t.groupby(t.colA).reduce(max=pw.reducers.max(t.colB))
>>> pw.debug.compute_and_print(result, include_id=False)
max
2
7
"""
return _apply_unary_reducer(_max, arg) | Returns the maximum of the aggregated values. Example: >>> import pathway as pw >>> t = pw.debug.table_from_markdown(''' ... colA | colB ... valA | -1 ... valA | 1 ... valA | 2 ... valB | 4 ... valB | 4 ... valB | 7 ... ''') >>> result = t.groupby(t.colA).reduce(max=pw.reducers.max(t.colB)) >>> pw.debug.compute_and_print(result, include_id=False) max 2 7 |
166,773 | from __future__ import annotations
import builtins
from abc import ABC, abstractmethod
from warnings import warn
import numpy as np
from pathway.internals import api, dtype as dt, expression as expr
from pathway.internals.column import ColumnExpression, GroupedContext
_argmin = FixedOutputUnaryReducer(
output_type=dt.POINTER,
name="argmin",
engine_reducer=api.Reducer.ARG_MIN,
)
def _apply_unary_reducer(
reducer: UnaryReducer, arg: expr.ColumnExpression, **kwargs
) -> expr.ReducerExpression:
return expr.ReducerExpression(reducer, arg, **kwargs)
The provided code snippet includes necessary dependencies for implementing the `argmin` function. Write a Python function `def argmin(arg: expr.ColumnExpression) -> expr.ReducerExpression` to solve the following problem:
Returns the index of the minimum aggregated value. Example: >>> import pathway as pw >>> t = pw.debug.table_from_markdown(''' ... colA | colB ... valA | -1 ... valA | 1 ... valA | 2 ... valB | 4 ... valB | 4 ... valB | 7 ... ''') >>> pw.debug.compute_and_print(t) | colA | colB ^X1MXHYY... | valA | -1 ^YYY4HAB... | valA | 1 ^Z3QWT29... | valA | 2 ^3CZ78B4... | valB | 4 ^3HN31E1... | valB | 4 ^3S2X6B2... | valB | 7 >>> result = t.groupby(t.colA).reduce(argmin=pw.reducers.argmin(t.colB), min=pw.reducers.min(t.colB)) >>> pw.debug.compute_and_print(result, include_id=False) argmin | min ^X1MXHYY... | -1 ^3CZ78B4... | 4
Here is the function:
def argmin(arg: expr.ColumnExpression) -> expr.ReducerExpression:
"""
Returns the index of the minimum aggregated value.
Example:
>>> import pathway as pw
>>> t = pw.debug.table_from_markdown('''
... colA | colB
... valA | -1
... valA | 1
... valA | 2
... valB | 4
... valB | 4
... valB | 7
... ''')
>>> pw.debug.compute_and_print(t)
| colA | colB
^X1MXHYY... | valA | -1
^YYY4HAB... | valA | 1
^Z3QWT29... | valA | 2
^3CZ78B4... | valB | 4
^3HN31E1... | valB | 4
^3S2X6B2... | valB | 7
>>> result = t.groupby(t.colA).reduce(argmin=pw.reducers.argmin(t.colB), min=pw.reducers.min(t.colB))
>>> pw.debug.compute_and_print(result, include_id=False)
argmin | min
^X1MXHYY... | -1
^3CZ78B4... | 4
"""
return _apply_unary_reducer(_argmin, arg) | Returns the index of the minimum aggregated value. Example: >>> import pathway as pw >>> t = pw.debug.table_from_markdown(''' ... colA | colB ... valA | -1 ... valA | 1 ... valA | 2 ... valB | 4 ... valB | 4 ... valB | 7 ... ''') >>> pw.debug.compute_and_print(t) | colA | colB ^X1MXHYY... | valA | -1 ^YYY4HAB... | valA | 1 ^Z3QWT29... | valA | 2 ^3CZ78B4... | valB | 4 ^3HN31E1... | valB | 4 ^3S2X6B2... | valB | 7 >>> result = t.groupby(t.colA).reduce(argmin=pw.reducers.argmin(t.colB), min=pw.reducers.min(t.colB)) >>> pw.debug.compute_and_print(result, include_id=False) argmin | min ^X1MXHYY... | -1 ^3CZ78B4... | 4 |
166,774 | from __future__ import annotations
import builtins
from abc import ABC, abstractmethod
from warnings import warn
import numpy as np
from pathway.internals import api, dtype as dt, expression as expr
from pathway.internals.column import ColumnExpression, GroupedContext
_argmax = FixedOutputUnaryReducer(
output_type=dt.POINTER,
name="argmax",
engine_reducer=api.Reducer.ARG_MAX,
)
def _apply_unary_reducer(
reducer: UnaryReducer, arg: expr.ColumnExpression, **kwargs
) -> expr.ReducerExpression:
return expr.ReducerExpression(reducer, arg, **kwargs)
The provided code snippet includes necessary dependencies for implementing the `argmax` function. Write a Python function `def argmax(arg: expr.ColumnExpression) -> expr.ReducerExpression` to solve the following problem:
Returns the index of the maximum aggregated value. Example: >>> import pathway as pw >>> t = pw.debug.table_from_markdown(''' ... colA | colB ... valA | -1 ... valA | 1 ... valA | 2 ... valB | 4 ... valB | 4 ... valB | 7 ... ''') >>> pw.debug.compute_and_print(t) | colA | colB ^X1MXHYY... | valA | -1 ^YYY4HAB... | valA | 1 ^Z3QWT29... | valA | 2 ^3CZ78B4... | valB | 4 ^3HN31E1... | valB | 4 ^3S2X6B2... | valB | 7 >>> result = t.groupby(t.colA).reduce(argmax=pw.reducers.argmax(t.colB), max=pw.reducers.max(t.colB)) >>> pw.debug.compute_and_print(result, include_id=False) argmax | max ^Z3QWT29... | 2 ^3S2X6B2... | 7
Here is the function:
def argmax(arg: expr.ColumnExpression) -> expr.ReducerExpression:
"""
Returns the index of the maximum aggregated value.
Example:
>>> import pathway as pw
>>> t = pw.debug.table_from_markdown('''
... colA | colB
... valA | -1
... valA | 1
... valA | 2
... valB | 4
... valB | 4
... valB | 7
... ''')
>>> pw.debug.compute_and_print(t)
| colA | colB
^X1MXHYY... | valA | -1
^YYY4HAB... | valA | 1
^Z3QWT29... | valA | 2
^3CZ78B4... | valB | 4
^3HN31E1... | valB | 4
^3S2X6B2... | valB | 7
>>> result = t.groupby(t.colA).reduce(argmax=pw.reducers.argmax(t.colB), max=pw.reducers.max(t.colB))
>>> pw.debug.compute_and_print(result, include_id=False)
argmax | max
^Z3QWT29... | 2
^3S2X6B2... | 7
"""
return _apply_unary_reducer(_argmax, arg) | Returns the index of the maximum aggregated value. Example: >>> import pathway as pw >>> t = pw.debug.table_from_markdown(''' ... colA | colB ... valA | -1 ... valA | 1 ... valA | 2 ... valB | 4 ... valB | 4 ... valB | 7 ... ''') >>> pw.debug.compute_and_print(t) | colA | colB ^X1MXHYY... | valA | -1 ^YYY4HAB... | valA | 1 ^Z3QWT29... | valA | 2 ^3CZ78B4... | valB | 4 ^3HN31E1... | valB | 4 ^3S2X6B2... | valB | 7 >>> result = t.groupby(t.colA).reduce(argmax=pw.reducers.argmax(t.colB), max=pw.reducers.max(t.colB)) >>> pw.debug.compute_and_print(result, include_id=False) argmax | max ^Z3QWT29... | 2 ^3S2X6B2... | 7 |
166,775 | from __future__ import annotations
import builtins
from abc import ABC, abstractmethod
from warnings import warn
import numpy as np
from pathway.internals import api, dtype as dt, expression as expr
from pathway.internals.column import ColumnExpression, GroupedContext
_unique = TypePreservingUnaryReducer(name="unique", engine_reducer=api.Reducer.UNIQUE)
def _apply_unary_reducer(
reducer: UnaryReducer, arg: expr.ColumnExpression, **kwargs
) -> expr.ReducerExpression:
return expr.ReducerExpression(reducer, arg, **kwargs)
The provided code snippet includes necessary dependencies for implementing the `unique` function. Write a Python function `def unique(arg: expr.ColumnExpression) -> expr.ReducerExpression` to solve the following problem:
Returns aggregated value, if all values are identical. If values are not identical, exception is raised. Example: >>> import pathway as pw >>> t = pw.debug.table_from_markdown(''' ... | colA | colB | colD ... 1 | valA | 1 | 3 ... 2 | valA | 1 | 3 ... 3 | valA | 1 | 3 ... 4 | valB | 2 | 4 ... 5 | valB | 2 | 5 ... 6 | valB | 2 | 6 ... ''') >>> result = t.groupby(t.colA).reduce(unique_B=pw.reducers.unique(t.colB)) >>> pw.debug.compute_and_print(result, include_id=False) unique_B 1 2 >>> result = t.groupby(t.colA).reduce(unique_D=pw.reducers.unique(t.colD)) >>> try: ... pw.debug.compute_and_print(result, include_id=False) ... except Exception as e: ... print(type(e)) <class 'pathway.engine.EngineError'>
Here is the function:
def unique(arg: expr.ColumnExpression) -> expr.ReducerExpression:
"""
Returns aggregated value, if all values are identical. If values are not identical, exception is raised.
Example:
>>> import pathway as pw
>>> t = pw.debug.table_from_markdown('''
... | colA | colB | colD
... 1 | valA | 1 | 3
... 2 | valA | 1 | 3
... 3 | valA | 1 | 3
... 4 | valB | 2 | 4
... 5 | valB | 2 | 5
... 6 | valB | 2 | 6
... ''')
>>> result = t.groupby(t.colA).reduce(unique_B=pw.reducers.unique(t.colB))
>>> pw.debug.compute_and_print(result, include_id=False)
unique_B
1
2
>>> result = t.groupby(t.colA).reduce(unique_D=pw.reducers.unique(t.colD))
>>> try:
... pw.debug.compute_and_print(result, include_id=False)
... except Exception as e:
... print(type(e))
<class 'pathway.engine.EngineError'>
"""
return _apply_unary_reducer(_unique, arg) | Returns aggregated value, if all values are identical. If values are not identical, exception is raised. Example: >>> import pathway as pw >>> t = pw.debug.table_from_markdown(''' ... | colA | colB | colD ... 1 | valA | 1 | 3 ... 2 | valA | 1 | 3 ... 3 | valA | 1 | 3 ... 4 | valB | 2 | 4 ... 5 | valB | 2 | 5 ... 6 | valB | 2 | 6 ... ''') >>> result = t.groupby(t.colA).reduce(unique_B=pw.reducers.unique(t.colB)) >>> pw.debug.compute_and_print(result, include_id=False) unique_B 1 2 >>> result = t.groupby(t.colA).reduce(unique_D=pw.reducers.unique(t.colD)) >>> try: ... pw.debug.compute_and_print(result, include_id=False) ... except Exception as e: ... print(type(e)) <class 'pathway.engine.EngineError'> |
166,776 | from __future__ import annotations
import builtins
from abc import ABC, abstractmethod
from warnings import warn
import numpy as np
from pathway.internals import api, dtype as dt, expression as expr
from pathway.internals.column import ColumnExpression, GroupedContext
_any = TypePreservingUnaryReducer(name="any", engine_reducer=api.Reducer.ANY)
def _apply_unary_reducer(
reducer: UnaryReducer, arg: expr.ColumnExpression, **kwargs
) -> expr.ReducerExpression:
return expr.ReducerExpression(reducer, arg, **kwargs)
The provided code snippet includes necessary dependencies for implementing the `any` function. Write a Python function `def any(arg: expr.ColumnExpression) -> expr.ReducerExpression` to solve the following problem:
Returns any of the aggregated values. Values are consistent across application to many columns. Example: >>> import pathway as pw >>> t = pw.debug.table_from_markdown(''' ... | colA | colB | colD ... 1 | valA | -1 | 4 ... 2 | valA | 1 | 7 ... 3 | valA | 2 | -3 ... 4 | valB | 4 | 2 ... 5 | valB | 5 | 6 ... 6 | valB | 7 | 1 ... ''') >>> result = t.groupby(t.colA).reduce( ... any_B=pw.reducers.any(t.colB), ... any_D=pw.reducers.any(t.colD), ... ) >>> pw.debug.compute_and_print(result, include_id=False) any_B | any_D 2 | -3 7 | 1
Here is the function:
def any(arg: expr.ColumnExpression) -> expr.ReducerExpression:
"""
Returns any of the aggregated values. Values are consistent across application to many columns.
Example:
>>> import pathway as pw
>>> t = pw.debug.table_from_markdown('''
... | colA | colB | colD
... 1 | valA | -1 | 4
... 2 | valA | 1 | 7
... 3 | valA | 2 | -3
... 4 | valB | 4 | 2
... 5 | valB | 5 | 6
... 6 | valB | 7 | 1
... ''')
>>> result = t.groupby(t.colA).reduce(
... any_B=pw.reducers.any(t.colB),
... any_D=pw.reducers.any(t.colD),
... )
>>> pw.debug.compute_and_print(result, include_id=False)
any_B | any_D
2 | -3
7 | 1
"""
return _apply_unary_reducer(_any, arg) | Returns any of the aggregated values. Values are consistent across application to many columns. Example: >>> import pathway as pw >>> t = pw.debug.table_from_markdown(''' ... | colA | colB | colD ... 1 | valA | -1 | 4 ... 2 | valA | 1 | 7 ... 3 | valA | 2 | -3 ... 4 | valB | 4 | 2 ... 5 | valB | 5 | 6 ... 6 | valB | 7 | 1 ... ''') >>> result = t.groupby(t.colA).reduce( ... any_B=pw.reducers.any(t.colB), ... any_D=pw.reducers.any(t.colD), ... ) >>> pw.debug.compute_and_print(result, include_id=False) any_B | any_D 2 | -3 7 | 1 |
166,777 | from __future__ import annotations
import builtins
from abc import ABC, abstractmethod
from warnings import warn
import numpy as np
from pathway.internals import api, dtype as dt, expression as expr
from pathway.internals.column import ColumnExpression, GroupedContext
def sum(arg: expr.ColumnExpression) -> expr.ReducerExpression:
"""
Returns the sum of the aggregated values. Can handle int, float, and array values.
Example:
>>> import pathway as pw
>>> t = pw.debug.table_from_markdown('''
... colA | colB
... valA | -1
... valA | 1
... valA | 2
... valB | 4
... valB | 4
... valB | 7
... ''')
>>> result = t.groupby(t.colA).reduce(sum=pw.reducers.sum(t.colB))
>>> pw.debug.compute_and_print(result, include_id=False)
sum
2
15
>>> import pandas as pd
>>> np_table = pw.debug.table_from_pandas(
... pd.DataFrame(
... {
... "data": [
... np.array([1, 2, 3]),
... np.array([4, 5, 6]),
... np.array([7, 8, 9]),
... ]
... }
... )
... )
>>> result = np_table.reduce(data_sum=pw.reducers.sum(np_table.data))
>>> pw.debug.compute_and_print(result, include_id=False)
data_sum
[12 15 18]
"""
return _apply_unary_reducer(_sum, arg)
def npsum(arg):
warn("Using pathway.reducers.npsum() is deprecated, use pathway.reducers.sum()")
return sum(arg) | null |
166,778 | from __future__ import annotations
import builtins
from abc import ABC, abstractmethod
from warnings import warn
import numpy as np
from pathway.internals import api, dtype as dt, expression as expr
from pathway.internals.column import ColumnExpression, GroupedContext
def sum(arg: expr.ColumnExpression) -> expr.ReducerExpression:
"""
Returns the sum of the aggregated values. Can handle int, float, and array values.
Example:
>>> import pathway as pw
>>> t = pw.debug.table_from_markdown('''
... colA | colB
... valA | -1
... valA | 1
... valA | 2
... valB | 4
... valB | 4
... valB | 7
... ''')
>>> result = t.groupby(t.colA).reduce(sum=pw.reducers.sum(t.colB))
>>> pw.debug.compute_and_print(result, include_id=False)
sum
2
15
>>> import pandas as pd
>>> np_table = pw.debug.table_from_pandas(
... pd.DataFrame(
... {
... "data": [
... np.array([1, 2, 3]),
... np.array([4, 5, 6]),
... np.array([7, 8, 9]),
... ]
... }
... )
... )
>>> result = np_table.reduce(data_sum=pw.reducers.sum(np_table.data))
>>> pw.debug.compute_and_print(result, include_id=False)
data_sum
[12 15 18]
"""
return _apply_unary_reducer(_sum, arg)
def count(*args):
"""
Returns the number of aggregated elements.
Example:
>>> import pathway as pw
>>> t = pw.debug.table_from_markdown('''
... colA | colB
... valA | -1
... valA | 1
... valA | 2
... valB | 4
... valB | 4
... valB | 7
... ''')
>>> result = t.groupby(t.colA).reduce(count=pw.reducers.count())
>>> pw.debug.compute_and_print(result, include_id=False)
count
3
3
"""
if args:
warn(
"Passing argument to pathway.reducers.count() is deprecated, use pathway.reducers.count() "
+ "without any arguments."
)
return expr.CountExpression()
The provided code snippet includes necessary dependencies for implementing the `avg` function. Write a Python function `def avg(expression: expr.ColumnExpression) -> expr.ColumnExpression` to solve the following problem:
Returns the average of the aggregated values. Example: >>> import pathway as pw >>> t = pw.debug.table_from_markdown(''' ... colA | colB ... valA | -1 ... valA | 1 ... valA | 2 ... valB | 4 ... valB | 4 ... valB | 7 ... ''') >>> result = t.groupby(t.colA).reduce(avg=pw.reducers.avg(t.colB)) >>> pw.debug.compute_and_print(result, include_id=False) avg 0.6666666666666666 5.0
Here is the function:
def avg(expression: expr.ColumnExpression) -> expr.ColumnExpression:
"""
Returns the average of the aggregated values.
Example:
>>> import pathway as pw
>>> t = pw.debug.table_from_markdown('''
... colA | colB
... valA | -1
... valA | 1
... valA | 2
... valB | 4
... valB | 4
... valB | 7
... ''')
>>> result = t.groupby(t.colA).reduce(avg=pw.reducers.avg(t.colB))
>>> pw.debug.compute_and_print(result, include_id=False)
avg
0.6666666666666666
5.0
"""
return sum(expression) / count() | Returns the average of the aggregated values. Example: >>> import pathway as pw >>> t = pw.debug.table_from_markdown(''' ... colA | colB ... valA | -1 ... valA | 1 ... valA | 2 ... valB | 4 ... valB | 4 ... valB | 7 ... ''') >>> result = t.groupby(t.colA).reduce(avg=pw.reducers.avg(t.colB)) >>> pw.debug.compute_and_print(result, include_id=False) avg 0.6666666666666666 5.0 |
166,779 | from __future__ import annotations
import builtins
from abc import ABC, abstractmethod
from warnings import warn
import numpy as np
from pathway.internals import api, dtype as dt, expression as expr
from pathway.internals.column import ColumnExpression, GroupedContext
def sum(arg: expr.ColumnExpression) -> expr.ReducerExpression:
"""
Returns the sum of the aggregated values. Can handle int, float, and array values.
Example:
>>> import pathway as pw
>>> t = pw.debug.table_from_markdown('''
... colA | colB
... valA | -1
... valA | 1
... valA | 2
... valB | 4
... valB | 4
... valB | 7
... ''')
>>> result = t.groupby(t.colA).reduce(sum=pw.reducers.sum(t.colB))
>>> pw.debug.compute_and_print(result, include_id=False)
sum
2
15
>>> import pandas as pd
>>> np_table = pw.debug.table_from_pandas(
... pd.DataFrame(
... {
... "data": [
... np.array([1, 2, 3]),
... np.array([4, 5, 6]),
... np.array([7, 8, 9]),
... ]
... }
... )
... )
>>> result = np_table.reduce(data_sum=pw.reducers.sum(np_table.data))
>>> pw.debug.compute_and_print(result, include_id=False)
data_sum
[12 15 18]
"""
return _apply_unary_reducer(_sum, arg)
def int_sum(expression: expr.ColumnExpression):
warn(
"Reducer pathway.reducers.int_sum is deprecated, use pathway.reducers.sum instead."
)
return sum(expression) | null |
166,780 | from __future__ import annotations
import builtins
from abc import ABC, abstractmethod
from warnings import warn
import numpy as np
from pathway.internals import api, dtype as dt, expression as expr
from pathway.internals.column import ColumnExpression, GroupedContext
def tuple(arg: expr.ColumnExpression, *, skip_nones: bool = False):
"""
Return a tuple containing all the aggregated values. Order of values inside a tuple
is consistent across application to many columns. If optional argument skip_nones is
set to True, any Nones in aggregated values are omitted from the result.
Example:
>>> import pathway as pw
>>> t = pw.debug.table_from_markdown('''
... | colA | colB | colC | colD
... 1 | valA | -1 | 5 | 4
... 2 | valA | 1 | 5 | 7
... 3 | valA | 2 | 5 | -3
... 4 | valB | 4 | 10 | 2
... 5 | valB | 4 | 10 | 6
... 6 | valB | 7 | 10 | 1
... ''')
>>> result = t.groupby(t.colA).reduce(
... tuple_B=pw.reducers.tuple(t.colB),
... tuple_C=pw.reducers.tuple(t.colC),
... tuple_D=pw.reducers.tuple(t.colD),
... )
>>> pw.debug.compute_and_print(result, include_id=False)
tuple_B | tuple_C | tuple_D
(-1, 1, 2) | (5, 5, 5) | (4, 7, -3)
(4, 4, 7) | (10, 10, 10) | (2, 6, 1)
"""
return _apply_unary_reducer(_tuple(skip_nones), arg, skip_nones=skip_nones)
def apply_with_type(
fun: Callable,
ret_type: type | dt.DType,
*args: expr.ColumnExpression | Value,
**kwargs: expr.ColumnExpression | Value,
) -> expr.ColumnExpression:
"""Applies function to column expressions, column-wise.
Output column type is provided explicitly.
Example:
>>> import pathway as pw
>>> t1 = pw.debug.table_from_markdown('''
... age owner pet
... 1 10 Alice dog
... 2 9 Bob dog
... 3 8 Alice cat
... 4 7 Bob dog''')
>>> t2 = t1.select(col = pw.apply_with_type(lambda left, right: left+right, str, t1.owner, t1.pet))
>>> pw.debug.compute_and_print(t2, include_id=False)
col
Alicecat
Alicedog
Bobdog
Bobdog
"""
if kwargs:
warn(
"Passing keyword arguments to the function in pw.apply_with_type is deprecated."
+ " Use positional arguments instead.",
DeprecationWarning,
stacklevel=2,
)
return udf(fun, return_type=ret_type)(*args, **kwargs)
The provided code snippet includes necessary dependencies for implementing the `ndarray` function. Write a Python function `def ndarray(expression: expr.ColumnExpression, *, skip_nones: bool = False)` to solve the following problem:
Returns an array containing all the aggregated values. Order of values inside an array is consistent across application to many columns. If optional argument skip_nones is set to True, any Nones in aggregated values are omitted from the result. Example: >>> import pathway as pw >>> t = pw.debug.table_from_markdown(''' ... | colA | colB | colD ... 1 | valA | -1 | 4 ... 2 | valA | 1 | 7 ... 3 | valA | 2 | -3 ... 4 | valB | 4 | ... 5 | valB | 4 | 6 ... 6 | valB | 7 | 1 ... ''') >>> result = t.groupby(t.colA).reduce( ... array_B=pw.reducers.ndarray(t.colB), ... array_D=pw.reducers.ndarray(t.colD, skip_nones=True), ... ) >>> pw.debug.compute_and_print(result, include_id=False) array_B | array_D [4 4 7] | [6 1] [-1 1 2] | [ 4 7 -3]
Here is the function:
def ndarray(expression: expr.ColumnExpression, *, skip_nones: bool = False):
"""
Returns an array containing all the aggregated values. Order of values inside an array
is consistent across application to many columns. If optional argument skip_nones is
set to True, any Nones in aggregated values are omitted from the result.
Example:
>>> import pathway as pw
>>> t = pw.debug.table_from_markdown('''
... | colA | colB | colD
... 1 | valA | -1 | 4
... 2 | valA | 1 | 7
... 3 | valA | 2 | -3
... 4 | valB | 4 |
... 5 | valB | 4 | 6
... 6 | valB | 7 | 1
... ''')
>>> result = t.groupby(t.colA).reduce(
... array_B=pw.reducers.ndarray(t.colB),
... array_D=pw.reducers.ndarray(t.colD, skip_nones=True),
... )
>>> pw.debug.compute_and_print(result, include_id=False)
array_B | array_D
[4 4 7] | [6 1]
[-1 1 2] | [ 4 7 -3]
"""
from pathway.internals.common import apply_with_type
return apply_with_type(
np.array, np.ndarray, tuple(expression, skip_nones=skip_nones)
) | Returns an array containing all the aggregated values. Order of values inside an array is consistent across application to many columns. If optional argument skip_nones is set to True, any Nones in aggregated values are omitted from the result. Example: >>> import pathway as pw >>> t = pw.debug.table_from_markdown(''' ... | colA | colB | colD ... 1 | valA | -1 | 4 ... 2 | valA | 1 | 7 ... 3 | valA | 2 | -3 ... 4 | valB | 4 | ... 5 | valB | 4 | 6 ... 6 | valB | 7 | 1 ... ''') >>> result = t.groupby(t.colA).reduce( ... array_B=pw.reducers.ndarray(t.colB), ... array_D=pw.reducers.ndarray(t.colD, skip_nones=True), ... ) >>> pw.debug.compute_and_print(result, include_id=False) array_B | array_D [4 4 7] | [6 1] [-1 1 2] | [ 4 7 -3] |
166,781 | from __future__ import annotations
import csv
import dataclasses
import itertools
from collections import ChainMap
from collections.abc import Callable, Iterable, KeysView, Mapping
from dataclasses import dataclass
from os import PathLike
from pydoc import locate
from types import MappingProxyType
from typing import TYPE_CHECKING, Any, NoReturn, get_type_hints
from warnings import warn
import numpy as np
import pandas as pd
from pathway.internals import dtype as dt, trace
from pathway.internals.column_properties import ColumnProperties
from pathway.internals.helpers import StableSet
from pathway.internals.runtime_type_check import check_arg_types
class Schema(metaclass=SchemaMetaclass):
def __init_subclass__(cls, /, append_only: bool | None = None, **kwargs) -> None:
class ColumnDefinition:
def __post_init__(self):
def from_properties(cls, properties: ColumnProperties) -> ColumnDefinition:
def schema_builder(
columns: dict[str, ColumnDefinition],
*,
name: str | None = None,
properties: SchemaProperties = SchemaProperties(),
) -> type[Schema]:
def schema_from_columns(
columns: Mapping[str, clmn.Column],
_name: str | None = None,
) -> type[Schema]:
if _name is None:
_name = "schema_from_columns(" + str(list(columns.keys())) + ")"
return schema_builder(
columns={
name: ColumnDefinition.from_properties(c.properties)
for name, c in columns.items()
},
name=_name,
) | null |
166,782 | from __future__ import annotations
import csv
import dataclasses
import itertools
from collections import ChainMap
from collections.abc import Callable, Iterable, KeysView, Mapping
from dataclasses import dataclass
from os import PathLike
from pydoc import locate
from types import MappingProxyType
from typing import TYPE_CHECKING, Any, NoReturn, get_type_hints
from warnings import warn
import numpy as np
import pandas as pd
from pathway.internals import dtype as dt, trace
from pathway.internals.column_properties import ColumnProperties
from pathway.internals.helpers import StableSet
from pathway.internals.runtime_type_check import check_arg_types
class SchemaMetaclass(type):
__columns__: dict[str, ColumnSchema]
__dtypes__: dict[str, dt.DType]
__types__: dict[str, Any]
__universe_properties__: ColumnProperties
def __init__(self, *args, append_only: bool | None = None, **kwargs) -> None:
super().__init__(*args, **kwargs)
schema_properties = SchemaProperties(append_only=append_only)
self.__columns__ = _create_column_definitions(self, schema_properties)
self.__universe_properties__ = _universe_properties(
list(self.__columns__.values()), schema_properties
)
self.__dtypes__ = {
name: column.dtype for name, column in self.__columns__.items()
}
self.__types__ = {k: v.typehint for k, v in self.__dtypes__.items()}
def __call__(self) -> NoReturn:
raise TypeError(
"Schemas should not be called. Use `table.schema` not `table.schema()."
)
def __or__(self, other: type[Schema]) -> type[Schema]: # type: ignore
return schema_add(self, other) # type: ignore
def columns(self) -> Mapping[str, ColumnSchema]:
return MappingProxyType(self.__columns__)
def column_names(self) -> list[str]:
return list(self.keys())
def universe_properties(self) -> ColumnProperties:
return self.__universe_properties__
def column_properties(self, name: str) -> ColumnProperties:
column = self.__columns__[name]
return ColumnProperties(dtype=column.dtype, append_only=column.append_only)
def primary_key_columns(self) -> list[str] | None:
# There is a distinction between an empty set of columns denoting
# the primary key and None. If any (including empty) set of keys if provided,
# then it will be used to compute the primary key.
#
# For the autogeneration one needs to specify None
pkey_fields = [
name for name, column in self.__columns__.items() if column.primary_key
]
return pkey_fields if pkey_fields else None
def default_values(self) -> dict[str, Any]:
return {
name: column.default_value
for name, column in self.__columns__.items()
if column.has_default_value()
}
def keys(self) -> KeysView[str]:
return self.__columns__.keys()
def update_types(self, **kwargs) -> type[Schema]:
columns: dict[str, ColumnDefinition] = {
col.name: col.to_definition() for col in self.__columns__.values()
}
for name, dtype in kwargs.items():
if name not in columns:
raise ValueError(
"Schema.update_types() argument name has to be an existing column name."
)
columns[name] = dataclasses.replace(columns[name], dtype=dt.wrap(dtype))
return schema_builder(columns=columns)
def update_properties(self, **kwargs) -> type[Schema]:
columns: dict[str, ColumnDefinition] = {
col.name: dataclasses.replace(col.to_definition(), **kwargs)
for col in self.__columns__.values()
}
properties = SchemaProperties(
**{
name: value
for name, value in kwargs.items()
if name in SchemaProperties.__annotations__
}
)
return schema_builder(
columns=columns, name=self.__name__, properties=properties
)
def __getitem__(self, name) -> ColumnSchema:
return self.__columns__[name]
def _dtypes(self) -> Mapping[str, dt.DType]:
return MappingProxyType(self.__dtypes__)
def typehints(self) -> Mapping[str, Any]:
return MappingProxyType(self.__types__)
def __repr__(self):
return f"<pathway.Schema types={self.__types__}>"
def __str__(self):
col_names = [k for k in self.keys()]
max_lens = [
max(len(column_name), len(str(self.__dtypes__[column_name])))
for column_name in col_names
]
res = " | ".join(
[
column_name.ljust(max_len)
for (column_name, max_len) in zip(col_names, max_lens)
]
)
res = res.rstrip() + "\n"
res = res + " | ".join(
[
(str(self.__dtypes__[column_name])).ljust(max_len)
for (column_name, max_len) in zip(col_names, max_lens)
]
)
return res
def _as_tuple(self):
return tuple(self.__columns__.items())
def __eq__(self, other: object) -> bool:
if isinstance(other, SchemaMetaclass):
return self._as_tuple() == other._as_tuple()
return NotImplemented
def __hash__(self) -> int:
return hash(self._as_tuple())
def generate_class(
self, class_name: str | None = None, generate_imports: bool = False
) -> str:
"""Generates class with the definition of given schema and returns it as a string.
Args:
class_name: name of the class with the schema. If not provided, name created
during schema generation will be used.
generate_imports: whether the string should start with necessary imports to
run code (default is False)
"""
def get_type_definition_and_modules(type: object) -> tuple[str, list[str]]:
if type.__module__ != "builtins":
modules = [type.__module__]
type_definition = (
type.__module__
+ "."
+ type.__qualname__ # type:ignore[attr-defined]
)
else:
modules = []
type_definition = type.__qualname__ # type:ignore[attr-defined]
if not hasattr(type, "__origin__"):
return (type_definition, modules)
else:
args_definitions = []
for arg in type.__args__: # type:ignore[attr-defined]
definition, arg_modules = get_type_definition_and_modules(arg)
args_definitions.append(definition)
modules += arg_modules
return (f"{type_definition}[{', '.join(args_definitions)}]", modules)
required_modules: StableSet[str] = StableSet()
def render_column_definition(name: str, definition: ColumnSchema):
properties = [
f"{field.name}={repr(getattr(definition, field.name))}"
for field in dataclasses.fields(definition)
if field.name not in ("name", "dtype")
and getattr(definition, field.name) != field.default
]
type_definition, modules = get_type_definition_and_modules(
self.__types__[name]
)
required_modules.update(modules)
column_definition = f" {name}: {type_definition}"
if properties:
column_definition += f" = pw.column_definition({','.join(properties)})"
return column_definition
if class_name is None:
class_name = self.__name__
if not class_name.isidentifier():
warn(
f'Name {class_name} is not a valid name for a class. Using "CustomSchema" instead'
)
class_name = "CustomSchema"
class_definition = f"class {class_name}(pw.Schema):\n"
class_definition += "\n".join(
[
render_column_definition(name, definition)
for name, definition in self.__columns__.items()
]
)
if generate_imports:
class_definition = (
"\n".join([f"import {module}" for module in required_modules])
+ "\nimport pathway as pw\n\n"
+ class_definition
)
return class_definition
def generate_class_to_file(
self,
path: str | PathLike,
class_name: str | None = None,
generate_imports: bool = False,
) -> None:
"""Generates class with the definition of given schema and saves it to a file.
Used for persisting definition for schemas, which were automatically generated.
Args:
path: path of the file, in which the schema class definition will be saved.
class_name: name of the class with the schema. If not provided, name created
during schema generation will be used.
generate_imports: whether the string should start with necessary imports to
run code (default is False)
"""
class_definition = self.generate_class(
class_name, generate_imports=generate_imports
)
with open(path, mode="w") as f:
f.write(class_definition)
def assert_equal_to(
self,
other: type[Schema],
*,
allow_superset: bool = True,
ignore_primary_keys: bool = True,
) -> None:
self_dict = self.typehints()
other_dict = other.typehints()
# Check if self has all columns of other
if self_dict.keys() < other_dict.keys():
missing_columns = other_dict.keys() - self_dict.keys()
raise AssertionError(f"schema does not have columns {missing_columns}")
# Check if types of columns are the same
for col in other_dict:
assert other_dict[col] == self_dict[col], (
f"type of column {col} does not match - its type is {self_dict[col]} in {self.__name__}",
f" and {other_dict[col]} in {other.__name__}",
)
# When allow_superset=False, check that self does not have extra columns
if not allow_superset and self_dict.keys() > other_dict.keys():
extra_columns = self_dict.keys() - other_dict.keys()
raise AssertionError(
f"there are extra columns: {extra_columns} which are not present in the provided schema"
)
# Check whether primary keys are the same
if not ignore_primary_keys:
assert self.primary_key_columns() == other.primary_key_columns(), (
f"primary keys in the schemas do not match - they are {self.primary_key_columns()} in {self.__name__}",
f" and {other.primary_key_columns()} in {other.__name__}",
)
class Schema(metaclass=SchemaMetaclass):
"""Base class to inherit from when creating schemas.
All schemas should be subclasses of this one.
Example:
>>> import pathway as pw
>>> t1 = pw.debug.table_from_markdown('''
... age owner pet
... 1 10 Alice dog
... 2 9 Bob dog
... 3 8 Alice cat
... 4 7 Bob dog''')
>>> t1.schema
<pathway.Schema types={'age': <class 'int'>, 'owner': <class 'str'>, 'pet': <class 'str'>}>
>>> issubclass(t1.schema, pw.Schema)
True
>>> class NewSchema(pw.Schema):
... foo: int
>>> SchemaSum = NewSchema | t1.schema
>>> SchemaSum
<pathway.Schema types={'age': <class 'int'>, 'owner': <class 'str'>, 'pet': <class 'str'>, 'foo': <class 'int'>}>
"""
def __init_subclass__(cls, /, append_only: bool | None = None, **kwargs) -> None:
super().__init_subclass__(**kwargs)
def _schema_builder(
_name: str,
_dict: dict[str, Any],
*,
properties: SchemaProperties = SchemaProperties(),
) -> type[Schema]:
schema = SchemaMetaclass(
_name, (Schema,), _dict, append_only=properties.append_only
)
assert issubclass(schema, Schema)
return schema
The provided code snippet includes necessary dependencies for implementing the `schema_from_types` function. Write a Python function `def schema_from_types( _name: str | None = None, **kwargs, ) -> type[Schema]` to solve the following problem:
Constructs schema from kwargs: field=type. Example: >>> import pathway as pw >>> s = pw.schema_from_types(foo=int, bar=str) >>> s <pathway.Schema types={'foo': <class 'int'>, 'bar': <class 'str'>}> >>> issubclass(s, pw.Schema) True
Here is the function:
def schema_from_types(
_name: str | None = None,
**kwargs,
) -> type[Schema]:
"""Constructs schema from kwargs: field=type.
Example:
>>> import pathway as pw
>>> s = pw.schema_from_types(foo=int, bar=str)
>>> s
<pathway.Schema types={'foo': <class 'int'>, 'bar': <class 'str'>}>
>>> issubclass(s, pw.Schema)
True
"""
if _name is None:
_name = "schema(" + str(kwargs) + ")"
__dict = {
"__metaclass__": SchemaMetaclass,
"__annotations__": kwargs,
}
return _schema_builder(_name, __dict) | Constructs schema from kwargs: field=type. Example: >>> import pathway as pw >>> s = pw.schema_from_types(foo=int, bar=str) >>> s <pathway.Schema types={'foo': <class 'int'>, 'bar': <class 'str'>}> >>> issubclass(s, pw.Schema) True |
166,783 | from __future__ import annotations
import csv
import dataclasses
import itertools
from collections import ChainMap
from collections.abc import Callable, Iterable, KeysView, Mapping
from dataclasses import dataclass
from os import PathLike
from pydoc import locate
from types import MappingProxyType
from typing import TYPE_CHECKING, Any, NoReturn, get_type_hints
from warnings import warn
import numpy as np
import pandas as pd
from pathway.internals import dtype as dt, trace
from pathway.internals.column_properties import ColumnProperties
from pathway.internals.helpers import StableSet
from pathway.internals.runtime_type_check import check_arg_types
def _cls_fields(cls):
return {k: v for k, v in cls.__dict__.items() if not k.startswith("__")}
class SchemaMetaclass(type):
__columns__: dict[str, ColumnSchema]
__dtypes__: dict[str, dt.DType]
__types__: dict[str, Any]
__universe_properties__: ColumnProperties
def __init__(self, *args, append_only: bool | None = None, **kwargs) -> None:
super().__init__(*args, **kwargs)
schema_properties = SchemaProperties(append_only=append_only)
self.__columns__ = _create_column_definitions(self, schema_properties)
self.__universe_properties__ = _universe_properties(
list(self.__columns__.values()), schema_properties
)
self.__dtypes__ = {
name: column.dtype for name, column in self.__columns__.items()
}
self.__types__ = {k: v.typehint for k, v in self.__dtypes__.items()}
def __call__(self) -> NoReturn:
raise TypeError(
"Schemas should not be called. Use `table.schema` not `table.schema()."
)
def __or__(self, other: type[Schema]) -> type[Schema]: # type: ignore
return schema_add(self, other) # type: ignore
def columns(self) -> Mapping[str, ColumnSchema]:
return MappingProxyType(self.__columns__)
def column_names(self) -> list[str]:
return list(self.keys())
def universe_properties(self) -> ColumnProperties:
return self.__universe_properties__
def column_properties(self, name: str) -> ColumnProperties:
column = self.__columns__[name]
return ColumnProperties(dtype=column.dtype, append_only=column.append_only)
def primary_key_columns(self) -> list[str] | None:
# There is a distinction between an empty set of columns denoting
# the primary key and None. If any (including empty) set of keys if provided,
# then it will be used to compute the primary key.
#
# For the autogeneration one needs to specify None
pkey_fields = [
name for name, column in self.__columns__.items() if column.primary_key
]
return pkey_fields if pkey_fields else None
def default_values(self) -> dict[str, Any]:
return {
name: column.default_value
for name, column in self.__columns__.items()
if column.has_default_value()
}
def keys(self) -> KeysView[str]:
return self.__columns__.keys()
def update_types(self, **kwargs) -> type[Schema]:
columns: dict[str, ColumnDefinition] = {
col.name: col.to_definition() for col in self.__columns__.values()
}
for name, dtype in kwargs.items():
if name not in columns:
raise ValueError(
"Schema.update_types() argument name has to be an existing column name."
)
columns[name] = dataclasses.replace(columns[name], dtype=dt.wrap(dtype))
return schema_builder(columns=columns)
def update_properties(self, **kwargs) -> type[Schema]:
columns: dict[str, ColumnDefinition] = {
col.name: dataclasses.replace(col.to_definition(), **kwargs)
for col in self.__columns__.values()
}
properties = SchemaProperties(
**{
name: value
for name, value in kwargs.items()
if name in SchemaProperties.__annotations__
}
)
return schema_builder(
columns=columns, name=self.__name__, properties=properties
)
def __getitem__(self, name) -> ColumnSchema:
return self.__columns__[name]
def _dtypes(self) -> Mapping[str, dt.DType]:
return MappingProxyType(self.__dtypes__)
def typehints(self) -> Mapping[str, Any]:
return MappingProxyType(self.__types__)
def __repr__(self):
return f"<pathway.Schema types={self.__types__}>"
def __str__(self):
col_names = [k for k in self.keys()]
max_lens = [
max(len(column_name), len(str(self.__dtypes__[column_name])))
for column_name in col_names
]
res = " | ".join(
[
column_name.ljust(max_len)
for (column_name, max_len) in zip(col_names, max_lens)
]
)
res = res.rstrip() + "\n"
res = res + " | ".join(
[
(str(self.__dtypes__[column_name])).ljust(max_len)
for (column_name, max_len) in zip(col_names, max_lens)
]
)
return res
def _as_tuple(self):
return tuple(self.__columns__.items())
def __eq__(self, other: object) -> bool:
if isinstance(other, SchemaMetaclass):
return self._as_tuple() == other._as_tuple()
return NotImplemented
def __hash__(self) -> int:
return hash(self._as_tuple())
def generate_class(
self, class_name: str | None = None, generate_imports: bool = False
) -> str:
"""Generates class with the definition of given schema and returns it as a string.
Args:
class_name: name of the class with the schema. If not provided, name created
during schema generation will be used.
generate_imports: whether the string should start with necessary imports to
run code (default is False)
"""
def get_type_definition_and_modules(type: object) -> tuple[str, list[str]]:
if type.__module__ != "builtins":
modules = [type.__module__]
type_definition = (
type.__module__
+ "."
+ type.__qualname__ # type:ignore[attr-defined]
)
else:
modules = []
type_definition = type.__qualname__ # type:ignore[attr-defined]
if not hasattr(type, "__origin__"):
return (type_definition, modules)
else:
args_definitions = []
for arg in type.__args__: # type:ignore[attr-defined]
definition, arg_modules = get_type_definition_and_modules(arg)
args_definitions.append(definition)
modules += arg_modules
return (f"{type_definition}[{', '.join(args_definitions)}]", modules)
required_modules: StableSet[str] = StableSet()
def render_column_definition(name: str, definition: ColumnSchema):
properties = [
f"{field.name}={repr(getattr(definition, field.name))}"
for field in dataclasses.fields(definition)
if field.name not in ("name", "dtype")
and getattr(definition, field.name) != field.default
]
type_definition, modules = get_type_definition_and_modules(
self.__types__[name]
)
required_modules.update(modules)
column_definition = f" {name}: {type_definition}"
if properties:
column_definition += f" = pw.column_definition({','.join(properties)})"
return column_definition
if class_name is None:
class_name = self.__name__
if not class_name.isidentifier():
warn(
f'Name {class_name} is not a valid name for a class. Using "CustomSchema" instead'
)
class_name = "CustomSchema"
class_definition = f"class {class_name}(pw.Schema):\n"
class_definition += "\n".join(
[
render_column_definition(name, definition)
for name, definition in self.__columns__.items()
]
)
if generate_imports:
class_definition = (
"\n".join([f"import {module}" for module in required_modules])
+ "\nimport pathway as pw\n\n"
+ class_definition
)
return class_definition
def generate_class_to_file(
self,
path: str | PathLike,
class_name: str | None = None,
generate_imports: bool = False,
) -> None:
"""Generates class with the definition of given schema and saves it to a file.
Used for persisting definition for schemas, which were automatically generated.
Args:
path: path of the file, in which the schema class definition will be saved.
class_name: name of the class with the schema. If not provided, name created
during schema generation will be used.
generate_imports: whether the string should start with necessary imports to
run code (default is False)
"""
class_definition = self.generate_class(
class_name, generate_imports=generate_imports
)
with open(path, mode="w") as f:
f.write(class_definition)
def assert_equal_to(
self,
other: type[Schema],
*,
allow_superset: bool = True,
ignore_primary_keys: bool = True,
) -> None:
self_dict = self.typehints()
other_dict = other.typehints()
# Check if self has all columns of other
if self_dict.keys() < other_dict.keys():
missing_columns = other_dict.keys() - self_dict.keys()
raise AssertionError(f"schema does not have columns {missing_columns}")
# Check if types of columns are the same
for col in other_dict:
assert other_dict[col] == self_dict[col], (
f"type of column {col} does not match - its type is {self_dict[col]} in {self.__name__}",
f" and {other_dict[col]} in {other.__name__}",
)
# When allow_superset=False, check that self does not have extra columns
if not allow_superset and self_dict.keys() > other_dict.keys():
extra_columns = self_dict.keys() - other_dict.keys()
raise AssertionError(
f"there are extra columns: {extra_columns} which are not present in the provided schema"
)
# Check whether primary keys are the same
if not ignore_primary_keys:
assert self.primary_key_columns() == other.primary_key_columns(), (
f"primary keys in the schemas do not match - they are {self.primary_key_columns()} in {self.__name__}",
f" and {other.primary_key_columns()} in {other.__name__}",
)
class Schema(metaclass=SchemaMetaclass):
"""Base class to inherit from when creating schemas.
All schemas should be subclasses of this one.
Example:
>>> import pathway as pw
>>> t1 = pw.debug.table_from_markdown('''
... age owner pet
... 1 10 Alice dog
... 2 9 Bob dog
... 3 8 Alice cat
... 4 7 Bob dog''')
>>> t1.schema
<pathway.Schema types={'age': <class 'int'>, 'owner': <class 'str'>, 'pet': <class 'str'>}>
>>> issubclass(t1.schema, pw.Schema)
True
>>> class NewSchema(pw.Schema):
... foo: int
>>> SchemaSum = NewSchema | t1.schema
>>> SchemaSum
<pathway.Schema types={'age': <class 'int'>, 'owner': <class 'str'>, 'pet': <class 'str'>, 'foo': <class 'int'>}>
"""
def __init_subclass__(cls, /, append_only: bool | None = None, **kwargs) -> None:
super().__init_subclass__(**kwargs)
def _schema_builder(
_name: str,
_dict: dict[str, Any],
*,
properties: SchemaProperties = SchemaProperties(),
) -> type[Schema]:
schema = SchemaMetaclass(
_name, (Schema,), _dict, append_only=properties.append_only
)
assert issubclass(schema, Schema)
return schema
def schema_add(*schemas: type[Schema]) -> type[Schema]:
annots_list = [get_type_hints(schema) for schema in schemas]
annotations = dict(ChainMap(*annots_list))
assert len(annotations) == sum([len(annots) for annots in annots_list])
fields_list = [_cls_fields(schema) for schema in schemas]
fields = dict(ChainMap(*fields_list))
assert len(fields) == sum([len(f) for f in fields_list])
return _schema_builder(
"_".join(schema.__name__ for schema in schemas),
{
"__metaclass__": SchemaMetaclass,
"__annotations__": annotations,
"__orig__": {f"__arg{i}__": arg for i, arg in enumerate(schemas)},
**fields,
},
) | null |
166,784 | from __future__ import annotations
import csv
import dataclasses
import itertools
from collections import ChainMap
from collections.abc import Callable, Iterable, KeysView, Mapping
from dataclasses import dataclass
from os import PathLike
from pydoc import locate
from types import MappingProxyType
from typing import TYPE_CHECKING, Any, NoReturn, get_type_hints
from warnings import warn
import numpy as np
import pandas as pd
from pathway.internals import dtype as dt, trace
from pathway.internals.column_properties import ColumnProperties
from pathway.internals.helpers import StableSet
from pathway.internals.runtime_type_check import check_arg_types
def _cls_fields(cls):
return {k: v for k, v in cls.__dict__.items() if not k.startswith("__")}
class SchemaProperties:
append_only: bool | None = None
class SchemaMetaclass(type):
__columns__: dict[str, ColumnSchema]
__dtypes__: dict[str, dt.DType]
__types__: dict[str, Any]
__universe_properties__: ColumnProperties
def __init__(self, *args, append_only: bool | None = None, **kwargs) -> None:
super().__init__(*args, **kwargs)
schema_properties = SchemaProperties(append_only=append_only)
self.__columns__ = _create_column_definitions(self, schema_properties)
self.__universe_properties__ = _universe_properties(
list(self.__columns__.values()), schema_properties
)
self.__dtypes__ = {
name: column.dtype for name, column in self.__columns__.items()
}
self.__types__ = {k: v.typehint for k, v in self.__dtypes__.items()}
def __call__(self) -> NoReturn:
raise TypeError(
"Schemas should not be called. Use `table.schema` not `table.schema()."
)
def __or__(self, other: type[Schema]) -> type[Schema]: # type: ignore
return schema_add(self, other) # type: ignore
def columns(self) -> Mapping[str, ColumnSchema]:
return MappingProxyType(self.__columns__)
def column_names(self) -> list[str]:
return list(self.keys())
def universe_properties(self) -> ColumnProperties:
return self.__universe_properties__
def column_properties(self, name: str) -> ColumnProperties:
column = self.__columns__[name]
return ColumnProperties(dtype=column.dtype, append_only=column.append_only)
def primary_key_columns(self) -> list[str] | None:
# There is a distinction between an empty set of columns denoting
# the primary key and None. If any (including empty) set of keys if provided,
# then it will be used to compute the primary key.
#
# For the autogeneration one needs to specify None
pkey_fields = [
name for name, column in self.__columns__.items() if column.primary_key
]
return pkey_fields if pkey_fields else None
def default_values(self) -> dict[str, Any]:
return {
name: column.default_value
for name, column in self.__columns__.items()
if column.has_default_value()
}
def keys(self) -> KeysView[str]:
return self.__columns__.keys()
def update_types(self, **kwargs) -> type[Schema]:
columns: dict[str, ColumnDefinition] = {
col.name: col.to_definition() for col in self.__columns__.values()
}
for name, dtype in kwargs.items():
if name not in columns:
raise ValueError(
"Schema.update_types() argument name has to be an existing column name."
)
columns[name] = dataclasses.replace(columns[name], dtype=dt.wrap(dtype))
return schema_builder(columns=columns)
def update_properties(self, **kwargs) -> type[Schema]:
columns: dict[str, ColumnDefinition] = {
col.name: dataclasses.replace(col.to_definition(), **kwargs)
for col in self.__columns__.values()
}
properties = SchemaProperties(
**{
name: value
for name, value in kwargs.items()
if name in SchemaProperties.__annotations__
}
)
return schema_builder(
columns=columns, name=self.__name__, properties=properties
)
def __getitem__(self, name) -> ColumnSchema:
return self.__columns__[name]
def _dtypes(self) -> Mapping[str, dt.DType]:
return MappingProxyType(self.__dtypes__)
def typehints(self) -> Mapping[str, Any]:
return MappingProxyType(self.__types__)
def __repr__(self):
return f"<pathway.Schema types={self.__types__}>"
def __str__(self):
col_names = [k for k in self.keys()]
max_lens = [
max(len(column_name), len(str(self.__dtypes__[column_name])))
for column_name in col_names
]
res = " | ".join(
[
column_name.ljust(max_len)
for (column_name, max_len) in zip(col_names, max_lens)
]
)
res = res.rstrip() + "\n"
res = res + " | ".join(
[
(str(self.__dtypes__[column_name])).ljust(max_len)
for (column_name, max_len) in zip(col_names, max_lens)
]
)
return res
def _as_tuple(self):
return tuple(self.__columns__.items())
def __eq__(self, other: object) -> bool:
if isinstance(other, SchemaMetaclass):
return self._as_tuple() == other._as_tuple()
return NotImplemented
def __hash__(self) -> int:
return hash(self._as_tuple())
def generate_class(
self, class_name: str | None = None, generate_imports: bool = False
) -> str:
"""Generates class with the definition of given schema and returns it as a string.
Args:
class_name: name of the class with the schema. If not provided, name created
during schema generation will be used.
generate_imports: whether the string should start with necessary imports to
run code (default is False)
"""
def get_type_definition_and_modules(type: object) -> tuple[str, list[str]]:
if type.__module__ != "builtins":
modules = [type.__module__]
type_definition = (
type.__module__
+ "."
+ type.__qualname__ # type:ignore[attr-defined]
)
else:
modules = []
type_definition = type.__qualname__ # type:ignore[attr-defined]
if not hasattr(type, "__origin__"):
return (type_definition, modules)
else:
args_definitions = []
for arg in type.__args__: # type:ignore[attr-defined]
definition, arg_modules = get_type_definition_and_modules(arg)
args_definitions.append(definition)
modules += arg_modules
return (f"{type_definition}[{', '.join(args_definitions)}]", modules)
required_modules: StableSet[str] = StableSet()
def render_column_definition(name: str, definition: ColumnSchema):
properties = [
f"{field.name}={repr(getattr(definition, field.name))}"
for field in dataclasses.fields(definition)
if field.name not in ("name", "dtype")
and getattr(definition, field.name) != field.default
]
type_definition, modules = get_type_definition_and_modules(
self.__types__[name]
)
required_modules.update(modules)
column_definition = f" {name}: {type_definition}"
if properties:
column_definition += f" = pw.column_definition({','.join(properties)})"
return column_definition
if class_name is None:
class_name = self.__name__
if not class_name.isidentifier():
warn(
f'Name {class_name} is not a valid name for a class. Using "CustomSchema" instead'
)
class_name = "CustomSchema"
class_definition = f"class {class_name}(pw.Schema):\n"
class_definition += "\n".join(
[
render_column_definition(name, definition)
for name, definition in self.__columns__.items()
]
)
if generate_imports:
class_definition = (
"\n".join([f"import {module}" for module in required_modules])
+ "\nimport pathway as pw\n\n"
+ class_definition
)
return class_definition
def generate_class_to_file(
self,
path: str | PathLike,
class_name: str | None = None,
generate_imports: bool = False,
) -> None:
"""Generates class with the definition of given schema and saves it to a file.
Used for persisting definition for schemas, which were automatically generated.
Args:
path: path of the file, in which the schema class definition will be saved.
class_name: name of the class with the schema. If not provided, name created
during schema generation will be used.
generate_imports: whether the string should start with necessary imports to
run code (default is False)
"""
class_definition = self.generate_class(
class_name, generate_imports=generate_imports
)
with open(path, mode="w") as f:
f.write(class_definition)
def assert_equal_to(
self,
other: type[Schema],
*,
allow_superset: bool = True,
ignore_primary_keys: bool = True,
) -> None:
self_dict = self.typehints()
other_dict = other.typehints()
# Check if self has all columns of other
if self_dict.keys() < other_dict.keys():
missing_columns = other_dict.keys() - self_dict.keys()
raise AssertionError(f"schema does not have columns {missing_columns}")
# Check if types of columns are the same
for col in other_dict:
assert other_dict[col] == self_dict[col], (
f"type of column {col} does not match - its type is {self_dict[col]} in {self.__name__}",
f" and {other_dict[col]} in {other.__name__}",
)
# When allow_superset=False, check that self does not have extra columns
if not allow_superset and self_dict.keys() > other_dict.keys():
extra_columns = self_dict.keys() - other_dict.keys()
raise AssertionError(
f"there are extra columns: {extra_columns} which are not present in the provided schema"
)
# Check whether primary keys are the same
if not ignore_primary_keys:
assert self.primary_key_columns() == other.primary_key_columns(), (
f"primary keys in the schemas do not match - they are {self.primary_key_columns()} in {self.__name__}",
f" and {other.primary_key_columns()} in {other.__name__}",
)
class ColumnSchema:
dtype: dt.DType
name: str
primary_key: bool = False
default_value: Any = _no_default_value_marker
append_only: bool = False
description: str | None = None # used in OpenAPI schema autogeneration
example: Any = None # used in OpenAPI schema autogeneration
def has_default_value(self) -> bool:
return self.default_value != _no_default_value_marker
def to_definition(self) -> ColumnDefinition:
return ColumnDefinition(
primary_key=self.primary_key,
default_value=self.default_value,
dtype=self.dtype,
name=self.name,
append_only=self.append_only,
description=self.description,
example=self.example,
)
def typehint(self):
return self.dtype.typehint
class ColumnDefinition:
primary_key: bool = False
default_value: Any | None = _no_default_value_marker
dtype: dt.DType | None = dt.ANY
name: str | None = None
append_only: bool | None = None
description: str | None = None # used in OpenAPI schema autogeneration
example: Any = None # used in OpenAPI schema autogeneration
def __post_init__(self):
assert self.dtype is None or isinstance(self.dtype, dt.DType)
def from_properties(cls, properties: ColumnProperties) -> ColumnDefinition:
return cls(dtype=properties.dtype, append_only=properties.append_only)
def column_definition(
*,
primary_key: bool = False,
default_value: Any | None = _no_default_value_marker,
dtype: Any | None = None,
name: str | None = None,
append_only: bool | None = None,
description: str | None = None,
example: Any = None,
) -> Any: # Return any so that mypy does not complain
"""Creates column definition
Args:
primary_key: should column be a part of a primary key.
default_value: default value replacing blank entries. The default value of the
column must be specified explicitly,
otherwise there will be no default value.
dtype: data type. When used in schema class,
will be deduced from the type annotation.
name: name of a column. When used in schema class,
will be deduced from the attribute name.
append_only: whether column is append-only. if unspecified, defaults to False
or to value specified at the schema definition level
Returns:
Column definition.
Example:
>>> import pathway as pw
>>> class NewSchema(pw.Schema):
... key: int = pw.column_definition(primary_key=True)
... timestamp: str = pw.column_definition(name="@timestamp")
... data: str
>>> NewSchema
<pathway.Schema types={'key': <class 'int'>, '@timestamp': <class 'str'>, 'data': <class 'str'>}>
"""
return ColumnDefinition(
dtype=dt.wrap(dtype) if dtype is not None else None,
primary_key=primary_key,
default_value=default_value,
name=name,
append_only=append_only,
description=description,
example=example,
)
def _create_column_definitions(
schema: SchemaMetaclass, schema_properties: SchemaProperties
) -> dict[str, ColumnSchema]:
localns = locals()
# Update locals to handle recursive Schema definitions
localns[schema.__name__] = schema
annotations = get_type_hints(schema, localns=localns)
fields = _cls_fields(schema)
columns = {}
for column_name, annotation in annotations.items():
col_dtype = dt.wrap(annotation)
column = fields.pop(column_name, column_definition(dtype=col_dtype))
if not isinstance(column, ColumnDefinition):
raise ValueError(
f"`{column_name}` should be a column definition, found {type(column)}"
)
dtype = column.dtype
if dtype is None:
dtype = col_dtype
if col_dtype != dtype:
raise TypeError(
f"type annotation of column `{column_name}` does not match column definition"
)
column_name = column.name or column_name
def _get_column_property(property_name: str, default: Any) -> Any:
match (
getattr(column, property_name),
getattr(schema_properties, property_name),
):
case (None, None):
return default
case (None, schema_property):
return schema_property
case (column_property, None):
return column_property
case (column_property, schema_property):
if column_property != schema_property:
raise ValueError(
f"ambiguous property; schema property `{property_name}` has"
+ f" value {schema_property!r} but column"
+ f" `{column_name}` got {column_property!r}"
)
return column_property
columns[column_name] = ColumnSchema(
primary_key=column.primary_key,
default_value=column.default_value,
dtype=dt.wrap(dtype),
name=column_name,
append_only=_get_column_property("append_only", False),
description=column.description,
example=column.example,
)
if fields:
names = ", ".join(fields.keys())
raise ValueError(f"definitions of columns {names} lack type annotation")
return columns | null |
166,785 | from __future__ import annotations
import csv
import dataclasses
import itertools
from collections import ChainMap
from collections.abc import Callable, Iterable, KeysView, Mapping
from dataclasses import dataclass
from os import PathLike
from pydoc import locate
from types import MappingProxyType
from typing import TYPE_CHECKING, Any, NoReturn, get_type_hints
from warnings import warn
import numpy as np
import pandas as pd
from pathway.internals import dtype as dt, trace
from pathway.internals.column_properties import ColumnProperties
from pathway.internals.helpers import StableSet
from pathway.internals.runtime_type_check import check_arg_types
class SchemaProperties:
append_only: bool | None = None
class ColumnSchema:
dtype: dt.DType
name: str
primary_key: bool = False
default_value: Any = _no_default_value_marker
append_only: bool = False
description: str | None = None # used in OpenAPI schema autogeneration
example: Any = None # used in OpenAPI schema autogeneration
def has_default_value(self) -> bool:
return self.default_value != _no_default_value_marker
def to_definition(self) -> ColumnDefinition:
return ColumnDefinition(
primary_key=self.primary_key,
default_value=self.default_value,
dtype=self.dtype,
name=self.name,
append_only=self.append_only,
description=self.description,
example=self.example,
)
def typehint(self):
return self.dtype.typehint
class ColumnProperties:
dtype: dt.DType
append_only: bool = False
def _universe_properties(
columns: list[ColumnSchema], schema_properties: SchemaProperties
) -> ColumnProperties:
append_only: bool = False
if len(columns) > 0:
append_only = any(c.append_only for c in columns)
elif schema_properties.append_only is not None:
append_only = schema_properties.append_only
return ColumnProperties(dtype=dt.POINTER, append_only=append_only) | null |
166,786 | from __future__ import annotations
import csv
import dataclasses
import itertools
from collections import ChainMap
from collections.abc import Callable, Iterable, KeysView, Mapping
from dataclasses import dataclass
from os import PathLike
from pydoc import locate
from types import MappingProxyType
from typing import TYPE_CHECKING, Any, NoReturn, get_type_hints
from warnings import warn
import numpy as np
import pandas as pd
from pathway.internals import dtype as dt, trace
from pathway.internals.column_properties import ColumnProperties
from pathway.internals.helpers import StableSet
from pathway.internals.runtime_type_check import check_arg_types
class Schema(metaclass=SchemaMetaclass):
"""Base class to inherit from when creating schemas.
All schemas should be subclasses of this one.
Example:
>>> import pathway as pw
>>> t1 = pw.debug.table_from_markdown('''
... age owner pet
... 1 10 Alice dog
... 2 9 Bob dog
... 3 8 Alice cat
... 4 7 Bob dog''')
>>> t1.schema
<pathway.Schema types={'age': <class 'int'>, 'owner': <class 'str'>, 'pet': <class 'str'>}>
>>> issubclass(t1.schema, pw.Schema)
True
>>> class NewSchema(pw.Schema):
... foo: int
>>> SchemaSum = NewSchema | t1.schema
>>> SchemaSum
<pathway.Schema types={'age': <class 'int'>, 'owner': <class 'str'>, 'pet': <class 'str'>, 'foo': <class 'int'>}>
"""
def __init_subclass__(cls, /, append_only: bool | None = None, **kwargs) -> None:
super().__init_subclass__(**kwargs)
def is_subschema(left: type[Schema], right: type[Schema]):
if left.keys() != right.keys():
return False
for k in left.keys():
if not dt.dtype_issubclass(left.__dtypes__[k], right.__dtypes__[k]):
return False
return True | null |
166,787 | from __future__ import annotations
import csv
import dataclasses
import itertools
from collections import ChainMap
from collections.abc import Callable, Iterable, KeysView, Mapping
from dataclasses import dataclass
from os import PathLike
from pydoc import locate
from types import MappingProxyType
from typing import TYPE_CHECKING, Any, NoReturn, get_type_hints
from warnings import warn
import numpy as np
import pandas as pd
from pathway.internals import dtype as dt, trace
from pathway.internals.column_properties import ColumnProperties
from pathway.internals.helpers import StableSet
from pathway.internals.runtime_type_check import check_arg_types
class SchemaProperties:
append_only: bool | None = None
class Schema(metaclass=SchemaMetaclass):
"""Base class to inherit from when creating schemas.
All schemas should be subclasses of this one.
Example:
>>> import pathway as pw
>>> t1 = pw.debug.table_from_markdown('''
... age owner pet
... 1 10 Alice dog
... 2 9 Bob dog
... 3 8 Alice cat
... 4 7 Bob dog''')
>>> t1.schema
<pathway.Schema types={'age': <class 'int'>, 'owner': <class 'str'>, 'pet': <class 'str'>}>
>>> issubclass(t1.schema, pw.Schema)
True
>>> class NewSchema(pw.Schema):
... foo: int
>>> SchemaSum = NewSchema | t1.schema
>>> SchemaSum
<pathway.Schema types={'age': <class 'int'>, 'owner': <class 'str'>, 'pet': <class 'str'>, 'foo': <class 'int'>}>
"""
def __init_subclass__(cls, /, append_only: bool | None = None, **kwargs) -> None:
super().__init_subclass__(**kwargs)
def column_definition(
*,
primary_key: bool = False,
default_value: Any | None = _no_default_value_marker,
dtype: Any | None = None,
name: str | None = None,
append_only: bool | None = None,
description: str | None = None,
example: Any = None,
) -> Any: # Return any so that mypy does not complain
"""Creates column definition
Args:
primary_key: should column be a part of a primary key.
default_value: default value replacing blank entries. The default value of the
column must be specified explicitly,
otherwise there will be no default value.
dtype: data type. When used in schema class,
will be deduced from the type annotation.
name: name of a column. When used in schema class,
will be deduced from the attribute name.
append_only: whether column is append-only. if unspecified, defaults to False
or to value specified at the schema definition level
Returns:
Column definition.
Example:
>>> import pathway as pw
>>> class NewSchema(pw.Schema):
... key: int = pw.column_definition(primary_key=True)
... timestamp: str = pw.column_definition(name="@timestamp")
... data: str
>>> NewSchema
<pathway.Schema types={'key': <class 'int'>, '@timestamp': <class 'str'>, 'data': <class 'str'>}>
"""
return ColumnDefinition(
dtype=dt.wrap(dtype) if dtype is not None else None,
primary_key=primary_key,
default_value=default_value,
name=name,
append_only=append_only,
description=description,
example=example,
)
def schema_builder(
columns: dict[str, ColumnDefinition],
*,
name: str | None = None,
properties: SchemaProperties = SchemaProperties(),
) -> type[Schema]:
"""Allows to build schema inline, from a dictionary of column definitions.
Args:
columns: dictionary of column definitions.
name: schema name.
properties: schema properties.
Returns:
Schema
Example:
>>> import pathway as pw
>>> pw.schema_builder(columns={
... 'key': pw.column_definition(dtype=int, primary_key=True),
... 'data': pw.column_definition(dtype=int, default_value=0)
... }, name="my_schema")
<pathway.Schema types={'key': <class 'int'>, 'data': <class 'int'>}>
"""
if name is None:
name = "custom_schema(" + str(list(columns.keys())) + ")"
__annotations = {name: c.dtype or Any for name, c in columns.items()}
__dict: dict[str, Any] = {
"__metaclass__": SchemaMetaclass,
"__annotations__": __annotations,
**columns,
}
return _schema_builder(name, __dict, properties=properties)
The provided code snippet includes necessary dependencies for implementing the `schema_from_dict` function. Write a Python function `def schema_from_dict( columns: dict, *, name: str | None = None, properties: dict | SchemaProperties = SchemaProperties(), ) -> type[Schema]` to solve the following problem:
Allows to build schema inline, from a dictionary of column definitions. Compared to pw.schema_builder, this one uses simpler structure of the dictionary, which allows it to be loaded from JSON file. Args: columns: dictionary of column definitions. The keys in this dictionary are names of the columns, and the values are either: - type of the column - dictionary with keys: "dtype", "primary_key", "default_value" and values, respectively, type of the column, whether it is a primary key, and column's default value. The type can be given both by python class, or string with class name - that is both int and "int" are accepted. name: schema name. properties: schema properties, given either as instance of SchemaProperties class or a dict specifying arguments of SchemaProperties class. Returns: Schema Example: >>> import pathway as pw >>> pw.schema_from_dict(columns={ ... 'key': {"dtype": "int", "primary_key": True}, ... 'data': {"dtype": "int", "default_value": 0} ... }, name="my_schema") <pathway.Schema types={'key': <class 'int'>, 'data': <class 'int'>}>
Here is the function:
def schema_from_dict(
columns: dict,
*,
name: str | None = None,
properties: dict | SchemaProperties = SchemaProperties(),
) -> type[Schema]:
"""Allows to build schema inline, from a dictionary of column definitions.
Compared to pw.schema_builder, this one uses simpler structure of the dictionary,
which allows it to be loaded from JSON file.
Args:
columns: dictionary of column definitions. The keys in this dictionary are names
of the columns, and the values are either:
- type of the column
- dictionary with keys: "dtype", "primary_key", "default_value" and values,
respectively, type of the column, whether it is a primary key, and column's
default value.
The type can be given both by python class, or string with class name - that
is both int and "int" are accepted.
name: schema name.
properties: schema properties, given either as instance of SchemaProperties class
or a dict specifying arguments of SchemaProperties class.
Returns:
Schema
Example:
>>> import pathway as pw
>>> pw.schema_from_dict(columns={
... 'key': {"dtype": "int", "primary_key": True},
... 'data': {"dtype": "int", "default_value": 0}
... }, name="my_schema")
<pathway.Schema types={'key': <class 'int'>, 'data': <class 'int'>}>
"""
def get_dtype(dtype) -> dt.DType:
if isinstance(dtype, str):
dtype = locate(dtype)
return dt.wrap(dtype)
def create_column_definition(entry):
if not isinstance(entry, dict):
entry = {"dtype": entry}
entry["dtype"] = get_dtype(entry.get("dtype", Any))
return column_definition(**entry)
column_definitions = {
column_name: create_column_definition(value)
for (column_name, value) in columns.items()
}
if isinstance(properties, dict):
properties = SchemaProperties(**properties)
return schema_builder(column_definitions, name=name, properties=properties) | Allows to build schema inline, from a dictionary of column definitions. Compared to pw.schema_builder, this one uses simpler structure of the dictionary, which allows it to be loaded from JSON file. Args: columns: dictionary of column definitions. The keys in this dictionary are names of the columns, and the values are either: - type of the column - dictionary with keys: "dtype", "primary_key", "default_value" and values, respectively, type of the column, whether it is a primary key, and column's default value. The type can be given both by python class, or string with class name - that is both int and "int" are accepted. name: schema name. properties: schema properties, given either as instance of SchemaProperties class or a dict specifying arguments of SchemaProperties class. Returns: Schema Example: >>> import pathway as pw >>> pw.schema_from_dict(columns={ ... 'key': {"dtype": "int", "primary_key": True}, ... 'data': {"dtype": "int", "default_value": 0} ... }, name="my_schema") <pathway.Schema types={'key': <class 'int'>, 'data': <class 'int'>}> |
166,788 | from __future__ import annotations
import csv
import dataclasses
import itertools
from collections import ChainMap
from collections.abc import Callable, Iterable, KeysView, Mapping
from dataclasses import dataclass
from os import PathLike
from pydoc import locate
from types import MappingProxyType
from typing import TYPE_CHECKING, Any, NoReturn, get_type_hints
from warnings import warn
import numpy as np
import pandas as pd
from pathway.internals import dtype as dt, trace
from pathway.internals.column_properties import ColumnProperties
from pathway.internals.helpers import StableSet
from pathway.internals.runtime_type_check import check_arg_types
class SchemaProperties:
append_only: bool | None = None
def column_definition(
*,
primary_key: bool = False,
default_value: Any | None = _no_default_value_marker,
dtype: Any | None = None,
name: str | None = None,
append_only: bool | None = None,
description: str | None = None,
example: Any = None,
) -> Any: # Return any so that mypy does not complain
"""Creates column definition
Args:
primary_key: should column be a part of a primary key.
default_value: default value replacing blank entries. The default value of the
column must be specified explicitly,
otherwise there will be no default value.
dtype: data type. When used in schema class,
will be deduced from the type annotation.
name: name of a column. When used in schema class,
will be deduced from the attribute name.
append_only: whether column is append-only. if unspecified, defaults to False
or to value specified at the schema definition level
Returns:
Column definition.
Example:
>>> import pathway as pw
>>> class NewSchema(pw.Schema):
... key: int = pw.column_definition(primary_key=True)
... timestamp: str = pw.column_definition(name="@timestamp")
... data: str
>>> NewSchema
<pathway.Schema types={'key': <class 'int'>, '@timestamp': <class 'str'>, 'data': <class 'str'>}>
"""
return ColumnDefinition(
dtype=dt.wrap(dtype) if dtype is not None else None,
primary_key=primary_key,
default_value=default_value,
name=name,
append_only=append_only,
description=description,
example=example,
)
def schema_builder(
columns: dict[str, ColumnDefinition],
*,
name: str | None = None,
properties: SchemaProperties = SchemaProperties(),
) -> type[Schema]:
"""Allows to build schema inline, from a dictionary of column definitions.
Args:
columns: dictionary of column definitions.
name: schema name.
properties: schema properties.
Returns:
Schema
Example:
>>> import pathway as pw
>>> pw.schema_builder(columns={
... 'key': pw.column_definition(dtype=int, primary_key=True),
... 'data': pw.column_definition(dtype=int, default_value=0)
... }, name="my_schema")
<pathway.Schema types={'key': <class 'int'>, 'data': <class 'int'>}>
"""
if name is None:
name = "custom_schema(" + str(list(columns.keys())) + ")"
__annotations = {name: c.dtype or Any for name, c in columns.items()}
__dict: dict[str, Any] = {
"__metaclass__": SchemaMetaclass,
"__annotations__": __annotations,
**columns,
}
return _schema_builder(name, __dict, properties=properties)
def _is_parsable_to(s: str, parse_fun: Callable):
try:
parse_fun(s)
return True
except ValueError:
return False
The provided code snippet includes necessary dependencies for implementing the `schema_from_csv` function. Write a Python function `def schema_from_csv( path: str, *, name: str | None = None, properties: SchemaProperties = SchemaProperties(), delimiter: str = ",", quote: str = '"', comment_character: str | None = None, escape: str | None = None, double_quote_escapes: bool = True, num_parsed_rows: int | None = None, )` to solve the following problem:
Allows to generate schema based on a CSV file. The names of the columns are taken from the header of the CSV file. Types of columns are inferred from the values, by checking if they can be parsed. Currently supported types are str, int and float. Args: path: path to the CSV file. name: schema name. properties: schema properties. delimiter: delimiter used in CSV file. Defaults to ",". quote: quote character used in CSV file. Defaults to '"'. comment_character: character used in CSV file to denote comments. Defaults to None escape: escape character used in CSV file. Defaults to None. double_quote_escapes: enable escapes of double quotes. Defaults to True. num_parsed_rows: number of rows, which will be parsed when inferring types. When set to None, all rows will be parsed. When set to 0, types of all columns will be set to str. Defaults to None. Returns: Schema
Here is the function:
def schema_from_csv(
path: str,
*,
name: str | None = None,
properties: SchemaProperties = SchemaProperties(),
delimiter: str = ",",
quote: str = '"',
comment_character: str | None = None,
escape: str | None = None,
double_quote_escapes: bool = True,
num_parsed_rows: int | None = None,
):
"""Allows to generate schema based on a CSV file.
The names of the columns are taken from the header of the CSV file.
Types of columns are inferred from the values, by checking if they can be parsed.
Currently supported types are str, int and float.
Args:
path: path to the CSV file.
name: schema name.
properties: schema properties.
delimiter: delimiter used in CSV file. Defaults to ",".
quote: quote character used in CSV file. Defaults to '"'.
comment_character: character used in CSV file to denote comments.
Defaults to None
escape: escape character used in CSV file. Defaults to None.
double_quote_escapes: enable escapes of double quotes. Defaults to True.
num_parsed_rows: number of rows, which will be parsed when inferring types. When
set to None, all rows will be parsed. When set to 0, types of all columns
will be set to str. Defaults to None.
Returns:
Schema
"""
def remove_comments_from_file(f: Iterable[str], comment_char: str | None):
for line in f:
if line.lstrip()[0] != comment_char:
yield line
with open(path) as f:
csv_reader = csv.DictReader(
remove_comments_from_file(f, comment_character),
delimiter=delimiter,
escapechar=escape,
quoting=csv.QUOTE_ALL,
quotechar=quote,
doublequote=double_quote_escapes,
)
if csv_reader.fieldnames is None:
raise ValueError("can't generate Schema based on an empty CSV file")
column_names = csv_reader.fieldnames
if num_parsed_rows is None:
csv_data = list(csv_reader)
else:
csv_data = list(itertools.islice(csv_reader, num_parsed_rows))
def choose_type(entries: list[str]):
if len(entries) == 0:
return Any
if all(_is_parsable_to(s, int) for s in entries):
return int
if all(_is_parsable_to(s, float) for s in entries):
return float
return str
column_types = {
column_name: choose_type([row[column_name] for row in csv_data])
for column_name in column_names
}
columns = {
column_name: column_definition(dtype=column_types[column_name])
for column_name in column_names
}
return schema_builder(columns, name=name, properties=properties) | Allows to generate schema based on a CSV file. The names of the columns are taken from the header of the CSV file. Types of columns are inferred from the values, by checking if they can be parsed. Currently supported types are str, int and float. Args: path: path to the CSV file. name: schema name. properties: schema properties. delimiter: delimiter used in CSV file. Defaults to ",". quote: quote character used in CSV file. Defaults to '"'. comment_character: character used in CSV file to denote comments. Defaults to None escape: escape character used in CSV file. Defaults to None. double_quote_escapes: enable escapes of double quotes. Defaults to True. num_parsed_rows: number of rows, which will be parsed when inferring types. When set to None, all rows will be parsed. When set to 0, types of all columns will be set to str. Defaults to None. Returns: Schema |
166,789 | from __future__ import annotations
from collections import namedtuple
from collections.abc import Iterable, Iterator, MutableSet
from functools import partial, wraps
from typing import Generic, TypeVar
from pathway.internals import arg_tuple
from pathway.internals.shadows import inspect
def with_optional_kwargs(decorator):
@wraps(
decorator
) # necessary for doctests to work, see https://www.rosipov.com/blog/python-doctests-and-decorators-bug/
def wrapper(func=None, **kwargs):
if func is None:
return partial(wrapper, **kwargs)
return decorator(func, **kwargs)
return wrapper | null |
166,790 | from __future__ import annotations
from collections import namedtuple
from collections.abc import Iterable, Iterator, MutableSet
from functools import partial, wraps
from typing import Generic, TypeVar
from pathway.internals import arg_tuple
from pathway.internals.shadows import inspect
FunctionSpec = namedtuple("FunctionSpec", ["func", "arg_names", "defaults"])
The provided code snippet includes necessary dependencies for implementing the `fn_arg_tuple` function. Write a Python function `def fn_arg_tuple(fn_spec: FunctionSpec, args, kwargs)` to solve the following problem:
Returns arguments passed to function wrapped in ArgTuple
Here is the function:
def fn_arg_tuple(fn_spec: FunctionSpec, args, kwargs):
"""Returns arguments passed to function wrapped in ArgTuple"""
args_dict = dict(zip(fn_spec.arg_names, args))
for index in range(len(args_dict), len(args)):
args_dict[str(index)] = args[index]
result = {**fn_spec.defaults, **args_dict, **kwargs}
return arg_tuple.ArgTuple(result) | Returns arguments passed to function wrapped in ArgTuple |
166,791 | from collections.abc import Callable, Mapping
from typing import Any
from pathway.internals import api, dtype as dt
from pathway.internals.shadows import operator
_unary_operators_mapping: UnaryOperatorMapping = {
(operator.inv, dt.BOOL): dt.BOOL,
(operator.neg, dt.INT): dt.INT,
(operator.neg, dt.FLOAT): dt.FLOAT,
(operator.neg, dt.DURATION): dt.DURATION,
}
def get_unary_operators_mapping(op, operand_dtype, default=None):
return _unary_operators_mapping.get((op, operand_dtype), default) | null |
166,792 | from collections.abc import Callable, Mapping
from typing import Any
from pathway.internals import api, dtype as dt
from pathway.internals.shadows import operator
_unary_operators_to_engine: Mapping[UnaryOperator, api.UnaryOperator] = {
operator.inv: api.UnaryOperator.INV,
operator.neg: api.UnaryOperator.NEG,
}
def get_unary_expression(expr, op, expr_dtype: dt.DType, default=None):
op_engine = _unary_operators_to_engine.get(op)
expr_dtype_engine = expr_dtype.to_engine()
if op_engine is None or expr_dtype_engine is None:
return default
expression = api.Expression.unary_expression(expr, op_engine, expr_dtype_engine)
return expression if expression is not None else default | null |
166,793 | from collections.abc import Callable, Mapping
from typing import Any
from pathway.internals import api, dtype as dt
from pathway.internals.shadows import operator
_binary_operators_mapping: BinaryOperatorMapping = {
(operator.and_, dt.BOOL, dt.BOOL): dt.BOOL,
(operator.or_, dt.BOOL, dt.BOOL): dt.BOOL,
(operator.xor, dt.BOOL, dt.BOOL): dt.BOOL,
(operator.eq, dt.INT, dt.INT): dt.BOOL,
(operator.ne, dt.INT, dt.INT): dt.BOOL,
(operator.lt, dt.INT, dt.INT): dt.BOOL,
(operator.le, dt.INT, dt.INT): dt.BOOL,
(operator.gt, dt.INT, dt.INT): dt.BOOL,
(operator.ge, dt.INT, dt.INT): dt.BOOL,
(operator.eq, dt.BOOL, dt.BOOL): dt.BOOL,
(operator.ne, dt.BOOL, dt.BOOL): dt.BOOL,
(operator.lt, dt.BOOL, dt.BOOL): dt.BOOL,
(operator.le, dt.BOOL, dt.BOOL): dt.BOOL,
(operator.gt, dt.BOOL, dt.BOOL): dt.BOOL,
(operator.ge, dt.BOOL, dt.BOOL): dt.BOOL,
(operator.add, dt.INT, dt.INT): dt.INT,
(operator.sub, dt.INT, dt.INT): dt.INT,
(operator.mul, dt.INT, dt.INT): dt.INT,
(operator.floordiv, dt.INT, dt.INT): dt.INT,
(operator.truediv, dt.INT, dt.INT): dt.FLOAT,
(operator.mod, dt.INT, dt.INT): dt.INT,
(operator.pow, dt.INT, dt.INT): dt.INT,
(operator.lshift, dt.INT, dt.INT): dt.INT,
(operator.rshift, dt.INT, dt.INT): dt.INT,
(operator.and_, dt.INT, dt.INT): dt.INT,
(operator.or_, dt.INT, dt.INT): dt.INT,
(operator.xor, dt.INT, dt.INT): dt.INT,
(operator.eq, dt.FLOAT, dt.FLOAT): dt.BOOL,
(operator.ne, dt.FLOAT, dt.FLOAT): dt.BOOL,
(operator.lt, dt.FLOAT, dt.FLOAT): dt.BOOL,
(operator.le, dt.FLOAT, dt.FLOAT): dt.BOOL,
(operator.gt, dt.FLOAT, dt.FLOAT): dt.BOOL,
(operator.ge, dt.FLOAT, dt.FLOAT): dt.BOOL,
(operator.add, dt.FLOAT, dt.FLOAT): dt.FLOAT,
(operator.sub, dt.FLOAT, dt.FLOAT): dt.FLOAT,
(operator.mul, dt.FLOAT, dt.FLOAT): dt.FLOAT,
(operator.floordiv, dt.FLOAT, dt.FLOAT): dt.FLOAT,
(operator.truediv, dt.FLOAT, dt.FLOAT): dt.FLOAT,
(operator.mod, dt.FLOAT, dt.FLOAT): dt.FLOAT,
(operator.pow, dt.FLOAT, dt.FLOAT): dt.FLOAT,
(operator.eq, dt.STR, dt.STR): dt.BOOL,
(operator.ne, dt.STR, dt.STR): dt.BOOL,
(operator.lt, dt.STR, dt.STR): dt.BOOL,
(operator.le, dt.STR, dt.STR): dt.BOOL,
(operator.gt, dt.STR, dt.STR): dt.BOOL,
(operator.ge, dt.STR, dt.STR): dt.BOOL,
(operator.add, dt.STR, dt.STR): dt.STR,
(operator.mul, dt.STR, dt.INT): dt.STR,
(operator.mul, dt.INT, dt.STR): dt.STR,
(operator.eq, dt.POINTER, dt.POINTER): dt.BOOL,
(operator.ne, dt.POINTER, dt.POINTER): dt.BOOL,
(operator.lt, dt.POINTER, dt.POINTER): dt.BOOL,
(operator.le, dt.POINTER, dt.POINTER): dt.BOOL,
(operator.gt, dt.POINTER, dt.POINTER): dt.BOOL,
(operator.ge, dt.POINTER, dt.POINTER): dt.BOOL,
(operator.eq, dt.DATE_TIME_NAIVE, dt.DATE_TIME_NAIVE): dt.BOOL,
(operator.ne, dt.DATE_TIME_NAIVE, dt.DATE_TIME_NAIVE): dt.BOOL,
(operator.lt, dt.DATE_TIME_NAIVE, dt.DATE_TIME_NAIVE): dt.BOOL,
(operator.le, dt.DATE_TIME_NAIVE, dt.DATE_TIME_NAIVE): dt.BOOL,
(operator.gt, dt.DATE_TIME_NAIVE, dt.DATE_TIME_NAIVE): dt.BOOL,
(operator.ge, dt.DATE_TIME_NAIVE, dt.DATE_TIME_NAIVE): dt.BOOL,
(operator.sub, dt.DATE_TIME_NAIVE, dt.DATE_TIME_NAIVE): dt.DURATION,
(operator.add, dt.DATE_TIME_NAIVE, dt.DURATION): dt.DATE_TIME_NAIVE,
(operator.sub, dt.DATE_TIME_NAIVE, dt.DURATION): dt.DATE_TIME_NAIVE,
(operator.eq, dt.DATE_TIME_UTC, dt.DATE_TIME_UTC): dt.BOOL,
(operator.ne, dt.DATE_TIME_UTC, dt.DATE_TIME_UTC): dt.BOOL,
(operator.lt, dt.DATE_TIME_UTC, dt.DATE_TIME_UTC): dt.BOOL,
(operator.le, dt.DATE_TIME_UTC, dt.DATE_TIME_UTC): dt.BOOL,
(operator.gt, dt.DATE_TIME_UTC, dt.DATE_TIME_UTC): dt.BOOL,
(operator.ge, dt.DATE_TIME_UTC, dt.DATE_TIME_UTC): dt.BOOL,
(operator.sub, dt.DATE_TIME_UTC, dt.DATE_TIME_UTC): dt.DURATION,
(operator.add, dt.DATE_TIME_UTC, dt.DURATION): dt.DATE_TIME_UTC,
(operator.sub, dt.DATE_TIME_UTC, dt.DURATION): dt.DATE_TIME_UTC,
(operator.eq, dt.DURATION, dt.DURATION): dt.BOOL,
(operator.ne, dt.DURATION, dt.DURATION): dt.BOOL,
(operator.lt, dt.DURATION, dt.DURATION): dt.BOOL,
(operator.le, dt.DURATION, dt.DURATION): dt.BOOL,
(operator.gt, dt.DURATION, dt.DURATION): dt.BOOL,
(operator.ge, dt.DURATION, dt.DURATION): dt.BOOL,
(operator.add, dt.DURATION, dt.DURATION): dt.DURATION,
(operator.sub, dt.DURATION, dt.DURATION): dt.DURATION,
(operator.floordiv, dt.DURATION, dt.DURATION): dt.INT,
(operator.truediv, dt.DURATION, dt.DURATION): dt.FLOAT,
(operator.mod, dt.DURATION, dt.DURATION): dt.DURATION,
(operator.add, dt.DURATION, dt.DATE_TIME_NAIVE): dt.DATE_TIME_NAIVE,
(operator.add, dt.DURATION, dt.DATE_TIME_UTC): dt.DATE_TIME_UTC,
(operator.mul, dt.DURATION, dt.INT): dt.DURATION,
(operator.mul, dt.INT, dt.DURATION): dt.DURATION,
(operator.floordiv, dt.DURATION, dt.INT): dt.DURATION,
(operator.truediv, dt.DURATION, dt.INT): dt.DURATION,
(operator.mul, dt.DURATION, dt.FLOAT): dt.DURATION,
(operator.mul, dt.FLOAT, dt.DURATION): dt.DURATION,
(operator.truediv, dt.DURATION, dt.FLOAT): dt.DURATION,
(operator.matmul, dt.ANY_ARRAY_2D, dt.ANY_ARRAY_2D): dt.ANY_ARRAY_2D,
(operator.matmul, dt.INT_ARRAY_2D, dt.INT_ARRAY_2D): dt.INT_ARRAY_2D,
(operator.matmul, dt.FLOAT_ARRAY_2D, dt.FLOAT_ARRAY_2D): dt.FLOAT_ARRAY_2D,
(operator.matmul, dt.ANY_ARRAY_2D, dt.ANY_ARRAY_1D): dt.ANY_ARRAY_1D,
(operator.matmul, dt.INT_ARRAY_2D, dt.INT_ARRAY_1D): dt.INT_ARRAY_1D,
(operator.matmul, dt.FLOAT_ARRAY_2D, dt.FLOAT_ARRAY_1D): dt.FLOAT_ARRAY_1D,
(operator.matmul, dt.ANY_ARRAY_1D, dt.ANY_ARRAY_2D): dt.ANY_ARRAY_1D,
(operator.matmul, dt.INT_ARRAY_1D, dt.INT_ARRAY_2D): dt.INT_ARRAY_1D,
(operator.matmul, dt.FLOAT_ARRAY_1D, dt.FLOAT_ARRAY_2D): dt.FLOAT_ARRAY_1D,
(operator.matmul, dt.ANY_ARRAY_1D, dt.ANY_ARRAY_1D): dt.ANY,
(operator.matmul, dt.INT_ARRAY_1D, dt.INT_ARRAY_1D): dt.INT,
(operator.matmul, dt.FLOAT_ARRAY_1D, dt.FLOAT_ARRAY_1D): dt.FLOAT,
(operator.matmul, dt.ANY_ARRAY, dt.ANY_ARRAY): dt.ANY_ARRAY,
(operator.matmul, dt.INT_ARRAY, dt.INT_ARRAY): dt.INT_ARRAY,
(operator.matmul, dt.FLOAT_ARRAY, dt.FLOAT_ARRAY): dt.FLOAT_ARRAY,
}
def get_binary_operators_mapping(op, left, right, default=None):
if isinstance(left, dt.Array) and isinstance(right, dt.Array):
left, right = dt.coerce_arrays_pair(left, right)
return _binary_operators_mapping.get(
(op, dt.normalize_dtype(left), dt.normalize_dtype(right)), default
) | null |
166,794 | from collections.abc import Callable, Mapping
from typing import Any
from pathway.internals import api, dtype as dt
from pathway.internals.shadows import operator
_binary_operators_to_engine: Mapping[BinaryOperator, api.BinaryOperator] = {
operator.and_: api.BinaryOperator.AND,
operator.or_: api.BinaryOperator.OR,
operator.xor: api.BinaryOperator.XOR,
operator.eq: api.BinaryOperator.EQ,
operator.ne: api.BinaryOperator.NE,
operator.lt: api.BinaryOperator.LT,
operator.le: api.BinaryOperator.LE,
operator.gt: api.BinaryOperator.GT,
operator.ge: api.BinaryOperator.GE,
operator.add: api.BinaryOperator.ADD,
operator.sub: api.BinaryOperator.SUB,
operator.mul: api.BinaryOperator.MUL,
operator.floordiv: api.BinaryOperator.FLOOR_DIV,
operator.truediv: api.BinaryOperator.TRUE_DIV,
operator.mod: api.BinaryOperator.MOD,
operator.pow: api.BinaryOperator.POW,
operator.lshift: api.BinaryOperator.LSHIFT,
operator.rshift: api.BinaryOperator.RSHIFT,
operator.matmul: api.BinaryOperator.MATMUL,
}
def get_binary_expression(
left, right, op, left_dtype: dt.DType, right_dtype: dt.DType, default=None
):
op_engine = _binary_operators_to_engine.get(op)
left_dtype_engine = left_dtype.to_engine()
right_dtype_engine = right_dtype.to_engine()
if op_engine is None or left_dtype_engine is None or right_dtype_engine is None:
return default
expression = api.Expression.binary_expression(
left, right, op_engine, left_dtype_engine, right_dtype_engine
)
return expression if expression is not None else default | null |
166,795 | from collections.abc import Callable, Mapping
from typing import Any
from pathway.internals import api, dtype as dt
from pathway.internals.shadows import operator
_binary_operators_mapping_optionals: OptionalMapping = {
operator.eq: (dt.BOOL, api.Expression.eq),
operator.ne: (dt.BOOL, api.Expression.ne),
}
def get_binary_operators_mapping_optionals(op, left, right, default=None):
if left == right or left == dt.NONE or right == dt.NONE:
return _binary_operators_mapping_optionals.get(op, default)
else:
return default | null |
166,796 | from collections.abc import Callable, Mapping
from typing import Any
from pathway.internals import api, dtype as dt
from pathway.internals.shadows import operator
def get_cast_operators_mapping(
expr: api.Expression, source_type: dt.DType, target_type: dt.DType, default=None
) -> api.Expression | None:
source_type_engine = dt.unoptionalize(source_type).to_engine()
target_type_engine = dt.unoptionalize(target_type).to_engine()
if source_type_engine is None or target_type_engine is None:
return default
if isinstance(source_type, dt.Optional) and isinstance(target_type, dt.Optional):
fun = api.Expression.cast_optional
else:
fun = api.Expression.cast
expression = fun(
expr,
source_type_engine,
target_type_engine,
)
return expression if expression is not None else default | null |
166,797 | from collections.abc import Callable, Mapping
from typing import Any
from pathway.internals import api, dtype as dt
from pathway.internals.shadows import operator
def get_convert_operators_mapping(
expr: api.Expression, source_type: dt.DType, target_type: dt.DType
) -> api.Expression | None:
source_type_engine = dt.unoptionalize(source_type).to_engine()
target_type_engine = dt.unoptionalize(target_type).to_engine()
assert (
source_type_engine is not None and target_type_engine is not None
), "invalid pathway type"
expression = api.Expression.convert_optional(
expr,
source_type_engine,
target_type_engine,
)
return expression | null |
166,798 | from collections.abc import Callable, Mapping
from typing import Any
from pathway.internals import api, dtype as dt
from pathway.internals.shadows import operator
def common_dtype_in_binary_operator(
left_dtype: dt.DType, right_dtype: dt.DType
) -> dt.DType | None:
if (
left_dtype in [dt.INT, dt.Optional(dt.INT)]
and right_dtype in [dt.FLOAT, dt.Optional(dt.FLOAT)]
) or (
left_dtype in [dt.FLOAT, dt.Optional(dt.FLOAT)]
and right_dtype in [dt.INT, dt.Optional(dt.INT)]
):
return dt.types_lca(left_dtype, right_dtype)
return None | null |
166,799 | from __future__ import annotations
from abc import abstractmethod
from collections.abc import Iterable, Mapping
from functools import wraps
from typing import TYPE_CHECKING, Any, TypeVar
from pathway.internals import expression as expr
from pathway.internals.expression_visitor import IdentityTransform
from pathway.internals.helpers import function_spec, with_optional_kwargs
from pathway.internals.row_transformer import RowTransformer
def combine_args_kwargs(
args: Iterable[expr.ColumnReference],
kwargs: Mapping[str, Any],
exclude_columns: set[str] | None = None,
) -> dict[str, expr.ColumnExpression]:
all_args = {}
def add(name, expression):
if exclude_columns is not None and name in exclude_columns:
return
if name in all_args:
raise ValueError(f"Duplicate expression value given for {name}")
if name == "id":
raise ValueError("Can't use 'id' as a column name")
if not isinstance(expression, expr.ColumnExpression):
expression = expr.ColumnConstExpression(expression)
all_args[name] = expression
for expression in args:
add(expr.smart_name(expression), expression)
for name, expression in kwargs.items():
add(name, expression)
return all_args | null |
166,800 | from __future__ import annotations
from abc import abstractmethod
from collections.abc import Iterable, Mapping
from functools import wraps
from typing import TYPE_CHECKING, Any, TypeVar
from pathway.internals import expression as expr
from pathway.internals.expression_visitor import IdentityTransform
from pathway.internals.helpers import function_spec, with_optional_kwargs
from pathway.internals.row_transformer import RowTransformer
def _desugar_this_args(
substitution: dict[thisclass.ThisMetaclass, table.Joinable],
args: Iterable[ColExprT],
) -> tuple[ColExprT, ...]:
ret: list[ColExprT] = []
from pathway.internals import thisclass
for arg in args:
if isinstance(arg, thisclass.ThisMetaclass):
assert issubclass(arg, thisclass.iter_guard)
evaled_table = arg._eval_substitution(substitution)
ret.extend(evaled_table)
else:
ret.append(_desugar_this_arg(substitution, arg))
return tuple(ret)
def _desugar_this_kwargs(
substitution: dict[thisclass.ThisMetaclass, table.Joinable],
kwargs: Mapping[str, ColExprT],
) -> dict[str, ColExprT]:
from pathway.internals import thisclass
new_kwargs = {
name: arg
for name, arg in kwargs.items()
if not name.startswith(thisclass.KEY_GUARD)
}
for name, arg in kwargs.items():
if name.startswith(thisclass.KEY_GUARD):
assert isinstance(arg, thisclass.ThisMetaclass)
evaled_table = arg._eval_substitution(substitution)
new_kwargs.update(evaled_table)
return {
name: _desugar_this_arg(substitution, arg) for name, arg in new_kwargs.items()
}
class DesugaringContext:
_substitution: dict[thisclass.ThisMetaclass, table.Joinable] = {}
def _desugaring(self) -> DesugaringTransform:
pass
) # necessary for doctests to work, see https://www.rosipov.com/blog/python-doctests-and-decorators-bug/
def function_spec(fn):
fn = inspect.unwrap(fn)
fullspec = inspect.getfullargspec(fn)
defaults = {}
if fullspec.defaults is not None:
for index, default in enumerate(reversed(fullspec.defaults)):
defaults[fullspec.args[-index - 1]] = default
arg_names = fn.__code__.co_varnames[: fn.__code__.co_argcount]
return FunctionSpec(fn, arg_names, defaults)
def desugar(func, **kwargs):
fn_spec = function_spec(func)
substitution_param = kwargs.get("substitution", {})
@wraps(func)
def wrapper(*args, **kwargs):
named_args = {**dict(zip(fn_spec.arg_names, args)), **kwargs}
assert len(named_args) > 0
first_arg = next(iter(named_args.values()))
desugaring_context = (
first_arg if isinstance(first_arg, DesugaringContext) else None
)
this_substitution = {}
if desugaring_context is not None:
this_substitution.update(desugaring_context._substitution)
for key, value in substitution_param.items():
assert isinstance(value, str)
this_substitution[key] = named_args[value]
args = _desugar_this_args(this_substitution, args)
kwargs = _desugar_this_kwargs(this_substitution, kwargs)
if desugaring_context is not None:
args = tuple(
desugaring_context._desugaring.eval_expression(arg) for arg in args
)
kwargs = {
key: desugaring_context._desugaring.eval_expression(value)
for key, value in kwargs.items()
}
return func(*args, **kwargs)
return wrapper | null |
166,801 | from warnings import warn
import pandas as pd
import pathway.internals.expression as expr
from pathway.internals import api, dtype as dt
def _str_as_duration(freq: str) -> pd.Timedelta:
duration = pd.tseries.frequencies.to_offset(freq)
if duration is None:
raise ValueError(f"string {freq} cannot be parsed as a duration")
return pd.Timedelta(duration.nanos) | null |
166,802 | from __future__ import annotations
import dataclasses
from abc import ABC, abstractmethod
from collections.abc import Callable, Iterable
from functools import lru_cache
from typing import TYPE_CHECKING, Any, Mapping, cast
from pathway.internals import api, dtype as dt, helpers
from pathway.internals.api import Value
from pathway.internals.operator_input import OperatorInput
from pathway.internals.shadows import operator
from pathway.internals.trace import Trace
class ColumnExpression(OperatorInput, ABC):
_dtype: dt.DType
_trace: Trace
def __init__(self):
self._trace = Trace.from_traceback()
def __bool__(self):
raise RuntimeError("Cannot use expression as boolean.")
def _deps(self) -> tuple[ColumnExpression, ...]: ...
def _to_internal(self) -> InternalColExpr: ...
def __repr__(self):
from pathway.internals.expression_printer import ExpressionFormatter
return ExpressionFormatter().eval_expression(self)
def _wrap(arg: ColumnExpression | Value) -> ColumnExpression:
if not isinstance(arg, ColumnExpression):
return ColumnConstExpression(arg)
return arg
def _dependencies(self) -> helpers.StableSet[InternalColRef]:
return helpers.StableSet.union(*[dep._dependencies() for dep in self._deps])
def _dependencies_above_reducer(self) -> helpers.StableSet[InternalColRef]:
return helpers.StableSet.union(
*[dep._dependencies_above_reducer() for dep in self._deps]
)
def _dependencies_below_reducer(self) -> helpers.StableSet[InternalColRef]:
return helpers.StableSet.union(
*[dep._dependencies_below_reducer() for dep in self._deps]
)
def _operator_dependencies(self) -> helpers.StableSet[Table]:
return helpers.StableSet(
expression.to_column_expression()._table
for expression in self._dependencies()
)
def _column_dependencies(self) -> helpers.StableSet[Column]:
expression_dependencies = (
dep.to_column_expression()._column for dep in self._dependencies()
)
return helpers.StableSet(expression_dependencies)
def _column_with_expression_cls(self) -> type[ColumnWithExpression]:
from pathway.internals.column import ColumnWithExpression
return ColumnWithExpression
def __add__(self, other: ColumnExpression | Value) -> ColumnBinaryOpExpression:
return ColumnBinaryOpExpression(self, other, operator.add)
def __radd__(self, other: ColumnExpression | Value) -> ColumnBinaryOpExpression:
return ColumnBinaryOpExpression(other, self, operator.add)
def __sub__(self, other: ColumnExpression | Value) -> ColumnBinaryOpExpression:
return ColumnBinaryOpExpression(self, other, operator.sub)
def __rsub__(self, other: ColumnExpression | Value) -> ColumnBinaryOpExpression:
return ColumnBinaryOpExpression(other, self, operator.sub)
def __mul__(self, other: ColumnExpression | Value) -> ColumnBinaryOpExpression:
return ColumnBinaryOpExpression(self, other, operator.mul)
def __rmul__(self, other: ColumnExpression | Value) -> ColumnBinaryOpExpression:
return ColumnBinaryOpExpression(other, self, operator.mul)
def __truediv__(self, other: ColumnExpression | Value) -> ColumnBinaryOpExpression:
return ColumnBinaryOpExpression(self, other, operator.truediv)
def __rtruediv__(self, other: ColumnExpression | Value) -> ColumnBinaryOpExpression:
return ColumnBinaryOpExpression(other, self, operator.truediv)
def __floordiv__(self, other: ColumnExpression | Value) -> ColumnBinaryOpExpression:
return ColumnBinaryOpExpression(self, other, operator.floordiv)
def __rfloordiv__(
self, other: ColumnExpression | Value
) -> ColumnBinaryOpExpression:
return ColumnBinaryOpExpression(other, self, operator.floordiv)
def __mod__(self, other: ColumnExpression | Value) -> ColumnBinaryOpExpression:
return ColumnBinaryOpExpression(self, other, operator.mod)
def __rmod__(self, other: ColumnExpression | Value) -> ColumnBinaryOpExpression:
return ColumnBinaryOpExpression(other, self, operator.mod)
def __pow__(self, other: ColumnExpression | Value) -> ColumnBinaryOpExpression:
return ColumnBinaryOpExpression(self, other, operator.pow)
def __rpow__(self, other: ColumnExpression | Value) -> ColumnBinaryOpExpression:
return ColumnBinaryOpExpression(other, self, operator.pow)
def __lshift__(self, other: ColumnExpression | Value) -> ColumnBinaryOpExpression:
return ColumnBinaryOpExpression(self, other, operator.lshift)
def __rlshift__(self, other: ColumnExpression | Value) -> ColumnBinaryOpExpression:
return ColumnBinaryOpExpression(other, self, operator.lshift)
def __rshift__(self, other: ColumnExpression | Value) -> ColumnBinaryOpExpression:
return ColumnBinaryOpExpression(self, other, operator.rshift)
def __rrshift__(self, other: ColumnExpression | Value) -> ColumnBinaryOpExpression:
return ColumnBinaryOpExpression(other, self, operator.rshift)
def __eq__(self, other: ColumnExpression | Value) -> ColumnBinaryOpExpression: # type: ignore[override]
return ColumnBinaryOpExpression(self, other, operator.eq)
def __ne__(self, other: ColumnExpression | Value) -> ColumnBinaryOpExpression: # type: ignore[override]
return ColumnBinaryOpExpression(self, other, operator.ne)
def __le__(self, other: ColumnExpression | Value) -> ColumnBinaryOpExpression:
return ColumnBinaryOpExpression(self, other, operator.le)
def __ge__(self, other: ColumnExpression | Value) -> ColumnBinaryOpExpression:
return ColumnBinaryOpExpression(self, other, operator.ge)
def __lt__(self, other: ColumnExpression | Value) -> ColumnBinaryOpExpression:
return ColumnBinaryOpExpression(self, other, operator.lt)
def __gt__(self, other: ColumnExpression | Value) -> ColumnBinaryOpExpression:
return ColumnBinaryOpExpression(self, other, operator.gt)
def __and__(self, other: ColumnExpression | Value) -> ColumnBinaryOpExpression:
return ColumnBinaryOpExpression(self, other, operator.and_)
def __rand__(self, other: ColumnExpression | Value) -> ColumnBinaryOpExpression:
return ColumnBinaryOpExpression(other, self, operator.and_)
def __or__(self, other: ColumnExpression | Value) -> ColumnBinaryOpExpression:
return ColumnBinaryOpExpression(self, other, operator.or_)
def __ror__(self, other: ColumnExpression | Value) -> ColumnBinaryOpExpression:
return ColumnBinaryOpExpression(other, self, operator.or_)
def __xor__(self, other: ColumnExpression | Value) -> ColumnBinaryOpExpression:
return ColumnBinaryOpExpression(self, other, operator.xor)
def __rxor__(self, other: ColumnExpression | Value) -> ColumnBinaryOpExpression:
return ColumnBinaryOpExpression(other, self, operator.xor)
def __matmul__(self, other: ColumnExpression | Value) -> ColumnBinaryOpExpression:
return ColumnBinaryOpExpression(self, other, operator.matmul)
def __rmatmul__(self, other: ColumnExpression | Value) -> ColumnBinaryOpExpression:
return ColumnBinaryOpExpression(other, self, operator.matmul)
def __neg__(self) -> ColumnUnaryOpExpression:
return ColumnUnaryOpExpression(self, operator.neg)
def __invert__(self) -> ColumnExpression:
match self:
case ColumnUnaryOpExpression(_operator=operator.inv, _expr=e):
return e
case IsNoneExpression(_expr=e):
return IsNotNoneExpression(e)
case IsNotNoneExpression(_expr=e):
return IsNoneExpression(e)
case _:
return ColumnUnaryOpExpression(self, operator.inv)
def __abs__(self) -> ColumnExpression:
return self.num.abs()
def __hash__(self):
return object.__hash__(self)
def is_none(self) -> IsNoneExpression:
"""Returns true if the value is None.
Example:
>>> import pathway as pw
>>> t1 = pw.debug.table_from_markdown('''
... | owner | pet
... 1 | Alice | dog
... 2 | Bob |
... 3 | Carol | cat
... ''')
>>> t2 = t1.with_columns(has_no_pet=pw.this.pet.is_none())
>>> pw.debug.compute_and_print(t2, include_id=False)
owner | pet | has_no_pet
Alice | dog | False
Bob | | True
Carol | cat | False
"""
return IsNoneExpression(self)
def is_not_none(self) -> IsNotNoneExpression:
"""Returns true if the value is not None.
Example:
>>> import pathway as pw
>>> t1 = pw.debug.table_from_markdown('''
... | owner | pet
... 1 | Alice | dog
... 2 | Bob |
... 3 | Carol | cat
... ''')
>>> t2 = t1.with_columns(has_pet=pw.this.pet.is_not_none())
>>> pw.debug.compute_and_print(t2, include_id=False)
owner | pet | has_pet
Alice | dog | True
Bob | | False
Carol | cat | True
"""
return IsNotNoneExpression(self)
# Missing `__iter__` would make Python fall back to `__getitem__, which
# will not do the right thing.
__iter__ = None
def __getitem__(self, index: ColumnExpression | int | str) -> ColumnExpression:
"""Extracts element at `index` from an object. The object has to be a Tuple or Json.
Index can be effectively `int` for Tuple and `int` or `str` for Json.
For Tuples, using negative index can be used to access elements at the end, moving backwards.
if no element is present at `index`:
- returns `json(null)` for Json
- raises error for Tuple
Args:
index: Position to extract element at.
Example:
>>> import pathway as pw
>>> t1 = pw.debug.table_from_markdown(
... '''
... | a | b | c
... 1 | 3 | 2 | 1
... 2 | 4 | 1 | 0
... 3 | 7 | 3 | 1
... '''
... )
>>> t2 = t1.with_columns(tup=pw.make_tuple(pw.this.a, pw.this.b))
>>> t3 = t2.select(x=pw.this.tup[0], y=pw.this.tup[-1], z=pw.this.tup[pw.this.c])
>>> pw.debug.compute_and_print(t3, include_id=False)
x | y | z
3 | 2 | 2
4 | 1 | 4
7 | 3 | 3
"""
return GetExpression(self, index, check_if_exists=False)
def get(
self,
index: ColumnExpression | int | str,
default: ColumnExpression | Value = None,
) -> ColumnExpression:
"""Extracts element at `index` from an object. The object has to be a Tuple or Json.
If no element is present at `index`, it returns value specified by a `default` parameter.
Index can be effectively `int` for Tuple and `int` or `str` for Json.
For Tuples, using negative index can be used to access elements at the end, moving backwards.
Args:
index: Position to extract element at.
default: Value returned when no element is at position `index`. Defaults to None.
Example:
>>> import pathway as pw
>>> t1 = pw.debug.table_from_markdown(
... '''
... | a | b | c
... 1 | 3 | 2 | 2
... 2 | 4 | 1 | 0
... 3 | 7 | 3 | 1
... '''
... )
>>> t2 = t1.with_columns(tup=pw.make_tuple(pw.this.a, pw.this.b))
>>> t3 = t2.select(
... x=pw.this.tup.get(1),
... y=pw.this.tup.get(3),
... z=pw.this.tup.get(pw.this.c),
... t=pw.this.tup.get(pw.this.c, default=100),
... )
>>> pw.debug.compute_and_print(t3, include_id=False)
x | y | z | t
1 | | 4 | 4
2 | | | 100
3 | | 3 | 3
"""
return GetExpression(self, index, default, check_if_exists=True)
def dt(self) -> DateTimeNamespace:
from pathway.internals.expressions import DateTimeNamespace
return DateTimeNamespace(self)
def num(self) -> NumericalNamespace:
from pathway.internals.expressions import NumericalNamespace
return NumericalNamespace(self)
def str(self) -> StringNamespace:
from pathway.internals.expressions import StringNamespace
return StringNamespace(self)
def to_string(self) -> MethodCallExpression:
"""Changes the values to strings.
Example:
>>> import pathway as pw
>>> t1 = pw.debug.table_from_markdown('''
... val
... 1
... 2
... 3
... 4''')
>>> t1.schema
<pathway.Schema types={'val': <class 'int'>}>
>>> pw.debug.compute_and_print(t1, include_id=False)
val
1
2
3
4
>>> t2 = t1.select(val = pw.this.val.to_string())
>>> t2.schema
<pathway.Schema types={'val': <class 'str'>}>
>>> pw.debug.compute_and_print(t2.select(val=pw.this.val + "a"), include_id=False)
val
1a
2a
3a
4a
"""
return MethodCallExpression(
(
(
dt.ANY,
dt.STR,
api.Expression.to_string,
),
),
"to_string",
self,
)
def as_int(self) -> ConvertExpression:
"""Converts value to an int or None if not possible.
Currently works for Json columns only.
Example:
>>> import pathway as pw
>>> import pandas as pd
>>> class InputSchema(pw.Schema):
... data: dict
>>> dt = pd.DataFrame(data={"data": [{"value": 1}, {"value": 2}]})
>>> table = pw.debug.table_from_pandas(dt, schema=InputSchema)
>>> result = table.select(result=pw.this.data.get("value").as_int())
>>> pw.debug.compute_and_print(result, include_id=False)
result
1
2
"""
return ConvertExpression(dt.INT, self)
def as_float(self) -> ConvertExpression:
"""Converts value to a float or None if not possible.
Currently works for Json columns only.
Example:
>>> import pathway as pw
>>> import pandas as pd
>>> class InputSchema(pw.Schema):
... data: dict
>>> dt = pd.DataFrame(data={"data": [{"value": 1.5}, {"value": 3.14}]})
>>> table = pw.debug.table_from_pandas(dt, schema=InputSchema)
>>> result = table.select(result=pw.this.data.get("value").as_float())
>>> pw.debug.compute_and_print(result, include_id=False)
result
1.5
3.14
"""
return ConvertExpression(dt.FLOAT, self)
def as_str(self) -> ConvertExpression:
"""Converts value to a string or None if not possible.
Currently works for Json columns only.
Example:
>>> import pathway as pw
>>> import pandas as pd
>>> class InputSchema(pw.Schema):
... data: dict
>>> dt = pd.DataFrame(data={"data": [{"value": "dog"}, {"value": "cat"}]})
>>> table = pw.debug.table_from_pandas(dt, schema=InputSchema)
>>> result = table.select(result=pw.this.data.get("value").as_str())
>>> pw.debug.compute_and_print(result, include_id=False)
result
cat
dog
"""
return ConvertExpression(dt.STR, self)
def as_bool(self) -> ConvertExpression:
"""Converts value to a bool or None if not possible.
Currently works for Json columns only.
Example:
>>> import pathway as pw
>>> import pandas as pd
>>> class InputSchema(pw.Schema):
... data: dict
>>> dt = pd.DataFrame(data={"data": [{"value": True}, {"value": False}]})
>>> table = pw.debug.table_from_pandas(dt, schema=InputSchema)
>>> result = table.select(result=pw.this.data.get("value").as_bool())
>>> pw.debug.compute_and_print(result, include_id=False)
result
False
True
"""
return ConvertExpression(dt.BOOL, self)
class ColumnReference(ColumnExpression):
"""Reference to the column.
Inherits from ColumnExpression.
Example:
>>> import pathway as pw
>>> t1 = pw.debug.table_from_markdown('''
... age owner pet
... 1 10 Alice dog
... 2 9 Bob dog
... 3 8 Alice cat
... 4 7 Bob dog''')
>>> isinstance(t1.age, pw.ColumnReference)
True
>>> isinstance(t1["owner"], pw.ColumnReference)
True
"""
_column: Column
_table: Table
_name: str
def __init__(self, _column: Column, _table: Table, _name: str):
super().__init__()
self._column = _column
self._table = _table
self._name = _name
def _deps(self) -> tuple[ColumnExpression, ...]:
return ()
def _to_internal(self) -> InternalColRef:
return InternalColRef.build(type(self), self._column, self._table, self._name)
def _to_original(self) -> ColumnReference:
return self._column.lineage.table[self._column.lineage.name]
def table(self):
"""Table where the referred column belongs to.
Example:
>>> import pathway as pw
>>> t1 = pw.debug.table_from_markdown('''
... age owner pet
... 1 10 Alice dog
... 2 9 Bob dog
... 3 8 Alice cat
... 4 7 Bob dog''')
>>> t1.age.table is t1
True
"""
return self._table
def name(self):
"""Name of the referred column.
Example:
>>> import pathway as pw
>>> t1 = pw.debug.table_from_markdown('''
... age owner pet
... 1 10 Alice dog
... 2 9 Bob dog
... 3 8 Alice cat
... 4 7 Bob dog''')
>>> t1.age.name
'age'
"""
return self._name
def _dependencies(self) -> helpers.StableSet[InternalColRef]:
return helpers.StableSet([self._to_internal()])
def _dependencies_above_reducer(self) -> helpers.StableSet[InternalColRef]:
return helpers.StableSet([self._to_internal()])
def _dependencies_below_reducer(self) -> helpers.StableSet[InternalColRef]:
return helpers.StableSet()
def __call__(self, *args) -> ColumnExpression:
return ColumnCallExpression(self, args)
def _column_with_expression_cls(self) -> type[ColumnWithExpression]:
from pathway.internals.column import ColumnWithReference
return ColumnWithReference
class IsNotNoneExpression(ColumnExpression):
_expr: ColumnExpression
def __init__(self, _expr: ColumnExpression | Value):
super().__init__()
self._expr = ColumnExpression._wrap(_expr)
def _to_internal(self) -> InternalColExpr:
return InternalColExpr.build(type(self), self._expr)
def _deps(self) -> tuple[ColumnExpression, ...]:
return (self._expr,)
def get_column_filtered_by_is_none(arg: ColumnExpression) -> ColumnReference | None:
if isinstance(arg, IsNotNoneExpression) and isinstance(
filter_col := arg._expr, ColumnReference
):
return filter_col
return None | null |
166,803 | from __future__ import annotations
import inspect
from collections import defaultdict
from collections.abc import Callable, Mapping
from functools import wraps
from typing import (
Any,
ParamSpec,
TypeVar,
get_args,
get_origin,
get_type_hints,
overload,
)
from warnings import warn
from pathway.internals import (
dtype as dt,
expression as expr,
operator as op,
schema,
table,
)
from pathway.internals.api import Value
from pathway.internals.helpers import function_spec
from pathway.internals.parse_graph import G
from pathway.internals.runtime_type_check import check_arg_types
from pathway.internals.trace import trace_user_frame
from pathway.internals.udfs import async_executor, udf
) # necessary for doctests to work, see https://www.rosipov.com/blog/python-doctests-and-decorators-bug/
def function_spec(fn):
fn = inspect.unwrap(fn)
fullspec = inspect.getfullargspec(fn)
defaults = {}
if fullspec.defaults is not None:
for index, default in enumerate(reversed(fullspec.defaults)):
defaults[fullspec.args[-index - 1]] = default
arg_names = fn.__code__.co_varnames[: fn.__code__.co_argcount]
return FunctionSpec(fn, arg_names, defaults)
G = ParseGraph()
The provided code snippet includes necessary dependencies for implementing the `iterate` function. Write a Python function `def iterate( func, iteration_limit: int | None = None, **kwargs: table.Table | op.iterate_universe, )` to solve the following problem:
Iterate function until fixed point. Function has to take only Table arguments. Function has to return a single Table, a tuple of Tables, or a dict of Tables. Iterate returns the same shape of arguments as the ``func`` function: either a single Table, a tuple of Tables, or a dict of Tables, respectively. Initial arguments to function are passed through kwargs. Example: >>> import pathway as pw >>> def collatz_transformer(iterated): ... @pw.udf(deterministic=True) ... def collatz_step(x: int) -> int: ... if x == 1: ... return 1 ... elif x % 2 == 0: ... return x // 2 ... else: ... return 3 * x + 1 ... return iterated.select(val=collatz_step(iterated.val)) >>> tab = pw.debug.table_from_markdown(''' ... val ... 1 ... 2 ... 3 ... 4 ... 5 ... 6 ... 7 ... 8''') >>> ret = pw.iterate(collatz_transformer, iterated=tab) >>> pw.debug.compute_and_print(ret, include_id=False) val 1 1 1 1 1 1 1 1
Here is the function:
def iterate(
func,
iteration_limit: int | None = None,
**kwargs: table.Table | op.iterate_universe,
):
"""Iterate function until fixed point.
Function has to take only Table arguments.
Function has to return a single Table, a tuple of Tables, or a dict of Tables.
Iterate returns the same shape of arguments as the ``func`` function:
either a single Table, a tuple of Tables, or a dict of Tables, respectively.
Initial arguments to function are passed through kwargs.
Example:
>>> import pathway as pw
>>> def collatz_transformer(iterated):
... @pw.udf(deterministic=True)
... def collatz_step(x: int) -> int:
... if x == 1:
... return 1
... elif x % 2 == 0:
... return x // 2
... else:
... return 3 * x + 1
... return iterated.select(val=collatz_step(iterated.val))
>>> tab = pw.debug.table_from_markdown('''
... val
... 1
... 2
... 3
... 4
... 5
... 6
... 7
... 8''')
>>> ret = pw.iterate(collatz_transformer, iterated=tab)
>>> pw.debug.compute_and_print(ret, include_id=False)
val
1
1
1
1
1
1
1
1
"""
if iteration_limit is not None and iteration_limit < 1:
raise ValueError("wrong iteration limit")
fn_spec = function_spec(func)
return G.add_iterate(
fn_spec, lambda node: node(**kwargs), iteration_limit=iteration_limit
) | Iterate function until fixed point. Function has to take only Table arguments. Function has to return a single Table, a tuple of Tables, or a dict of Tables. Iterate returns the same shape of arguments as the ``func`` function: either a single Table, a tuple of Tables, or a dict of Tables, respectively. Initial arguments to function are passed through kwargs. Example: >>> import pathway as pw >>> def collatz_transformer(iterated): ... @pw.udf(deterministic=True) ... def collatz_step(x: int) -> int: ... if x == 1: ... return 1 ... elif x % 2 == 0: ... return x // 2 ... else: ... return 3 * x + 1 ... return iterated.select(val=collatz_step(iterated.val)) >>> tab = pw.debug.table_from_markdown(''' ... val ... 1 ... 2 ... 3 ... 4 ... 5 ... 6 ... 7 ... 8''') >>> ret = pw.iterate(collatz_transformer, iterated=tab) >>> pw.debug.compute_and_print(ret, include_id=False) val 1 1 1 1 1 1 1 1 |
166,804 | from __future__ import annotations
import inspect
from collections import defaultdict
from collections.abc import Callable, Mapping
from functools import wraps
from typing import (
Any,
ParamSpec,
TypeVar,
get_args,
get_origin,
get_type_hints,
overload,
)
from warnings import warn
from pathway.internals import (
dtype as dt,
expression as expr,
operator as op,
schema,
table,
)
from pathway.internals.api import Value
from pathway.internals.helpers import function_spec
from pathway.internals.parse_graph import G
from pathway.internals.runtime_type_check import check_arg_types
from pathway.internals.trace import trace_user_frame
from pathway.internals.udfs import async_executor, udf
Value: TypeAlias = Union[
None,
int,
float,
str,
bytes,
bool,
Pointer,
datetime.datetime,
datetime.timedelta,
np.ndarray,
json.Json,
dict[str, _Value],
tuple[_Value, ...],
]
def udf(
*,
return_type: Any = None,
deterministic: bool = False,
propagate_none: bool = False,
executor: Executor = AutoExecutor(),
cache_strategy: CacheStrategy | None = None,
) -> Callable[[Callable], UDF]: ...
def udf(
fun: Callable,
/,
*,
return_type: Any = None,
deterministic: bool = False,
propagate_none: bool = False,
executor: Executor = AutoExecutor(),
cache_strategy: CacheStrategy | None = None,
) -> UDF: ...
def udf(
fun: Callable,
/,
*,
return_type: Any = ...,
deterministic: bool = False,
propagate_none: bool = False,
executor: Executor = AutoExecutor(),
cache_strategy: CacheStrategy | None = None,
):
"""Create a Python UDF (user-defined function) out of a callable.
Output column type deduced from type-annotations of a function.
Can be applied to a regular or asynchronous function.
Args:
return_type: The return type of the function. Can be passed here or as a return
type annotation.
Defaults to ``...``, meaning that the return type will be inferred from type annotation.
deterministic: Whether the provided function is deterministic. In this context,
it means that the function always returns the same value for the same arguments.
If it is not deterministic, Pathway will memoize the results until the row deletion.
If your function is deterministic, you're **strongly encouraged** to set it
to True as it will improve the performance.
Defaults to False, meaning that the function is not deterministic
and its results will be kept.
executor: Defines the executor of the UDF. It determines if the execution is
synchronous or asynchronous.
Defaults to AutoExecutor(), meaning that the execution strategy will be
inferred from the function annotation. By default, if the function is a coroutine,
then it is executed asynchronously. Otherwise it is executed synchronously.
cache_strategy: Defines the caching mechanism.
Defaults to None.
Example:
>>> import pathway as pw
>>> import asyncio
>>> table = pw.debug.table_from_markdown(
... '''
... age | owner | pet
... 10 | Alice | dog
... 9 | Bob | dog
... | Alice | cat
... 7 | Bob | dog
... '''
... )
>>>
>>> @pw.udf
... def concat(left: str, right: str) -> str:
... return left + "-" + right
...
>>> @pw.udf(propagate_none=True)
... def increment(age: int) -> int:
... assert age is not None
... return age + 1
...
>>> res1 = table.select(
... owner_with_pet=concat(table.owner, table.pet), new_age=increment(table.age)
... )
>>> pw.debug.compute_and_print(res1, include_id=False)
owner_with_pet | new_age
Alice-cat |
Alice-dog | 11
Bob-dog | 8
Bob-dog | 10
>>>
>>> @pw.udf
... async def sleeping_concat(left: str, right: str) -> str:
... await asyncio.sleep(0.1)
... return left + "-" + right
...
>>> res2 = table.select(col=sleeping_concat(table.owner, table.pet))
>>> pw.debug.compute_and_print(res2, include_id=False)
col
Alice-cat
Alice-dog
Bob-dog
Bob-dog
"""
return UDFFunction(
fun,
return_type=return_type,
deterministic=deterministic,
propagate_none=propagate_none,
executor=executor,
cache_strategy=cache_strategy,
)
The provided code snippet includes necessary dependencies for implementing the `apply` function. Write a Python function `def apply( fun: Callable, *args: expr.ColumnExpression | Value, **kwargs: expr.ColumnExpression | Value, ) -> expr.ColumnExpression` to solve the following problem:
Applies function to column expressions, column-wise. Output column type deduced from type-annotations of a function. Example: >>> import pathway as pw >>> def concat(left: str, right: str) -> str: ... return left+right >>> t1 = pw.debug.table_from_markdown(''' ... age owner pet ... 10 Alice dog ... 9 Bob dog ... 8 Alice cat ... 7 Bob dog''') >>> t2 = t1.select(col = pw.apply(concat, t1.owner, t1.pet)) >>> pw.debug.compute_and_print(t2, include_id=False) col Alicecat Alicedog Bobdog Bobdog
Here is the function:
def apply(
fun: Callable,
*args: expr.ColumnExpression | Value,
**kwargs: expr.ColumnExpression | Value,
) -> expr.ColumnExpression:
"""Applies function to column expressions, column-wise.
Output column type deduced from type-annotations of a function.
Example:
>>> import pathway as pw
>>> def concat(left: str, right: str) -> str:
... return left+right
>>> t1 = pw.debug.table_from_markdown('''
... age owner pet
... 10 Alice dog
... 9 Bob dog
... 8 Alice cat
... 7 Bob dog''')
>>> t2 = t1.select(col = pw.apply(concat, t1.owner, t1.pet))
>>> pw.debug.compute_and_print(t2, include_id=False)
col
Alicecat
Alicedog
Bobdog
Bobdog
"""
if kwargs:
warn(
"Passing keyword arguments to the function in pw.apply is deprecated. Use positional arguments instead.",
DeprecationWarning,
stacklevel=5,
)
return udf(fun)(*args, **kwargs) | Applies function to column expressions, column-wise. Output column type deduced from type-annotations of a function. Example: >>> import pathway as pw >>> def concat(left: str, right: str) -> str: ... return left+right >>> t1 = pw.debug.table_from_markdown(''' ... age owner pet ... 10 Alice dog ... 9 Bob dog ... 8 Alice cat ... 7 Bob dog''') >>> t2 = t1.select(col = pw.apply(concat, t1.owner, t1.pet)) >>> pw.debug.compute_and_print(t2, include_id=False) col Alicecat Alicedog Bobdog Bobdog |
166,805 | from __future__ import annotations
import inspect
from collections import defaultdict
from collections.abc import Callable, Mapping
from functools import wraps
from typing import (
Any,
ParamSpec,
TypeVar,
get_args,
get_origin,
get_type_hints,
overload,
)
from warnings import warn
from pathway.internals import (
dtype as dt,
expression as expr,
operator as op,
schema,
table,
)
from pathway.internals.api import Value
from pathway.internals.helpers import function_spec
from pathway.internals.parse_graph import G
from pathway.internals.runtime_type_check import check_arg_types
from pathway.internals.trace import trace_user_frame
from pathway.internals.udfs import async_executor, udf
Value: TypeAlias = Union[
None,
int,
float,
str,
bytes,
bool,
Pointer,
datetime.datetime,
datetime.timedelta,
np.ndarray,
json.Json,
dict[str, _Value],
tuple[_Value, ...],
]
The provided code snippet includes necessary dependencies for implementing the `numba_apply` function. Write a Python function `def numba_apply( fun: Callable, numba_signature: str, *args: expr.ColumnExpression | Value, **kwargs: expr.ColumnExpression | Value, ) -> expr.ColumnExpression` to solve the following problem:
Applies function to column expressions, column-wise. Function has to be numba compilable. Currently only a few signatures are supported: - function has to be unary or binary - arguments and return type has to be either int64 or float64 Example: >>> import pathway as pw >>> t1 = pw.debug.table_from_markdown(''' ... val ... 1 1 ... 2 3 ... 3 5 ... 4 7''') >>> t2 = t1.select(col = pw.numba_apply(lambda x: x*x-2*x+1, "int64(int64,)", t1.val)) >>> pw.debug.compute_and_print(t2, include_id=False) col 0 4 16 36
Here is the function:
def numba_apply(
fun: Callable,
numba_signature: str,
*args: expr.ColumnExpression | Value,
**kwargs: expr.ColumnExpression | Value,
) -> expr.ColumnExpression:
"""Applies function to column expressions, column-wise.
Function has to be numba compilable.
Currently only a few signatures are supported:
- function has to be unary or binary
- arguments and return type has to be either int64 or float64
Example:
>>> import pathway as pw
>>> t1 = pw.debug.table_from_markdown('''
... val
... 1 1
... 2 3
... 3 5
... 4 7''')
>>> t2 = t1.select(col = pw.numba_apply(lambda x: x*x-2*x+1, "int64(int64,)", t1.val))
>>> pw.debug.compute_and_print(t2, include_id=False)
col
0
4
16
36
"""
return_type = {
"int64": int,
"int32": int,
"int128": int,
"float128": float,
"float64": float,
"float32": float,
"bool": bool,
}[numba_signature.split("(")[0]]
try:
import numba
except ImportError:
return expr.ApplyExpression(
fun,
return_type,
propagate_none=False,
deterministic=False,
args=args,
kwargs=kwargs,
)
try:
# Disabling nopython should result in compiling more functions, but with a speed penalty
fun = numba.cfunc(numba_signature, nopython=True)(fun)
return expr.NumbaApplyExpression(
fun,
return_type,
propagate_none=False,
deterministic=False,
args=args,
kwargs=kwargs,
)
except Exception as e:
raise ValueError("Numba compilation failed!") from e | Applies function to column expressions, column-wise. Function has to be numba compilable. Currently only a few signatures are supported: - function has to be unary or binary - arguments and return type has to be either int64 or float64 Example: >>> import pathway as pw >>> t1 = pw.debug.table_from_markdown(''' ... val ... 1 1 ... 2 3 ... 3 5 ... 4 7''') >>> t2 = t1.select(col = pw.numba_apply(lambda x: x*x-2*x+1, "int64(int64,)", t1.val)) >>> pw.debug.compute_and_print(t2, include_id=False) col 0 4 16 36 |
166,806 | from __future__ import annotations
import inspect
from collections import defaultdict
from collections.abc import Callable, Mapping
from functools import wraps
from typing import (
Any,
ParamSpec,
TypeVar,
get_args,
get_origin,
get_type_hints,
overload,
)
from warnings import warn
from pathway.internals import (
dtype as dt,
expression as expr,
operator as op,
schema,
table,
)
from pathway.internals.api import Value
from pathway.internals.helpers import function_spec
from pathway.internals.parse_graph import G
from pathway.internals.runtime_type_check import check_arg_types
from pathway.internals.trace import trace_user_frame
from pathway.internals.udfs import async_executor, udf
Value: TypeAlias = Union[
None,
int,
float,
str,
bytes,
bool,
Pointer,
datetime.datetime,
datetime.timedelta,
np.ndarray,
json.Json,
dict[str, _Value],
tuple[_Value, ...],
]
def udf(
*,
return_type: Any = None,
deterministic: bool = False,
propagate_none: bool = False,
executor: Executor = AutoExecutor(),
cache_strategy: CacheStrategy | None = None,
) -> Callable[[Callable], UDF]: ...
def udf(
fun: Callable,
/,
*,
return_type: Any = None,
deterministic: bool = False,
propagate_none: bool = False,
executor: Executor = AutoExecutor(),
cache_strategy: CacheStrategy | None = None,
) -> UDF: ...
def udf(
fun: Callable,
/,
*,
return_type: Any = ...,
deterministic: bool = False,
propagate_none: bool = False,
executor: Executor = AutoExecutor(),
cache_strategy: CacheStrategy | None = None,
):
"""Create a Python UDF (user-defined function) out of a callable.
Output column type deduced from type-annotations of a function.
Can be applied to a regular or asynchronous function.
Args:
return_type: The return type of the function. Can be passed here or as a return
type annotation.
Defaults to ``...``, meaning that the return type will be inferred from type annotation.
deterministic: Whether the provided function is deterministic. In this context,
it means that the function always returns the same value for the same arguments.
If it is not deterministic, Pathway will memoize the results until the row deletion.
If your function is deterministic, you're **strongly encouraged** to set it
to True as it will improve the performance.
Defaults to False, meaning that the function is not deterministic
and its results will be kept.
executor: Defines the executor of the UDF. It determines if the execution is
synchronous or asynchronous.
Defaults to AutoExecutor(), meaning that the execution strategy will be
inferred from the function annotation. By default, if the function is a coroutine,
then it is executed asynchronously. Otherwise it is executed synchronously.
cache_strategy: Defines the caching mechanism.
Defaults to None.
Example:
>>> import pathway as pw
>>> import asyncio
>>> table = pw.debug.table_from_markdown(
... '''
... age | owner | pet
... 10 | Alice | dog
... 9 | Bob | dog
... | Alice | cat
... 7 | Bob | dog
... '''
... )
>>>
>>> @pw.udf
... def concat(left: str, right: str) -> str:
... return left + "-" + right
...
>>> @pw.udf(propagate_none=True)
... def increment(age: int) -> int:
... assert age is not None
... return age + 1
...
>>> res1 = table.select(
... owner_with_pet=concat(table.owner, table.pet), new_age=increment(table.age)
... )
>>> pw.debug.compute_and_print(res1, include_id=False)
owner_with_pet | new_age
Alice-cat |
Alice-dog | 11
Bob-dog | 8
Bob-dog | 10
>>>
>>> @pw.udf
... async def sleeping_concat(left: str, right: str) -> str:
... await asyncio.sleep(0.1)
... return left + "-" + right
...
>>> res2 = table.select(col=sleeping_concat(table.owner, table.pet))
>>> pw.debug.compute_and_print(res2, include_id=False)
col
Alice-cat
Alice-dog
Bob-dog
Bob-dog
"""
return UDFFunction(
fun,
return_type=return_type,
deterministic=deterministic,
propagate_none=propagate_none,
executor=executor,
cache_strategy=cache_strategy,
)
The provided code snippet includes necessary dependencies for implementing the `apply_async` function. Write a Python function `def apply_async( fun: Callable, *args: expr.ColumnExpression | Value, **kwargs: expr.ColumnExpression | Value, ) -> expr.ColumnExpression` to solve the following problem:
r"""Applies function asynchronously to column expressions, column-wise. Output column type deduced from type-annotations of a function. Either a regular or async function can be passed. Example: >>> import pathway as pw >>> import asyncio >>> async def concat(left: str, right: str) -> str: ... await asyncio.sleep(0.1) ... return left+right >>> t1 = pw.debug.table_from_markdown(''' ... age owner pet ... 10 Alice dog ... 9 Bob dog ... 8 Alice cat ... 7 Bob dog''') >>> t2 = t1.select(col = pw.apply_async(concat, t1.owner, t1.pet)) >>> pw.debug.compute_and_print(t2, include_id=False) col Alicecat Alicedog Bobdog Bobdog
Here is the function:
def apply_async(
fun: Callable,
*args: expr.ColumnExpression | Value,
**kwargs: expr.ColumnExpression | Value,
) -> expr.ColumnExpression:
r"""Applies function asynchronously to column expressions, column-wise.
Output column type deduced from type-annotations of a function.
Either a regular or async function can be passed.
Example:
>>> import pathway as pw
>>> import asyncio
>>> async def concat(left: str, right: str) -> str:
... await asyncio.sleep(0.1)
... return left+right
>>> t1 = pw.debug.table_from_markdown('''
... age owner pet
... 10 Alice dog
... 9 Bob dog
... 8 Alice cat
... 7 Bob dog''')
>>> t2 = t1.select(col = pw.apply_async(concat, t1.owner, t1.pet))
>>> pw.debug.compute_and_print(t2, include_id=False)
col
Alicecat
Alicedog
Bobdog
Bobdog
"""
if kwargs:
warn(
"Passing keyword arguments to the function in pw.apply_async is deprecated."
+ " Use positional arguments instead.",
DeprecationWarning,
stacklevel=5,
)
return udf(fun, executor=async_executor())(*args, **kwargs) | r"""Applies function asynchronously to column expressions, column-wise. Output column type deduced from type-annotations of a function. Either a regular or async function can be passed. Example: >>> import pathway as pw >>> import asyncio >>> async def concat(left: str, right: str) -> str: ... await asyncio.sleep(0.1) ... return left+right >>> t1 = pw.debug.table_from_markdown(''' ... age owner pet ... 10 Alice dog ... 9 Bob dog ... 8 Alice cat ... 7 Bob dog''') >>> t2 = t1.select(col = pw.apply_async(concat, t1.owner, t1.pet)) >>> pw.debug.compute_and_print(t2, include_id=False) col Alicecat Alicedog Bobdog Bobdog |
166,807 | from __future__ import annotations
import inspect
from collections import defaultdict
from collections.abc import Callable, Mapping
from functools import wraps
from typing import (
Any,
ParamSpec,
TypeVar,
get_args,
get_origin,
get_type_hints,
overload,
)
from warnings import warn
from pathway.internals import (
dtype as dt,
expression as expr,
operator as op,
schema,
table,
)
from pathway.internals.api import Value
from pathway.internals.helpers import function_spec
from pathway.internals.parse_graph import G
from pathway.internals.runtime_type_check import check_arg_types
from pathway.internals.trace import trace_user_frame
from pathway.internals.udfs import async_executor, udf
Value: TypeAlias = Union[
None,
int,
float,
str,
bytes,
bool,
Pointer,
datetime.datetime,
datetime.timedelta,
np.ndarray,
json.Json,
dict[str, _Value],
tuple[_Value, ...],
]
The provided code snippet includes necessary dependencies for implementing the `declare_type` function. Write a Python function `def declare_type( target_type, col: expr.ColumnExpression | Value ) -> expr.DeclareTypeExpression` to solve the following problem:
Used to change the type of a column to a particular type. Disclaimer: it only changes type in a schema, it does not affect values stored. Example: >>> import pathway as pw >>> t1 = pw.debug.table_from_markdown(''' ... val ... 1 10 ... 2 9.5 ... 3 8 ... 4 7''') >>> t1.schema <pathway.Schema types={'val': <class 'float'>}> >>> t2 = t1.filter(t1.val == pw.cast(int, t1.val)) >>> t2.schema <pathway.Schema types={'val': <class 'float'>}> >>> t3 = t2.select(val = pw.declare_type(int, t2.val)) >>> t3.schema <pathway.Schema types={'val': <class 'int'>}>
Here is the function:
def declare_type(
target_type, col: expr.ColumnExpression | Value
) -> expr.DeclareTypeExpression:
"""Used to change the type of a column to a particular type.
Disclaimer: it only changes type in a schema, it does not affect values stored.
Example:
>>> import pathway as pw
>>> t1 = pw.debug.table_from_markdown('''
... val
... 1 10
... 2 9.5
... 3 8
... 4 7''')
>>> t1.schema
<pathway.Schema types={'val': <class 'float'>}>
>>> t2 = t1.filter(t1.val == pw.cast(int, t1.val))
>>> t2.schema
<pathway.Schema types={'val': <class 'float'>}>
>>> t3 = t2.select(val = pw.declare_type(int, t2.val))
>>> t3.schema
<pathway.Schema types={'val': <class 'int'>}>
"""
return expr.DeclareTypeExpression(target_type, col) | Used to change the type of a column to a particular type. Disclaimer: it only changes type in a schema, it does not affect values stored. Example: >>> import pathway as pw >>> t1 = pw.debug.table_from_markdown(''' ... val ... 1 10 ... 2 9.5 ... 3 8 ... 4 7''') >>> t1.schema <pathway.Schema types={'val': <class 'float'>}> >>> t2 = t1.filter(t1.val == pw.cast(int, t1.val)) >>> t2.schema <pathway.Schema types={'val': <class 'float'>}> >>> t3 = t2.select(val = pw.declare_type(int, t2.val)) >>> t3.schema <pathway.Schema types={'val': <class 'int'>}> |
166,808 | from __future__ import annotations
import inspect
from collections import defaultdict
from collections.abc import Callable, Mapping
from functools import wraps
from typing import (
Any,
ParamSpec,
TypeVar,
get_args,
get_origin,
get_type_hints,
overload,
)
from warnings import warn
from pathway.internals import (
dtype as dt,
expression as expr,
operator as op,
schema,
table,
)
from pathway.internals.api import Value
from pathway.internals.helpers import function_spec
from pathway.internals.parse_graph import G
from pathway.internals.runtime_type_check import check_arg_types
from pathway.internals.trace import trace_user_frame
from pathway.internals.udfs import async_executor, udf
Value: TypeAlias = Union[
None,
int,
float,
str,
bytes,
bool,
Pointer,
datetime.datetime,
datetime.timedelta,
np.ndarray,
json.Json,
dict[str, _Value],
tuple[_Value, ...],
]
The provided code snippet includes necessary dependencies for implementing the `coalesce` function. Write a Python function `def coalesce(*args: expr.ColumnExpression | Value) -> expr.ColumnExpression` to solve the following problem:
For arguments list arg_1, arg_2, ..., arg_n returns first not-None value. Example: >>> import pathway as pw >>> t1 = pw.debug.table_from_markdown(''' ... colA colB ... | 10 ... 2 | ... | ... 4 | 7''') >>> t2 = t1.select(t1.colA, t1.colB, col=pw.coalesce(t1.colA, t1.colB)) >>> pw.debug.compute_and_print(t2, include_id=False) colA | colB | col | | | 10 | 10 2 | | 2 4 | 7 | 4
Here is the function:
def coalesce(*args: expr.ColumnExpression | Value) -> expr.ColumnExpression:
"""For arguments list arg_1, arg_2, ..., arg_n returns first not-None value.
Example:
>>> import pathway as pw
>>> t1 = pw.debug.table_from_markdown('''
... colA colB
... | 10
... 2 |
... |
... 4 | 7''')
>>> t2 = t1.select(t1.colA, t1.colB, col=pw.coalesce(t1.colA, t1.colB))
>>> pw.debug.compute_and_print(t2, include_id=False)
colA | colB | col
| |
| 10 | 10
2 | | 2
4 | 7 | 4
"""
return expr.CoalesceExpression(*args) | For arguments list arg_1, arg_2, ..., arg_n returns first not-None value. Example: >>> import pathway as pw >>> t1 = pw.debug.table_from_markdown(''' ... colA colB ... | 10 ... 2 | ... | ... 4 | 7''') >>> t2 = t1.select(t1.colA, t1.colB, col=pw.coalesce(t1.colA, t1.colB)) >>> pw.debug.compute_and_print(t2, include_id=False) colA | colB | col | | | 10 | 10 2 | | 2 4 | 7 | 4 |
166,809 | from __future__ import annotations
import inspect
from collections import defaultdict
from collections.abc import Callable, Mapping
from functools import wraps
from typing import (
Any,
ParamSpec,
TypeVar,
get_args,
get_origin,
get_type_hints,
overload,
)
from warnings import warn
from pathway.internals import (
dtype as dt,
expression as expr,
operator as op,
schema,
table,
)
from pathway.internals.api import Value
from pathway.internals.helpers import function_spec
from pathway.internals.parse_graph import G
from pathway.internals.runtime_type_check import check_arg_types
from pathway.internals.trace import trace_user_frame
from pathway.internals.udfs import async_executor, udf
Value: TypeAlias = Union[
None,
int,
float,
str,
bytes,
bool,
Pointer,
datetime.datetime,
datetime.timedelta,
np.ndarray,
json.Json,
dict[str, _Value],
tuple[_Value, ...],
]
The provided code snippet includes necessary dependencies for implementing the `require` function. Write a Python function `def require(val, *deps: expr.ColumnExpression | Value) -> expr.ColumnExpression` to solve the following problem:
Returns val iff every dep in deps is not-None. Returns None otherwise. Example: >>> import pathway as pw >>> t1 = pw.debug.table_from_markdown(''' ... colA colB ... | 10 ... 2 | ... | ... 4 | 7''') >>> t2 = t1.select(t1.colA, t1.colB, col=pw.require(t1.colA + t1.colB, t1.colA, t1.colB)) >>> pw.debug.compute_and_print(t2, include_id=False) colA | colB | col | | | 10 | 2 | | 4 | 7 | 11
Here is the function:
def require(val, *deps: expr.ColumnExpression | Value) -> expr.ColumnExpression:
"""Returns val iff every dep in deps is not-None.
Returns None otherwise.
Example:
>>> import pathway as pw
>>> t1 = pw.debug.table_from_markdown('''
... colA colB
... | 10
... 2 |
... |
... 4 | 7''')
>>> t2 = t1.select(t1.colA, t1.colB, col=pw.require(t1.colA + t1.colB, t1.colA, t1.colB))
>>> pw.debug.compute_and_print(t2, include_id=False)
colA | colB | col
| |
| 10 |
2 | |
4 | 7 | 11
"""
return expr.RequireExpression(val, *deps) | Returns val iff every dep in deps is not-None. Returns None otherwise. Example: >>> import pathway as pw >>> t1 = pw.debug.table_from_markdown(''' ... colA colB ... | 10 ... 2 | ... | ... 4 | 7''') >>> t2 = t1.select(t1.colA, t1.colB, col=pw.require(t1.colA + t1.colB, t1.colA, t1.colB)) >>> pw.debug.compute_and_print(t2, include_id=False) colA | colB | col | | | 10 | 2 | | 4 | 7 | 11 |
166,810 | from __future__ import annotations
import inspect
from collections import defaultdict
from collections.abc import Callable, Mapping
from functools import wraps
from typing import (
Any,
ParamSpec,
TypeVar,
get_args,
get_origin,
get_type_hints,
overload,
)
from warnings import warn
from pathway.internals import (
dtype as dt,
expression as expr,
operator as op,
schema,
table,
)
from pathway.internals.api import Value
from pathway.internals.helpers import function_spec
from pathway.internals.parse_graph import G
from pathway.internals.runtime_type_check import check_arg_types
from pathway.internals.trace import trace_user_frame
from pathway.internals.udfs import async_executor, udf
Value: TypeAlias = Union[
None,
int,
float,
str,
bytes,
bool,
Pointer,
datetime.datetime,
datetime.timedelta,
np.ndarray,
json.Json,
dict[str, _Value],
tuple[_Value, ...],
]
The provided code snippet includes necessary dependencies for implementing the `if_else` function. Write a Python function `def if_else( if_clause: expr.ColumnExpression | Value, then_clause: expr.ColumnExpression | Value, else_clause: expr.ColumnExpression | Value, ) -> expr.ColumnExpression` to solve the following problem:
Equivalent to:: if (if_clause): return (then_clause) else: return (else_clause) Example: >>> import pathway as pw >>> t1 = pw.debug.table_from_markdown(''' ... colA colB ... 1 | 0 ... 2 | 2 ... 6 | 3''') >>> t2 = t1.select(res = pw.if_else(t1.colB != 0, t1.colA // t1.colB, 0)) >>> pw.debug.compute_and_print(t2, include_id=False) res 0 1 2
Here is the function:
def if_else(
if_clause: expr.ColumnExpression | Value,
then_clause: expr.ColumnExpression | Value,
else_clause: expr.ColumnExpression | Value,
) -> expr.ColumnExpression:
"""Equivalent to::
if (if_clause):
return (then_clause)
else:
return (else_clause)
Example:
>>> import pathway as pw
>>> t1 = pw.debug.table_from_markdown('''
... colA colB
... 1 | 0
... 2 | 2
... 6 | 3''')
>>> t2 = t1.select(res = pw.if_else(t1.colB != 0, t1.colA // t1.colB, 0))
>>> pw.debug.compute_and_print(t2, include_id=False)
res
0
1
2
"""
return expr.IfElseExpression(if_clause, then_clause, else_clause) | Equivalent to:: if (if_clause): return (then_clause) else: return (else_clause) Example: >>> import pathway as pw >>> t1 = pw.debug.table_from_markdown(''' ... colA colB ... 1 | 0 ... 2 | 2 ... 6 | 3''') >>> t2 = t1.select(res = pw.if_else(t1.colB != 0, t1.colA // t1.colB, 0)) >>> pw.debug.compute_and_print(t2, include_id=False) res 0 1 2 |
166,811 | from __future__ import annotations
import inspect
from collections import defaultdict
from collections.abc import Callable, Mapping
from functools import wraps
from typing import (
Any,
ParamSpec,
TypeVar,
get_args,
get_origin,
get_type_hints,
overload,
)
from warnings import warn
from pathway.internals import (
dtype as dt,
expression as expr,
operator as op,
schema,
table,
)
from pathway.internals.api import Value
from pathway.internals.helpers import function_spec
from pathway.internals.parse_graph import G
from pathway.internals.runtime_type_check import check_arg_types
from pathway.internals.trace import trace_user_frame
from pathway.internals.udfs import async_executor, udf
Value: TypeAlias = Union[
None,
int,
float,
str,
bytes,
bool,
Pointer,
datetime.datetime,
datetime.timedelta,
np.ndarray,
json.Json,
dict[str, _Value],
tuple[_Value, ...],
]
The provided code snippet includes necessary dependencies for implementing the `make_tuple` function. Write a Python function `def make_tuple(*args: expr.ColumnExpression | Value) -> expr.ColumnExpression` to solve the following problem:
Creates a tuple from the provided expressions. Args: args: a list of expressions to be put in a tuple Returns: tuple Note: - Each cell in the output column will be a tuple containing the corresponding values from the input \ columns. - The order of values in each tuple will match the order of the input columns. - If any of the input columns have missing values, the resulting tuples will contain None for those \ positions. Example: >>> import pathway as pw >>> table = pw.debug.table_from_markdown( ... ''' ... a | b | c ... 1 | 10 | a ... 2 | 20 | ... 3 | 30 | c ... ''' ... ) >>> table_with_tuple = table.select(res=pw.make_tuple(pw.this.a, pw.this.b, pw.this.c)) >>> pw.debug.compute_and_print(table_with_tuple, include_id=False) res (1, 10, 'a') (2, 20, None) (3, 30, 'c')
Here is the function:
def make_tuple(*args: expr.ColumnExpression | Value) -> expr.ColumnExpression:
"""
Creates a tuple from the provided expressions.
Args:
args: a list of expressions to be put in a tuple
Returns:
tuple
Note:
- Each cell in the output column will be a tuple containing the corresponding values from the input \
columns.
- The order of values in each tuple will match the order of the input columns.
- If any of the input columns have missing values, the resulting tuples will contain None for those \
positions.
Example:
>>> import pathway as pw
>>> table = pw.debug.table_from_markdown(
... '''
... a | b | c
... 1 | 10 | a
... 2 | 20 |
... 3 | 30 | c
... '''
... )
>>> table_with_tuple = table.select(res=pw.make_tuple(pw.this.a, pw.this.b, pw.this.c))
>>> pw.debug.compute_and_print(table_with_tuple, include_id=False)
res
(1, 10, 'a')
(2, 20, None)
(3, 30, 'c')
"""
return expr.MakeTupleExpression(*args) | Creates a tuple from the provided expressions. Args: args: a list of expressions to be put in a tuple Returns: tuple Note: - Each cell in the output column will be a tuple containing the corresponding values from the input \ columns. - The order of values in each tuple will match the order of the input columns. - If any of the input columns have missing values, the resulting tuples will contain None for those \ positions. Example: >>> import pathway as pw >>> table = pw.debug.table_from_markdown( ... ''' ... a | b | c ... 1 | 10 | a ... 2 | 20 | ... 3 | 30 | c ... ''' ... ) >>> table_with_tuple = table.select(res=pw.make_tuple(pw.this.a, pw.this.b, pw.this.c)) >>> pw.debug.compute_and_print(table_with_tuple, include_id=False) res (1, 10, 'a') (2, 20, None) (3, 30, 'c') |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.