id int64 0 190k | prompt stringlengths 21 13.4M | docstring stringlengths 1 12k ⌀ |
|---|---|---|
166,612 | from __future__ import annotations
import math
from collections.abc import Callable
from enum import IntEnum, auto
from typing import Any
import pathway.internals as pw
from pathway.internals.helpers import StableSet
class JoinResult(pw.Schema):
left: pw.Pointer[Node]
right: pw.Pointer[Node]
weight: float
class FuzzyJoinFeatureGeneration(IntEnum):
AUTO = auto()
TOKENIZE = auto()
LETTERS = auto()
def generate(self) -> Callable[[Any], Any]:
cls = type(self)
if self == cls.AUTO:
return _tokenize # TODO add some magic autoguessing
if self == cls.TOKENIZE:
return _tokenize
if self == cls.LETTERS:
return _letters
raise AssertionError
class FuzzyJoinNormalization(IntEnum):
WEIGHT = auto()
LOGWEIGHT = auto()
NONE = auto()
def normalize(self) -> Callable[[Any], Any]:
cls = type(self)
if self == cls.WEIGHT:
return _discrete_weight
if self == cls.LOGWEIGHT:
return _discrete_logweight
if self == cls.NONE:
return _none
raise AssertionError
def _fuzzy_match_tables(
left_table: pw.Table,
right_table: pw.Table,
*,
by_hand_match: pw.Table[JoinResult] | None = None,
normalization=FuzzyJoinNormalization.LOGWEIGHT,
feature_generation=FuzzyJoinFeatureGeneration.AUTO,
) -> pw.Table[JoinResult]:
left = _concatenate_columns(left_table)
right = _concatenate_columns(right_table)
return smart_fuzzy_match(
left.desc,
right.desc,
by_hand_match=by_hand_match,
normalization=normalization,
feature_generation=feature_generation,
)
) # necessary for doctests to work, see https://www.rosipov.com/blog/python-doctests-and-decorators-bug/
class StableSet(MutableSet[T]):
_inner: dict[T, None]
def __init__(self, /, iterable: Iterable[T] = ()):
self._inner = dict.fromkeys(iterable)
def __contains__(self, element: object) -> bool:
return element in self._inner
def __iter__(self) -> Iterator[T]:
return iter(self._inner.keys())
def __len__(self) -> int:
return len(self._inner)
def __repr__(self) -> str:
type_name = type(self).__name__
if self._inner:
values = repr(list(self._inner.keys()))
else:
values = ""
return f"{type_name}({values})"
def add(self, element: T) -> None:
self._inner[element] = None
def discard(self, element: T) -> None:
self._inner.pop(element, None)
def copy(self) -> StableSet[T]:
return StableSet(self)
def __or__(self, other: Iterable[T2]) -> StableSet[T | T2]:
return super().__or__(other) # type: ignore
def __ior__(self, other: Iterable[T2]) -> StableSet[T | T2]:
return super().__ior__(other) # type: ignore
def update(self, *sets: Iterable[T]) -> None:
for s in sets:
self |= s
def union(*sets: Iterable[T]) -> StableSet[T]:
res: StableSet[T] = StableSet()
res.update(*sets)
return res
def fuzzy_match_tables(
left_table: pw.Table,
right_table: pw.Table,
*,
by_hand_match: pw.Table[JoinResult] | None = None,
normalization=FuzzyJoinNormalization.LOGWEIGHT,
feature_generation=FuzzyJoinFeatureGeneration.AUTO,
left_projection: dict[str, str] = {},
right_projection: dict[str, str] = {},
) -> pw.Table[JoinResult]:
# If one projection is empty, we don't do any projection fuzzy_match_tables
if left_projection == {} or right_projection == {}:
return _fuzzy_match_tables(
left_table=left_table,
right_table=right_table,
by_hand_match=by_hand_match,
normalization=normalization,
feature_generation=feature_generation,
)
# We compute the projections spaces and for each bucket b we keep track of the
# corresponding columns which are projected into b.
set_buckets: StableSet[str] = StableSet()
buckets_left: dict[str, list] = {}
buckets_right: dict[str, list] = {}
for col_name in left_table._columns.keys():
if col_name not in left_projection:
continue
bucket_id = left_projection[col_name]
set_buckets.add(bucket_id)
if bucket_id not in buckets_left:
buckets_left[bucket_id] = []
buckets_left[bucket_id].append(col_name)
for col_name in right_table._columns.keys():
if col_name not in right_projection:
continue
bucket_id = right_projection[col_name]
set_buckets.add(bucket_id)
if bucket_id not in buckets_right:
buckets_right[bucket_id] = []
buckets_right[bucket_id].append(col_name)
# For each bucket, we compute the fuzzy_match_table on the table only with
# the columns associated to the bucket.
# The corresponding matches are then added in a common 'matchings' columns
fuzzy_match_bucket_list = []
for bucket_id in set_buckets:
left_table_bucket = left_table[buckets_left[bucket_id]]
right_table_bucket = right_table[buckets_right[bucket_id]]
fuzzy_match_bucket = _fuzzy_match_tables(
left_table=left_table_bucket,
right_table=right_table_bucket,
by_hand_match=by_hand_match,
normalization=normalization,
feature_generation=feature_generation,
)
fuzzy_match_bucket_list.append(fuzzy_match_bucket)
matchings = pw.Table.concat_reindex(*fuzzy_match_bucket_list)
# Matchings are grouped by left/right pairs and the weights are summed.
matchings = matchings.groupby(matchings.left, matchings.right).reduce(
matchings.left,
matchings.right,
weight=pw.reducers.sum(matchings.weight),
)
return matchings | null |
166,613 | from __future__ import annotations
import math
from collections.abc import Callable
from enum import IntEnum, auto
from typing import Any
import pathway.internals as pw
from pathway.internals.helpers import StableSet
class Feature(pw.Schema):
weight: float
normalization_type: int
class Edge(pw.Schema):
node: pw.Pointer[Node]
feature: pw.Pointer[Feature]
weight: float
class JoinResult(pw.Schema):
left: pw.Pointer[Node]
right: pw.Pointer[Node]
weight: float
def _fuzzy_match(
edges_left: pw.Table[Edge],
edges_right: pw.Table[Edge],
features: pw.Table[Feature],
symmetric: bool,
HEAVY_LIGHT_THRESHOLD,
by_hand_match: pw.Table[JoinResult] | None = None,
) -> pw.Table[JoinResult]:
if symmetric:
assert edges_left is edges_right
# TODO do a more integrated approach for accommodating by_hand_match.
if by_hand_match is not None:
edges_left, edges_right = _filter_out_matched_by_hand(
edges_left, edges_right, symmetric, by_hand_match
)
if symmetric:
edges = edges_left
edges_right = edges_right.copy()
else:
edges = pw.Table.concat_reindex(edges_left, edges_right)
features_cnt = features.select(cnt=0).update_rows(
edges.groupby(id=edges.feature).reduce(cnt=pw.reducers.count())
)
del edges
edges_left_heavy = edges_left.filter(
features_cnt.ix(edges_left.feature).cnt >= HEAVY_LIGHT_THRESHOLD
)
edges_left_light = edges_left.filter(
features_cnt.ix(edges_left.feature).cnt < HEAVY_LIGHT_THRESHOLD
)
if symmetric:
edges_right_heavy = edges_left_heavy.copy()
edges_right_light = edges_left_light.copy()
else:
edges_right_heavy = edges_right.filter(
features_cnt.ix(edges_right.feature).cnt >= HEAVY_LIGHT_THRESHOLD
)
edges_right_light = edges_right.filter(
features_cnt.ix(edges_right.feature).cnt < HEAVY_LIGHT_THRESHOLD
)
def _normalize_weight(cnt: float, normalization_type: int) -> float:
return FuzzyJoinNormalization(normalization_type).normalize(cnt)
features_normalized = features.select(
weight=features.weight
* pw.apply(
_normalize_weight,
features_cnt.restrict(features).cnt,
features.normalization_type,
)
)
node_node_light: pw.Table[JoinResult] = edges_left_light.join(
edges_right_light, edges_left_light.feature == edges_right_light.feature
).select(
weight=edges_left_light.weight
* edges_right_light.weight
* features_normalized.ix(pw.this.feature).weight,
left=edges_left_light.node,
right=edges_right_light.node,
)
if symmetric:
node_node_light = node_node_light.filter(
node_node_light.left != node_node_light.right
)
node_node_light = node_node_light.groupby(
node_node_light.left, node_node_light.right
).reduce(
node_node_light.left,
node_node_light.right,
weight=pw.reducers.sum(node_node_light.weight),
)
node_node_heavy = (
node_node_light.join(edges_left_heavy, pw.left.left == pw.right.node)
.join(
edges_right_heavy,
pw.left.right == pw.right.node,
pw.left.feature == pw.right.feature,
)
.select(
pw.this.left,
pw.this.right,
weight=edges_left_heavy.weight
* edges_right_heavy.weight
* features_normalized.ix(pw.this.feature).weight,
)
)
def weight_to_pseudoweight(weight, left_id, right_id):
return pw.if_else(
left_id < right_id,
pw.make_tuple(weight, left_id, right_id),
pw.make_tuple(weight, right_id, left_id),
)
node_node = (
pw.Table.concat_reindex(node_node_light, node_node_heavy)
.groupby(pw.this.left, pw.this.right)
.reduce(pw.this.left, pw.this.right, weight=pw.reducers.sum(pw.this.weight))
.with_columns(
weight=weight_to_pseudoweight(
pw.this.weight,
pw.this.left,
pw.this.right,
),
)
.groupby(pw.this.left)
.reduce(
pw.this.left,
pw.this.ix(pw.reducers.argmax(pw.this.weight)).right,
weight=pw.reducers.max(pw.this.weight),
)
.groupby(pw.this.right)
.reduce(
pw.this.right,
pw.this.ix(pw.reducers.argmax(pw.this.weight)).left,
weight=pw.reducers.max(pw.this.weight),
)
)
if symmetric:
node_node = node_node.filter(node_node.left < node_node.right)
node_node = node_node.with_columns(
weight=pw.this.weight[0],
)
if by_hand_match is not None:
node_node = node_node.update_rows(by_hand_match)
return node_node
def fuzzy_match_with_hint(
edges_left: pw.Table[Edge],
edges_right: pw.Table[Edge],
features: pw.Table[Feature],
by_hand_match: pw.Table[JoinResult],
HEAVY_LIGHT_THRESHOLD=100,
) -> pw.Table[JoinResult]:
return _fuzzy_match(
edges_left,
edges_right,
features,
symmetric=False,
HEAVY_LIGHT_THRESHOLD=HEAVY_LIGHT_THRESHOLD,
by_hand_match=by_hand_match,
) | null |
166,614 | from __future__ import annotations
import numpy as np
import pathway as pw
from ._lsh import lsh
def compute_cosine_dist(datapoint: np.ndarray, querypoint: np.ndarray) -> float:
return (
1
- np.dot(datapoint, querypoint)
/ (np.linalg.norm(datapoint) * np.linalg.norm(querypoint))
).item() | null |
166,615 | from __future__ import annotations
import numpy as np
import pathway as pw
from ._lsh import lsh
class DataPoint(pw.Schema):
data: np.ndarray
class Query(pw.Schema):
data: np.ndarray
k: int
def compute_euclidean_dist2(datapoint: np.ndarray, querypoint: np.ndarray) -> float:
return np.sum((datapoint - querypoint) ** 2).item()
def k_approximate_nearest_neighbors_flat(
data: pw.Table[DataPoint],
queries: pw.Table[Query],
bucketer,
dist_fun=compute_euclidean_dist2,
) -> pw.Table:
"""Finds k-approximate nearest neighbors of points in queries table against points in data table.
Requires supplying LSH bucketer, see e.g. generate_euclidean_lsh_bucketer.
dist_fun is a distance function which should be chosen according to the bucketer.
"""
flat_data = lsh(data, bucketer, origin_id="data_id", include_data=False)
flat_queries = lsh(queries, bucketer, origin_id="query_id", include_data=False)
unique = (
flat_queries.join(
flat_data,
flat_queries.bucketing == flat_data.bucketing,
flat_queries.band == flat_data.band,
)
.select(
flat_queries.query_id,
flat_data.data_id,
)
.groupby(pw.this.query_id, pw.this.data_id)
.reduce(pw.this.query_id, pw.this.data_id)
)
distances = unique + unique.select(
dist=pw.apply(
dist_fun, data.ix(unique.data_id).data, queries.ix(unique.query_id).data
),
)
# TODO this assert could be deduced
nonempty_queries = distances.groupby(id=distances.query_id).reduce()
pw.universes.promise_is_subset_of(nonempty_queries, queries)
queries_restricted = queries.restrict(nonempty_queries)
ks = nonempty_queries.select(queries_restricted.k, instance=queries_restricted.id)
topk = pw.indexing.filter_smallest_k(distances.dist, distances.query_id, ks)
return topk.select(topk.query_id, topk.data_id)
The provided code snippet includes necessary dependencies for implementing the `knn_classifier_flat` function. Write a Python function `def knn_classifier_flat( data: pw.Table[DataPoint], labels: pw.Table, queries: pw.Table[Query], bucketer, dist_fun=compute_euclidean_dist2, ) -> pw.Table` to solve the following problem:
Classifies queries against labeled data using approximate k-NN.
Here is the function:
def knn_classifier_flat(
data: pw.Table[DataPoint],
labels: pw.Table,
queries: pw.Table[Query],
bucketer,
dist_fun=compute_euclidean_dist2,
) -> pw.Table:
"""Classifies queries against labeled data using approximate k-NN."""
knns = k_approximate_nearest_neighbors_flat(data, queries, bucketer, dist_fun)
labeled = knns.select(knns.query_id, label=labels.ix(knns.data_id).label)
predictions = pw.utils.col.groupby_reduce_majority(labeled.query_id, labeled.label)
return predictions.select(predicted_label=predictions.majority).with_id(
predictions.query_id
) | Classifies queries against labeled data using approximate k-NN. |
166,616 | from __future__ import annotations
import numpy as np
import pathway as pw
from ._lsh import lsh
class DataPoint(pw.Schema):
data: np.ndarray
class Label:
label: int
def np_divide(data: np.ndarray, other: float) -> np.ndarray:
return data / other
def lsh(data: pw.Table, bucketer, origin_id="origin_id", include_data=True) -> pw.Table:
"""Apply LSH bucketer for each row and flatten the table."""
flat_data = data.select(
buckets=pw.apply(lambda x: list(enumerate(bucketer(x))), data.data)
)
flat_data = flat_data.flatten(pw.this.buckets, **{origin_id: pw.this.id})
flat_data = flat_data.select(flat_data[origin_id]) + unpack_col(
flat_data.buckets,
pw.this.bucketing,
pw.this.band,
)
if include_data:
flat_data += flat_data.select(
data.ix(flat_data[origin_id]).data,
)
return flat_data
def clustering_via_lsh(
data: pw.Table[DataPoint], bucketer, k: int
) -> pw.Table[DataPoint | Label]: # type: ignore
flat_data = lsh(data, bucketer, origin_id="data_id", include_data=True)
representatives = (
flat_data.groupby(flat_data.bucketing, flat_data.band)
.reduce(
flat_data.bucketing,
flat_data.band,
sum=pw.reducers.sum(flat_data.data),
count=pw.reducers.count(),
)
.select(
pw.this.bucketing,
pw.this.band,
data=pw.apply(
np_divide, pw.this.sum, pw.this.count
), # TODO: operators for np.ndarray
weight=pw.this.count,
)
)
def clustering(data: list[np.ndarray], weights: list[float]) -> list[float]:
from sklearn.cluster import KMeans
kmeans = KMeans(n_clusters=3, init="k-means++", random_state=0, n_init=10)
kmeans.fit(data, sample_weight=weights)
return kmeans.labels_
labels = pw.utils.col.apply_all_rows(
representatives.data,
representatives.weight,
fun=clustering,
result_col_name="label",
)
representatives += labels
votes = flat_data.join(
representatives,
flat_data.bucketing == representatives.bucketing,
flat_data.band == representatives.band,
).select(
flat_data.data_id,
representatives.label,
)
result = pw.utils.col.groupby_reduce_majority(votes.data_id, votes.label)
return result.select(label=result.majority).with_id(result.data_id) | null |
166,617 | from __future__ import annotations
import fnmatch
import logging
from statistics import mode
from typing import Literal
import jmespath
import jmespath.functions
import numpy as np
import pathway.internals as pw
from pathway.internals.helpers import StableSet
from pathway.stdlib.utils.col import groupby_reduce_majority, unpack_col
from ._lsh import generate_cosine_lsh_bucketer, generate_euclidean_lsh_bucketer
DistanceTypes = Literal["euclidean", "cosine"]
class DataPoint(pw.Schema):
data: np.ndarray
def _euclidean_distance(data_table: np.ndarray, query_table: np.ndarray) -> np.ndarray:
return np.sum((data_table - query_table) ** 2, axis=1)
def compute_cosine_dist(data_table: np.ndarray, query_point: np.ndarray) -> np.ndarray:
return 1 - np.dot(data_table, query_point) / (
np.linalg.norm(data_table, axis=1) * np.linalg.norm(query_point)
)
def knn_lsh_generic_classifier_train(
data: pw.Table, lsh_projection, distance_function, L: int
):
"""
Build the LSH index over data using the a generic lsh_projector and its associated distance.
L the number of repetitions of the LSH scheme.
Returns a LSH projector of type (queries: Table, k:Any) -> Table
"""
def make_band_col_name(i):
return f"band_{i}"
band_col_names = [make_band_col_name(i) for i in range(L)]
data += data.select(buckets=pw.apply(lsh_projection, data.data))
# Fix "UserWarning: Object in (<table1>.buckets)[8] is of type numpy.ndarray
# but its number of dimensions is not known." when calling unpack_col with ndarray.
# pw.cast not working and unpack_col doesnt take pw.apply so I used pw.apply separately.
buckets_list = data.select(buckets=pw.apply(list, data.buckets))
data += unpack_col(buckets_list.buckets, *band_col_names)
if "metadata" not in data._columns:
data += data.select(metadata=None)
def lsh_perform_query(
queries: pw.Table, k: int | None = None, with_distances=False
) -> pw.Table:
queries += queries.select(buckets=pw.apply(lsh_projection, queries.data))
if k is not None:
queries += queries.select(k=k)
# Same Fix "UserWarning: ... above"
buckets_list = queries.select(buckets=pw.apply(list, queries.buckets))
queries += unpack_col(buckets_list.buckets, *band_col_names)
# step 2: for each query, take union of matching databuckets
result = queries
for band_i in range(L):
band = data.groupby(data[make_band_col_name(band_i)]).reduce(
data[make_band_col_name(band_i)],
items=pw.reducers.sorted_tuple(data.id),
)
result += queries.select(**{f"items_{band_i}": ()}).update_rows(
queries.join(
band,
queries[make_band_col_name(band_i)]
== band[make_band_col_name(band_i)],
id=queries.id,
).select(
**{f"items_{band_i}": band.items},
)
)
def merge_buckets(*tuples: list[tuple]) -> tuple:
return tuple(StableSet(sum(tuples, ())))
if "metadata_filter" not in result._columns:
result += result.select(metadata_filter=None)
flattened = result.select(
result.data,
query_id=result.id,
ids=pw.apply(merge_buckets, *[result[f"items_{i}"] for i in range(L)]),
k=result.k,
metadata_filter=result.metadata_filter,
).filter(pw.this.ids != ())
# step 3: find knns in unioned buckets
class compute_knns_transformer:
class training_data(pw.ClassArg):
data = pw.input_attribute()
metadata = pw.input_attribute()
class flattened(pw.ClassArg):
data = pw.input_attribute()
query_id = pw.input_attribute()
ids = pw.input_attribute()
k = pw.input_attribute()
metadata_filter = pw.input_attribute()
def knns(self) -> list[tuple[pw.Pointer, float]]:
querypoint = self.data
for id_candidate in self.ids:
try:
self.transformer.training_data[id_candidate].data
self.transformer.training_data[id_candidate].metadata
except Exception:
raise
except BaseException:
# Used to prefetch values
pass
try:
candidates = [
(
id_candidate,
self.transformer.training_data[id_candidate].data,
)
for id_candidate in self.ids
if self.metadata_filter is None
or jmespath.search(
self.metadata_filter,
self.transformer.training_data[
id_candidate
].metadata.value,
options=_glob_options,
)
is True
]
except jmespath.exceptions.JMESPathError:
logging.exception(
"Incorrect JMESPath expression for metadata filter"
)
candidates = []
if len(candidates) == 0:
return []
ids_filtered, data_candidates_filtered = zip(*candidates)
data_candidates = np.array(data_candidates_filtered)
neighs = min(self.k, len(data_candidates))
distances = distance_function(data_candidates, querypoint)
knn_ids = np.argpartition(
distances,
neighs - 1,
)[
:neighs
] # neighs - 1 in argpartition, because of 0-based indexing
k_distances = distances[knn_ids]
final_ids = np.array(ids_filtered)[knn_ids]
if len(final_ids) == 0:
return []
sorted_dists, sorted_ids_by_dist = zip(
*sorted(zip(k_distances, final_ids))
)
return list(zip(sorted_ids_by_dist, sorted_dists))
knn_result: pw.Table = compute_knns_transformer( # type: ignore
training_data=data, flattened=flattened
).flattened.select(flattened.query_id, knns_ids_with_dists=pw.this.knns)
knn_result_with_empty_results = queries.join_left(
knn_result, queries.id == knn_result.query_id
).select(knn_result.knns_ids_with_dists, query_id=queries.id)
result = knn_result_with_empty_results.with_columns(
knns_ids_with_dists=pw.coalesce(pw.this.knns_ids_with_dists, ()),
)
if not with_distances:
result = result.select(
pw.this.query_id,
knns_ids=pw.apply(
lambda x: tuple(zip(*x))[0] if len(x) > 0 else (),
pw.this.knns_ids_with_dists,
),
)
return result
return lsh_perform_query
def generate_euclidean_lsh_bucketer(d: int, M: int, L: int, A=1.0, seed=0):
"""Locality-sensitive hashing in the Euclidean space.
See e.g. http://madscience.ucsd.edu/notes/lec9.pdf
d - number of dimensions in the data
M - number of ANDS
L - number of ORS
A - bucket length (after projecting on a line)
"""
gen = np.random.default_rng(seed=seed)
total_lines = M * L
# generate random unit vectors
random_lines = gen.standard_normal((d, total_lines))
random_lines = random_lines / np.linalg.norm(random_lines, axis=0)
shift = gen.random(size=total_lines) * A
def bucketify(x: np.ndarray) -> np.ndarray:
buckets = np.floor_divide(x @ random_lines + shift, A).astype(
int
) # project on lines
split = np.split(buckets, L)
return np.hstack([fingerprint(X, format="i32") for X in split])
return bucketify
def generate_cosine_lsh_bucketer(d: int, M: int, L: int, seed=0):
"""Locality-sensitive hashing for the cosine similarity.
See e.g. http://madscience.ucsd.edu/notes/lec9.pdf
M - number of ANDS
L - number of ORS
"""
gen = np.random.default_rng(seed=seed)
total_hyperplanes = M * L
# generate random unit vectors
random_hyperplanes = gen.standard_normal((d, total_hyperplanes))
def bucketify(x: np.ndarray) -> np.ndarray:
signs = (x @ random_hyperplanes >= 0).astype(int)
# compute single-number bucket identifiers (i.e. single bucket int representing ANDed buckets)
split = np.split(signs, L)
powers = 2 ** np.arange(M).reshape(-1, 1) # powers of two
return np.hstack([x @ powers for x in split])
return bucketify
The provided code snippet includes necessary dependencies for implementing the `knn_lsh_classifier_train` function. Write a Python function `def knn_lsh_classifier_train( data: pw.Table[DataPoint], L: int, type: DistanceTypes = "euclidean", **kwargs, )` to solve the following problem:
Build the LSH index over data. L the number of repetitions of the LSH scheme. Returns a LSH projector of type (queries: Table, k:Any) -> Table
Here is the function:
def knn_lsh_classifier_train(
data: pw.Table[DataPoint],
L: int,
type: DistanceTypes = "euclidean",
**kwargs,
):
"""
Build the LSH index over data.
L the number of repetitions of the LSH scheme.
Returns a LSH projector of type (queries: Table, k:Any) -> Table
"""
if type == "euclidean":
lsh_projection = generate_euclidean_lsh_bucketer(
kwargs["d"], kwargs["M"], L, kwargs["A"]
)
return knn_lsh_generic_classifier_train(
data,
lsh_projection,
_euclidean_distance,
L,
)
elif type == "cosine":
lsh_projection = generate_cosine_lsh_bucketer(kwargs["d"], kwargs["M"], L)
return knn_lsh_generic_classifier_train(
data,
lsh_projection,
compute_cosine_dist,
L,
)
else:
raise ValueError(
f"Not supported `type` {type} in knn_lsh_classifier_train. "
"The allowed values are 'euclidean' and 'cosine'."
) | Build the LSH index over data. L the number of repetitions of the LSH scheme. Returns a LSH projector of type (queries: Table, k:Any) -> Table |
166,618 | from __future__ import annotations
import fnmatch
import logging
from statistics import mode
from typing import Literal
import jmespath
import jmespath.functions
import numpy as np
import pathway.internals as pw
from pathway.internals.helpers import StableSet
from pathway.stdlib.utils.col import groupby_reduce_majority, unpack_col
from ._lsh import generate_cosine_lsh_bucketer, generate_euclidean_lsh_bucketer
def _globmatch_impl(pat_i, pat_n, pattern, p_i, p_n, path):
"""Match pattern to path, recursively expanding **."""
if pat_i == pat_n:
return p_i == p_n
if p_i == p_n:
return False
if pattern[pat_i] == "**":
return _globmatch_impl(
pat_i, pat_n, pattern, p_i + 1, p_n, path
) or _globmatch_impl(pat_i + 1, pat_n, pattern, p_i, p_n, path)
if fnmatch.fnmatch(path[p_i], pattern[pat_i]):
return _globmatch_impl(pat_i + 1, pat_n, pattern, p_i + 1, p_n, path)
return False
The provided code snippet includes necessary dependencies for implementing the `_globmatch` function. Write a Python function `def _globmatch(pattern, path)` to solve the following problem:
globmatch path to patter, using fnmatch at every level.
Here is the function:
def _globmatch(pattern, path):
"""globmatch path to patter, using fnmatch at every level."""
pattern_parts = pattern.split("/")
path_parts = path.split("/")
return _globmatch_impl(
0, len(pattern_parts), pattern_parts, 0, len(path_parts), path_parts
) | globmatch path to patter, using fnmatch at every level. |
166,619 | from __future__ import annotations
import fnmatch
import logging
from statistics import mode
from typing import Literal
import jmespath
import jmespath.functions
import numpy as np
import pathway.internals as pw
from pathway.internals.helpers import StableSet
from pathway.stdlib.utils.col import groupby_reduce_majority, unpack_col
from ._lsh import generate_cosine_lsh_bucketer, generate_euclidean_lsh_bucketer
def _euclidean_distance(data_table: np.ndarray, query_table: np.ndarray) -> np.ndarray:
return np.sum((data_table - query_table) ** 2, axis=1)
def knn_lsh_generic_classifier_train(
data: pw.Table, lsh_projection, distance_function, L: int
):
"""
Build the LSH index over data using the a generic lsh_projector and its associated distance.
L the number of repetitions of the LSH scheme.
Returns a LSH projector of type (queries: Table, k:Any) -> Table
"""
def make_band_col_name(i):
return f"band_{i}"
band_col_names = [make_band_col_name(i) for i in range(L)]
data += data.select(buckets=pw.apply(lsh_projection, data.data))
# Fix "UserWarning: Object in (<table1>.buckets)[8] is of type numpy.ndarray
# but its number of dimensions is not known." when calling unpack_col with ndarray.
# pw.cast not working and unpack_col doesnt take pw.apply so I used pw.apply separately.
buckets_list = data.select(buckets=pw.apply(list, data.buckets))
data += unpack_col(buckets_list.buckets, *band_col_names)
if "metadata" not in data._columns:
data += data.select(metadata=None)
def lsh_perform_query(
queries: pw.Table, k: int | None = None, with_distances=False
) -> pw.Table:
queries += queries.select(buckets=pw.apply(lsh_projection, queries.data))
if k is not None:
queries += queries.select(k=k)
# Same Fix "UserWarning: ... above"
buckets_list = queries.select(buckets=pw.apply(list, queries.buckets))
queries += unpack_col(buckets_list.buckets, *band_col_names)
# step 2: for each query, take union of matching databuckets
result = queries
for band_i in range(L):
band = data.groupby(data[make_band_col_name(band_i)]).reduce(
data[make_band_col_name(band_i)],
items=pw.reducers.sorted_tuple(data.id),
)
result += queries.select(**{f"items_{band_i}": ()}).update_rows(
queries.join(
band,
queries[make_band_col_name(band_i)]
== band[make_band_col_name(band_i)],
id=queries.id,
).select(
**{f"items_{band_i}": band.items},
)
)
def merge_buckets(*tuples: list[tuple]) -> tuple:
return tuple(StableSet(sum(tuples, ())))
if "metadata_filter" not in result._columns:
result += result.select(metadata_filter=None)
flattened = result.select(
result.data,
query_id=result.id,
ids=pw.apply(merge_buckets, *[result[f"items_{i}"] for i in range(L)]),
k=result.k,
metadata_filter=result.metadata_filter,
).filter(pw.this.ids != ())
# step 3: find knns in unioned buckets
class compute_knns_transformer:
class training_data(pw.ClassArg):
data = pw.input_attribute()
metadata = pw.input_attribute()
class flattened(pw.ClassArg):
data = pw.input_attribute()
query_id = pw.input_attribute()
ids = pw.input_attribute()
k = pw.input_attribute()
metadata_filter = pw.input_attribute()
def knns(self) -> list[tuple[pw.Pointer, float]]:
querypoint = self.data
for id_candidate in self.ids:
try:
self.transformer.training_data[id_candidate].data
self.transformer.training_data[id_candidate].metadata
except Exception:
raise
except BaseException:
# Used to prefetch values
pass
try:
candidates = [
(
id_candidate,
self.transformer.training_data[id_candidate].data,
)
for id_candidate in self.ids
if self.metadata_filter is None
or jmespath.search(
self.metadata_filter,
self.transformer.training_data[
id_candidate
].metadata.value,
options=_glob_options,
)
is True
]
except jmespath.exceptions.JMESPathError:
logging.exception(
"Incorrect JMESPath expression for metadata filter"
)
candidates = []
if len(candidates) == 0:
return []
ids_filtered, data_candidates_filtered = zip(*candidates)
data_candidates = np.array(data_candidates_filtered)
neighs = min(self.k, len(data_candidates))
distances = distance_function(data_candidates, querypoint)
knn_ids = np.argpartition(
distances,
neighs - 1,
)[
:neighs
] # neighs - 1 in argpartition, because of 0-based indexing
k_distances = distances[knn_ids]
final_ids = np.array(ids_filtered)[knn_ids]
if len(final_ids) == 0:
return []
sorted_dists, sorted_ids_by_dist = zip(
*sorted(zip(k_distances, final_ids))
)
return list(zip(sorted_ids_by_dist, sorted_dists))
knn_result: pw.Table = compute_knns_transformer( # type: ignore
training_data=data, flattened=flattened
).flattened.select(flattened.query_id, knns_ids_with_dists=pw.this.knns)
knn_result_with_empty_results = queries.join_left(
knn_result, queries.id == knn_result.query_id
).select(knn_result.knns_ids_with_dists, query_id=queries.id)
result = knn_result_with_empty_results.with_columns(
knns_ids_with_dists=pw.coalesce(pw.this.knns_ids_with_dists, ()),
)
if not with_distances:
result = result.select(
pw.this.query_id,
knns_ids=pw.apply(
lambda x: tuple(zip(*x))[0] if len(x) > 0 else (),
pw.this.knns_ids_with_dists,
),
)
return result
return lsh_perform_query
def generate_euclidean_lsh_bucketer(d: int, M: int, L: int, A=1.0, seed=0):
"""Locality-sensitive hashing in the Euclidean space.
See e.g. http://madscience.ucsd.edu/notes/lec9.pdf
d - number of dimensions in the data
M - number of ANDS
L - number of ORS
A - bucket length (after projecting on a line)
"""
gen = np.random.default_rng(seed=seed)
total_lines = M * L
# generate random unit vectors
random_lines = gen.standard_normal((d, total_lines))
random_lines = random_lines / np.linalg.norm(random_lines, axis=0)
shift = gen.random(size=total_lines) * A
def bucketify(x: np.ndarray) -> np.ndarray:
buckets = np.floor_divide(x @ random_lines + shift, A).astype(
int
) # project on lines
split = np.split(buckets, L)
return np.hstack([fingerprint(X, format="i32") for X in split])
return bucketify
The provided code snippet includes necessary dependencies for implementing the `knn_lsh_euclidean_classifier_train` function. Write a Python function `def knn_lsh_euclidean_classifier_train(data: pw.Table, d, M, L, A)` to solve the following problem:
Build the LSH index over data using the Euclidean distances. d is the dimension of the data, L the number of repetition of the LSH scheme, M and A are specific to LSH with Euclidean distance, M is the number of random projections done to create each bucket and A is the width of each bucket on each projection.
Here is the function:
def knn_lsh_euclidean_classifier_train(data: pw.Table, d, M, L, A):
"""
Build the LSH index over data using the Euclidean distances.
d is the dimension of the data, L the number of repetition of the LSH scheme,
M and A are specific to LSH with Euclidean distance, M is the number of random projections
done to create each bucket and A is the width of each bucket on each projection.
"""
lsh_projection = generate_euclidean_lsh_bucketer(d, M, L, A)
return knn_lsh_generic_classifier_train(
data, lsh_projection, _euclidean_distance, L
) | Build the LSH index over data using the Euclidean distances. d is the dimension of the data, L the number of repetition of the LSH scheme, M and A are specific to LSH with Euclidean distance, M is the number of random projections done to create each bucket and A is the width of each bucket on each projection. |
166,620 | from __future__ import annotations
import fnmatch
import logging
from statistics import mode
from typing import Literal
import jmespath
import jmespath.functions
import numpy as np
import pathway.internals as pw
from pathway.internals.helpers import StableSet
from pathway.stdlib.utils.col import groupby_reduce_majority, unpack_col
from ._lsh import generate_cosine_lsh_bucketer, generate_euclidean_lsh_bucketer
class take_majority_label:
class labels(pw.ClassArg):
label = pw.input_attribute()
class knn_table(pw.ClassArg):
knns_ids = pw.input_attribute()
def predicted_label(self):
try:
for x in self.knns_ids:
self.transformer.labels[x].label
except Exception:
raise
except BaseException:
# Used to prefetch values
pass
if self.knns_ids != ():
return mode(self.transformer.labels[x].label for x in self.knns_ids)
else:
return None
The provided code snippet includes necessary dependencies for implementing the `knn_lsh_classify` function. Write a Python function `def knn_lsh_classify(knn_model, data_labels: pw.Table, queries: pw.Table, k)` to solve the following problem:
Classify the queries. Use the knn_model to extract the k closest datapoints. The queries are then labeled using a majority vote between the labels of the retrieved datapoints, using the labels provided in data_labels.
Here is the function:
def knn_lsh_classify(knn_model, data_labels: pw.Table, queries: pw.Table, k):
"""Classify the queries.
Use the knn_model to extract the k closest datapoints.
The queries are then labeled using a majority vote between the labels
of the retrieved datapoints, using the labels provided in data_labels.
"""
knns = knn_model(queries, k)
# resultA = process_knn_no_transformers(data_labels, knns)
# resultB = process_knn_two_transformers(data_labels, knns)
result = (
take_majority_label(labels=data_labels, knn_table=knns) # type: ignore
.knn_table.with_id(knns.query_id)
.update_types(predicted_label=data_labels.typehints()["label"])
)
return result | Classify the queries. Use the knn_model to extract the k closest datapoints. The queries are then labeled using a majority vote between the labels of the retrieved datapoints, using the labels provided in data_labels. |
166,621 | from __future__ import annotations
import fnmatch
import logging
from statistics import mode
from typing import Literal
import jmespath
import jmespath.functions
import numpy as np
import pathway.internals as pw
from pathway.internals.helpers import StableSet
from pathway.stdlib.utils.col import groupby_reduce_majority, unpack_col
from ._lsh import generate_cosine_lsh_bucketer, generate_euclidean_lsh_bucketer
def groupby_reduce_majority(
column_group: pw.ColumnReference, column_val: pw.ColumnReference
):
def process_knn_no_transformers(data_labels, knns):
labeled = (
knns.flatten(pw.this.knns_ids, row_id=pw.this.id)
.join(knns, pw.this.row_id == knns.id)
.select(knns.query_id, data_labels.ix(pw.this.knns_ids).label)
)
# labeled = flatten_column(knns.knns_ids, origin_id=knns.query_id).select(
# pw.this.query_id, data_labels.ix(pw.this.knns_ids).label
# )
predictions = groupby_reduce_majority(labeled.query_id, labeled.label)
results = predictions.select(predicted_label=predictions.majority).with_id(
predictions.query_id
)
return results | null |
166,622 | from __future__ import annotations
import fnmatch
import logging
from statistics import mode
from typing import Literal
import jmespath
import jmespath.functions
import numpy as np
import pathway.internals as pw
from pathway.internals.helpers import StableSet
from pathway.stdlib.utils.col import groupby_reduce_majority, unpack_col
from ._lsh import generate_cosine_lsh_bucketer, generate_euclidean_lsh_bucketer
class substitute_with_labels:
class labels(pw.ClassArg):
label = pw.input_attribute()
class knn_table(pw.ClassArg):
knns_ids = pw.input_attribute()
def knn_labels(self):
return tuple(self.transformer.labels[x].label for x in self.knns_ids)
class compute_mode_empty_to_none:
class data(pw.ClassArg):
items = pw.input_attribute()
def mode(self):
if self.items != ():
return mode(self.items)
else:
return None
def process_knn_two_transformers(data_labels, knns):
labeled = (
substitute_with_labels(labels=data_labels, knn_table=knns) # type: ignore
.knn_table.select(labels=pw.this.knn_labels)
.with_id(knns.query_id)
)
result: pw.Table = (
compute_mode_empty_to_none(labeled.select(items=labeled.labels)) # type: ignore
.data.select(predicted_label=pw.this.mode)
.with_id(labeled.id)
)
return result | null |
166,623 | from collections.abc import Callable
from typing import Any
import pandas as pd
import panel as pn
from bokeh.models import ColumnDataSource, Plot
import pathway as pw
from pathway.internals import api, parse_graph
from pathway.internals.graph_runner import GraphRunner
from pathway.internals.monitoring import MonitoringLevel
from pathway.internals.runtime_type_check import check_arg_types
from pathway.internals.table_subscription import subscribe as internal_subscribe
from pathway.internals.trace import trace_user_frame
def _in_notebook():
try:
from IPython import get_ipython # noqa
if "IPKernelApp" not in get_ipython().config: # noqa
return False
except ImportError:
return False
except AttributeError:
return False
return True
class GraphRunner:
"""Runs evaluation of ParseGraph."""
_graph: graph.ParseGraph
debug: bool
ignore_asserts: bool
runtime_typechecking: bool
telemetry: telemetry.Telemetry
def __init__(
self,
input_graph: graph.ParseGraph,
*,
debug: bool = False,
ignore_asserts: bool | None = None,
monitoring_level: MonitoringLevel = MonitoringLevel.AUTO,
with_http_server: bool = False,
default_logging: bool = True,
persistence_config: PersistenceConfig | None = None,
runtime_typechecking: bool | None = None,
license_key: str | None = None,
) -> None:
self._graph = input_graph
self.debug = debug
if ignore_asserts is None:
ignore_asserts = pathway_config.ignore_asserts
self.ignore_asserts = ignore_asserts
self.monitoring_level = monitoring_level
self.with_http_server = with_http_server
self.default_logging = default_logging
self.persistence_config = persistence_config or pathway_config.replay_config
if runtime_typechecking is None:
self.runtime_typechecking = pathway_config.runtime_typechecking
else:
self.runtime_typechecking = runtime_typechecking
if license_key is None:
license_key = pathway_config.license_key
self.license_key = license_key
self.telemetry = telemetry.Telemetry.create(
license_key=self.license_key,
telemetry_server=pathway_config.telemetry_server,
)
def run_nodes(
self,
nodes: Iterable[Operator],
/,
*,
after_build: Callable[[ScopeState, OperatorStorageGraph], None] | None = None,
):
all_nodes = self._tree_shake(self._graph.global_scope, nodes)
self._run(all_nodes, after_build=after_build)
def run_tables(
self,
*tables: table.Table,
after_build: Callable[[ScopeState, OperatorStorageGraph], None] | None = None,
) -> list[api.CapturedStream]:
nodes = self.tree_shake_tables(self._graph.global_scope, tables)
return self._run(nodes, output_tables=tables, after_build=after_build)
def run_all(
self,
*,
after_build: Callable[[ScopeState, OperatorStorageGraph], None] | None = None,
) -> None:
self._run(
self._graph.global_scope.normal_nodes, after_build=after_build, run_all=True
)
def run_outputs(
self,
*,
after_build: Callable[[ScopeState, OperatorStorageGraph], None] | None = None,
) -> None:
self.run_nodes(self._graph.global_scope.output_nodes, after_build=after_build)
def has_bounded_input(self, table: table.Table) -> bool:
nodes = self.tree_shake_tables(self._graph.global_scope, [table])
for node in nodes:
if isinstance(node, InputOperator) and not node.datasource.is_bounded():
return False
return True
def _run(
self,
nodes: Iterable[Operator],
/,
*,
output_tables: Collection[table.Table] = (),
after_build: Callable[[ScopeState, OperatorStorageGraph], None] | None = None,
run_all: bool = False,
) -> list[api.CapturedStream]:
with self.telemetry.tracer.start_as_current_span("graph_runner.run"):
trace_context, trace_parent = telemetry.get_current_context()
context = ScopeContext(
nodes=StableSet(nodes),
runtime_typechecking=self.runtime_typechecking,
run_all=run_all,
)
storage_graph = OperatorStorageGraph.from_scope_context(
context, self, output_tables
)
"graph_runner.build",
context=trace_context,
attributes=dict(
graph=repr(self._graph),
debug=self.debug,
),
)
def logic(
scope: api.Scope,
/,
*,
storage_graph: OperatorStorageGraph = storage_graph,
output_tables: Collection[table.Table] = output_tables,
) -> list[tuple[api.Table, list[ColumnPath]]]:
state = ScopeState(scope)
storage_graph.build_scope(scope, state, self)
if after_build is not None:
after_build(state, storage_graph)
return storage_graph.get_output_tables(output_tables, state)
node_names = [
(operator.id, operator.label())
for operator in context.nodes
if isinstance(operator, ContextualizedIntermediateOperator)
]
monitoring_level = self.monitoring_level.to_internal()
with (
new_event_loop() as event_loop,
monitor_stats(
monitoring_level, node_names, self.default_logging
) as stats_monitor,
self.telemetry.with_logging_handler(),
get_persistence_engine_config(
self.persistence_config
) as persistence_engine_config,
):
try:
return api.run_with_new_graph(
logic,
event_loop=event_loop,
ignore_asserts=self.ignore_asserts,
stats_monitor=stats_monitor,
monitoring_level=monitoring_level,
with_http_server=self.with_http_server,
persistence_config=persistence_engine_config,
license_key=self.license_key,
telemetry_server=pathway_config.telemetry_server,
trace_parent=trace_parent,
)
except api.EngineErrorWithTrace as e:
error, frame = e.args
if frame is not None:
trace.add_pathway_trace_note(
error,
trace.Frame(
filename=frame.file_name,
line_number=frame.line_number,
line=frame.line,
function=frame.function,
),
)
raise error from None
def tree_shake_tables(
self, graph_scope: graph.Scope, tables: Iterable[table.Table]
) -> StableSet[Operator]:
starting_nodes = (table._source.operator for table in tables)
return self._tree_shake(graph_scope, starting_nodes)
def _tree_shake(
self,
graph_scope: graph.Scope,
starting_nodes: Iterable[Operator],
) -> StableSet[Operator]:
if self.debug:
starting_nodes = chain(starting_nodes, graph_scope.debug_nodes)
nodes = StableSet(graph_scope.relevant_nodes(starting_nodes))
return nodes
class MonitoringLevel(Enum):
"""Specifies a verbosity of Pathway monitoring mechanism."""
AUTO = 0
"""
Automatically sets IN_OUT in an interactive terminal and jupyter notebook.
Sets NONE otherwise.
"""
AUTO_ALL = 1
"""
Automatically sets ALL in an interactive terminal and jupyter notebook.
Sets NONE otherwise.
"""
NONE = 2
"""No monitoring."""
IN_OUT = 3
"""
Monitor input connectors and input and output latency. The latency is measured as
the difference between the time when the operator processed the data and the time
when pathway acquired the data.
"""
ALL = 4
"""
Monitor input connectors and latency for each operator in the execution graph. The
latency is measured as the difference between the time when the operator processed
the data and the time when pathway acquired the data.
"""
def to_internal(self) -> api.MonitoringLevel:
if (
self in {MonitoringLevel.AUTO, MonitoringLevel.AUTO_ALL}
and _disable_monitoring_when_auto()
):
return api.MonitoringLevel.NONE
return {
MonitoringLevel.AUTO: api.MonitoringLevel.IN_OUT,
MonitoringLevel.AUTO_ALL: api.MonitoringLevel.ALL,
MonitoringLevel.NONE: api.MonitoringLevel.NONE,
MonitoringLevel.IN_OUT: api.MonitoringLevel.IN_OUT,
MonitoringLevel.ALL: api.MonitoringLevel.ALL,
}[self]
The provided code snippet includes necessary dependencies for implementing the `plot` function. Write a Python function `def plot( self: pw.Table, plotting_function: Callable[[ColumnDataSource], Plot], sorting_col=None, ) -> pn.Column` to solve the following problem:
Allows for plotting contents of the table visually in e.g. jupyter. If the table depends only on the bounded data sources, the plot will be generated right away. Otherwise (in streaming scenario), the plot will be auto-updating after running pw.run() Args: self (pw.Table): a table serving as a source of data plotting_function (Callable[[ColumnDataSource], Plot]): function for creating plot from ColumnDataSource Returns: pn.Column: visualization which can be displayed immediately or passed as a dashboard widget Example: >>> import pathway as pw >>> from bokeh.plotting import figure >>> def func(source): ... plot = figure(height=400, width=400, title="CPU usage over time") ... plot.scatter('a', 'b', source=source, line_width=3, line_alpha=0.6) ... return plot >>> viz = pw.debug.table_from_pandas(pd.DataFrame({"a":[1,2,3],"b":[3,1,2]})).plot(func) >>> type(viz) <class 'panel.layout.base.Column'>
Here is the function:
def plot(
self: pw.Table,
plotting_function: Callable[[ColumnDataSource], Plot],
sorting_col=None,
) -> pn.Column:
"""
Allows for plotting contents of the table visually in e.g. jupyter. If the table
depends only on the bounded data sources, the plot will be generated right away.
Otherwise (in streaming scenario), the plot will be auto-updating after running pw.run()
Args:
self (pw.Table): a table serving as a source of data
plotting_function (Callable[[ColumnDataSource], Plot]): function for creating plot
from ColumnDataSource
Returns:
pn.Column: visualization which can be displayed immediately or passed as a dashboard widget
Example:
>>> import pathway as pw
>>> from bokeh.plotting import figure
>>> def func(source):
... plot = figure(height=400, width=400, title="CPU usage over time")
... plot.scatter('a', 'b', source=source, line_width=3, line_alpha=0.6)
... return plot
>>> viz = pw.debug.table_from_pandas(pd.DataFrame({"a":[1,2,3],"b":[3,1,2]})).plot(func)
>>> type(viz)
<class 'panel.layout.base.Column'>
"""
col_names = self.schema.column_names()
gr = GraphRunner(parse_graph.G, debug=False, monitoring_level=MonitoringLevel.NONE)
bounded = gr.has_bounded_input(self)
source = ColumnDataSource(data={colname: [] for colname in col_names})
plot = plotting_function(source)
viz = pn.Column(
pn.Row(
"Static preview" if bounded else "Streaming mode",
pn.widgets.TooltipIcon(
value=(
"Immediate table preview is possible as the table depends only on static inputs"
if bounded
else "Table depends on streaming inputs. Please run pw.run()"
)
),
),
plot,
)
if bounded:
[captured] = gr.run_tables(self)
output_data = api.squash_updates(captured)
keys = list(output_data.keys())
if sorting_col:
sorting_i = list(self._columns.keys()).index(sorting_col)
keys.sort(key=lambda k: output_data[k][sorting_i]) # type: ignore
dict_data = {
name: [output_data[key][index] for key in keys]
for index, name in enumerate(self._columns.keys())
}
source.stream(dict_data, rollover=len(output_data)) # type: ignore
else:
integrated: dict[api.Pointer, Any] = {}
in_notebook = _in_notebook()
def stream_updates():
df = pd.DataFrame.from_dict(integrated, orient="index", columns=col_names)
if sorting_col:
df = df.sort_values(sorting_col)
else:
df = df.sort_index()
df = df.reset_index(drop=True)
source.stream(
df.to_dict("list"), rollover=len(df) # type:ignore[arg-type]
)
if in_notebook:
pn.io.push_notebook(viz)
def _update(key, row, time, is_addition):
if is_addition:
integrated[key] = row
else:
del integrated[key]
if plot.document is not None:
if plot.document.session_context:
plot.document.add_next_tick_callback(stream_updates)
else:
stream_updates()
internal_subscribe(self, on_change=_update, skip_persisted_batch=True)
pn.state.on_session_created(lambda _: stream_updates())
return viz | Allows for plotting contents of the table visually in e.g. jupyter. If the table depends only on the bounded data sources, the plot will be generated right away. Otherwise (in streaming scenario), the plot will be auto-updating after running pw.run() Args: self (pw.Table): a table serving as a source of data plotting_function (Callable[[ColumnDataSource], Plot]): function for creating plot from ColumnDataSource Returns: pn.Column: visualization which can be displayed immediately or passed as a dashboard widget Example: >>> import pathway as pw >>> from bokeh.plotting import figure >>> def func(source): ... plot = figure(height=400, width=400, title="CPU usage over time") ... plot.scatter('a', 'b', source=source, line_width=3, line_alpha=0.6) ... return plot >>> viz = pw.debug.table_from_pandas(pd.DataFrame({"a":[1,2,3],"b":[3,1,2]})).plot(func) >>> type(viz) <class 'panel.layout.base.Column'> |
166,624 | import os
import pandas as pd
import panel as pn
import pathway as pw
from pathway.internals import api, parse_graph
from pathway.internals.graph_runner import GraphRunner
from pathway.internals.monitoring import MonitoringLevel
from pathway.internals.runtime_type_check import check_arg_types
from pathway.internals.table_subscription import subscribe as internal_subscribe
from pathway.internals.trace import trace_user_frame
def show(
self: pw.Table, *, snapshot=True, include_id=True, short_pointers=True, sorters=None
) -> pn.Column:
def _repr_mimebundle_(self: pw.Table, include, exclude):
return self.show(snapshot=True)._repr_mimebundle_(include, exclude) | null |
166,625 | from collections.abc import Callable
import pathway as pw
from pathway.internals.table import T, TSchema
TSchema = TypeVar("TSchema", bound=Schema)
T = TypeVar("T", bound=api.Value)
The provided code snippet includes necessary dependencies for implementing the `deduplicate` function. Write a Python function `def deduplicate( table: pw.Table[TSchema], *, col: pw.ColumnReference, instance: pw.ColumnExpression | None = None, acceptor: Callable[[T, T], bool], ) -> pw.Table[TSchema]` to solve the following problem:
Deduplicates rows in `table` on `col` column using acceptor function. It keeps rows which where accepted by the acceptor function. Acceptor operates on two arguments - current value and the previous accepted value. Args: table (pw.Table[TSchema]): table to deduplicate col (pw.ColumnReference): column used for deduplication acceptor (Callable[[T, T], bool]): callback telling whether two values are different instance (pw.ColumnExpression, optional): Group column for which deduplication will be performed separately. Defaults to None. Returns: pw.Table[TSchema]:
Here is the function:
def deduplicate(
table: pw.Table[TSchema],
*,
col: pw.ColumnReference,
instance: pw.ColumnExpression | None = None,
acceptor: Callable[[T, T], bool],
) -> pw.Table[TSchema]:
"""Deduplicates rows in `table` on `col` column using acceptor function.
It keeps rows which where accepted by the acceptor function.
Acceptor operates on two arguments - current value and the previous accepted value.
Args:
table (pw.Table[TSchema]): table to deduplicate
col (pw.ColumnReference): column used for deduplication
acceptor (Callable[[T, T], bool]): callback telling whether two values are different
instance (pw.ColumnExpression, optional): Group column for which deduplication will be performed separately.
Defaults to None.
Returns:
pw.Table[TSchema]:
"""
return table.deduplicate(value=col, instance=instance, acceptor=acceptor) | Deduplicates rows in `table` on `col` column using acceptor function. It keeps rows which where accepted by the acceptor function. Acceptor operates on two arguments - current value and the previous accepted value. Args: table (pw.Table[TSchema]): table to deduplicate col (pw.ColumnReference): column used for deduplication acceptor (Callable[[T, T], bool]): callback telling whether two values are different instance (pw.ColumnExpression, optional): Group column for which deduplication will be performed separately. Defaults to None. Returns: pw.Table[TSchema]: |
166,626 | from __future__ import annotations
import pathway.internals as pw
from pathway.internals import expression as expr
from pathway.internals.arg_handlers import (
arg_handler,
join_kwargs_handler,
select_args_handler,
)
from pathway.internals.desugaring import (
DesugaringContext,
TableSubstitutionDesugaring,
desugar,
)
from pathway.internals.joins import validate_join_condition
from pathway.internals.runtime_type_check import check_arg_types
from pathway.internals.thisclass import ThisMetaclass
from pathway.internals.trace import trace_user_frame
class AsofNowJoinResult(DesugaringContext):
"""Result of an asof now join between tables."""
_original_left: pw.Table
_left_with_forgetting: pw.Table
_original_right: pw.Table
_join_result: pw.JoinResult
_table_substitution: dict[pw.TableLike, pw.Table]
_mode: pw.JoinMode
_id: expr.ColumnReference | None
_substitution: dict[ThisMetaclass, pw.Joinable]
def __init__(
self,
original_left: pw.Table,
left: pw.Table,
right: pw.Table,
join_result: pw.JoinResult,
table_substitution: dict[pw.TableLike, pw.Table],
mode: pw.JoinMode,
id: expr.ColumnReference | None,
):
self._original_left = original_left
self._left_with_forgetting = left
self._original_right = right
self._join_result = join_result
self._table_substitution = table_substitution
self._mode = mode
self._id = id
self._substitution = {pw.left: left, pw.right: right, pw.this: join_result}
def _asof_now_join(
left: pw.Table,
right: pw.Table,
*on: expr.ColumnExpression,
mode: pw.JoinMode,
id: expr.ColumnReference | None,
left_instance: expr.ColumnReference | None = None,
right_instance: expr.ColumnReference | None = None,
) -> AsofNowJoinResult:
# TODO assert that left is append-only
if mode != pw.JoinMode.INNER and mode != pw.JoinMode.LEFT:
raise ValueError(
"asof_now_join can only use modes pathway.JoinMode.INNER or pathway.JoinMode.LEFT"
)
left_with_forgetting = left._forget_immediately()
if left_instance is not None and right_instance is not None:
on = (*on, left_instance == right_instance)
else:
assert left_instance is None and right_instance is None
for cond in on:
cond_left, _, cond = validate_join_condition(cond, left, right)
cond._left = left_with_forgetting[cond_left._name]
if id is not None and id.table == left:
id = left_with_forgetting[id._name]
table_substitution: dict[pw.TableLike, pw.Table] = {
left: left_with_forgetting,
}
join_result = left_with_forgetting.join(right, *on, id=id, how=mode)
return AsofNowJoinResult(
original_left=left,
left=left_with_forgetting,
right=right,
join_result=join_result,
table_substitution=table_substitution,
mode=mode,
id=id,
)
def _desugaring(self) -> TableSubstitutionDesugaring:
return TableSubstitutionDesugaring(self._table_substitution)
def select(
self, *args: expr.ColumnReference, **kwargs: expr.ColumnExpression
) -> pw.Table:
"""
Computes a result of an asof now join.
Args:
args: Column references.
kwargs: Column expressions with their new assigned names.
Returns:
Table: Created table.
Example:
>>> import pathway as pw
>>> data = pw.debug.table_from_markdown(
... '''
... id | value | instance | __time__ | __diff__
... 2 | 4 | 1 | 4 | 1
... 2 | 4 | 1 | 10 | -1
... 5 | 5 | 1 | 10 | 1
... 7 | 2 | 2 | 14 | 1
... 7 | 2 | 2 | 22 | -1
... 11 | 3 | 2 | 26 | 1
... 5 | 5 | 1 | 30 | -1
... 14 | 9 | 1 | 32 | 1
... '''
... )
>>> queries = pw.debug.table_from_markdown(
... '''
... value | instance | __time__
... 1 | 1 | 2
... 2 | 1 | 6
... 4 | 1 | 12
... 5 | 2 | 16
... 10 | 1 | 26
... '''
... )
>>> result = queries.asof_now_join(
... data, pw.left.instance == pw.right.instance
... ).select(query=pw.left.value, ans=pw.right.value)
>>> pw.debug.compute_and_print_update_stream(result, include_id=False)
query | ans | __time__ | __diff__
2 | 4 | 6 | 1
4 | 5 | 12 | 1
5 | 2 | 16 | 1
10 | 5 | 26 | 1
"""
result = self._join_result.select(*args, **kwargs)
result = result._filter_out_results_of_forgetting()
if (
self._id is not None
and self._id._column == self._left_with_forgetting._id_column
):
if self._mode == pw.JoinMode.INNER:
pw.universes.promise_is_subset_of(result, self._original_left)
elif self._mode == pw.JoinMode.LEFT:
# FIXME if original_left is append-only (should be) then result is
# also append-only (promise that). Then with_universe_of should be able
# to operate in const memory.
result = result.with_universe_of(self._original_left)
return result
The provided code snippet includes necessary dependencies for implementing the `asof_now_join` function. Write a Python function `def asof_now_join( self: pw.Table, other: pw.Table, *on: pw.ColumnExpression, how: pw.JoinMode = pw.JoinMode.INNER, id: expr.ColumnReference | None = None, left_instance: pw.ColumnReference | None = None, right_instance: pw.ColumnReference | None = None, ) -> AsofNowJoinResult` to solve the following problem:
Performs asof now join of self with other using join expressions. Each row of self is joined with rows from other at a given processing time. Rows from self are not stored. They are joined with rows of other at their processing time. If other is updated in the future, rows from self from the past won't be updated. Rows from other are stored. They can be joined with future rows of self. Args: other: the right side of a join. on: a list of column expressions. Each must have == as the top level operation and be of the form LHS: ColumnReference == RHS: ColumnReference. id: optional argument for id of result, can be only self.id or other.id how: by default, inner join is performed. Possible values are JoinMode.{INNER,LEFT} which correspond to inner and left join respectively. Returns: AsofNowJoinResult: an object on which `.select()` may be called to extract relevant columns from the result of the join. Example: >>> import pathway as pw >>> data = pw.debug.table_from_markdown( ... ''' ... id | value | instance | __time__ | __diff__ ... 2 | 4 | 1 | 4 | 1 ... 2 | 4 | 1 | 10 | -1 ... 5 | 5 | 1 | 10 | 1 ... 7 | 2 | 2 | 14 | 1 ... 7 | 2 | 2 | 22 | -1 ... 11 | 3 | 2 | 26 | 1 ... 5 | 5 | 1 | 30 | -1 ... 14 | 9 | 1 | 32 | 1 ... ''' ... ) >>> queries = pw.debug.table_from_markdown( ... ''' ... value | instance | __time__ ... 1 | 1 | 2 ... 2 | 1 | 6 ... 4 | 1 | 12 ... 5 | 2 | 16 ... 10 | 1 | 26 ... ''' ... ) >>> result = queries.asof_now_join( ... data, pw.left.instance == pw.right.instance, how=pw.JoinMode.LEFT ... ).select(query=pw.left.value, ans=pw.right.value) >>> pw.debug.compute_and_print_update_stream(result, include_id=False) query | ans | __time__ | __diff__ 1 | | 2 | 1 2 | 4 | 6 | 1 4 | 5 | 12 | 1 5 | 2 | 16 | 1 10 | 5 | 26 | 1
Here is the function:
def asof_now_join(
self: pw.Table,
other: pw.Table,
*on: pw.ColumnExpression,
how: pw.JoinMode = pw.JoinMode.INNER,
id: expr.ColumnReference | None = None,
left_instance: pw.ColumnReference | None = None,
right_instance: pw.ColumnReference | None = None,
) -> AsofNowJoinResult:
"""
Performs asof now join of self with other using join expressions. Each row of self
is joined with rows from other at a given processing time. Rows from self are not stored.
They are joined with rows of other at their processing time. If other is updated
in the future, rows from self from the past won't be updated.
Rows from other are stored. They can be joined with future rows of self.
Args:
other: the right side of a join.
on: a list of column expressions. Each must have == as the top level operation
and be of the form LHS: ColumnReference == RHS: ColumnReference.
id: optional argument for id of result, can be only self.id or other.id
how: by default, inner join is performed. Possible values are JoinMode.{INNER,LEFT}
which correspond to inner and left join respectively.
Returns:
AsofNowJoinResult: an object on which `.select()` may be called to extract relevant
columns from the result of the join.
Example:
>>> import pathway as pw
>>> data = pw.debug.table_from_markdown(
... '''
... id | value | instance | __time__ | __diff__
... 2 | 4 | 1 | 4 | 1
... 2 | 4 | 1 | 10 | -1
... 5 | 5 | 1 | 10 | 1
... 7 | 2 | 2 | 14 | 1
... 7 | 2 | 2 | 22 | -1
... 11 | 3 | 2 | 26 | 1
... 5 | 5 | 1 | 30 | -1
... 14 | 9 | 1 | 32 | 1
... '''
... )
>>> queries = pw.debug.table_from_markdown(
... '''
... value | instance | __time__
... 1 | 1 | 2
... 2 | 1 | 6
... 4 | 1 | 12
... 5 | 2 | 16
... 10 | 1 | 26
... '''
... )
>>> result = queries.asof_now_join(
... data, pw.left.instance == pw.right.instance, how=pw.JoinMode.LEFT
... ).select(query=pw.left.value, ans=pw.right.value)
>>> pw.debug.compute_and_print_update_stream(result, include_id=False)
query | ans | __time__ | __diff__
1 | | 2 | 1
2 | 4 | 6 | 1
4 | 5 | 12 | 1
5 | 2 | 16 | 1
10 | 5 | 26 | 1
"""
return AsofNowJoinResult._asof_now_join(
self,
other,
*on,
mode=how,
id=id,
left_instance=left_instance,
right_instance=right_instance,
) | Performs asof now join of self with other using join expressions. Each row of self is joined with rows from other at a given processing time. Rows from self are not stored. They are joined with rows of other at their processing time. If other is updated in the future, rows from self from the past won't be updated. Rows from other are stored. They can be joined with future rows of self. Args: other: the right side of a join. on: a list of column expressions. Each must have == as the top level operation and be of the form LHS: ColumnReference == RHS: ColumnReference. id: optional argument for id of result, can be only self.id or other.id how: by default, inner join is performed. Possible values are JoinMode.{INNER,LEFT} which correspond to inner and left join respectively. Returns: AsofNowJoinResult: an object on which `.select()` may be called to extract relevant columns from the result of the join. Example: >>> import pathway as pw >>> data = pw.debug.table_from_markdown( ... ''' ... id | value | instance | __time__ | __diff__ ... 2 | 4 | 1 | 4 | 1 ... 2 | 4 | 1 | 10 | -1 ... 5 | 5 | 1 | 10 | 1 ... 7 | 2 | 2 | 14 | 1 ... 7 | 2 | 2 | 22 | -1 ... 11 | 3 | 2 | 26 | 1 ... 5 | 5 | 1 | 30 | -1 ... 14 | 9 | 1 | 32 | 1 ... ''' ... ) >>> queries = pw.debug.table_from_markdown( ... ''' ... value | instance | __time__ ... 1 | 1 | 2 ... 2 | 1 | 6 ... 4 | 1 | 12 ... 5 | 2 | 16 ... 10 | 1 | 26 ... ''' ... ) >>> result = queries.asof_now_join( ... data, pw.left.instance == pw.right.instance, how=pw.JoinMode.LEFT ... ).select(query=pw.left.value, ans=pw.right.value) >>> pw.debug.compute_and_print_update_stream(result, include_id=False) query | ans | __time__ | __diff__ 1 | | 2 | 1 2 | 4 | 6 | 1 4 | 5 | 12 | 1 5 | 2 | 16 | 1 10 | 5 | 26 | 1 |
166,627 | from __future__ import annotations
import pathway.internals as pw
from pathway.internals import expression as expr
from pathway.internals.arg_handlers import (
arg_handler,
join_kwargs_handler,
select_args_handler,
)
from pathway.internals.desugaring import (
DesugaringContext,
TableSubstitutionDesugaring,
desugar,
)
from pathway.internals.joins import validate_join_condition
from pathway.internals.runtime_type_check import check_arg_types
from pathway.internals.thisclass import ThisMetaclass
from pathway.internals.trace import trace_user_frame
class AsofNowJoinResult(DesugaringContext):
"""Result of an asof now join between tables."""
_original_left: pw.Table
_left_with_forgetting: pw.Table
_original_right: pw.Table
_join_result: pw.JoinResult
_table_substitution: dict[pw.TableLike, pw.Table]
_mode: pw.JoinMode
_id: expr.ColumnReference | None
_substitution: dict[ThisMetaclass, pw.Joinable]
def __init__(
self,
original_left: pw.Table,
left: pw.Table,
right: pw.Table,
join_result: pw.JoinResult,
table_substitution: dict[pw.TableLike, pw.Table],
mode: pw.JoinMode,
id: expr.ColumnReference | None,
):
self._original_left = original_left
self._left_with_forgetting = left
self._original_right = right
self._join_result = join_result
self._table_substitution = table_substitution
self._mode = mode
self._id = id
self._substitution = {pw.left: left, pw.right: right, pw.this: join_result}
def _asof_now_join(
left: pw.Table,
right: pw.Table,
*on: expr.ColumnExpression,
mode: pw.JoinMode,
id: expr.ColumnReference | None,
left_instance: expr.ColumnReference | None = None,
right_instance: expr.ColumnReference | None = None,
) -> AsofNowJoinResult:
# TODO assert that left is append-only
if mode != pw.JoinMode.INNER and mode != pw.JoinMode.LEFT:
raise ValueError(
"asof_now_join can only use modes pathway.JoinMode.INNER or pathway.JoinMode.LEFT"
)
left_with_forgetting = left._forget_immediately()
if left_instance is not None and right_instance is not None:
on = (*on, left_instance == right_instance)
else:
assert left_instance is None and right_instance is None
for cond in on:
cond_left, _, cond = validate_join_condition(cond, left, right)
cond._left = left_with_forgetting[cond_left._name]
if id is not None and id.table == left:
id = left_with_forgetting[id._name]
table_substitution: dict[pw.TableLike, pw.Table] = {
left: left_with_forgetting,
}
join_result = left_with_forgetting.join(right, *on, id=id, how=mode)
return AsofNowJoinResult(
original_left=left,
left=left_with_forgetting,
right=right,
join_result=join_result,
table_substitution=table_substitution,
mode=mode,
id=id,
)
def _desugaring(self) -> TableSubstitutionDesugaring:
return TableSubstitutionDesugaring(self._table_substitution)
def select(
self, *args: expr.ColumnReference, **kwargs: expr.ColumnExpression
) -> pw.Table:
"""
Computes a result of an asof now join.
Args:
args: Column references.
kwargs: Column expressions with their new assigned names.
Returns:
Table: Created table.
Example:
>>> import pathway as pw
>>> data = pw.debug.table_from_markdown(
... '''
... id | value | instance | __time__ | __diff__
... 2 | 4 | 1 | 4 | 1
... 2 | 4 | 1 | 10 | -1
... 5 | 5 | 1 | 10 | 1
... 7 | 2 | 2 | 14 | 1
... 7 | 2 | 2 | 22 | -1
... 11 | 3 | 2 | 26 | 1
... 5 | 5 | 1 | 30 | -1
... 14 | 9 | 1 | 32 | 1
... '''
... )
>>> queries = pw.debug.table_from_markdown(
... '''
... value | instance | __time__
... 1 | 1 | 2
... 2 | 1 | 6
... 4 | 1 | 12
... 5 | 2 | 16
... 10 | 1 | 26
... '''
... )
>>> result = queries.asof_now_join(
... data, pw.left.instance == pw.right.instance
... ).select(query=pw.left.value, ans=pw.right.value)
>>> pw.debug.compute_and_print_update_stream(result, include_id=False)
query | ans | __time__ | __diff__
2 | 4 | 6 | 1
4 | 5 | 12 | 1
5 | 2 | 16 | 1
10 | 5 | 26 | 1
"""
result = self._join_result.select(*args, **kwargs)
result = result._filter_out_results_of_forgetting()
if (
self._id is not None
and self._id._column == self._left_with_forgetting._id_column
):
if self._mode == pw.JoinMode.INNER:
pw.universes.promise_is_subset_of(result, self._original_left)
elif self._mode == pw.JoinMode.LEFT:
# FIXME if original_left is append-only (should be) then result is
# also append-only (promise that). Then with_universe_of should be able
# to operate in const memory.
result = result.with_universe_of(self._original_left)
return result
The provided code snippet includes necessary dependencies for implementing the `asof_now_join_inner` function. Write a Python function `def asof_now_join_inner( self: pw.Table, other: pw.Table, *on: pw.ColumnExpression, id: expr.ColumnReference | None = None, left_instance: pw.ColumnReference | None = None, right_instance: pw.ColumnReference | None = None, ) -> AsofNowJoinResult` to solve the following problem:
Performs asof now join of self with other using join expressions. Each row of self is joined with rows from other at a given processing time. Rows from self are not stored. They are joined with rows of other at their processing time. If other is updated in the future, rows from self from the past won't be updated. Rows from other are stored. They can be joined with future rows of self. Args: other: the right side of a join. on: a list of column expressions. Each must have == as the top level operation and be of the form LHS: ColumnReference == RHS: ColumnReference. id: optional argument for id of result, can be only self.id or other.id Returns: AsofNowJoinResult: an object on which `.select()` may be called to extract relevant columns from the result of the join. Example: >>> import pathway as pw >>> data = pw.debug.table_from_markdown( ... ''' ... id | value | instance | __time__ | __diff__ ... 2 | 4 | 1 | 4 | 1 ... 2 | 4 | 1 | 10 | -1 ... 5 | 5 | 1 | 10 | 1 ... 7 | 2 | 2 | 14 | 1 ... 7 | 2 | 2 | 22 | -1 ... 11 | 3 | 2 | 26 | 1 ... 5 | 5 | 1 | 30 | -1 ... 14 | 9 | 1 | 32 | 1 ... ''' ... ) >>> queries = pw.debug.table_from_markdown( ... ''' ... value | instance | __time__ ... 1 | 1 | 2 ... 2 | 1 | 6 ... 4 | 1 | 12 ... 5 | 2 | 16 ... 10 | 1 | 26 ... ''' ... ) >>> result = queries.asof_now_join_inner( ... data, pw.left.instance == pw.right.instance ... ).select(query=pw.left.value, ans=pw.right.value) >>> pw.debug.compute_and_print_update_stream(result, include_id=False) query | ans | __time__ | __diff__ 2 | 4 | 6 | 1 4 | 5 | 12 | 1 5 | 2 | 16 | 1 10 | 5 | 26 | 1
Here is the function:
def asof_now_join_inner(
self: pw.Table,
other: pw.Table,
*on: pw.ColumnExpression,
id: expr.ColumnReference | None = None,
left_instance: pw.ColumnReference | None = None,
right_instance: pw.ColumnReference | None = None,
) -> AsofNowJoinResult:
"""
Performs asof now join of self with other using join expressions. Each row of self
is joined with rows from other at a given processing time. Rows from self are not stored.
They are joined with rows of other at their processing time. If other is updated
in the future, rows from self from the past won't be updated.
Rows from other are stored. They can be joined with future rows of self.
Args:
other: the right side of a join.
on: a list of column expressions. Each must have == as the top level operation
and be of the form LHS: ColumnReference == RHS: ColumnReference.
id: optional argument for id of result, can be only self.id or other.id
Returns:
AsofNowJoinResult: an object on which `.select()` may be called to extract relevant
columns from the result of the join.
Example:
>>> import pathway as pw
>>> data = pw.debug.table_from_markdown(
... '''
... id | value | instance | __time__ | __diff__
... 2 | 4 | 1 | 4 | 1
... 2 | 4 | 1 | 10 | -1
... 5 | 5 | 1 | 10 | 1
... 7 | 2 | 2 | 14 | 1
... 7 | 2 | 2 | 22 | -1
... 11 | 3 | 2 | 26 | 1
... 5 | 5 | 1 | 30 | -1
... 14 | 9 | 1 | 32 | 1
... '''
... )
>>> queries = pw.debug.table_from_markdown(
... '''
... value | instance | __time__
... 1 | 1 | 2
... 2 | 1 | 6
... 4 | 1 | 12
... 5 | 2 | 16
... 10 | 1 | 26
... '''
... )
>>> result = queries.asof_now_join_inner(
... data, pw.left.instance == pw.right.instance
... ).select(query=pw.left.value, ans=pw.right.value)
>>> pw.debug.compute_and_print_update_stream(result, include_id=False)
query | ans | __time__ | __diff__
2 | 4 | 6 | 1
4 | 5 | 12 | 1
5 | 2 | 16 | 1
10 | 5 | 26 | 1
"""
return AsofNowJoinResult._asof_now_join(
self,
other,
*on,
mode=pw.JoinMode.INNER,
id=id,
left_instance=left_instance,
right_instance=right_instance,
) | Performs asof now join of self with other using join expressions. Each row of self is joined with rows from other at a given processing time. Rows from self are not stored. They are joined with rows of other at their processing time. If other is updated in the future, rows from self from the past won't be updated. Rows from other are stored. They can be joined with future rows of self. Args: other: the right side of a join. on: a list of column expressions. Each must have == as the top level operation and be of the form LHS: ColumnReference == RHS: ColumnReference. id: optional argument for id of result, can be only self.id or other.id Returns: AsofNowJoinResult: an object on which `.select()` may be called to extract relevant columns from the result of the join. Example: >>> import pathway as pw >>> data = pw.debug.table_from_markdown( ... ''' ... id | value | instance | __time__ | __diff__ ... 2 | 4 | 1 | 4 | 1 ... 2 | 4 | 1 | 10 | -1 ... 5 | 5 | 1 | 10 | 1 ... 7 | 2 | 2 | 14 | 1 ... 7 | 2 | 2 | 22 | -1 ... 11 | 3 | 2 | 26 | 1 ... 5 | 5 | 1 | 30 | -1 ... 14 | 9 | 1 | 32 | 1 ... ''' ... ) >>> queries = pw.debug.table_from_markdown( ... ''' ... value | instance | __time__ ... 1 | 1 | 2 ... 2 | 1 | 6 ... 4 | 1 | 12 ... 5 | 2 | 16 ... 10 | 1 | 26 ... ''' ... ) >>> result = queries.asof_now_join_inner( ... data, pw.left.instance == pw.right.instance ... ).select(query=pw.left.value, ans=pw.right.value) >>> pw.debug.compute_and_print_update_stream(result, include_id=False) query | ans | __time__ | __diff__ 2 | 4 | 6 | 1 4 | 5 | 12 | 1 5 | 2 | 16 | 1 10 | 5 | 26 | 1 |
166,628 | from __future__ import annotations
import pathway.internals as pw
from pathway.internals import expression as expr
from pathway.internals.arg_handlers import (
arg_handler,
join_kwargs_handler,
select_args_handler,
)
from pathway.internals.desugaring import (
DesugaringContext,
TableSubstitutionDesugaring,
desugar,
)
from pathway.internals.joins import validate_join_condition
from pathway.internals.runtime_type_check import check_arg_types
from pathway.internals.thisclass import ThisMetaclass
from pathway.internals.trace import trace_user_frame
class AsofNowJoinResult(DesugaringContext):
"""Result of an asof now join between tables."""
_original_left: pw.Table
_left_with_forgetting: pw.Table
_original_right: pw.Table
_join_result: pw.JoinResult
_table_substitution: dict[pw.TableLike, pw.Table]
_mode: pw.JoinMode
_id: expr.ColumnReference | None
_substitution: dict[ThisMetaclass, pw.Joinable]
def __init__(
self,
original_left: pw.Table,
left: pw.Table,
right: pw.Table,
join_result: pw.JoinResult,
table_substitution: dict[pw.TableLike, pw.Table],
mode: pw.JoinMode,
id: expr.ColumnReference | None,
):
self._original_left = original_left
self._left_with_forgetting = left
self._original_right = right
self._join_result = join_result
self._table_substitution = table_substitution
self._mode = mode
self._id = id
self._substitution = {pw.left: left, pw.right: right, pw.this: join_result}
def _asof_now_join(
left: pw.Table,
right: pw.Table,
*on: expr.ColumnExpression,
mode: pw.JoinMode,
id: expr.ColumnReference | None,
left_instance: expr.ColumnReference | None = None,
right_instance: expr.ColumnReference | None = None,
) -> AsofNowJoinResult:
# TODO assert that left is append-only
if mode != pw.JoinMode.INNER and mode != pw.JoinMode.LEFT:
raise ValueError(
"asof_now_join can only use modes pathway.JoinMode.INNER or pathway.JoinMode.LEFT"
)
left_with_forgetting = left._forget_immediately()
if left_instance is not None and right_instance is not None:
on = (*on, left_instance == right_instance)
else:
assert left_instance is None and right_instance is None
for cond in on:
cond_left, _, cond = validate_join_condition(cond, left, right)
cond._left = left_with_forgetting[cond_left._name]
if id is not None and id.table == left:
id = left_with_forgetting[id._name]
table_substitution: dict[pw.TableLike, pw.Table] = {
left: left_with_forgetting,
}
join_result = left_with_forgetting.join(right, *on, id=id, how=mode)
return AsofNowJoinResult(
original_left=left,
left=left_with_forgetting,
right=right,
join_result=join_result,
table_substitution=table_substitution,
mode=mode,
id=id,
)
def _desugaring(self) -> TableSubstitutionDesugaring:
return TableSubstitutionDesugaring(self._table_substitution)
def select(
self, *args: expr.ColumnReference, **kwargs: expr.ColumnExpression
) -> pw.Table:
"""
Computes a result of an asof now join.
Args:
args: Column references.
kwargs: Column expressions with their new assigned names.
Returns:
Table: Created table.
Example:
>>> import pathway as pw
>>> data = pw.debug.table_from_markdown(
... '''
... id | value | instance | __time__ | __diff__
... 2 | 4 | 1 | 4 | 1
... 2 | 4 | 1 | 10 | -1
... 5 | 5 | 1 | 10 | 1
... 7 | 2 | 2 | 14 | 1
... 7 | 2 | 2 | 22 | -1
... 11 | 3 | 2 | 26 | 1
... 5 | 5 | 1 | 30 | -1
... 14 | 9 | 1 | 32 | 1
... '''
... )
>>> queries = pw.debug.table_from_markdown(
... '''
... value | instance | __time__
... 1 | 1 | 2
... 2 | 1 | 6
... 4 | 1 | 12
... 5 | 2 | 16
... 10 | 1 | 26
... '''
... )
>>> result = queries.asof_now_join(
... data, pw.left.instance == pw.right.instance
... ).select(query=pw.left.value, ans=pw.right.value)
>>> pw.debug.compute_and_print_update_stream(result, include_id=False)
query | ans | __time__ | __diff__
2 | 4 | 6 | 1
4 | 5 | 12 | 1
5 | 2 | 16 | 1
10 | 5 | 26 | 1
"""
result = self._join_result.select(*args, **kwargs)
result = result._filter_out_results_of_forgetting()
if (
self._id is not None
and self._id._column == self._left_with_forgetting._id_column
):
if self._mode == pw.JoinMode.INNER:
pw.universes.promise_is_subset_of(result, self._original_left)
elif self._mode == pw.JoinMode.LEFT:
# FIXME if original_left is append-only (should be) then result is
# also append-only (promise that). Then with_universe_of should be able
# to operate in const memory.
result = result.with_universe_of(self._original_left)
return result
The provided code snippet includes necessary dependencies for implementing the `asof_now_join_left` function. Write a Python function `def asof_now_join_left( self: pw.Table, other: pw.Table, *on: pw.ColumnExpression, id: expr.ColumnReference | None = None, left_instance: pw.ColumnReference | None = None, right_instance: pw.ColumnReference | None = None, ) -> AsofNowJoinResult` to solve the following problem:
Performs asof now join of self with other using join expressions. Each row of self is joined with rows from other at a given processing time. If there are no matching rows in other, missing values on the right side are replaced with `None`. Rows from self are not stored. They are joined with rows of other at their processing time. If other is updated in the future, rows from self from the past won't be updated. Rows from other are stored. They can be joined with future rows of self. Args: other: the right side of a join. on: a list of column expressions. Each must have == as the top level operation and be of the form LHS: ColumnReference == RHS: ColumnReference. id: optional argument for id of result, can be only self.id or other.id Returns: AsofNowJoinResult: an object on which `.select()` may be called to extract relevant columns from the result of the join. Example: >>> import pathway as pw >>> data = pw.debug.table_from_markdown( ... ''' ... id | value | instance | __time__ | __diff__ ... 2 | 4 | 1 | 4 | 1 ... 2 | 4 | 1 | 10 | -1 ... 5 | 5 | 1 | 10 | 1 ... 7 | 2 | 2 | 14 | 1 ... 7 | 2 | 2 | 22 | -1 ... 11 | 3 | 2 | 26 | 1 ... 5 | 5 | 1 | 30 | -1 ... 14 | 9 | 1 | 32 | 1 ... ''' ... ) >>> queries = pw.debug.table_from_markdown( ... ''' ... value | instance | __time__ ... 1 | 1 | 2 ... 2 | 1 | 6 ... 4 | 1 | 12 ... 5 | 2 | 16 ... 10 | 1 | 26 ... ''' ... ) >>> result = queries.asof_now_join_left( ... data, pw.left.instance == pw.right.instance ... ).select(query=pw.left.value, ans=pw.right.value) >>> pw.debug.compute_and_print_update_stream(result, include_id=False) query | ans | __time__ | __diff__ 1 | | 2 | 1 2 | 4 | 6 | 1 4 | 5 | 12 | 1 5 | 2 | 16 | 1 10 | 5 | 26 | 1
Here is the function:
def asof_now_join_left(
self: pw.Table,
other: pw.Table,
*on: pw.ColumnExpression,
id: expr.ColumnReference | None = None,
left_instance: pw.ColumnReference | None = None,
right_instance: pw.ColumnReference | None = None,
) -> AsofNowJoinResult:
"""
Performs asof now join of self with other using join expressions. Each row of self
is joined with rows from other at a given processing time. If there are no matching
rows in other, missing values on the right side are replaced with `None`.
Rows from self are not stored. They are joined with rows of other at their processing
time. If other is updated in the future, rows from self from the past won't be updated.
Rows from other are stored. They can be joined with future rows of self.
Args:
other: the right side of a join.
on: a list of column expressions. Each must have == as the top level operation
and be of the form LHS: ColumnReference == RHS: ColumnReference.
id: optional argument for id of result, can be only self.id or other.id
Returns:
AsofNowJoinResult: an object on which `.select()` may be called to extract relevant
columns from the result of the join.
Example:
>>> import pathway as pw
>>> data = pw.debug.table_from_markdown(
... '''
... id | value | instance | __time__ | __diff__
... 2 | 4 | 1 | 4 | 1
... 2 | 4 | 1 | 10 | -1
... 5 | 5 | 1 | 10 | 1
... 7 | 2 | 2 | 14 | 1
... 7 | 2 | 2 | 22 | -1
... 11 | 3 | 2 | 26 | 1
... 5 | 5 | 1 | 30 | -1
... 14 | 9 | 1 | 32 | 1
... '''
... )
>>> queries = pw.debug.table_from_markdown(
... '''
... value | instance | __time__
... 1 | 1 | 2
... 2 | 1 | 6
... 4 | 1 | 12
... 5 | 2 | 16
... 10 | 1 | 26
... '''
... )
>>> result = queries.asof_now_join_left(
... data, pw.left.instance == pw.right.instance
... ).select(query=pw.left.value, ans=pw.right.value)
>>> pw.debug.compute_and_print_update_stream(result, include_id=False)
query | ans | __time__ | __diff__
1 | | 2 | 1
2 | 4 | 6 | 1
4 | 5 | 12 | 1
5 | 2 | 16 | 1
10 | 5 | 26 | 1
"""
return AsofNowJoinResult._asof_now_join(
self,
other,
*on,
mode=pw.JoinMode.LEFT,
id=id,
left_instance=left_instance,
right_instance=right_instance,
) | Performs asof now join of self with other using join expressions. Each row of self is joined with rows from other at a given processing time. If there are no matching rows in other, missing values on the right side are replaced with `None`. Rows from self are not stored. They are joined with rows of other at their processing time. If other is updated in the future, rows from self from the past won't be updated. Rows from other are stored. They can be joined with future rows of self. Args: other: the right side of a join. on: a list of column expressions. Each must have == as the top level operation and be of the form LHS: ColumnReference == RHS: ColumnReference. id: optional argument for id of result, can be only self.id or other.id Returns: AsofNowJoinResult: an object on which `.select()` may be called to extract relevant columns from the result of the join. Example: >>> import pathway as pw >>> data = pw.debug.table_from_markdown( ... ''' ... id | value | instance | __time__ | __diff__ ... 2 | 4 | 1 | 4 | 1 ... 2 | 4 | 1 | 10 | -1 ... 5 | 5 | 1 | 10 | 1 ... 7 | 2 | 2 | 14 | 1 ... 7 | 2 | 2 | 22 | -1 ... 11 | 3 | 2 | 26 | 1 ... 5 | 5 | 1 | 30 | -1 ... 14 | 9 | 1 | 32 | 1 ... ''' ... ) >>> queries = pw.debug.table_from_markdown( ... ''' ... value | instance | __time__ ... 1 | 1 | 2 ... 2 | 1 | 6 ... 4 | 1 | 12 ... 5 | 2 | 16 ... 10 | 1 | 26 ... ''' ... ) >>> result = queries.asof_now_join_left( ... data, pw.left.instance == pw.right.instance ... ).select(query=pw.left.value, ans=pw.right.value) >>> pw.debug.compute_and_print_update_stream(result, include_id=False) query | ans | __time__ | __diff__ 1 | | 2 | 1 2 | 4 | 6 | 1 4 | 5 | 12 | 1 5 | 2 | 16 | 1 10 | 5 | 26 | 1 |
166,629 | import datetime
from typing import Any, Union
import pandas as pd
from dateutil import tz
from pathway.internals import dtype as dt
from pathway.internals.type_interpreter import eval_type
TimeEventType = Union[int, float, datetime.datetime]
def get_default_origin(time_event_type: dt.DType) -> TimeEventType:
mapping: dict[Any, TimeEventType] = {
dt.INT: 0,
dt.FLOAT: 0.0,
dt.DATE_TIME_NAIVE: pd.Timestamp(year=1973, month=1, day=1, tz=None),
dt.DATE_TIME_UTC: pd.Timestamp(year=1973, month=1, day=1, tz=tz.UTC),
# 1973 because it started on Monady and then by default all week-wide windows start on Monday
}
return mapping[time_event_type] | null |
166,630 | import datetime
from typing import Any, Union
import pandas as pd
from dateutil import tz
from pathway.internals import dtype as dt
from pathway.internals.type_interpreter import eval_type
IntervalType = Union[int, float, datetime.timedelta]
def zero_length_interval(interval_type: type[IntervalType]) -> IntervalType:
if issubclass(interval_type, datetime.timedelta):
return datetime.timedelta(0)
elif issubclass(interval_type, int):
return 0
elif issubclass(interval_type, float):
return 0.0
else:
raise Exception("unsupported interval type") | null |
166,631 | from __future__ import annotations
from typing import Any
import pathway.internals as pw
from pathway.internals.arg_handlers import (
arg_handler,
join_kwargs_handler,
select_args_handler,
)
from pathway.internals.desugaring import (
DesugaringContext,
TableSubstitutionDesugaring,
desugar,
)
from pathway.internals.runtime_type_check import check_arg_types
from pathway.internals.thisclass import ThisMetaclass
from pathway.internals.trace import trace_user_frame
from pathway.stdlib import temporal
class WindowJoinResult(DesugaringContext):
"""
Result of a window join between tables.
Example:
>>> import pathway as pw
>>> t1 = pw.debug.table_from_markdown(
... '''
... | t
... 1 | 1
... 2 | 2
... 3 | 3
... 4 | 7
... 5 | 13
... '''
... )
>>> t2 = pw.debug.table_from_markdown(
... '''
... | t
... 1 | 2
... 2 | 5
... 3 | 6
... 4 | 7
... '''
... )
>>> join_result = t1.window_join_outer(t2, t1.t, t2.t, pw.temporal.tumbling(2))
>>> isinstance(join_result, pw.temporal.WindowJoinResult)
True
>>> pw.debug.compute_and_print(
... join_result.select(left_t=t1.t, right_t=t2.t), include_id=False
... )
left_t | right_t
| 5
1 |
2 | 2
3 | 2
7 | 6
7 | 7
13 |
"""
_join_result: pw.JoinResult
_table_substitution: dict[pw.TableLike, pw.Table]
_substitution: dict[ThisMetaclass, pw.Joinable]
def __init__(
self,
join_result: pw.JoinResult,
left_original: pw.Table,
right_original: pw.Table,
left_new: pw.Table,
right_new: pw.Table,
):
self._join_result = join_result
self._universe = join_result._universe
self._table_substitution = {
left_original: left_new,
right_original: right_new,
}
self._substitution = {
pw.left: left_new,
pw.right: right_new,
pw.this: join_result,
}
def _desugaring(self) -> TableSubstitutionDesugaring:
return TableSubstitutionDesugaring(self._table_substitution)
def select(self, *args: pw.ColumnReference, **kwargs: Any) -> pw.Table:
"""Computes a result of a window join.
Args:
args: Column references.
kwargs: Column expressions with their new assigned names.
Returns:
Table: Created table.
Example:
>>> import pathway as pw
>>> t1 = pw.debug.table_from_markdown(
... '''
... | a | t
... 1 | 1 | 1
... 2 | 1 | 2
... 3 | 1 | 3
... 4 | 1 | 7
... 5 | 1 | 13
... 6 | 2 | 1
... 7 | 2 | 2
... 8 | 3 | 4
... '''
... )
>>> t2 = pw.debug.table_from_markdown(
... '''
... | b | t
... 1 | 1 | 2
... 2 | 1 | 5
... 3 | 1 | 6
... 4 | 1 | 7
... 5 | 2 | 2
... 6 | 2 | 3
... 7 | 4 | 3
... '''
... )
>>> t3 = t1.window_join_outer(t2, t1.t, t2.t, pw.temporal.tumbling(2), t1.a == t2.b).select(
... key=pw.coalesce(t1.a, t2.b), left_t=t1.t, right_t=t2.t
... )
>>> pw.debug.compute_and_print(t3, include_id=False)
key | left_t | right_t
1 | | 5
1 | 1 |
1 | 2 | 2
1 | 3 | 2
1 | 7 | 6
1 | 7 | 7
1 | 13 |
2 | 1 |
2 | 2 | 2
2 | 2 | 3
3 | 4 |
4 | | 3
"""
return self._join_result.select(*args, **kwargs)
The provided code snippet includes necessary dependencies for implementing the `window_join` function. Write a Python function `def window_join( self: pw.Table, other: pw.Table, self_time: pw.ColumnExpression, other_time: pw.ColumnExpression, window: temporal.Window, *on: pw.ColumnExpression, how: pw.JoinMode = pw.JoinMode.INNER, left_instance: pw.ColumnReference | None = None, right_instance: pw.ColumnReference | None = None, ) -> WindowJoinResult` to solve the following problem:
Performs a window join of self with other using a window and join expressions. If two records belong to the same window and meet the conditions specified in the `on` clause, they will be joined. Note that if a sliding window is used and there are pairs of matching records that appear in more than one window, they will be included in the result multiple times (equal to the number of windows they appear in). When using a session window, the function creates sessions by concatenating records from both sides of a join. Only pairs of records that meet the conditions specified in the `on` clause can be part of the same session. The result of a given session will include all records from the left side of a join that belong to this session, joined with all records from the right side of a join that belong to this session. Args: other: the right side of a join. self_time: time expression in self. other_time: time expression in other. window: a window to use. on: a list of column expressions. Each must have == on the top level operation and be of the form LHS: ColumnReference == RHS: ColumnReference. how: decides whether to run `window_join_inner`, `window_join_left`, `window_join_right` or `window_join_outer`. Default is INNER. left_instance/right_instance: optional arguments describing partitioning of the data into separate instances Returns: WindowJoinResult: a result of the window join. A method `.select()` can be called on it to extract relevant columns from the result of a join. Examples: >>> import pathway as pw >>> t1 = pw.debug.table_from_markdown( ... ''' ... | t ... 1 | 1 ... 2 | 2 ... 3 | 3 ... 4 | 7 ... 5 | 13 ... ''' ... ) >>> t2 = pw.debug.table_from_markdown( ... ''' ... | t ... 1 | 2 ... 2 | 5 ... 3 | 6 ... 4 | 7 ... ''' ... ) >>> t3 = t1.window_join(t2, t1.t, t2.t, pw.temporal.tumbling(2)).select( ... left_t=t1.t, right_t=t2.t ... ) >>> pw.debug.compute_and_print(t3, include_id=False) left_t | right_t 2 | 2 3 | 2 7 | 6 7 | 7 >>> t4 = t1.window_join(t2, t1.t, t2.t, pw.temporal.sliding(1, 2)).select( ... left_t=t1.t, right_t=t2.t ... ) >>> pw.debug.compute_and_print(t4, include_id=False) left_t | right_t 1 | 2 2 | 2 2 | 2 3 | 2 7 | 6 7 | 7 7 | 7 >>> t1 = pw.debug.table_from_markdown( ... ''' ... | a | t ... 1 | 1 | 1 ... 2 | 1 | 2 ... 3 | 1 | 3 ... 4 | 1 | 7 ... 5 | 1 | 13 ... 6 | 2 | 1 ... 7 | 2 | 2 ... 8 | 3 | 4 ... ''' ... ) >>> t2 = pw.debug.table_from_markdown( ... ''' ... | b | t ... 1 | 1 | 2 ... 2 | 1 | 5 ... 3 | 1 | 6 ... 4 | 1 | 7 ... 5 | 2 | 2 ... 6 | 2 | 3 ... 7 | 4 | 3 ... ''' ... ) >>> t3 = t1.window_join(t2, t1.t, t2.t, pw.temporal.tumbling(2), t1.a == t2.b).select( ... key=t1.a, left_t=t1.t, right_t=t2.t ... ) >>> pw.debug.compute_and_print(t3, include_id=False) key | left_t | right_t 1 | 2 | 2 1 | 3 | 2 1 | 7 | 6 1 | 7 | 7 2 | 2 | 2 2 | 2 | 3 >>> >>> t1 = pw.debug.table_from_markdown( ... ''' ... | t ... 0 | 0 ... 1 | 5 ... 2 | 10 ... 3 | 15 ... 4 | 17 ... ''' ... ) >>> t2 = pw.debug.table_from_markdown( ... ''' ... | t ... 0 | -3 ... 1 | 2 ... 2 | 3 ... 3 | 6 ... 4 | 16 ... ''' ... ) >>> t3 = t1.window_join( ... t2, t1.t, t2.t, pw.temporal.session(predicate=lambda a, b: abs(a - b) <= 2) ... ).select(left_t=t1.t, right_t=t2.t) >>> pw.debug.compute_and_print(t3, include_id=False) left_t | right_t 0 | 2 0 | 3 0 | 6 5 | 2 5 | 3 5 | 6 15 | 16 17 | 16 >>> t1 = pw.debug.table_from_markdown( ... ''' ... | a | t ... 1 | 1 | 1 ... 2 | 1 | 4 ... 3 | 1 | 7 ... 4 | 2 | 0 ... 5 | 2 | 3 ... 6 | 2 | 4 ... 7 | 2 | 7 ... 8 | 3 | 4 ... ''' ... ) >>> t2 = pw.debug.table_from_markdown( ... ''' ... | b | t ... 1 | 1 | -1 ... 2 | 1 | 6 ... 3 | 2 | 2 ... 4 | 2 | 10 ... 5 | 4 | 3 ... ''' ... ) >>> t3 = t1.window_join( ... t2, t1.t, t2.t, pw.temporal.session(predicate=lambda a, b: abs(a - b) <= 2), t1.a == t2.b ... ).select(key=t1.a, left_t=t1.t, right_t=t2.t) >>> pw.debug.compute_and_print(t3, include_id=False) key | left_t | right_t 1 | 1 | -1 1 | 4 | 6 1 | 7 | 6 2 | 0 | 2 2 | 3 | 2 2 | 4 | 2
Here is the function:
def window_join(
self: pw.Table,
other: pw.Table,
self_time: pw.ColumnExpression,
other_time: pw.ColumnExpression,
window: temporal.Window,
*on: pw.ColumnExpression,
how: pw.JoinMode = pw.JoinMode.INNER,
left_instance: pw.ColumnReference | None = None,
right_instance: pw.ColumnReference | None = None,
) -> WindowJoinResult:
"""Performs a window join of self with other using a window and join expressions.
If two records belong to the same window and meet the conditions specified in
the `on` clause, they will be joined. Note that if a sliding window is used and
there are pairs of matching records that appear in more than one window,
they will be included in the result multiple times (equal to the number of
windows they appear in).
When using a session window, the function creates sessions by concatenating
records from both sides of a join. Only pairs of records that meet
the conditions specified in the `on` clause can be part of the same session.
The result of a given session will include all records from the left side of
a join that belong to this session, joined with all records from the right
side of a join that belong to this session.
Args:
other: the right side of a join.
self_time: time expression in self.
other_time: time expression in other.
window: a window to use.
on: a list of column expressions. Each must have == on the top level
operation and be of the form LHS: ColumnReference == RHS: ColumnReference.
how: decides whether to run `window_join_inner`, `window_join_left`, `window_join_right`
or `window_join_outer`. Default is INNER.
left_instance/right_instance: optional arguments describing partitioning of the data into separate instances
Returns:
WindowJoinResult: a result of the window join. A method `.select()`
can be called on it to extract relevant columns from the result of a join.
Examples:
>>> import pathway as pw
>>> t1 = pw.debug.table_from_markdown(
... '''
... | t
... 1 | 1
... 2 | 2
... 3 | 3
... 4 | 7
... 5 | 13
... '''
... )
>>> t2 = pw.debug.table_from_markdown(
... '''
... | t
... 1 | 2
... 2 | 5
... 3 | 6
... 4 | 7
... '''
... )
>>> t3 = t1.window_join(t2, t1.t, t2.t, pw.temporal.tumbling(2)).select(
... left_t=t1.t, right_t=t2.t
... )
>>> pw.debug.compute_and_print(t3, include_id=False)
left_t | right_t
2 | 2
3 | 2
7 | 6
7 | 7
>>> t4 = t1.window_join(t2, t1.t, t2.t, pw.temporal.sliding(1, 2)).select(
... left_t=t1.t, right_t=t2.t
... )
>>> pw.debug.compute_and_print(t4, include_id=False)
left_t | right_t
1 | 2
2 | 2
2 | 2
3 | 2
7 | 6
7 | 7
7 | 7
>>> t1 = pw.debug.table_from_markdown(
... '''
... | a | t
... 1 | 1 | 1
... 2 | 1 | 2
... 3 | 1 | 3
... 4 | 1 | 7
... 5 | 1 | 13
... 6 | 2 | 1
... 7 | 2 | 2
... 8 | 3 | 4
... '''
... )
>>> t2 = pw.debug.table_from_markdown(
... '''
... | b | t
... 1 | 1 | 2
... 2 | 1 | 5
... 3 | 1 | 6
... 4 | 1 | 7
... 5 | 2 | 2
... 6 | 2 | 3
... 7 | 4 | 3
... '''
... )
>>> t3 = t1.window_join(t2, t1.t, t2.t, pw.temporal.tumbling(2), t1.a == t2.b).select(
... key=t1.a, left_t=t1.t, right_t=t2.t
... )
>>> pw.debug.compute_and_print(t3, include_id=False)
key | left_t | right_t
1 | 2 | 2
1 | 3 | 2
1 | 7 | 6
1 | 7 | 7
2 | 2 | 2
2 | 2 | 3
>>>
>>> t1 = pw.debug.table_from_markdown(
... '''
... | t
... 0 | 0
... 1 | 5
... 2 | 10
... 3 | 15
... 4 | 17
... '''
... )
>>> t2 = pw.debug.table_from_markdown(
... '''
... | t
... 0 | -3
... 1 | 2
... 2 | 3
... 3 | 6
... 4 | 16
... '''
... )
>>> t3 = t1.window_join(
... t2, t1.t, t2.t, pw.temporal.session(predicate=lambda a, b: abs(a - b) <= 2)
... ).select(left_t=t1.t, right_t=t2.t)
>>> pw.debug.compute_and_print(t3, include_id=False)
left_t | right_t
0 | 2
0 | 3
0 | 6
5 | 2
5 | 3
5 | 6
15 | 16
17 | 16
>>> t1 = pw.debug.table_from_markdown(
... '''
... | a | t
... 1 | 1 | 1
... 2 | 1 | 4
... 3 | 1 | 7
... 4 | 2 | 0
... 5 | 2 | 3
... 6 | 2 | 4
... 7 | 2 | 7
... 8 | 3 | 4
... '''
... )
>>> t2 = pw.debug.table_from_markdown(
... '''
... | b | t
... 1 | 1 | -1
... 2 | 1 | 6
... 3 | 2 | 2
... 4 | 2 | 10
... 5 | 4 | 3
... '''
... )
>>> t3 = t1.window_join(
... t2, t1.t, t2.t, pw.temporal.session(predicate=lambda a, b: abs(a - b) <= 2), t1.a == t2.b
... ).select(key=t1.a, left_t=t1.t, right_t=t2.t)
>>> pw.debug.compute_and_print(t3, include_id=False)
key | left_t | right_t
1 | 1 | -1
1 | 4 | 6
1 | 7 | 6
2 | 0 | 2
2 | 3 | 2
2 | 4 | 2
"""
return window._join(
self,
other,
self_time,
other_time,
*on,
mode=how,
left_instance=left_instance,
right_instance=right_instance,
) | Performs a window join of self with other using a window and join expressions. If two records belong to the same window and meet the conditions specified in the `on` clause, they will be joined. Note that if a sliding window is used and there are pairs of matching records that appear in more than one window, they will be included in the result multiple times (equal to the number of windows they appear in). When using a session window, the function creates sessions by concatenating records from both sides of a join. Only pairs of records that meet the conditions specified in the `on` clause can be part of the same session. The result of a given session will include all records from the left side of a join that belong to this session, joined with all records from the right side of a join that belong to this session. Args: other: the right side of a join. self_time: time expression in self. other_time: time expression in other. window: a window to use. on: a list of column expressions. Each must have == on the top level operation and be of the form LHS: ColumnReference == RHS: ColumnReference. how: decides whether to run `window_join_inner`, `window_join_left`, `window_join_right` or `window_join_outer`. Default is INNER. left_instance/right_instance: optional arguments describing partitioning of the data into separate instances Returns: WindowJoinResult: a result of the window join. A method `.select()` can be called on it to extract relevant columns from the result of a join. Examples: >>> import pathway as pw >>> t1 = pw.debug.table_from_markdown( ... ''' ... | t ... 1 | 1 ... 2 | 2 ... 3 | 3 ... 4 | 7 ... 5 | 13 ... ''' ... ) >>> t2 = pw.debug.table_from_markdown( ... ''' ... | t ... 1 | 2 ... 2 | 5 ... 3 | 6 ... 4 | 7 ... ''' ... ) >>> t3 = t1.window_join(t2, t1.t, t2.t, pw.temporal.tumbling(2)).select( ... left_t=t1.t, right_t=t2.t ... ) >>> pw.debug.compute_and_print(t3, include_id=False) left_t | right_t 2 | 2 3 | 2 7 | 6 7 | 7 >>> t4 = t1.window_join(t2, t1.t, t2.t, pw.temporal.sliding(1, 2)).select( ... left_t=t1.t, right_t=t2.t ... ) >>> pw.debug.compute_and_print(t4, include_id=False) left_t | right_t 1 | 2 2 | 2 2 | 2 3 | 2 7 | 6 7 | 7 7 | 7 >>> t1 = pw.debug.table_from_markdown( ... ''' ... | a | t ... 1 | 1 | 1 ... 2 | 1 | 2 ... 3 | 1 | 3 ... 4 | 1 | 7 ... 5 | 1 | 13 ... 6 | 2 | 1 ... 7 | 2 | 2 ... 8 | 3 | 4 ... ''' ... ) >>> t2 = pw.debug.table_from_markdown( ... ''' ... | b | t ... 1 | 1 | 2 ... 2 | 1 | 5 ... 3 | 1 | 6 ... 4 | 1 | 7 ... 5 | 2 | 2 ... 6 | 2 | 3 ... 7 | 4 | 3 ... ''' ... ) >>> t3 = t1.window_join(t2, t1.t, t2.t, pw.temporal.tumbling(2), t1.a == t2.b).select( ... key=t1.a, left_t=t1.t, right_t=t2.t ... ) >>> pw.debug.compute_and_print(t3, include_id=False) key | left_t | right_t 1 | 2 | 2 1 | 3 | 2 1 | 7 | 6 1 | 7 | 7 2 | 2 | 2 2 | 2 | 3 >>> >>> t1 = pw.debug.table_from_markdown( ... ''' ... | t ... 0 | 0 ... 1 | 5 ... 2 | 10 ... 3 | 15 ... 4 | 17 ... ''' ... ) >>> t2 = pw.debug.table_from_markdown( ... ''' ... | t ... 0 | -3 ... 1 | 2 ... 2 | 3 ... 3 | 6 ... 4 | 16 ... ''' ... ) >>> t3 = t1.window_join( ... t2, t1.t, t2.t, pw.temporal.session(predicate=lambda a, b: abs(a - b) <= 2) ... ).select(left_t=t1.t, right_t=t2.t) >>> pw.debug.compute_and_print(t3, include_id=False) left_t | right_t 0 | 2 0 | 3 0 | 6 5 | 2 5 | 3 5 | 6 15 | 16 17 | 16 >>> t1 = pw.debug.table_from_markdown( ... ''' ... | a | t ... 1 | 1 | 1 ... 2 | 1 | 4 ... 3 | 1 | 7 ... 4 | 2 | 0 ... 5 | 2 | 3 ... 6 | 2 | 4 ... 7 | 2 | 7 ... 8 | 3 | 4 ... ''' ... ) >>> t2 = pw.debug.table_from_markdown( ... ''' ... | b | t ... 1 | 1 | -1 ... 2 | 1 | 6 ... 3 | 2 | 2 ... 4 | 2 | 10 ... 5 | 4 | 3 ... ''' ... ) >>> t3 = t1.window_join( ... t2, t1.t, t2.t, pw.temporal.session(predicate=lambda a, b: abs(a - b) <= 2), t1.a == t2.b ... ).select(key=t1.a, left_t=t1.t, right_t=t2.t) >>> pw.debug.compute_and_print(t3, include_id=False) key | left_t | right_t 1 | 1 | -1 1 | 4 | 6 1 | 7 | 6 2 | 0 | 2 2 | 3 | 2 2 | 4 | 2 |
166,632 | from __future__ import annotations
from typing import Any
import pathway.internals as pw
from pathway.internals.arg_handlers import (
arg_handler,
join_kwargs_handler,
select_args_handler,
)
from pathway.internals.desugaring import (
DesugaringContext,
TableSubstitutionDesugaring,
desugar,
)
from pathway.internals.runtime_type_check import check_arg_types
from pathway.internals.thisclass import ThisMetaclass
from pathway.internals.trace import trace_user_frame
from pathway.stdlib import temporal
class WindowJoinResult(DesugaringContext):
"""
Result of a window join between tables.
Example:
>>> import pathway as pw
>>> t1 = pw.debug.table_from_markdown(
... '''
... | t
... 1 | 1
... 2 | 2
... 3 | 3
... 4 | 7
... 5 | 13
... '''
... )
>>> t2 = pw.debug.table_from_markdown(
... '''
... | t
... 1 | 2
... 2 | 5
... 3 | 6
... 4 | 7
... '''
... )
>>> join_result = t1.window_join_outer(t2, t1.t, t2.t, pw.temporal.tumbling(2))
>>> isinstance(join_result, pw.temporal.WindowJoinResult)
True
>>> pw.debug.compute_and_print(
... join_result.select(left_t=t1.t, right_t=t2.t), include_id=False
... )
left_t | right_t
| 5
1 |
2 | 2
3 | 2
7 | 6
7 | 7
13 |
"""
_join_result: pw.JoinResult
_table_substitution: dict[pw.TableLike, pw.Table]
_substitution: dict[ThisMetaclass, pw.Joinable]
def __init__(
self,
join_result: pw.JoinResult,
left_original: pw.Table,
right_original: pw.Table,
left_new: pw.Table,
right_new: pw.Table,
):
self._join_result = join_result
self._universe = join_result._universe
self._table_substitution = {
left_original: left_new,
right_original: right_new,
}
self._substitution = {
pw.left: left_new,
pw.right: right_new,
pw.this: join_result,
}
def _desugaring(self) -> TableSubstitutionDesugaring:
return TableSubstitutionDesugaring(self._table_substitution)
def select(self, *args: pw.ColumnReference, **kwargs: Any) -> pw.Table:
"""Computes a result of a window join.
Args:
args: Column references.
kwargs: Column expressions with their new assigned names.
Returns:
Table: Created table.
Example:
>>> import pathway as pw
>>> t1 = pw.debug.table_from_markdown(
... '''
... | a | t
... 1 | 1 | 1
... 2 | 1 | 2
... 3 | 1 | 3
... 4 | 1 | 7
... 5 | 1 | 13
... 6 | 2 | 1
... 7 | 2 | 2
... 8 | 3 | 4
... '''
... )
>>> t2 = pw.debug.table_from_markdown(
... '''
... | b | t
... 1 | 1 | 2
... 2 | 1 | 5
... 3 | 1 | 6
... 4 | 1 | 7
... 5 | 2 | 2
... 6 | 2 | 3
... 7 | 4 | 3
... '''
... )
>>> t3 = t1.window_join_outer(t2, t1.t, t2.t, pw.temporal.tumbling(2), t1.a == t2.b).select(
... key=pw.coalesce(t1.a, t2.b), left_t=t1.t, right_t=t2.t
... )
>>> pw.debug.compute_and_print(t3, include_id=False)
key | left_t | right_t
1 | | 5
1 | 1 |
1 | 2 | 2
1 | 3 | 2
1 | 7 | 6
1 | 7 | 7
1 | 13 |
2 | 1 |
2 | 2 | 2
2 | 2 | 3
3 | 4 |
4 | | 3
"""
return self._join_result.select(*args, **kwargs)
The provided code snippet includes necessary dependencies for implementing the `window_join_inner` function. Write a Python function `def window_join_inner( self: pw.Table, other: pw.Table, self_time: pw.ColumnExpression, other_time: pw.ColumnExpression, window: temporal.Window, *on: pw.ColumnExpression, left_instance: pw.ColumnReference | None = None, right_instance: pw.ColumnReference | None = None, ) -> WindowJoinResult` to solve the following problem:
Performs a window join of self with other using a window and join expressions. If two records belong to the same window and meet the conditions specified in the `on` clause, they will be joined. Note that if a sliding window is used and there are pairs of matching records that appear in more than one window, they will be included in the result multiple times (equal to the number of windows they appear in). When using a session window, the function creates sessions by concatenating records from both sides of a join. Only pairs of records that meet the conditions specified in the `on` clause can be part of the same session. The result of a given session will include all records from the left side of a join that belong to this session, joined with all records from the right side of a join that belong to this session. Args: other: the right side of a join. self_time: time expression in self. other_time: time expression in other. window: a window to use. on: a list of column expressions. Each must have == on the top level operation and be of the form LHS: ColumnReference == RHS: ColumnReference. left_instance/right_instance: optional arguments describing partitioning of the data into separate instances Returns: WindowJoinResult: a result of the window join. A method `.select()` can be called on it to extract relevant columns from the result of a join. Examples: >>> import pathway as pw >>> t1 = pw.debug.table_from_markdown( ... ''' ... | t ... 1 | 1 ... 2 | 2 ... 3 | 3 ... 4 | 7 ... 5 | 13 ... ''' ... ) >>> t2 = pw.debug.table_from_markdown( ... ''' ... | t ... 1 | 2 ... 2 | 5 ... 3 | 6 ... 4 | 7 ... ''' ... ) >>> t3 = t1.window_join_inner(t2, t1.t, t2.t, pw.temporal.tumbling(2)).select( ... left_t=t1.t, right_t=t2.t ... ) >>> pw.debug.compute_and_print(t3, include_id=False) left_t | right_t 2 | 2 3 | 2 7 | 6 7 | 7 >>> t4 = t1.window_join_inner(t2, t1.t, t2.t, pw.temporal.sliding(1, 2)).select( ... left_t=t1.t, right_t=t2.t ... ) >>> pw.debug.compute_and_print(t4, include_id=False) left_t | right_t 1 | 2 2 | 2 2 | 2 3 | 2 7 | 6 7 | 7 7 | 7 >>> t1 = pw.debug.table_from_markdown( ... ''' ... | a | t ... 1 | 1 | 1 ... 2 | 1 | 2 ... 3 | 1 | 3 ... 4 | 1 | 7 ... 5 | 1 | 13 ... 6 | 2 | 1 ... 7 | 2 | 2 ... 8 | 3 | 4 ... ''' ... ) >>> t2 = pw.debug.table_from_markdown( ... ''' ... | b | t ... 1 | 1 | 2 ... 2 | 1 | 5 ... 3 | 1 | 6 ... 4 | 1 | 7 ... 5 | 2 | 2 ... 6 | 2 | 3 ... 7 | 4 | 3 ... ''' ... ) >>> t3 = t1.window_join_inner(t2, t1.t, t2.t, pw.temporal.tumbling(2), t1.a == t2.b).select( ... key=t1.a, left_t=t1.t, right_t=t2.t ... ) >>> pw.debug.compute_and_print(t3, include_id=False) key | left_t | right_t 1 | 2 | 2 1 | 3 | 2 1 | 7 | 6 1 | 7 | 7 2 | 2 | 2 2 | 2 | 3 >>> >>> t1 = pw.debug.table_from_markdown( ... ''' ... | t ... 0 | 0 ... 1 | 5 ... 2 | 10 ... 3 | 15 ... 4 | 17 ... ''' ... ) >>> t2 = pw.debug.table_from_markdown( ... ''' ... | t ... 0 | -3 ... 1 | 2 ... 2 | 3 ... 3 | 6 ... 4 | 16 ... ''' ... ) >>> t3 = t1.window_join_inner( ... t2, t1.t, t2.t, pw.temporal.session(predicate=lambda a, b: abs(a - b) <= 2) ... ).select(left_t=t1.t, right_t=t2.t) >>> pw.debug.compute_and_print(t3, include_id=False) left_t | right_t 0 | 2 0 | 3 0 | 6 5 | 2 5 | 3 5 | 6 15 | 16 17 | 16 >>> t1 = pw.debug.table_from_markdown( ... ''' ... | a | t ... 1 | 1 | 1 ... 2 | 1 | 4 ... 3 | 1 | 7 ... 4 | 2 | 0 ... 5 | 2 | 3 ... 6 | 2 | 4 ... 7 | 2 | 7 ... 8 | 3 | 4 ... ''' ... ) >>> t2 = pw.debug.table_from_markdown( ... ''' ... | b | t ... 1 | 1 | -1 ... 2 | 1 | 6 ... 3 | 2 | 2 ... 4 | 2 | 10 ... 5 | 4 | 3 ... ''' ... ) >>> t3 = t1.window_join_inner( ... t2, t1.t, t2.t, pw.temporal.session(predicate=lambda a, b: abs(a - b) <= 2), t1.a == t2.b ... ).select(key=t1.a, left_t=t1.t, right_t=t2.t) >>> pw.debug.compute_and_print(t3, include_id=False) key | left_t | right_t 1 | 1 | -1 1 | 4 | 6 1 | 7 | 6 2 | 0 | 2 2 | 3 | 2 2 | 4 | 2
Here is the function:
def window_join_inner(
self: pw.Table,
other: pw.Table,
self_time: pw.ColumnExpression,
other_time: pw.ColumnExpression,
window: temporal.Window,
*on: pw.ColumnExpression,
left_instance: pw.ColumnReference | None = None,
right_instance: pw.ColumnReference | None = None,
) -> WindowJoinResult:
"""Performs a window join of self with other using a window and join expressions.
If two records belong to the same window and meet the conditions specified in
the `on` clause, they will be joined. Note that if a sliding window is used and
there are pairs of matching records that appear in more than one window,
they will be included in the result multiple times (equal to the number of
windows they appear in).
When using a session window, the function creates sessions by concatenating
records from both sides of a join. Only pairs of records that meet
the conditions specified in the `on` clause can be part of the same session.
The result of a given session will include all records from the left side of
a join that belong to this session, joined with all records from the right
side of a join that belong to this session.
Args:
other: the right side of a join.
self_time: time expression in self.
other_time: time expression in other.
window: a window to use.
on: a list of column expressions. Each must have == on the top level
operation and be of the form LHS: ColumnReference == RHS: ColumnReference.
left_instance/right_instance: optional arguments describing partitioning of the data into separate instances
Returns:
WindowJoinResult: a result of the window join. A method `.select()`
can be called on it to extract relevant columns from the result of a join.
Examples:
>>> import pathway as pw
>>> t1 = pw.debug.table_from_markdown(
... '''
... | t
... 1 | 1
... 2 | 2
... 3 | 3
... 4 | 7
... 5 | 13
... '''
... )
>>> t2 = pw.debug.table_from_markdown(
... '''
... | t
... 1 | 2
... 2 | 5
... 3 | 6
... 4 | 7
... '''
... )
>>> t3 = t1.window_join_inner(t2, t1.t, t2.t, pw.temporal.tumbling(2)).select(
... left_t=t1.t, right_t=t2.t
... )
>>> pw.debug.compute_and_print(t3, include_id=False)
left_t | right_t
2 | 2
3 | 2
7 | 6
7 | 7
>>> t4 = t1.window_join_inner(t2, t1.t, t2.t, pw.temporal.sliding(1, 2)).select(
... left_t=t1.t, right_t=t2.t
... )
>>> pw.debug.compute_and_print(t4, include_id=False)
left_t | right_t
1 | 2
2 | 2
2 | 2
3 | 2
7 | 6
7 | 7
7 | 7
>>> t1 = pw.debug.table_from_markdown(
... '''
... | a | t
... 1 | 1 | 1
... 2 | 1 | 2
... 3 | 1 | 3
... 4 | 1 | 7
... 5 | 1 | 13
... 6 | 2 | 1
... 7 | 2 | 2
... 8 | 3 | 4
... '''
... )
>>> t2 = pw.debug.table_from_markdown(
... '''
... | b | t
... 1 | 1 | 2
... 2 | 1 | 5
... 3 | 1 | 6
... 4 | 1 | 7
... 5 | 2 | 2
... 6 | 2 | 3
... 7 | 4 | 3
... '''
... )
>>> t3 = t1.window_join_inner(t2, t1.t, t2.t, pw.temporal.tumbling(2), t1.a == t2.b).select(
... key=t1.a, left_t=t1.t, right_t=t2.t
... )
>>> pw.debug.compute_and_print(t3, include_id=False)
key | left_t | right_t
1 | 2 | 2
1 | 3 | 2
1 | 7 | 6
1 | 7 | 7
2 | 2 | 2
2 | 2 | 3
>>>
>>> t1 = pw.debug.table_from_markdown(
... '''
... | t
... 0 | 0
... 1 | 5
... 2 | 10
... 3 | 15
... 4 | 17
... '''
... )
>>> t2 = pw.debug.table_from_markdown(
... '''
... | t
... 0 | -3
... 1 | 2
... 2 | 3
... 3 | 6
... 4 | 16
... '''
... )
>>> t3 = t1.window_join_inner(
... t2, t1.t, t2.t, pw.temporal.session(predicate=lambda a, b: abs(a - b) <= 2)
... ).select(left_t=t1.t, right_t=t2.t)
>>> pw.debug.compute_and_print(t3, include_id=False)
left_t | right_t
0 | 2
0 | 3
0 | 6
5 | 2
5 | 3
5 | 6
15 | 16
17 | 16
>>> t1 = pw.debug.table_from_markdown(
... '''
... | a | t
... 1 | 1 | 1
... 2 | 1 | 4
... 3 | 1 | 7
... 4 | 2 | 0
... 5 | 2 | 3
... 6 | 2 | 4
... 7 | 2 | 7
... 8 | 3 | 4
... '''
... )
>>> t2 = pw.debug.table_from_markdown(
... '''
... | b | t
... 1 | 1 | -1
... 2 | 1 | 6
... 3 | 2 | 2
... 4 | 2 | 10
... 5 | 4 | 3
... '''
... )
>>> t3 = t1.window_join_inner(
... t2, t1.t, t2.t, pw.temporal.session(predicate=lambda a, b: abs(a - b) <= 2), t1.a == t2.b
... ).select(key=t1.a, left_t=t1.t, right_t=t2.t)
>>> pw.debug.compute_and_print(t3, include_id=False)
key | left_t | right_t
1 | 1 | -1
1 | 4 | 6
1 | 7 | 6
2 | 0 | 2
2 | 3 | 2
2 | 4 | 2
"""
return window._join(
self,
other,
self_time,
other_time,
*on,
mode=pw.JoinMode.INNER,
left_instance=left_instance,
right_instance=right_instance,
) | Performs a window join of self with other using a window and join expressions. If two records belong to the same window and meet the conditions specified in the `on` clause, they will be joined. Note that if a sliding window is used and there are pairs of matching records that appear in more than one window, they will be included in the result multiple times (equal to the number of windows they appear in). When using a session window, the function creates sessions by concatenating records from both sides of a join. Only pairs of records that meet the conditions specified in the `on` clause can be part of the same session. The result of a given session will include all records from the left side of a join that belong to this session, joined with all records from the right side of a join that belong to this session. Args: other: the right side of a join. self_time: time expression in self. other_time: time expression in other. window: a window to use. on: a list of column expressions. Each must have == on the top level operation and be of the form LHS: ColumnReference == RHS: ColumnReference. left_instance/right_instance: optional arguments describing partitioning of the data into separate instances Returns: WindowJoinResult: a result of the window join. A method `.select()` can be called on it to extract relevant columns from the result of a join. Examples: >>> import pathway as pw >>> t1 = pw.debug.table_from_markdown( ... ''' ... | t ... 1 | 1 ... 2 | 2 ... 3 | 3 ... 4 | 7 ... 5 | 13 ... ''' ... ) >>> t2 = pw.debug.table_from_markdown( ... ''' ... | t ... 1 | 2 ... 2 | 5 ... 3 | 6 ... 4 | 7 ... ''' ... ) >>> t3 = t1.window_join_inner(t2, t1.t, t2.t, pw.temporal.tumbling(2)).select( ... left_t=t1.t, right_t=t2.t ... ) >>> pw.debug.compute_and_print(t3, include_id=False) left_t | right_t 2 | 2 3 | 2 7 | 6 7 | 7 >>> t4 = t1.window_join_inner(t2, t1.t, t2.t, pw.temporal.sliding(1, 2)).select( ... left_t=t1.t, right_t=t2.t ... ) >>> pw.debug.compute_and_print(t4, include_id=False) left_t | right_t 1 | 2 2 | 2 2 | 2 3 | 2 7 | 6 7 | 7 7 | 7 >>> t1 = pw.debug.table_from_markdown( ... ''' ... | a | t ... 1 | 1 | 1 ... 2 | 1 | 2 ... 3 | 1 | 3 ... 4 | 1 | 7 ... 5 | 1 | 13 ... 6 | 2 | 1 ... 7 | 2 | 2 ... 8 | 3 | 4 ... ''' ... ) >>> t2 = pw.debug.table_from_markdown( ... ''' ... | b | t ... 1 | 1 | 2 ... 2 | 1 | 5 ... 3 | 1 | 6 ... 4 | 1 | 7 ... 5 | 2 | 2 ... 6 | 2 | 3 ... 7 | 4 | 3 ... ''' ... ) >>> t3 = t1.window_join_inner(t2, t1.t, t2.t, pw.temporal.tumbling(2), t1.a == t2.b).select( ... key=t1.a, left_t=t1.t, right_t=t2.t ... ) >>> pw.debug.compute_and_print(t3, include_id=False) key | left_t | right_t 1 | 2 | 2 1 | 3 | 2 1 | 7 | 6 1 | 7 | 7 2 | 2 | 2 2 | 2 | 3 >>> >>> t1 = pw.debug.table_from_markdown( ... ''' ... | t ... 0 | 0 ... 1 | 5 ... 2 | 10 ... 3 | 15 ... 4 | 17 ... ''' ... ) >>> t2 = pw.debug.table_from_markdown( ... ''' ... | t ... 0 | -3 ... 1 | 2 ... 2 | 3 ... 3 | 6 ... 4 | 16 ... ''' ... ) >>> t3 = t1.window_join_inner( ... t2, t1.t, t2.t, pw.temporal.session(predicate=lambda a, b: abs(a - b) <= 2) ... ).select(left_t=t1.t, right_t=t2.t) >>> pw.debug.compute_and_print(t3, include_id=False) left_t | right_t 0 | 2 0 | 3 0 | 6 5 | 2 5 | 3 5 | 6 15 | 16 17 | 16 >>> t1 = pw.debug.table_from_markdown( ... ''' ... | a | t ... 1 | 1 | 1 ... 2 | 1 | 4 ... 3 | 1 | 7 ... 4 | 2 | 0 ... 5 | 2 | 3 ... 6 | 2 | 4 ... 7 | 2 | 7 ... 8 | 3 | 4 ... ''' ... ) >>> t2 = pw.debug.table_from_markdown( ... ''' ... | b | t ... 1 | 1 | -1 ... 2 | 1 | 6 ... 3 | 2 | 2 ... 4 | 2 | 10 ... 5 | 4 | 3 ... ''' ... ) >>> t3 = t1.window_join_inner( ... t2, t1.t, t2.t, pw.temporal.session(predicate=lambda a, b: abs(a - b) <= 2), t1.a == t2.b ... ).select(key=t1.a, left_t=t1.t, right_t=t2.t) >>> pw.debug.compute_and_print(t3, include_id=False) key | left_t | right_t 1 | 1 | -1 1 | 4 | 6 1 | 7 | 6 2 | 0 | 2 2 | 3 | 2 2 | 4 | 2 |
166,633 | from __future__ import annotations
from typing import Any
import pathway.internals as pw
from pathway.internals.arg_handlers import (
arg_handler,
join_kwargs_handler,
select_args_handler,
)
from pathway.internals.desugaring import (
DesugaringContext,
TableSubstitutionDesugaring,
desugar,
)
from pathway.internals.runtime_type_check import check_arg_types
from pathway.internals.thisclass import ThisMetaclass
from pathway.internals.trace import trace_user_frame
from pathway.stdlib import temporal
class WindowJoinResult(DesugaringContext):
"""
Result of a window join between tables.
Example:
>>> import pathway as pw
>>> t1 = pw.debug.table_from_markdown(
... '''
... | t
... 1 | 1
... 2 | 2
... 3 | 3
... 4 | 7
... 5 | 13
... '''
... )
>>> t2 = pw.debug.table_from_markdown(
... '''
... | t
... 1 | 2
... 2 | 5
... 3 | 6
... 4 | 7
... '''
... )
>>> join_result = t1.window_join_outer(t2, t1.t, t2.t, pw.temporal.tumbling(2))
>>> isinstance(join_result, pw.temporal.WindowJoinResult)
True
>>> pw.debug.compute_and_print(
... join_result.select(left_t=t1.t, right_t=t2.t), include_id=False
... )
left_t | right_t
| 5
1 |
2 | 2
3 | 2
7 | 6
7 | 7
13 |
"""
_join_result: pw.JoinResult
_table_substitution: dict[pw.TableLike, pw.Table]
_substitution: dict[ThisMetaclass, pw.Joinable]
def __init__(
self,
join_result: pw.JoinResult,
left_original: pw.Table,
right_original: pw.Table,
left_new: pw.Table,
right_new: pw.Table,
):
self._join_result = join_result
self._universe = join_result._universe
self._table_substitution = {
left_original: left_new,
right_original: right_new,
}
self._substitution = {
pw.left: left_new,
pw.right: right_new,
pw.this: join_result,
}
def _desugaring(self) -> TableSubstitutionDesugaring:
return TableSubstitutionDesugaring(self._table_substitution)
def select(self, *args: pw.ColumnReference, **kwargs: Any) -> pw.Table:
"""Computes a result of a window join.
Args:
args: Column references.
kwargs: Column expressions with their new assigned names.
Returns:
Table: Created table.
Example:
>>> import pathway as pw
>>> t1 = pw.debug.table_from_markdown(
... '''
... | a | t
... 1 | 1 | 1
... 2 | 1 | 2
... 3 | 1 | 3
... 4 | 1 | 7
... 5 | 1 | 13
... 6 | 2 | 1
... 7 | 2 | 2
... 8 | 3 | 4
... '''
... )
>>> t2 = pw.debug.table_from_markdown(
... '''
... | b | t
... 1 | 1 | 2
... 2 | 1 | 5
... 3 | 1 | 6
... 4 | 1 | 7
... 5 | 2 | 2
... 6 | 2 | 3
... 7 | 4 | 3
... '''
... )
>>> t3 = t1.window_join_outer(t2, t1.t, t2.t, pw.temporal.tumbling(2), t1.a == t2.b).select(
... key=pw.coalesce(t1.a, t2.b), left_t=t1.t, right_t=t2.t
... )
>>> pw.debug.compute_and_print(t3, include_id=False)
key | left_t | right_t
1 | | 5
1 | 1 |
1 | 2 | 2
1 | 3 | 2
1 | 7 | 6
1 | 7 | 7
1 | 13 |
2 | 1 |
2 | 2 | 2
2 | 2 | 3
3 | 4 |
4 | | 3
"""
return self._join_result.select(*args, **kwargs)
The provided code snippet includes necessary dependencies for implementing the `window_join_left` function. Write a Python function `def window_join_left( self: pw.Table, other: pw.Table, self_time: pw.ColumnExpression, other_time: pw.ColumnExpression, window: temporal.Window, *on: pw.ColumnExpression, left_instance: pw.ColumnReference | None = None, right_instance: pw.ColumnReference | None = None, ) -> WindowJoinResult` to solve the following problem:
Performs a window left join of self with other using a window and join expressions. If two records belong to the same window and meet the conditions specified in the `on` clause, they will be joined. Note that if a sliding window is used and there are pairs of matching records that appear in more than one window, they will be included in the result multiple times (equal to the number of windows they appear in). When using a session window, the function creates sessions by concatenating records from both sides of a join. Only pairs of records that meet the conditions specified in the `on` clause can be part of the same session. The result of a given session will include all records from the left side of a join that belong to this session, joined with all records from the right side of a join that belong to this session. Rows from the left side that didn't match with any record on the right side in a given window, are returned with missing values on the right side replaced with `None`. The multiplicity of such rows equals the number of windows they belong to and don't have a match in them. Args: other: the right side of a join. self_time: time expression in self. other_time: time expression in other. window: a window to use. on: a list of column expressions. Each must have == on the top level operation and be of the form LHS: ColumnReference == RHS: ColumnReference. left_instance/right_instance: optional arguments describing partitioning of the data into separate instances Returns: WindowJoinResult: a result of the window join. A method `.select()` can be called on it to extract relevant columns from the result of a join. Examples: >>> import pathway as pw >>> t1 = pw.debug.table_from_markdown( ... ''' ... | t ... 1 | 1 ... 2 | 2 ... 3 | 3 ... 4 | 7 ... 5 | 13 ... ''' ... ) >>> t2 = pw.debug.table_from_markdown( ... ''' ... | t ... 1 | 2 ... 2 | 5 ... 3 | 6 ... 4 | 7 ... ''' ... ) >>> t3 = t1.window_join_left(t2, t1.t, t2.t, pw.temporal.tumbling(2)).select( ... left_t=t1.t, right_t=t2.t ... ) >>> pw.debug.compute_and_print(t3, include_id=False) left_t | right_t 1 | 2 | 2 3 | 2 7 | 6 7 | 7 13 | >>> t4 = t1.window_join_left(t2, t1.t, t2.t, pw.temporal.sliding(1, 2)).select( ... left_t=t1.t, right_t=t2.t ... ) >>> pw.debug.compute_and_print(t4, include_id=False) left_t | right_t 1 | 1 | 2 2 | 2 2 | 2 3 | 3 | 2 7 | 6 7 | 7 7 | 7 13 | 13 | >>> t1 = pw.debug.table_from_markdown( ... ''' ... | a | t ... 1 | 1 | 1 ... 2 | 1 | 2 ... 3 | 1 | 3 ... 4 | 1 | 7 ... 5 | 1 | 13 ... 6 | 2 | 1 ... 7 | 2 | 2 ... 8 | 3 | 4 ... ''' ... ) >>> t2 = pw.debug.table_from_markdown( ... ''' ... | b | t ... 1 | 1 | 2 ... 2 | 1 | 5 ... 3 | 1 | 6 ... 4 | 1 | 7 ... 5 | 2 | 2 ... 6 | 2 | 3 ... 7 | 4 | 3 ... ''' ... ) >>> t3 = t1.window_join_left(t2, t1.t, t2.t, pw.temporal.tumbling(2), t1.a == t2.b).select( ... key=t1.a, left_t=t1.t, right_t=t2.t ... ) >>> pw.debug.compute_and_print(t3, include_id=False) key | left_t | right_t 1 | 1 | 1 | 2 | 2 1 | 3 | 2 1 | 7 | 6 1 | 7 | 7 1 | 13 | 2 | 1 | 2 | 2 | 2 2 | 2 | 3 3 | 4 | >>> >>> t1 = pw.debug.table_from_markdown( ... ''' ... | t ... 0 | 0 ... 1 | 5 ... 2 | 10 ... 3 | 15 ... 4 | 17 ... ''' ... ) >>> t2 = pw.debug.table_from_markdown( ... ''' ... | t ... 0 | -3 ... 1 | 2 ... 2 | 3 ... 3 | 6 ... 4 | 16 ... ''' ... ) >>> t3 = t1.window_join_left( ... t2, t1.t, t2.t, pw.temporal.session(predicate=lambda a, b: abs(a - b) <= 2) ... ).select(left_t=t1.t, right_t=t2.t) >>> pw.debug.compute_and_print(t3, include_id=False) left_t | right_t 0 | 2 0 | 3 0 | 6 5 | 2 5 | 3 5 | 6 10 | 15 | 16 17 | 16 >>> t1 = pw.debug.table_from_markdown( ... ''' ... | a | t ... 1 | 1 | 1 ... 2 | 1 | 4 ... 3 | 1 | 7 ... 4 | 2 | 0 ... 5 | 2 | 3 ... 6 | 2 | 4 ... 7 | 2 | 7 ... 8 | 3 | 4 ... ''' ... ) >>> t2 = pw.debug.table_from_markdown( ... ''' ... | b | t ... 1 | 1 | -1 ... 2 | 1 | 6 ... 3 | 2 | 2 ... 4 | 2 | 10 ... 5 | 4 | 3 ... ''' ... ) >>> t3 = t1.window_join_left( ... t2, t1.t, t2.t, pw.temporal.session(predicate=lambda a, b: abs(a - b) <= 2), t1.a == t2.b ... ).select(key=t1.a, left_t=t1.t, right_t=t2.t) >>> pw.debug.compute_and_print(t3, include_id=False) key | left_t | right_t 1 | 1 | -1 1 | 4 | 6 1 | 7 | 6 2 | 0 | 2 2 | 3 | 2 2 | 4 | 2 2 | 7 | 3 | 4 |
Here is the function:
def window_join_left(
self: pw.Table,
other: pw.Table,
self_time: pw.ColumnExpression,
other_time: pw.ColumnExpression,
window: temporal.Window,
*on: pw.ColumnExpression,
left_instance: pw.ColumnReference | None = None,
right_instance: pw.ColumnReference | None = None,
) -> WindowJoinResult:
"""Performs a window left join of self with other using a window and join expressions.
If two records belong to the same window and meet the conditions specified in
the `on` clause, they will be joined. Note that if a sliding window is used and
there are pairs of matching records that appear in more than one window,
they will be included in the result multiple times (equal to the number of
windows they appear in).
When using a session window, the function creates sessions by concatenating
records from both sides of a join. Only pairs of records that meet
the conditions specified in the `on` clause can be part of the same session.
The result of a given session will include all records from the left side of
a join that belong to this session, joined with all records from the right
side of a join that belong to this session.
Rows from the left side that didn't match with any record on the right side in
a given window, are returned with missing values on the right side replaced
with `None`. The multiplicity of such rows equals the number of windows they
belong to and don't have a match in them.
Args:
other: the right side of a join.
self_time: time expression in self.
other_time: time expression in other.
window: a window to use.
on: a list of column expressions. Each must have == on the top level
operation and be of the form LHS: ColumnReference == RHS: ColumnReference.
left_instance/right_instance: optional arguments describing partitioning of the data into separate instances
Returns:
WindowJoinResult: a result of the window join. A method `.select()`
can be called on it to extract relevant columns from the result of a join.
Examples:
>>> import pathway as pw
>>> t1 = pw.debug.table_from_markdown(
... '''
... | t
... 1 | 1
... 2 | 2
... 3 | 3
... 4 | 7
... 5 | 13
... '''
... )
>>> t2 = pw.debug.table_from_markdown(
... '''
... | t
... 1 | 2
... 2 | 5
... 3 | 6
... 4 | 7
... '''
... )
>>> t3 = t1.window_join_left(t2, t1.t, t2.t, pw.temporal.tumbling(2)).select(
... left_t=t1.t, right_t=t2.t
... )
>>> pw.debug.compute_and_print(t3, include_id=False)
left_t | right_t
1 |
2 | 2
3 | 2
7 | 6
7 | 7
13 |
>>> t4 = t1.window_join_left(t2, t1.t, t2.t, pw.temporal.sliding(1, 2)).select(
... left_t=t1.t, right_t=t2.t
... )
>>> pw.debug.compute_and_print(t4, include_id=False)
left_t | right_t
1 |
1 | 2
2 | 2
2 | 2
3 |
3 | 2
7 | 6
7 | 7
7 | 7
13 |
13 |
>>> t1 = pw.debug.table_from_markdown(
... '''
... | a | t
... 1 | 1 | 1
... 2 | 1 | 2
... 3 | 1 | 3
... 4 | 1 | 7
... 5 | 1 | 13
... 6 | 2 | 1
... 7 | 2 | 2
... 8 | 3 | 4
... '''
... )
>>> t2 = pw.debug.table_from_markdown(
... '''
... | b | t
... 1 | 1 | 2
... 2 | 1 | 5
... 3 | 1 | 6
... 4 | 1 | 7
... 5 | 2 | 2
... 6 | 2 | 3
... 7 | 4 | 3
... '''
... )
>>> t3 = t1.window_join_left(t2, t1.t, t2.t, pw.temporal.tumbling(2), t1.a == t2.b).select(
... key=t1.a, left_t=t1.t, right_t=t2.t
... )
>>> pw.debug.compute_and_print(t3, include_id=False)
key | left_t | right_t
1 | 1 |
1 | 2 | 2
1 | 3 | 2
1 | 7 | 6
1 | 7 | 7
1 | 13 |
2 | 1 |
2 | 2 | 2
2 | 2 | 3
3 | 4 |
>>>
>>> t1 = pw.debug.table_from_markdown(
... '''
... | t
... 0 | 0
... 1 | 5
... 2 | 10
... 3 | 15
... 4 | 17
... '''
... )
>>> t2 = pw.debug.table_from_markdown(
... '''
... | t
... 0 | -3
... 1 | 2
... 2 | 3
... 3 | 6
... 4 | 16
... '''
... )
>>> t3 = t1.window_join_left(
... t2, t1.t, t2.t, pw.temporal.session(predicate=lambda a, b: abs(a - b) <= 2)
... ).select(left_t=t1.t, right_t=t2.t)
>>> pw.debug.compute_and_print(t3, include_id=False)
left_t | right_t
0 | 2
0 | 3
0 | 6
5 | 2
5 | 3
5 | 6
10 |
15 | 16
17 | 16
>>> t1 = pw.debug.table_from_markdown(
... '''
... | a | t
... 1 | 1 | 1
... 2 | 1 | 4
... 3 | 1 | 7
... 4 | 2 | 0
... 5 | 2 | 3
... 6 | 2 | 4
... 7 | 2 | 7
... 8 | 3 | 4
... '''
... )
>>> t2 = pw.debug.table_from_markdown(
... '''
... | b | t
... 1 | 1 | -1
... 2 | 1 | 6
... 3 | 2 | 2
... 4 | 2 | 10
... 5 | 4 | 3
... '''
... )
>>> t3 = t1.window_join_left(
... t2, t1.t, t2.t, pw.temporal.session(predicate=lambda a, b: abs(a - b) <= 2), t1.a == t2.b
... ).select(key=t1.a, left_t=t1.t, right_t=t2.t)
>>> pw.debug.compute_and_print(t3, include_id=False)
key | left_t | right_t
1 | 1 | -1
1 | 4 | 6
1 | 7 | 6
2 | 0 | 2
2 | 3 | 2
2 | 4 | 2
2 | 7 |
3 | 4 |
"""
return window._join(
self,
other,
self_time,
other_time,
*on,
mode=pw.JoinMode.LEFT,
left_instance=left_instance,
right_instance=right_instance,
) | Performs a window left join of self with other using a window and join expressions. If two records belong to the same window and meet the conditions specified in the `on` clause, they will be joined. Note that if a sliding window is used and there are pairs of matching records that appear in more than one window, they will be included in the result multiple times (equal to the number of windows they appear in). When using a session window, the function creates sessions by concatenating records from both sides of a join. Only pairs of records that meet the conditions specified in the `on` clause can be part of the same session. The result of a given session will include all records from the left side of a join that belong to this session, joined with all records from the right side of a join that belong to this session. Rows from the left side that didn't match with any record on the right side in a given window, are returned with missing values on the right side replaced with `None`. The multiplicity of such rows equals the number of windows they belong to and don't have a match in them. Args: other: the right side of a join. self_time: time expression in self. other_time: time expression in other. window: a window to use. on: a list of column expressions. Each must have == on the top level operation and be of the form LHS: ColumnReference == RHS: ColumnReference. left_instance/right_instance: optional arguments describing partitioning of the data into separate instances Returns: WindowJoinResult: a result of the window join. A method `.select()` can be called on it to extract relevant columns from the result of a join. Examples: >>> import pathway as pw >>> t1 = pw.debug.table_from_markdown( ... ''' ... | t ... 1 | 1 ... 2 | 2 ... 3 | 3 ... 4 | 7 ... 5 | 13 ... ''' ... ) >>> t2 = pw.debug.table_from_markdown( ... ''' ... | t ... 1 | 2 ... 2 | 5 ... 3 | 6 ... 4 | 7 ... ''' ... ) >>> t3 = t1.window_join_left(t2, t1.t, t2.t, pw.temporal.tumbling(2)).select( ... left_t=t1.t, right_t=t2.t ... ) >>> pw.debug.compute_and_print(t3, include_id=False) left_t | right_t 1 | 2 | 2 3 | 2 7 | 6 7 | 7 13 | >>> t4 = t1.window_join_left(t2, t1.t, t2.t, pw.temporal.sliding(1, 2)).select( ... left_t=t1.t, right_t=t2.t ... ) >>> pw.debug.compute_and_print(t4, include_id=False) left_t | right_t 1 | 1 | 2 2 | 2 2 | 2 3 | 3 | 2 7 | 6 7 | 7 7 | 7 13 | 13 | >>> t1 = pw.debug.table_from_markdown( ... ''' ... | a | t ... 1 | 1 | 1 ... 2 | 1 | 2 ... 3 | 1 | 3 ... 4 | 1 | 7 ... 5 | 1 | 13 ... 6 | 2 | 1 ... 7 | 2 | 2 ... 8 | 3 | 4 ... ''' ... ) >>> t2 = pw.debug.table_from_markdown( ... ''' ... | b | t ... 1 | 1 | 2 ... 2 | 1 | 5 ... 3 | 1 | 6 ... 4 | 1 | 7 ... 5 | 2 | 2 ... 6 | 2 | 3 ... 7 | 4 | 3 ... ''' ... ) >>> t3 = t1.window_join_left(t2, t1.t, t2.t, pw.temporal.tumbling(2), t1.a == t2.b).select( ... key=t1.a, left_t=t1.t, right_t=t2.t ... ) >>> pw.debug.compute_and_print(t3, include_id=False) key | left_t | right_t 1 | 1 | 1 | 2 | 2 1 | 3 | 2 1 | 7 | 6 1 | 7 | 7 1 | 13 | 2 | 1 | 2 | 2 | 2 2 | 2 | 3 3 | 4 | >>> >>> t1 = pw.debug.table_from_markdown( ... ''' ... | t ... 0 | 0 ... 1 | 5 ... 2 | 10 ... 3 | 15 ... 4 | 17 ... ''' ... ) >>> t2 = pw.debug.table_from_markdown( ... ''' ... | t ... 0 | -3 ... 1 | 2 ... 2 | 3 ... 3 | 6 ... 4 | 16 ... ''' ... ) >>> t3 = t1.window_join_left( ... t2, t1.t, t2.t, pw.temporal.session(predicate=lambda a, b: abs(a - b) <= 2) ... ).select(left_t=t1.t, right_t=t2.t) >>> pw.debug.compute_and_print(t3, include_id=False) left_t | right_t 0 | 2 0 | 3 0 | 6 5 | 2 5 | 3 5 | 6 10 | 15 | 16 17 | 16 >>> t1 = pw.debug.table_from_markdown( ... ''' ... | a | t ... 1 | 1 | 1 ... 2 | 1 | 4 ... 3 | 1 | 7 ... 4 | 2 | 0 ... 5 | 2 | 3 ... 6 | 2 | 4 ... 7 | 2 | 7 ... 8 | 3 | 4 ... ''' ... ) >>> t2 = pw.debug.table_from_markdown( ... ''' ... | b | t ... 1 | 1 | -1 ... 2 | 1 | 6 ... 3 | 2 | 2 ... 4 | 2 | 10 ... 5 | 4 | 3 ... ''' ... ) >>> t3 = t1.window_join_left( ... t2, t1.t, t2.t, pw.temporal.session(predicate=lambda a, b: abs(a - b) <= 2), t1.a == t2.b ... ).select(key=t1.a, left_t=t1.t, right_t=t2.t) >>> pw.debug.compute_and_print(t3, include_id=False) key | left_t | right_t 1 | 1 | -1 1 | 4 | 6 1 | 7 | 6 2 | 0 | 2 2 | 3 | 2 2 | 4 | 2 2 | 7 | 3 | 4 | |
166,634 | from __future__ import annotations
from typing import Any
import pathway.internals as pw
from pathway.internals.arg_handlers import (
arg_handler,
join_kwargs_handler,
select_args_handler,
)
from pathway.internals.desugaring import (
DesugaringContext,
TableSubstitutionDesugaring,
desugar,
)
from pathway.internals.runtime_type_check import check_arg_types
from pathway.internals.thisclass import ThisMetaclass
from pathway.internals.trace import trace_user_frame
from pathway.stdlib import temporal
class WindowJoinResult(DesugaringContext):
"""
Result of a window join between tables.
Example:
>>> import pathway as pw
>>> t1 = pw.debug.table_from_markdown(
... '''
... | t
... 1 | 1
... 2 | 2
... 3 | 3
... 4 | 7
... 5 | 13
... '''
... )
>>> t2 = pw.debug.table_from_markdown(
... '''
... | t
... 1 | 2
... 2 | 5
... 3 | 6
... 4 | 7
... '''
... )
>>> join_result = t1.window_join_outer(t2, t1.t, t2.t, pw.temporal.tumbling(2))
>>> isinstance(join_result, pw.temporal.WindowJoinResult)
True
>>> pw.debug.compute_and_print(
... join_result.select(left_t=t1.t, right_t=t2.t), include_id=False
... )
left_t | right_t
| 5
1 |
2 | 2
3 | 2
7 | 6
7 | 7
13 |
"""
_join_result: pw.JoinResult
_table_substitution: dict[pw.TableLike, pw.Table]
_substitution: dict[ThisMetaclass, pw.Joinable]
def __init__(
self,
join_result: pw.JoinResult,
left_original: pw.Table,
right_original: pw.Table,
left_new: pw.Table,
right_new: pw.Table,
):
self._join_result = join_result
self._universe = join_result._universe
self._table_substitution = {
left_original: left_new,
right_original: right_new,
}
self._substitution = {
pw.left: left_new,
pw.right: right_new,
pw.this: join_result,
}
def _desugaring(self) -> TableSubstitutionDesugaring:
return TableSubstitutionDesugaring(self._table_substitution)
def select(self, *args: pw.ColumnReference, **kwargs: Any) -> pw.Table:
"""Computes a result of a window join.
Args:
args: Column references.
kwargs: Column expressions with their new assigned names.
Returns:
Table: Created table.
Example:
>>> import pathway as pw
>>> t1 = pw.debug.table_from_markdown(
... '''
... | a | t
... 1 | 1 | 1
... 2 | 1 | 2
... 3 | 1 | 3
... 4 | 1 | 7
... 5 | 1 | 13
... 6 | 2 | 1
... 7 | 2 | 2
... 8 | 3 | 4
... '''
... )
>>> t2 = pw.debug.table_from_markdown(
... '''
... | b | t
... 1 | 1 | 2
... 2 | 1 | 5
... 3 | 1 | 6
... 4 | 1 | 7
... 5 | 2 | 2
... 6 | 2 | 3
... 7 | 4 | 3
... '''
... )
>>> t3 = t1.window_join_outer(t2, t1.t, t2.t, pw.temporal.tumbling(2), t1.a == t2.b).select(
... key=pw.coalesce(t1.a, t2.b), left_t=t1.t, right_t=t2.t
... )
>>> pw.debug.compute_and_print(t3, include_id=False)
key | left_t | right_t
1 | | 5
1 | 1 |
1 | 2 | 2
1 | 3 | 2
1 | 7 | 6
1 | 7 | 7
1 | 13 |
2 | 1 |
2 | 2 | 2
2 | 2 | 3
3 | 4 |
4 | | 3
"""
return self._join_result.select(*args, **kwargs)
The provided code snippet includes necessary dependencies for implementing the `window_join_right` function. Write a Python function `def window_join_right( self: pw.Table, other: pw.Table, self_time: pw.ColumnExpression, other_time: pw.ColumnExpression, window: temporal.Window, *on: pw.ColumnExpression, left_instance: pw.ColumnReference | None = None, right_instance: pw.ColumnReference | None = None, ) -> WindowJoinResult` to solve the following problem:
Performs a window right join of self with other using a window and join expressions. If two records belong to the same window and meet the conditions specified in the `on` clause, they will be joined. Note that if a sliding window is used and there are pairs of matching records that appear in more than one window, they will be included in the result multiple times (equal to the number of windows they appear in). When using a session window, the function creates sessions by concatenating records from both sides of a join. Only pairs of records that meet the conditions specified in the `on` clause can be part of the same session. The result of a given session will include all records from the left side of a join that belong to this session, joined with all records from the right side of a join that belong to this session. Rows from the right side that didn't match with any record on the left side in a given window, are returned with missing values on the left side replaced with `None`. The multiplicity of such rows equals the number of windows they belong to and don't have a match in them. Args: other: the right side of a join. self_time: time expression in self. other_time: time expression in other. window: a window to use. on: a list of column expressions. Each must have == on the top level operation and be of the form LHS: ColumnReference == RHS: ColumnReference. left_instance/right_instance: optional arguments describing partitioning of the data into separate instances Returns: WindowJoinResult: a result of the window join. A method `.select()` can be called on it to extract relevant columns from the result of a join. Examples: >>> import pathway as pw >>> t1 = pw.debug.table_from_markdown( ... ''' ... | t ... 1 | 1 ... 2 | 2 ... 3 | 3 ... 4 | 7 ... 5 | 13 ... ''' ... ) >>> t2 = pw.debug.table_from_markdown( ... ''' ... | t ... 1 | 2 ... 2 | 5 ... 3 | 6 ... 4 | 7 ... ''' ... ) >>> t3 = t1.window_join_right(t2, t1.t, t2.t, pw.temporal.tumbling(2)).select( ... left_t=t1.t, right_t=t2.t ... ) >>> pw.debug.compute_and_print(t3, include_id=False) left_t | right_t | 5 2 | 2 3 | 2 7 | 6 7 | 7 >>> t4 = t1.window_join_right(t2, t1.t, t2.t, pw.temporal.sliding(1, 2)).select( ... left_t=t1.t, right_t=t2.t ... ) >>> pw.debug.compute_and_print(t4, include_id=False) left_t | right_t | 5 | 5 | 6 1 | 2 2 | 2 2 | 2 3 | 2 7 | 6 7 | 7 7 | 7 >>> t1 = pw.debug.table_from_markdown( ... ''' ... | a | t ... 1 | 1 | 1 ... 2 | 1 | 2 ... 3 | 1 | 3 ... 4 | 1 | 7 ... 5 | 1 | 13 ... 6 | 2 | 1 ... 7 | 2 | 2 ... 8 | 3 | 4 ... ''' ... ) >>> t2 = pw.debug.table_from_markdown( ... ''' ... | b | t ... 1 | 1 | 2 ... 2 | 1 | 5 ... 3 | 1 | 6 ... 4 | 1 | 7 ... 5 | 2 | 2 ... 6 | 2 | 3 ... 7 | 4 | 3 ... ''' ... ) >>> t3 = t1.window_join_right(t2, t1.t, t2.t, pw.temporal.tumbling(2), t1.a == t2.b).select( ... key=t2.b, left_t=t1.t, right_t=t2.t ... ) >>> pw.debug.compute_and_print(t3, include_id=False) key | left_t | right_t 1 | | 5 1 | 2 | 2 1 | 3 | 2 1 | 7 | 6 1 | 7 | 7 2 | 2 | 2 2 | 2 | 3 4 | | 3 >>> >>> t1 = pw.debug.table_from_markdown( ... ''' ... | t ... 0 | 0 ... 1 | 5 ... 2 | 10 ... 3 | 15 ... 4 | 17 ... ''' ... ) >>> t2 = pw.debug.table_from_markdown( ... ''' ... | t ... 0 | -3 ... 1 | 2 ... 2 | 3 ... 3 | 6 ... 4 | 16 ... ''' ... ) >>> t3 = t1.window_join_right( ... t2, t1.t, t2.t, pw.temporal.session(predicate=lambda a, b: abs(a - b) <= 2) ... ).select(left_t=t1.t, right_t=t2.t) >>> pw.debug.compute_and_print(t3, include_id=False) left_t | right_t | -3 0 | 2 0 | 3 0 | 6 5 | 2 5 | 3 5 | 6 15 | 16 17 | 16 >>> t1 = pw.debug.table_from_markdown( ... ''' ... | a | t ... 1 | 1 | 1 ... 2 | 1 | 4 ... 3 | 1 | 7 ... 4 | 2 | 0 ... 5 | 2 | 3 ... 6 | 2 | 4 ... 7 | 2 | 7 ... 8 | 3 | 4 ... ''' ... ) >>> t2 = pw.debug.table_from_markdown( ... ''' ... | b | t ... 1 | 1 | -1 ... 2 | 1 | 6 ... 3 | 2 | 2 ... 4 | 2 | 10 ... 5 | 4 | 3 ... ''' ... ) >>> t3 = t1.window_join_right( ... t2, t1.t, t2.t, pw.temporal.session(predicate=lambda a, b: abs(a - b) <= 2), t1.a == t2.b ... ).select(key=t2.b, left_t=t1.t, right_t=t2.t) >>> pw.debug.compute_and_print(t3, include_id=False) key | left_t | right_t 1 | 1 | -1 1 | 4 | 6 1 | 7 | 6 2 | | 10 2 | 0 | 2 2 | 3 | 2 2 | 4 | 2 4 | | 3
Here is the function:
def window_join_right(
self: pw.Table,
other: pw.Table,
self_time: pw.ColumnExpression,
other_time: pw.ColumnExpression,
window: temporal.Window,
*on: pw.ColumnExpression,
left_instance: pw.ColumnReference | None = None,
right_instance: pw.ColumnReference | None = None,
) -> WindowJoinResult:
"""Performs a window right join of self with other using a window and join expressions.
If two records belong to the same window and meet the conditions specified in
the `on` clause, they will be joined. Note that if a sliding window is used and
there are pairs of matching records that appear in more than one window,
they will be included in the result multiple times (equal to the number of
windows they appear in).
When using a session window, the function creates sessions by concatenating
records from both sides of a join. Only pairs of records that meet
the conditions specified in the `on` clause can be part of the same session.
The result of a given session will include all records from the left side of
a join that belong to this session, joined with all records from the right
side of a join that belong to this session.
Rows from the right side that didn't match with any record on the left side in
a given window, are returned with missing values on the left side replaced
with `None`. The multiplicity of such rows equals the number of windows they
belong to and don't have a match in them.
Args:
other: the right side of a join.
self_time: time expression in self.
other_time: time expression in other.
window: a window to use.
on: a list of column expressions. Each must have == on the top level
operation and be of the form LHS: ColumnReference == RHS: ColumnReference.
left_instance/right_instance: optional arguments describing partitioning of the data into separate instances
Returns:
WindowJoinResult: a result of the window join. A method `.select()`
can be called on it to extract relevant columns from the result of a join.
Examples:
>>> import pathway as pw
>>> t1 = pw.debug.table_from_markdown(
... '''
... | t
... 1 | 1
... 2 | 2
... 3 | 3
... 4 | 7
... 5 | 13
... '''
... )
>>> t2 = pw.debug.table_from_markdown(
... '''
... | t
... 1 | 2
... 2 | 5
... 3 | 6
... 4 | 7
... '''
... )
>>> t3 = t1.window_join_right(t2, t1.t, t2.t, pw.temporal.tumbling(2)).select(
... left_t=t1.t, right_t=t2.t
... )
>>> pw.debug.compute_and_print(t3, include_id=False)
left_t | right_t
| 5
2 | 2
3 | 2
7 | 6
7 | 7
>>> t4 = t1.window_join_right(t2, t1.t, t2.t, pw.temporal.sliding(1, 2)).select(
... left_t=t1.t, right_t=t2.t
... )
>>> pw.debug.compute_and_print(t4, include_id=False)
left_t | right_t
| 5
| 5
| 6
1 | 2
2 | 2
2 | 2
3 | 2
7 | 6
7 | 7
7 | 7
>>> t1 = pw.debug.table_from_markdown(
... '''
... | a | t
... 1 | 1 | 1
... 2 | 1 | 2
... 3 | 1 | 3
... 4 | 1 | 7
... 5 | 1 | 13
... 6 | 2 | 1
... 7 | 2 | 2
... 8 | 3 | 4
... '''
... )
>>> t2 = pw.debug.table_from_markdown(
... '''
... | b | t
... 1 | 1 | 2
... 2 | 1 | 5
... 3 | 1 | 6
... 4 | 1 | 7
... 5 | 2 | 2
... 6 | 2 | 3
... 7 | 4 | 3
... '''
... )
>>> t3 = t1.window_join_right(t2, t1.t, t2.t, pw.temporal.tumbling(2), t1.a == t2.b).select(
... key=t2.b, left_t=t1.t, right_t=t2.t
... )
>>> pw.debug.compute_and_print(t3, include_id=False)
key | left_t | right_t
1 | | 5
1 | 2 | 2
1 | 3 | 2
1 | 7 | 6
1 | 7 | 7
2 | 2 | 2
2 | 2 | 3
4 | | 3
>>>
>>> t1 = pw.debug.table_from_markdown(
... '''
... | t
... 0 | 0
... 1 | 5
... 2 | 10
... 3 | 15
... 4 | 17
... '''
... )
>>> t2 = pw.debug.table_from_markdown(
... '''
... | t
... 0 | -3
... 1 | 2
... 2 | 3
... 3 | 6
... 4 | 16
... '''
... )
>>> t3 = t1.window_join_right(
... t2, t1.t, t2.t, pw.temporal.session(predicate=lambda a, b: abs(a - b) <= 2)
... ).select(left_t=t1.t, right_t=t2.t)
>>> pw.debug.compute_and_print(t3, include_id=False)
left_t | right_t
| -3
0 | 2
0 | 3
0 | 6
5 | 2
5 | 3
5 | 6
15 | 16
17 | 16
>>> t1 = pw.debug.table_from_markdown(
... '''
... | a | t
... 1 | 1 | 1
... 2 | 1 | 4
... 3 | 1 | 7
... 4 | 2 | 0
... 5 | 2 | 3
... 6 | 2 | 4
... 7 | 2 | 7
... 8 | 3 | 4
... '''
... )
>>> t2 = pw.debug.table_from_markdown(
... '''
... | b | t
... 1 | 1 | -1
... 2 | 1 | 6
... 3 | 2 | 2
... 4 | 2 | 10
... 5 | 4 | 3
... '''
... )
>>> t3 = t1.window_join_right(
... t2, t1.t, t2.t, pw.temporal.session(predicate=lambda a, b: abs(a - b) <= 2), t1.a == t2.b
... ).select(key=t2.b, left_t=t1.t, right_t=t2.t)
>>> pw.debug.compute_and_print(t3, include_id=False)
key | left_t | right_t
1 | 1 | -1
1 | 4 | 6
1 | 7 | 6
2 | | 10
2 | 0 | 2
2 | 3 | 2
2 | 4 | 2
4 | | 3
"""
return window._join(
self,
other,
self_time,
other_time,
*on,
mode=pw.JoinMode.RIGHT,
left_instance=left_instance,
right_instance=right_instance,
) | Performs a window right join of self with other using a window and join expressions. If two records belong to the same window and meet the conditions specified in the `on` clause, they will be joined. Note that if a sliding window is used and there are pairs of matching records that appear in more than one window, they will be included in the result multiple times (equal to the number of windows they appear in). When using a session window, the function creates sessions by concatenating records from both sides of a join. Only pairs of records that meet the conditions specified in the `on` clause can be part of the same session. The result of a given session will include all records from the left side of a join that belong to this session, joined with all records from the right side of a join that belong to this session. Rows from the right side that didn't match with any record on the left side in a given window, are returned with missing values on the left side replaced with `None`. The multiplicity of such rows equals the number of windows they belong to and don't have a match in them. Args: other: the right side of a join. self_time: time expression in self. other_time: time expression in other. window: a window to use. on: a list of column expressions. Each must have == on the top level operation and be of the form LHS: ColumnReference == RHS: ColumnReference. left_instance/right_instance: optional arguments describing partitioning of the data into separate instances Returns: WindowJoinResult: a result of the window join. A method `.select()` can be called on it to extract relevant columns from the result of a join. Examples: >>> import pathway as pw >>> t1 = pw.debug.table_from_markdown( ... ''' ... | t ... 1 | 1 ... 2 | 2 ... 3 | 3 ... 4 | 7 ... 5 | 13 ... ''' ... ) >>> t2 = pw.debug.table_from_markdown( ... ''' ... | t ... 1 | 2 ... 2 | 5 ... 3 | 6 ... 4 | 7 ... ''' ... ) >>> t3 = t1.window_join_right(t2, t1.t, t2.t, pw.temporal.tumbling(2)).select( ... left_t=t1.t, right_t=t2.t ... ) >>> pw.debug.compute_and_print(t3, include_id=False) left_t | right_t | 5 2 | 2 3 | 2 7 | 6 7 | 7 >>> t4 = t1.window_join_right(t2, t1.t, t2.t, pw.temporal.sliding(1, 2)).select( ... left_t=t1.t, right_t=t2.t ... ) >>> pw.debug.compute_and_print(t4, include_id=False) left_t | right_t | 5 | 5 | 6 1 | 2 2 | 2 2 | 2 3 | 2 7 | 6 7 | 7 7 | 7 >>> t1 = pw.debug.table_from_markdown( ... ''' ... | a | t ... 1 | 1 | 1 ... 2 | 1 | 2 ... 3 | 1 | 3 ... 4 | 1 | 7 ... 5 | 1 | 13 ... 6 | 2 | 1 ... 7 | 2 | 2 ... 8 | 3 | 4 ... ''' ... ) >>> t2 = pw.debug.table_from_markdown( ... ''' ... | b | t ... 1 | 1 | 2 ... 2 | 1 | 5 ... 3 | 1 | 6 ... 4 | 1 | 7 ... 5 | 2 | 2 ... 6 | 2 | 3 ... 7 | 4 | 3 ... ''' ... ) >>> t3 = t1.window_join_right(t2, t1.t, t2.t, pw.temporal.tumbling(2), t1.a == t2.b).select( ... key=t2.b, left_t=t1.t, right_t=t2.t ... ) >>> pw.debug.compute_and_print(t3, include_id=False) key | left_t | right_t 1 | | 5 1 | 2 | 2 1 | 3 | 2 1 | 7 | 6 1 | 7 | 7 2 | 2 | 2 2 | 2 | 3 4 | | 3 >>> >>> t1 = pw.debug.table_from_markdown( ... ''' ... | t ... 0 | 0 ... 1 | 5 ... 2 | 10 ... 3 | 15 ... 4 | 17 ... ''' ... ) >>> t2 = pw.debug.table_from_markdown( ... ''' ... | t ... 0 | -3 ... 1 | 2 ... 2 | 3 ... 3 | 6 ... 4 | 16 ... ''' ... ) >>> t3 = t1.window_join_right( ... t2, t1.t, t2.t, pw.temporal.session(predicate=lambda a, b: abs(a - b) <= 2) ... ).select(left_t=t1.t, right_t=t2.t) >>> pw.debug.compute_and_print(t3, include_id=False) left_t | right_t | -3 0 | 2 0 | 3 0 | 6 5 | 2 5 | 3 5 | 6 15 | 16 17 | 16 >>> t1 = pw.debug.table_from_markdown( ... ''' ... | a | t ... 1 | 1 | 1 ... 2 | 1 | 4 ... 3 | 1 | 7 ... 4 | 2 | 0 ... 5 | 2 | 3 ... 6 | 2 | 4 ... 7 | 2 | 7 ... 8 | 3 | 4 ... ''' ... ) >>> t2 = pw.debug.table_from_markdown( ... ''' ... | b | t ... 1 | 1 | -1 ... 2 | 1 | 6 ... 3 | 2 | 2 ... 4 | 2 | 10 ... 5 | 4 | 3 ... ''' ... ) >>> t3 = t1.window_join_right( ... t2, t1.t, t2.t, pw.temporal.session(predicate=lambda a, b: abs(a - b) <= 2), t1.a == t2.b ... ).select(key=t2.b, left_t=t1.t, right_t=t2.t) >>> pw.debug.compute_and_print(t3, include_id=False) key | left_t | right_t 1 | 1 | -1 1 | 4 | 6 1 | 7 | 6 2 | | 10 2 | 0 | 2 2 | 3 | 2 2 | 4 | 2 4 | | 3 |
166,635 | from __future__ import annotations
from typing import Any
import pathway.internals as pw
from pathway.internals.arg_handlers import (
arg_handler,
join_kwargs_handler,
select_args_handler,
)
from pathway.internals.desugaring import (
DesugaringContext,
TableSubstitutionDesugaring,
desugar,
)
from pathway.internals.runtime_type_check import check_arg_types
from pathway.internals.thisclass import ThisMetaclass
from pathway.internals.trace import trace_user_frame
from pathway.stdlib import temporal
class WindowJoinResult(DesugaringContext):
"""
Result of a window join between tables.
Example:
>>> import pathway as pw
>>> t1 = pw.debug.table_from_markdown(
... '''
... | t
... 1 | 1
... 2 | 2
... 3 | 3
... 4 | 7
... 5 | 13
... '''
... )
>>> t2 = pw.debug.table_from_markdown(
... '''
... | t
... 1 | 2
... 2 | 5
... 3 | 6
... 4 | 7
... '''
... )
>>> join_result = t1.window_join_outer(t2, t1.t, t2.t, pw.temporal.tumbling(2))
>>> isinstance(join_result, pw.temporal.WindowJoinResult)
True
>>> pw.debug.compute_and_print(
... join_result.select(left_t=t1.t, right_t=t2.t), include_id=False
... )
left_t | right_t
| 5
1 |
2 | 2
3 | 2
7 | 6
7 | 7
13 |
"""
_join_result: pw.JoinResult
_table_substitution: dict[pw.TableLike, pw.Table]
_substitution: dict[ThisMetaclass, pw.Joinable]
def __init__(
self,
join_result: pw.JoinResult,
left_original: pw.Table,
right_original: pw.Table,
left_new: pw.Table,
right_new: pw.Table,
):
self._join_result = join_result
self._universe = join_result._universe
self._table_substitution = {
left_original: left_new,
right_original: right_new,
}
self._substitution = {
pw.left: left_new,
pw.right: right_new,
pw.this: join_result,
}
def _desugaring(self) -> TableSubstitutionDesugaring:
return TableSubstitutionDesugaring(self._table_substitution)
def select(self, *args: pw.ColumnReference, **kwargs: Any) -> pw.Table:
"""Computes a result of a window join.
Args:
args: Column references.
kwargs: Column expressions with their new assigned names.
Returns:
Table: Created table.
Example:
>>> import pathway as pw
>>> t1 = pw.debug.table_from_markdown(
... '''
... | a | t
... 1 | 1 | 1
... 2 | 1 | 2
... 3 | 1 | 3
... 4 | 1 | 7
... 5 | 1 | 13
... 6 | 2 | 1
... 7 | 2 | 2
... 8 | 3 | 4
... '''
... )
>>> t2 = pw.debug.table_from_markdown(
... '''
... | b | t
... 1 | 1 | 2
... 2 | 1 | 5
... 3 | 1 | 6
... 4 | 1 | 7
... 5 | 2 | 2
... 6 | 2 | 3
... 7 | 4 | 3
... '''
... )
>>> t3 = t1.window_join_outer(t2, t1.t, t2.t, pw.temporal.tumbling(2), t1.a == t2.b).select(
... key=pw.coalesce(t1.a, t2.b), left_t=t1.t, right_t=t2.t
... )
>>> pw.debug.compute_and_print(t3, include_id=False)
key | left_t | right_t
1 | | 5
1 | 1 |
1 | 2 | 2
1 | 3 | 2
1 | 7 | 6
1 | 7 | 7
1 | 13 |
2 | 1 |
2 | 2 | 2
2 | 2 | 3
3 | 4 |
4 | | 3
"""
return self._join_result.select(*args, **kwargs)
The provided code snippet includes necessary dependencies for implementing the `window_join_outer` function. Write a Python function `def window_join_outer( self: pw.Table, other: pw.Table, self_time: pw.ColumnExpression, other_time: pw.ColumnExpression, window: temporal.Window, *on: pw.ColumnExpression, left_instance: pw.ColumnReference | None = None, right_instance: pw.ColumnReference | None = None, ) -> WindowJoinResult` to solve the following problem:
Performs a window outer join of self with other using a window and join expressions. If two records belong to the same window and meet the conditions specified in the `on` clause, they will be joined. Note that if a sliding window is used and there are pairs of matching records that appear in more than one window, they will be included in the result multiple times (equal to the number of windows they appear in). When using a session window, the function creates sessions by concatenating records from both sides of a join. Only pairs of records that meet the conditions specified in the `on` clause can be part of the same session. The result of a given session will include all records from the left side of a join that belong to this session, joined with all records from the right side of a join that belong to this session. Rows from both sides that didn't match with any record on the other side in a given window, are returned with missing values on the other side replaced with `None`. The multiplicity of such rows equals the number of windows they belong to and don't have a match in them. Args: other: the right side of a join. self_time: time expression in self. other_time: time expression in other. window: a window to use. on: a list of column expressions. Each must have == on the top level operation and be of the form LHS: ColumnReference == RHS: ColumnReference. left_instance/right_instance: optional arguments describing partitioning of the data into separate instances Returns: WindowJoinResult: a result of the window join. A method `.select()` can be called on it to extract relevant columns from the result of a join. Examples: >>> import pathway as pw >>> t1 = pw.debug.table_from_markdown( ... ''' ... | t ... 1 | 1 ... 2 | 2 ... 3 | 3 ... 4 | 7 ... 5 | 13 ... ''' ... ) >>> t2 = pw.debug.table_from_markdown( ... ''' ... | t ... 1 | 2 ... 2 | 5 ... 3 | 6 ... 4 | 7 ... ''' ... ) >>> t3 = t1.window_join_outer(t2, t1.t, t2.t, pw.temporal.tumbling(2)).select( ... left_t=t1.t, right_t=t2.t ... ) >>> pw.debug.compute_and_print(t3, include_id=False) left_t | right_t | 5 1 | 2 | 2 3 | 2 7 | 6 7 | 7 13 | >>> t4 = t1.window_join_outer(t2, t1.t, t2.t, pw.temporal.sliding(1, 2)).select( ... left_t=t1.t, right_t=t2.t ... ) >>> pw.debug.compute_and_print(t4, include_id=False) left_t | right_t | 5 | 5 | 6 1 | 1 | 2 2 | 2 2 | 2 3 | 3 | 2 7 | 6 7 | 7 7 | 7 13 | 13 | >>> t1 = pw.debug.table_from_markdown( ... ''' ... | a | t ... 1 | 1 | 1 ... 2 | 1 | 2 ... 3 | 1 | 3 ... 4 | 1 | 7 ... 5 | 1 | 13 ... 6 | 2 | 1 ... 7 | 2 | 2 ... 8 | 3 | 4 ... ''' ... ) >>> t2 = pw.debug.table_from_markdown( ... ''' ... | b | t ... 1 | 1 | 2 ... 2 | 1 | 5 ... 3 | 1 | 6 ... 4 | 1 | 7 ... 5 | 2 | 2 ... 6 | 2 | 3 ... 7 | 4 | 3 ... ''' ... ) >>> t3 = t1.window_join_outer(t2, t1.t, t2.t, pw.temporal.tumbling(2), t1.a == t2.b).select( ... key=pw.coalesce(t1.a, t2.b), left_t=t1.t, right_t=t2.t ... ) >>> pw.debug.compute_and_print(t3, include_id=False) key | left_t | right_t 1 | | 5 1 | 1 | 1 | 2 | 2 1 | 3 | 2 1 | 7 | 6 1 | 7 | 7 1 | 13 | 2 | 1 | 2 | 2 | 2 2 | 2 | 3 3 | 4 | 4 | | 3 >>> >>> t1 = pw.debug.table_from_markdown( ... ''' ... | t ... 0 | 0 ... 1 | 5 ... 2 | 10 ... 3 | 15 ... 4 | 17 ... ''' ... ) >>> t2 = pw.debug.table_from_markdown( ... ''' ... | t ... 0 | -3 ... 1 | 2 ... 2 | 3 ... 3 | 6 ... 4 | 16 ... ''' ... ) >>> t3 = t1.window_join_outer( ... t2, t1.t, t2.t, pw.temporal.session(predicate=lambda a, b: abs(a - b) <= 2) ... ).select(left_t=t1.t, right_t=t2.t) >>> pw.debug.compute_and_print(t3, include_id=False) left_t | right_t | -3 0 | 2 0 | 3 0 | 6 5 | 2 5 | 3 5 | 6 10 | 15 | 16 17 | 16 >>> t1 = pw.debug.table_from_markdown( ... ''' ... | a | t ... 1 | 1 | 1 ... 2 | 1 | 4 ... 3 | 1 | 7 ... 4 | 2 | 0 ... 5 | 2 | 3 ... 6 | 2 | 4 ... 7 | 2 | 7 ... 8 | 3 | 4 ... ''' ... ) >>> t2 = pw.debug.table_from_markdown( ... ''' ... | b | t ... 1 | 1 | -1 ... 2 | 1 | 6 ... 3 | 2 | 2 ... 4 | 2 | 10 ... 5 | 4 | 3 ... ''' ... ) >>> t3 = t1.window_join_outer( ... t2, t1.t, t2.t, pw.temporal.session(predicate=lambda a, b: abs(a - b) <= 2), t1.a == t2.b ... ).select(key=pw.coalesce(t1.a, t2.b), left_t=t1.t, right_t=t2.t) >>> pw.debug.compute_and_print(t3, include_id=False) key | left_t | right_t 1 | 1 | -1 1 | 4 | 6 1 | 7 | 6 2 | | 10 2 | 0 | 2 2 | 3 | 2 2 | 4 | 2 2 | 7 | 3 | 4 | 4 | | 3
Here is the function:
def window_join_outer(
self: pw.Table,
other: pw.Table,
self_time: pw.ColumnExpression,
other_time: pw.ColumnExpression,
window: temporal.Window,
*on: pw.ColumnExpression,
left_instance: pw.ColumnReference | None = None,
right_instance: pw.ColumnReference | None = None,
) -> WindowJoinResult:
"""Performs a window outer join of self with other using a window and join expressions.
If two records belong to the same window and meet the conditions specified in
the `on` clause, they will be joined. Note that if a sliding window is used and
there are pairs of matching records that appear in more than one window,
they will be included in the result multiple times (equal to the number of
windows they appear in).
When using a session window, the function creates sessions by concatenating
records from both sides of a join. Only pairs of records that meet
the conditions specified in the `on` clause can be part of the same session.
The result of a given session will include all records from the left side of
a join that belong to this session, joined with all records from the right
side of a join that belong to this session.
Rows from both sides that didn't match with any record on the other side in
a given window, are returned with missing values on the other side replaced
with `None`. The multiplicity of such rows equals the number of windows they
belong to and don't have a match in them.
Args:
other: the right side of a join.
self_time: time expression in self.
other_time: time expression in other.
window: a window to use.
on: a list of column expressions. Each must have == on the top level
operation and be of the form LHS: ColumnReference == RHS: ColumnReference.
left_instance/right_instance: optional arguments describing partitioning of the data into separate instances
Returns:
WindowJoinResult: a result of the window join. A method `.select()`
can be called on it to extract relevant columns from the result of a join.
Examples:
>>> import pathway as pw
>>> t1 = pw.debug.table_from_markdown(
... '''
... | t
... 1 | 1
... 2 | 2
... 3 | 3
... 4 | 7
... 5 | 13
... '''
... )
>>> t2 = pw.debug.table_from_markdown(
... '''
... | t
... 1 | 2
... 2 | 5
... 3 | 6
... 4 | 7
... '''
... )
>>> t3 = t1.window_join_outer(t2, t1.t, t2.t, pw.temporal.tumbling(2)).select(
... left_t=t1.t, right_t=t2.t
... )
>>> pw.debug.compute_and_print(t3, include_id=False)
left_t | right_t
| 5
1 |
2 | 2
3 | 2
7 | 6
7 | 7
13 |
>>> t4 = t1.window_join_outer(t2, t1.t, t2.t, pw.temporal.sliding(1, 2)).select(
... left_t=t1.t, right_t=t2.t
... )
>>> pw.debug.compute_and_print(t4, include_id=False)
left_t | right_t
| 5
| 5
| 6
1 |
1 | 2
2 | 2
2 | 2
3 |
3 | 2
7 | 6
7 | 7
7 | 7
13 |
13 |
>>> t1 = pw.debug.table_from_markdown(
... '''
... | a | t
... 1 | 1 | 1
... 2 | 1 | 2
... 3 | 1 | 3
... 4 | 1 | 7
... 5 | 1 | 13
... 6 | 2 | 1
... 7 | 2 | 2
... 8 | 3 | 4
... '''
... )
>>> t2 = pw.debug.table_from_markdown(
... '''
... | b | t
... 1 | 1 | 2
... 2 | 1 | 5
... 3 | 1 | 6
... 4 | 1 | 7
... 5 | 2 | 2
... 6 | 2 | 3
... 7 | 4 | 3
... '''
... )
>>> t3 = t1.window_join_outer(t2, t1.t, t2.t, pw.temporal.tumbling(2), t1.a == t2.b).select(
... key=pw.coalesce(t1.a, t2.b), left_t=t1.t, right_t=t2.t
... )
>>> pw.debug.compute_and_print(t3, include_id=False)
key | left_t | right_t
1 | | 5
1 | 1 |
1 | 2 | 2
1 | 3 | 2
1 | 7 | 6
1 | 7 | 7
1 | 13 |
2 | 1 |
2 | 2 | 2
2 | 2 | 3
3 | 4 |
4 | | 3
>>>
>>> t1 = pw.debug.table_from_markdown(
... '''
... | t
... 0 | 0
... 1 | 5
... 2 | 10
... 3 | 15
... 4 | 17
... '''
... )
>>> t2 = pw.debug.table_from_markdown(
... '''
... | t
... 0 | -3
... 1 | 2
... 2 | 3
... 3 | 6
... 4 | 16
... '''
... )
>>> t3 = t1.window_join_outer(
... t2, t1.t, t2.t, pw.temporal.session(predicate=lambda a, b: abs(a - b) <= 2)
... ).select(left_t=t1.t, right_t=t2.t)
>>> pw.debug.compute_and_print(t3, include_id=False)
left_t | right_t
| -3
0 | 2
0 | 3
0 | 6
5 | 2
5 | 3
5 | 6
10 |
15 | 16
17 | 16
>>> t1 = pw.debug.table_from_markdown(
... '''
... | a | t
... 1 | 1 | 1
... 2 | 1 | 4
... 3 | 1 | 7
... 4 | 2 | 0
... 5 | 2 | 3
... 6 | 2 | 4
... 7 | 2 | 7
... 8 | 3 | 4
... '''
... )
>>> t2 = pw.debug.table_from_markdown(
... '''
... | b | t
... 1 | 1 | -1
... 2 | 1 | 6
... 3 | 2 | 2
... 4 | 2 | 10
... 5 | 4 | 3
... '''
... )
>>> t3 = t1.window_join_outer(
... t2, t1.t, t2.t, pw.temporal.session(predicate=lambda a, b: abs(a - b) <= 2), t1.a == t2.b
... ).select(key=pw.coalesce(t1.a, t2.b), left_t=t1.t, right_t=t2.t)
>>> pw.debug.compute_and_print(t3, include_id=False)
key | left_t | right_t
1 | 1 | -1
1 | 4 | 6
1 | 7 | 6
2 | | 10
2 | 0 | 2
2 | 3 | 2
2 | 4 | 2
2 | 7 |
3 | 4 |
4 | | 3
"""
return window._join(
self,
other,
self_time,
other_time,
*on,
mode=pw.JoinMode.OUTER,
left_instance=left_instance,
right_instance=right_instance,
) | Performs a window outer join of self with other using a window and join expressions. If two records belong to the same window and meet the conditions specified in the `on` clause, they will be joined. Note that if a sliding window is used and there are pairs of matching records that appear in more than one window, they will be included in the result multiple times (equal to the number of windows they appear in). When using a session window, the function creates sessions by concatenating records from both sides of a join. Only pairs of records that meet the conditions specified in the `on` clause can be part of the same session. The result of a given session will include all records from the left side of a join that belong to this session, joined with all records from the right side of a join that belong to this session. Rows from both sides that didn't match with any record on the other side in a given window, are returned with missing values on the other side replaced with `None`. The multiplicity of such rows equals the number of windows they belong to and don't have a match in them. Args: other: the right side of a join. self_time: time expression in self. other_time: time expression in other. window: a window to use. on: a list of column expressions. Each must have == on the top level operation and be of the form LHS: ColumnReference == RHS: ColumnReference. left_instance/right_instance: optional arguments describing partitioning of the data into separate instances Returns: WindowJoinResult: a result of the window join. A method `.select()` can be called on it to extract relevant columns from the result of a join. Examples: >>> import pathway as pw >>> t1 = pw.debug.table_from_markdown( ... ''' ... | t ... 1 | 1 ... 2 | 2 ... 3 | 3 ... 4 | 7 ... 5 | 13 ... ''' ... ) >>> t2 = pw.debug.table_from_markdown( ... ''' ... | t ... 1 | 2 ... 2 | 5 ... 3 | 6 ... 4 | 7 ... ''' ... ) >>> t3 = t1.window_join_outer(t2, t1.t, t2.t, pw.temporal.tumbling(2)).select( ... left_t=t1.t, right_t=t2.t ... ) >>> pw.debug.compute_and_print(t3, include_id=False) left_t | right_t | 5 1 | 2 | 2 3 | 2 7 | 6 7 | 7 13 | >>> t4 = t1.window_join_outer(t2, t1.t, t2.t, pw.temporal.sliding(1, 2)).select( ... left_t=t1.t, right_t=t2.t ... ) >>> pw.debug.compute_and_print(t4, include_id=False) left_t | right_t | 5 | 5 | 6 1 | 1 | 2 2 | 2 2 | 2 3 | 3 | 2 7 | 6 7 | 7 7 | 7 13 | 13 | >>> t1 = pw.debug.table_from_markdown( ... ''' ... | a | t ... 1 | 1 | 1 ... 2 | 1 | 2 ... 3 | 1 | 3 ... 4 | 1 | 7 ... 5 | 1 | 13 ... 6 | 2 | 1 ... 7 | 2 | 2 ... 8 | 3 | 4 ... ''' ... ) >>> t2 = pw.debug.table_from_markdown( ... ''' ... | b | t ... 1 | 1 | 2 ... 2 | 1 | 5 ... 3 | 1 | 6 ... 4 | 1 | 7 ... 5 | 2 | 2 ... 6 | 2 | 3 ... 7 | 4 | 3 ... ''' ... ) >>> t3 = t1.window_join_outer(t2, t1.t, t2.t, pw.temporal.tumbling(2), t1.a == t2.b).select( ... key=pw.coalesce(t1.a, t2.b), left_t=t1.t, right_t=t2.t ... ) >>> pw.debug.compute_and_print(t3, include_id=False) key | left_t | right_t 1 | | 5 1 | 1 | 1 | 2 | 2 1 | 3 | 2 1 | 7 | 6 1 | 7 | 7 1 | 13 | 2 | 1 | 2 | 2 | 2 2 | 2 | 3 3 | 4 | 4 | | 3 >>> >>> t1 = pw.debug.table_from_markdown( ... ''' ... | t ... 0 | 0 ... 1 | 5 ... 2 | 10 ... 3 | 15 ... 4 | 17 ... ''' ... ) >>> t2 = pw.debug.table_from_markdown( ... ''' ... | t ... 0 | -3 ... 1 | 2 ... 2 | 3 ... 3 | 6 ... 4 | 16 ... ''' ... ) >>> t3 = t1.window_join_outer( ... t2, t1.t, t2.t, pw.temporal.session(predicate=lambda a, b: abs(a - b) <= 2) ... ).select(left_t=t1.t, right_t=t2.t) >>> pw.debug.compute_and_print(t3, include_id=False) left_t | right_t | -3 0 | 2 0 | 3 0 | 6 5 | 2 5 | 3 5 | 6 10 | 15 | 16 17 | 16 >>> t1 = pw.debug.table_from_markdown( ... ''' ... | a | t ... 1 | 1 | 1 ... 2 | 1 | 4 ... 3 | 1 | 7 ... 4 | 2 | 0 ... 5 | 2 | 3 ... 6 | 2 | 4 ... 7 | 2 | 7 ... 8 | 3 | 4 ... ''' ... ) >>> t2 = pw.debug.table_from_markdown( ... ''' ... | b | t ... 1 | 1 | -1 ... 2 | 1 | 6 ... 3 | 2 | 2 ... 4 | 2 | 10 ... 5 | 4 | 3 ... ''' ... ) >>> t3 = t1.window_join_outer( ... t2, t1.t, t2.t, pw.temporal.session(predicate=lambda a, b: abs(a - b) <= 2), t1.a == t2.b ... ).select(key=pw.coalesce(t1.a, t2.b), left_t=t1.t, right_t=t2.t) >>> pw.debug.compute_and_print(t3, include_id=False) key | left_t | right_t 1 | 1 | -1 1 | 4 | 6 1 | 7 | 6 2 | | 10 2 | 0 | 2 2 | 3 | 2 2 | 4 | 2 2 | 7 | 3 | 4 | 4 | | 3 |
166,636 | from dataclasses import dataclass
import pathway.internals as pw
from .utils import IntervalType
class CommonBehavior(Behavior):
"""Defines temporal behavior of windows and temporal joins."""
delay: IntervalType | None
cutoff: IntervalType | None
keep_results: bool
IntervalType = Union[int, float, datetime.timedelta]
The provided code snippet includes necessary dependencies for implementing the `common_behavior` function. Write a Python function `def common_behavior( delay: IntervalType | None = None, cutoff: IntervalType | None = None, keep_results: bool = True, ) -> CommonBehavior` to solve the following problem:
Creates an instance of ``CommonBehavior``, which contains a basic configuration of a behavior of temporal operators (like ``windowby`` or ``asof_join``). Each temporal operator tracks its own time (defined as a maximum time that arrived to the operator) and this configuration tells it that some of its inputs or outputs may be delayed or ignored. The decisions are based on the current time of the operator and the time associated with an input/output entry. Additionally, it allows the operator to free up memory by removing parts of internal state that cannot interact with any future input entries. Remark: for the sake of temporal behavior, the current time of each operator is updated only after it processes all the data that arrived on input. In other words, if several new input entries arrived to the system simultaneously, each of those entries will be processed using last recorded time, and the recorded time is upda Args: delay: Optional. For windows, delays initial output by ``delay`` with respect to the beginning of the window. Setting it to ``None`` does not enable delaying mechanism. For interval joins and asof joins, it delays the time the record is joined by ``delay``. Using `delay` is useful when updates are too frequent. cutoff: Optional. For windows, stops updating windows which end earlier than maximal seen time minus ``cutoff``. Setting cutoff to ``None`` does not enable cutoff mechanism. For interval joins and asof joins, it ignores entries that are older than maximal seen time minus ``cutoff``. This parameter is also used to clear memory. It allows to release memory used by entries that won't change. keep_results: If set to True, keeps all results of the operator. If set to False, keeps only results that are newer than maximal seen time minus ``cutoff``. Can't be set to ``False``, when ``cutoff`` is ``None``.
Here is the function:
def common_behavior(
delay: IntervalType | None = None,
cutoff: IntervalType | None = None,
keep_results: bool = True,
) -> CommonBehavior:
"""Creates an instance of ``CommonBehavior``, which contains a basic configuration of
a behavior of temporal operators (like ``windowby`` or ``asof_join``).
Each temporal operator tracks its own time (defined as a maximum time that arrived to
the operator) and this configuration tells it that some of its inputs or outputs may
be delayed or ignored.
The decisions are based on the current time of the operator and the time associated
with an input/output entry. Additionally, it allows the operator to free up memory by
removing parts of internal state that cannot interact with any future input entries.
Remark: for the sake of temporal behavior, the current time of each operator is
updated only after it processes all the data that arrived on input. In other words,
if several new input entries arrived to the system simultaneously, each of those
entries will be processed using last recorded time, and the recorded time is upda
Args:
delay:
Optional.
For windows, delays initial output by ``delay`` with respect to the
beginning of the window. Setting it to ``None`` does not enable
delaying mechanism.
For interval joins and asof joins, it delays the time the record is joined by ``delay``.
Using `delay` is useful when updates are too frequent.
cutoff:
Optional.
For windows, stops updating windows which end earlier than maximal
seen time minus ``cutoff``. Setting cutoff to ``None`` does not enable
cutoff mechanism.
For interval joins and asof joins, it ignores entries that are older
than maximal seen time minus ``cutoff``. This parameter is also used to clear
memory. It allows to release memory used by entries that won't change.
keep_results: If set to True, keeps all results of the operator. If set to False,
keeps only results that are newer than maximal seen time minus ``cutoff``.
Can't be set to ``False``, when ``cutoff`` is ``None``.
"""
assert not (cutoff is None and not keep_results)
return CommonBehavior(delay, cutoff, keep_results) | Creates an instance of ``CommonBehavior``, which contains a basic configuration of a behavior of temporal operators (like ``windowby`` or ``asof_join``). Each temporal operator tracks its own time (defined as a maximum time that arrived to the operator) and this configuration tells it that some of its inputs or outputs may be delayed or ignored. The decisions are based on the current time of the operator and the time associated with an input/output entry. Additionally, it allows the operator to free up memory by removing parts of internal state that cannot interact with any future input entries. Remark: for the sake of temporal behavior, the current time of each operator is updated only after it processes all the data that arrived on input. In other words, if several new input entries arrived to the system simultaneously, each of those entries will be processed using last recorded time, and the recorded time is upda Args: delay: Optional. For windows, delays initial output by ``delay`` with respect to the beginning of the window. Setting it to ``None`` does not enable delaying mechanism. For interval joins and asof joins, it delays the time the record is joined by ``delay``. Using `delay` is useful when updates are too frequent. cutoff: Optional. For windows, stops updating windows which end earlier than maximal seen time minus ``cutoff``. Setting cutoff to ``None`` does not enable cutoff mechanism. For interval joins and asof joins, it ignores entries that are older than maximal seen time minus ``cutoff``. This parameter is also used to clear memory. It allows to release memory used by entries that won't change. keep_results: If set to True, keeps all results of the operator. If set to False, keeps only results that are newer than maximal seen time minus ``cutoff``. Can't be set to ``False``, when ``cutoff`` is ``None``. |
166,637 | from dataclasses import dataclass
import pathway.internals as pw
from .utils import IntervalType
class ExactlyOnceBehavior(Behavior):
shift: IntervalType | None
IntervalType = Union[int, float, datetime.timedelta]
The provided code snippet includes necessary dependencies for implementing the `exactly_once_behavior` function. Write a Python function `def exactly_once_behavior(shift: IntervalType | None = None)` to solve the following problem:
Creates an instance of class ExactlyOnceBehavior, indicating that each non empty window should produce exactly one output. Args: shift: optional, defines the moment in time (``window end + shift``) in which the window stops accepting the data and sends the results to the output. Setting it to ``None`` is interpreted as ``shift=0``. Remark: note that setting a non-zero shift and demanding exactly one output results in the output being delivered only when the time in the time column reaches ``window end + shift``.
Here is the function:
def exactly_once_behavior(shift: IntervalType | None = None):
"""Creates an instance of class ExactlyOnceBehavior, indicating that each non empty
window should produce exactly one output.
Args:
shift: optional, defines the moment in time (``window end + shift``) in which
the window stops accepting the data and sends the results to the output.
Setting it to ``None`` is interpreted as ``shift=0``.
Remark:
note that setting a non-zero shift and demanding exactly one output results in
the output being delivered only when the time in the time column reaches
``window end + shift``.
"""
return ExactlyOnceBehavior(shift) | Creates an instance of class ExactlyOnceBehavior, indicating that each non empty window should produce exactly one output. Args: shift: optional, defines the moment in time (``window end + shift``) in which the window stops accepting the data and sends the results to the output. Setting it to ``None`` is interpreted as ``shift=0``. Remark: note that setting a non-zero shift and demanding exactly one output results in the output being delivered only when the time in the time column reaches ``window end + shift``. |
166,638 | from __future__ import annotations
import dataclasses
import datetime
from abc import ABC, abstractmethod
from collections.abc import Callable, Sequence
from typing import Any
import pathway.internals as pw
from pathway.internals import dtype as dt
from pathway.internals.arg_handlers import (
arg_handler,
offset_deprecation,
shard_deprecation,
windowby_handler,
)
from pathway.internals.desugaring import desugar
from pathway.internals.joins import validate_join_condition
from pathway.internals.runtime_type_check import check_arg_types
from pathway.internals.trace import trace_user_frame
from pathway.internals.type_interpreter import eval_type
from ._interval_join import interval, interval_join
from ._window_join import WindowJoinResult
from .temporal_behavior import (
Behavior,
CommonBehavior,
ExactlyOnceBehavior,
common_behavior,
)
from .utils import (
IntervalType,
TimeEventType,
check_joint_types,
get_default_origin,
zero_length_interval,
)
class Window(ABC):
def _apply(
self,
table: pw.Table,
key: pw.ColumnExpression,
behavior: Behavior | None,
instance: pw.ColumnExpression | None,
) -> pw.GroupedTable: ...
def _join(
self,
left: pw.Table,
right: pw.Table,
left_time_expression: pw.ColumnExpression,
right_time_expression: pw.ColumnExpression,
*on: pw.ColumnExpression,
mode: pw.JoinMode,
left_instance: pw.ColumnReference | None = None,
right_instance: pw.ColumnReference | None = None,
) -> WindowJoinResult: ...
_SessionPredicateType = Callable[[Any, Any], bool]
class _SessionWindow(Window):
predicate: _SessionPredicateType | None
max_gap: IntervalType | None
def _merge(
self, cur: pw.ColumnExpression, next: pw.ColumnExpression
) -> pw.ColumnExpression:
if self.predicate is not None:
return pw.apply_with_type(self.predicate, bool, cur, next)
else:
return next - cur < self.max_gap
def _compute_group_repr(
self,
table: pw.Table,
key: pw.ColumnExpression,
instance: pw.ColumnExpression | None,
) -> pw.Table:
target = table.select(key=key, instance=instance)
import pathway.stdlib.indexing
target = target + pathway.stdlib.indexing.sort_from_index(
**pathway.stdlib.indexing.build_sorted_index(target)
)
sel_key = target.select(next_key=target.ix(target.next, optional=True).key)
target += target.select(
_pw_window=pw.if_else(
sel_key.next_key.is_not_none(),
pw.if_else(
self._merge(target.key, pw.unwrap(sel_key.next_key)),
target.next,
target.id,
),
target.id,
),
).update_types(_pw_window=pw.Pointer)
def merge_ccs(data):
data = data.with_columns(_pw_window=data.ix(data._pw_window)._pw_window)
return data
return pw.iterate(merge_ccs, data=target)
def _apply(
self,
table: pw.Table,
key: pw.ColumnExpression,
behavior: Behavior | None,
instance: pw.ColumnExpression | None,
) -> pw.GroupedTable:
if self.max_gap is not None:
check_joint_types(
{
"time_expr": (key, TimeEventType),
"window.max_gap": (self.max_gap, IntervalType),
}
)
target = self._compute_group_repr(table, key, instance)
tmp = target.groupby(target._pw_window).reduce(
_pw_window_start=pw.reducers.min(key),
_pw_window_end=pw.reducers.max(key),
)
gb = table.with_columns(
target._pw_window,
tmp.ix_ref(target._pw_window)._pw_window_start,
tmp.ix_ref(target._pw_window)._pw_window_end,
_pw_instance=instance,
).groupby(
pw.this._pw_window,
pw.this._pw_window_start,
pw.this._pw_window_end,
instance=pw.this._pw_instance,
)
return gb
def _join(
self,
left: pw.Table,
right: pw.Table,
left_time_expression: pw.ColumnExpression,
right_time_expression: pw.ColumnExpression,
*on: pw.ColumnExpression,
mode: pw.JoinMode,
left_instance: pw.ColumnReference | None = None,
right_instance: pw.ColumnReference | None = None,
) -> WindowJoinResult:
def maybe_make_tuple(
conditions: Sequence[pw.ColumnExpression],
) -> pw.ColumnExpression:
if len(conditions) > 1:
return pw.make_tuple(*conditions)
elif len(conditions) == 1:
return conditions[0]
else:
return None # type: ignore
check_joint_types(
{
"left_time_expression": (left_time_expression, TimeEventType),
"right_time_expression": (right_time_expression, TimeEventType),
"window.max_gap": (self.max_gap, IntervalType),
}
)
if left_instance is not None and right_instance is not None:
on = (*on, left_instance == right_instance)
else:
assert left_instance is None and right_instance is None
left_on: list[pw.ColumnReference] = []
right_on: list[pw.ColumnReference] = []
for cond in on:
cond_left, cond_right, _ = validate_join_condition(cond, left, right)
left_on.append(cond_left)
right_on.append(cond_right)
concatenated_events = pw.Table.concat_reindex(
left.select(
key=left_time_expression,
instance=maybe_make_tuple(left_on),
is_left=True,
original_id=left.id,
),
right.select(
key=right_time_expression,
instance=maybe_make_tuple(right_on),
is_left=False,
original_id=right.id,
),
)
group_repr = self._compute_group_repr(
concatenated_events, concatenated_events.key, concatenated_events.instance
)
tmp = group_repr.groupby(group_repr._pw_window).reduce(
_pw_window_start=pw.reducers.min(concatenated_events.key),
_pw_window_end=pw.reducers.max(concatenated_events.key),
)
session_ids = concatenated_events.with_columns(
group_repr._pw_window,
tmp.ix_ref(group_repr._pw_window)._pw_window_start,
tmp.ix_ref(group_repr._pw_window)._pw_window_end,
)
left_session_ids = (
session_ids.filter(session_ids.is_left)
.with_id(pw.this.original_id)
.with_universe_of(left)
)
right_session_ids = (
session_ids.filter(~session_ids.is_left)
.with_id(pw.this.original_id)
.with_universe_of(right)
)
left_with_session_id = left.with_columns(
left_session_ids._pw_window,
left_session_ids._pw_window_start,
left_session_ids._pw_window_end,
)
right_with_session_id = right.with_columns(
right_session_ids._pw_window,
right_session_ids._pw_window_start,
right_session_ids._pw_window_end,
)
join_result = pw.JoinResult._table_join(
left_with_session_id,
right_with_session_id,
left_with_session_id._pw_window_start
== right_with_session_id._pw_window_start,
left_with_session_id._pw_window_end == right_with_session_id._pw_window_end,
left_with_session_id._pw_window == right_with_session_id._pw_window,
*[
left_with_session_id[left_cond.name]
== right_with_session_id[right_cond.name]
for left_cond, right_cond in zip(left_on, right_on)
],
mode=mode,
)
return WindowJoinResult(
join_result, left, right, left_with_session_id, right_with_session_id
)
The provided code snippet includes necessary dependencies for implementing the `session` function. Write a Python function `def session( *, predicate: _SessionPredicateType | None = None, max_gap: int | float | datetime.timedelta | None = None, ) -> Window` to solve the following problem:
Allows grouping together elements within a window across ordered time-like data column by locally grouping adjacent elements either based on a maximum time difference or using a custom predicate. Note: Usually used as an argument of `.windowby()`. Exactly one of the arguments `predicate` or `max_gap` should be provided. Args: predicate: function taking two adjacent entries that returns a boolean saying whether the two entries should be grouped max_gap: Two adjacent entries will be grouped if `b - a < max_gap` Returns: Window: object to pass as an argument to `.windowby()` Examples: >>> import pathway as pw >>> t = pw.debug.table_from_markdown( ... ''' ... | instance | t | v ... 1 | 0 | 1 | 10 ... 2 | 0 | 2 | 1 ... 3 | 0 | 4 | 3 ... 4 | 0 | 8 | 2 ... 5 | 0 | 9 | 4 ... 6 | 0 | 10| 8 ... 7 | 1 | 1 | 9 ... 8 | 1 | 2 | 16 ... ''') >>> result = t.windowby( ... t.t, window=pw.temporal.session(predicate=lambda a, b: abs(a-b) <= 1), instance=t.instance ... ).reduce( ... pw.this._pw_instance, ... pw.this._pw_window_start, ... pw.this._pw_window_end, ... min_t=pw.reducers.min(pw.this.t), ... max_v=pw.reducers.max(pw.this.v), ... count=pw.reducers.count(), ... ) >>> pw.debug.compute_and_print(result, include_id=False) _pw_instance | _pw_window_start | _pw_window_end | min_t | max_v | count 0 | 1 | 2 | 1 | 10 | 2 0 | 4 | 4 | 4 | 3 | 1 0 | 8 | 10 | 8 | 8 | 3 1 | 1 | 2 | 1 | 16 | 2
Here is the function:
def session(
*,
predicate: _SessionPredicateType | None = None,
max_gap: int | float | datetime.timedelta | None = None,
) -> Window:
"""Allows grouping together elements within a window across ordered time-like
data column by locally grouping adjacent elements either based on a maximum time
difference or using a custom predicate.
Note:
Usually used as an argument of `.windowby()`.
Exactly one of the arguments `predicate` or `max_gap` should be provided.
Args:
predicate: function taking two adjacent entries that returns a boolean saying
whether the two entries should be grouped
max_gap: Two adjacent entries will be grouped if `b - a < max_gap`
Returns:
Window: object to pass as an argument to `.windowby()`
Examples:
>>> import pathway as pw
>>> t = pw.debug.table_from_markdown(
... '''
... | instance | t | v
... 1 | 0 | 1 | 10
... 2 | 0 | 2 | 1
... 3 | 0 | 4 | 3
... 4 | 0 | 8 | 2
... 5 | 0 | 9 | 4
... 6 | 0 | 10| 8
... 7 | 1 | 1 | 9
... 8 | 1 | 2 | 16
... ''')
>>> result = t.windowby(
... t.t, window=pw.temporal.session(predicate=lambda a, b: abs(a-b) <= 1), instance=t.instance
... ).reduce(
... pw.this._pw_instance,
... pw.this._pw_window_start,
... pw.this._pw_window_end,
... min_t=pw.reducers.min(pw.this.t),
... max_v=pw.reducers.max(pw.this.v),
... count=pw.reducers.count(),
... )
>>> pw.debug.compute_and_print(result, include_id=False)
_pw_instance | _pw_window_start | _pw_window_end | min_t | max_v | count
0 | 1 | 2 | 1 | 10 | 2
0 | 4 | 4 | 4 | 3 | 1
0 | 8 | 10 | 8 | 8 | 3
1 | 1 | 2 | 1 | 16 | 2
"""
if predicate is None and max_gap is None:
raise ValueError(
"At least one of the parameters [predicate, max_gap] should be provided."
)
elif predicate is not None and max_gap is not None:
raise ValueError("Cannot provide both [predicate, max_gap] at the same time.")
return _SessionWindow(predicate=predicate, max_gap=max_gap) | Allows grouping together elements within a window across ordered time-like data column by locally grouping adjacent elements either based on a maximum time difference or using a custom predicate. Note: Usually used as an argument of `.windowby()`. Exactly one of the arguments `predicate` or `max_gap` should be provided. Args: predicate: function taking two adjacent entries that returns a boolean saying whether the two entries should be grouped max_gap: Two adjacent entries will be grouped if `b - a < max_gap` Returns: Window: object to pass as an argument to `.windowby()` Examples: >>> import pathway as pw >>> t = pw.debug.table_from_markdown( ... ''' ... | instance | t | v ... 1 | 0 | 1 | 10 ... 2 | 0 | 2 | 1 ... 3 | 0 | 4 | 3 ... 4 | 0 | 8 | 2 ... 5 | 0 | 9 | 4 ... 6 | 0 | 10| 8 ... 7 | 1 | 1 | 9 ... 8 | 1 | 2 | 16 ... ''') >>> result = t.windowby( ... t.t, window=pw.temporal.session(predicate=lambda a, b: abs(a-b) <= 1), instance=t.instance ... ).reduce( ... pw.this._pw_instance, ... pw.this._pw_window_start, ... pw.this._pw_window_end, ... min_t=pw.reducers.min(pw.this.t), ... max_v=pw.reducers.max(pw.this.v), ... count=pw.reducers.count(), ... ) >>> pw.debug.compute_and_print(result, include_id=False) _pw_instance | _pw_window_start | _pw_window_end | min_t | max_v | count 0 | 1 | 2 | 1 | 10 | 2 0 | 4 | 4 | 4 | 3 | 1 0 | 8 | 10 | 8 | 8 | 3 1 | 1 | 2 | 1 | 16 | 2 |
166,639 | from __future__ import annotations
import dataclasses
import datetime
from abc import ABC, abstractmethod
from collections.abc import Callable, Sequence
from typing import Any
import pathway.internals as pw
from pathway.internals import dtype as dt
from pathway.internals.arg_handlers import (
arg_handler,
offset_deprecation,
shard_deprecation,
windowby_handler,
)
from pathway.internals.desugaring import desugar
from pathway.internals.joins import validate_join_condition
from pathway.internals.runtime_type_check import check_arg_types
from pathway.internals.trace import trace_user_frame
from pathway.internals.type_interpreter import eval_type
from ._interval_join import interval, interval_join
from ._window_join import WindowJoinResult
from .temporal_behavior import (
Behavior,
CommonBehavior,
ExactlyOnceBehavior,
common_behavior,
)
from .utils import (
IntervalType,
TimeEventType,
check_joint_types,
get_default_origin,
zero_length_interval,
)
class Window(ABC):
def _apply(
self,
table: pw.Table,
key: pw.ColumnExpression,
behavior: Behavior | None,
instance: pw.ColumnExpression | None,
) -> pw.GroupedTable: ...
def _join(
self,
left: pw.Table,
right: pw.Table,
left_time_expression: pw.ColumnExpression,
right_time_expression: pw.ColumnExpression,
*on: pw.ColumnExpression,
mode: pw.JoinMode,
left_instance: pw.ColumnReference | None = None,
right_instance: pw.ColumnReference | None = None,
) -> WindowJoinResult: ...
class _SlidingWindow(Window):
hop: IntervalType
duration: IntervalType | None
ratio: int | None
origin: TimeEventType | None
def __init__(
self,
hop: IntervalType,
duration: IntervalType | None,
origin: TimeEventType | None,
ratio: int | None,
) -> None:
self.hop = hop
self.duration = duration
self.ratio = ratio
self.origin = origin
def _window_assignment_function(
self, key_dtype: dt.DType
) -> Callable[[Any, TimeEventType], list[tuple[Any, TimeEventType, TimeEventType]]]:
if self.origin is None:
origin = get_default_origin(key_dtype)
else:
origin = self.origin
def kth_stable_window(k):
"""Numerically stable k-th window."""
start = k * self.hop + origin
if self.ratio is not None:
end = (k + self.ratio) * self.hop + origin
else:
end = k * self.hop + origin + self.duration
return (start, end)
def assign_windows(
instance: Any, key: TimeEventType
) -> list[tuple[Any, TimeEventType, TimeEventType]]:
"""Returns the list of all the windows the given key belongs to.
Each window is a tuple (window_start, window_end) describing the range
of the window (window_start inclusive, window_end exclusive).
"""
# compute lower and upper bound for multipliers (first_k and last_k) of hop
# for which corresponding windows could contain key.
last_k = int((key - origin) // self.hop) + 1 # type: ignore[operator, arg-type]
if self.ratio is not None:
first_k = last_k - self.ratio - 1
else:
assert self.duration is not None
first_k = last_k - int(self.duration // self.hop) - 1 # type: ignore[operator, arg-type]
first_k -= 1 # safety to avoid off-by one
candidate_windows = [
kth_stable_window(k) for k in range(first_k, last_k + 1)
]
# filtering below is needed to handle case when hop > duration
return [
(instance, start, end)
for (start, end) in candidate_windows
if start <= key
and key < end
and (self.origin is None or start >= self.origin)
]
return assign_windows
def _apply(
self,
table: pw.Table,
key: pw.ColumnExpression,
behavior: Behavior | None,
instance: pw.ColumnExpression | None,
) -> pw.GroupedTable:
check_joint_types(
{
"time_expr": (key, TimeEventType),
"window.hop": (self.hop, IntervalType),
"window.duration": (self.duration, IntervalType),
"window.origin": (self.origin, TimeEventType),
}
)
key_dtype = eval_type(key)
assign_windows = self._window_assignment_function(key_dtype)
target = table.select(
_pw_window=pw.apply_with_type(
assign_windows,
dt.List(
dt.Tuple(
eval_type(instance), # type: ignore
key_dtype,
key_dtype,
)
),
instance,
key,
),
_pw_key=key,
)
target = target.flatten(target._pw_window, _pw_key=target._pw_key, *table)
target = target.with_columns(
_pw_instance=pw.this._pw_window.get(0),
_pw_window_start=pw.this._pw_window.get(1),
_pw_window_end=pw.this._pw_window.get(2),
)
if behavior is not None:
if isinstance(behavior, ExactlyOnceBehavior):
duration: IntervalType
# that is split in two if-s, as it helps mypy figure out proper types
# one if impl left either self.ratio or self.duration as optionals
# which won't fit into the duration variable of type IntervalType
if self.duration is not None:
duration = self.duration
elif self.ratio is not None:
duration = self.ratio * self.hop
shift = (
behavior.shift
if behavior.shift is not None
else zero_length_interval(type(duration))
)
behavior = common_behavior(
duration + shift, shift, True # type:ignore
)
elif not isinstance(behavior, CommonBehavior):
raise ValueError(
f"behavior {behavior} unsupported in sliding/tumbling window"
)
if behavior.cutoff is not None:
cutoff_threshold = pw.this._pw_window_end + behavior.cutoff
target = target._freeze(cutoff_threshold, pw.this._pw_key)
if behavior.delay is not None:
target = target._buffer(
target._pw_window_start + behavior.delay, target._pw_key
)
target = target.with_columns(
_pw_key=pw.if_else(
target._pw_key > target._pw_window_start + behavior.delay,
target._pw_key,
target._pw_window_start + behavior.delay,
)
)
if behavior.cutoff is not None:
cutoff_threshold = pw.this._pw_window_end + behavior.cutoff
target = target._forget(
cutoff_threshold, pw.this._pw_key, behavior.keep_results
)
filter_out_results_of_forgetting = (
behavior is not None
and behavior.cutoff is not None
and behavior.keep_results
)
target = target.groupby(
target._pw_window,
target._pw_window_start,
target._pw_window_end,
instance=target._pw_instance,
_filter_out_results_of_forgetting=filter_out_results_of_forgetting,
)
return target
def _join(
self,
left: pw.Table,
right: pw.Table,
left_time_expression: pw.ColumnExpression,
right_time_expression: pw.ColumnExpression,
*on: pw.ColumnExpression,
mode: pw.JoinMode,
left_instance: pw.ColumnReference | None = None,
right_instance: pw.ColumnReference | None = None,
) -> WindowJoinResult:
check_joint_types(
{
"left_time_expression": (left_time_expression, TimeEventType),
"right_time_expression": (right_time_expression, TimeEventType),
"window.hop": (self.hop, IntervalType),
"window.duration": (self.duration, IntervalType),
"window.origin": (self.origin, TimeEventType),
}
)
time_expression_dtype = eval_type(left_time_expression)
assert time_expression_dtype == eval_type(
right_time_expression
) # checked in check_joint_types
_pw_window_dtype = dt.List(
dt.Tuple(
dt.NONE,
time_expression_dtype,
time_expression_dtype,
)
)
assign_windows = self._window_assignment_function(time_expression_dtype)
left_window = left.select(
_pw_window=pw.apply_with_type(
assign_windows, _pw_window_dtype, None, left_time_expression
)
)
left_window = left_window.flatten(left_window._pw_window, *left)
left_window = left_window.with_columns(
_pw_window_start=pw.this._pw_window.get(1),
_pw_window_end=pw.this._pw_window.get(2),
)
right_window = right.select(
_pw_window=pw.apply_with_type(
assign_windows, _pw_window_dtype, None, right_time_expression
)
)
right_window = right_window.flatten(right_window._pw_window, *right)
right_window = right_window.with_columns(
_pw_window_start=pw.this._pw_window.get(1),
_pw_window_end=pw.this._pw_window.get(2),
)
for cond in on:
cond_left, cond_right, cond = validate_join_condition(cond, left, right)
cond._left = left_window[cond_left._name]
cond._right = right_window[cond_right._name]
join_result = pw.JoinResult._table_join(
left_window,
right_window,
left_window._pw_window_start == right_window._pw_window_start,
left_window._pw_window_end == right_window._pw_window_end,
left_window._pw_window == right_window._pw_window,
*on,
mode=mode,
left_instance=left_instance,
right_instance=right_instance,
)
return WindowJoinResult(join_result, left, right, left_window, right_window)
The provided code snippet includes necessary dependencies for implementing the `sliding` function. Write a Python function `def sliding( hop: int | float | datetime.timedelta, duration: int | float | datetime.timedelta | None = None, ratio: int | None = None, origin: int | float | datetime.datetime | None = None, ) -> Window` to solve the following problem:
Allows grouping together elements within a window of a given length sliding across ordered time-like data column according to a specified interval (hop) starting from a given origin. Note: Usually used as an argument of `.windowby()`. Exactly one of the arguments `hop` or `ratio` should be provided. Args: hop: frequency of a window duration: length of the window ratio: used as an alternative way to specify duration as hop * ratio origin: a point in time at which the first window begins Returns: Window: object to pass as an argument to `.windowby()` Examples: >>> import pathway as pw >>> t = pw.debug.table_from_markdown( ... ''' ... | instance | t ... 1 | 0 | 12 ... 2 | 0 | 13 ... 3 | 0 | 14 ... 4 | 0 | 15 ... 5 | 0 | 16 ... 6 | 0 | 17 ... 7 | 1 | 10 ... 8 | 1 | 11 ... ''') >>> result = t.windowby( ... t.t, window=pw.temporal.sliding(duration=10, hop=3), instance=t.instance ... ).reduce( ... pw.this._pw_instance, ... pw.this._pw_window_start, ... pw.this._pw_window_end, ... min_t=pw.reducers.min(pw.this.t), ... max_t=pw.reducers.max(pw.this.t), ... count=pw.reducers.count(), ... ) >>> pw.debug.compute_and_print(result, include_id=False) _pw_instance | _pw_window_start | _pw_window_end | min_t | max_t | count 0 | 3 | 13 | 12 | 12 | 1 0 | 6 | 16 | 12 | 15 | 4 0 | 9 | 19 | 12 | 17 | 6 0 | 12 | 22 | 12 | 17 | 6 0 | 15 | 25 | 15 | 17 | 3 1 | 3 | 13 | 10 | 11 | 2 1 | 6 | 16 | 10 | 11 | 2 1 | 9 | 19 | 10 | 11 | 2
Here is the function:
def sliding(
hop: int | float | datetime.timedelta,
duration: int | float | datetime.timedelta | None = None,
ratio: int | None = None,
origin: int | float | datetime.datetime | None = None,
) -> Window:
"""Allows grouping together elements within a window of a given length sliding
across ordered time-like data column according to a specified interval (hop)
starting from a given origin.
Note:
Usually used as an argument of `.windowby()`.
Exactly one of the arguments `hop` or `ratio` should be provided.
Args:
hop: frequency of a window
duration: length of the window
ratio: used as an alternative way to specify duration as hop * ratio
origin: a point in time at which the first window begins
Returns:
Window: object to pass as an argument to `.windowby()`
Examples:
>>> import pathway as pw
>>> t = pw.debug.table_from_markdown(
... '''
... | instance | t
... 1 | 0 | 12
... 2 | 0 | 13
... 3 | 0 | 14
... 4 | 0 | 15
... 5 | 0 | 16
... 6 | 0 | 17
... 7 | 1 | 10
... 8 | 1 | 11
... ''')
>>> result = t.windowby(
... t.t, window=pw.temporal.sliding(duration=10, hop=3), instance=t.instance
... ).reduce(
... pw.this._pw_instance,
... pw.this._pw_window_start,
... pw.this._pw_window_end,
... min_t=pw.reducers.min(pw.this.t),
... max_t=pw.reducers.max(pw.this.t),
... count=pw.reducers.count(),
... )
>>> pw.debug.compute_and_print(result, include_id=False)
_pw_instance | _pw_window_start | _pw_window_end | min_t | max_t | count
0 | 3 | 13 | 12 | 12 | 1
0 | 6 | 16 | 12 | 15 | 4
0 | 9 | 19 | 12 | 17 | 6
0 | 12 | 22 | 12 | 17 | 6
0 | 15 | 25 | 15 | 17 | 3
1 | 3 | 13 | 10 | 11 | 2
1 | 6 | 16 | 10 | 11 | 2
1 | 9 | 19 | 10 | 11 | 2
"""
if duration is None and ratio is None:
raise ValueError(
"At least one of the parameters [duration, ratio] should be provided."
)
elif duration is not None and ratio is not None:
raise ValueError("Cannot provide both [duration, ratio] at the same time.")
return _SlidingWindow(
duration=duration,
hop=hop,
ratio=ratio,
origin=origin,
) | Allows grouping together elements within a window of a given length sliding across ordered time-like data column according to a specified interval (hop) starting from a given origin. Note: Usually used as an argument of `.windowby()`. Exactly one of the arguments `hop` or `ratio` should be provided. Args: hop: frequency of a window duration: length of the window ratio: used as an alternative way to specify duration as hop * ratio origin: a point in time at which the first window begins Returns: Window: object to pass as an argument to `.windowby()` Examples: >>> import pathway as pw >>> t = pw.debug.table_from_markdown( ... ''' ... | instance | t ... 1 | 0 | 12 ... 2 | 0 | 13 ... 3 | 0 | 14 ... 4 | 0 | 15 ... 5 | 0 | 16 ... 6 | 0 | 17 ... 7 | 1 | 10 ... 8 | 1 | 11 ... ''') >>> result = t.windowby( ... t.t, window=pw.temporal.sliding(duration=10, hop=3), instance=t.instance ... ).reduce( ... pw.this._pw_instance, ... pw.this._pw_window_start, ... pw.this._pw_window_end, ... min_t=pw.reducers.min(pw.this.t), ... max_t=pw.reducers.max(pw.this.t), ... count=pw.reducers.count(), ... ) >>> pw.debug.compute_and_print(result, include_id=False) _pw_instance | _pw_window_start | _pw_window_end | min_t | max_t | count 0 | 3 | 13 | 12 | 12 | 1 0 | 6 | 16 | 12 | 15 | 4 0 | 9 | 19 | 12 | 17 | 6 0 | 12 | 22 | 12 | 17 | 6 0 | 15 | 25 | 15 | 17 | 3 1 | 3 | 13 | 10 | 11 | 2 1 | 6 | 16 | 10 | 11 | 2 1 | 9 | 19 | 10 | 11 | 2 |
166,640 | from __future__ import annotations
import dataclasses
import datetime
from abc import ABC, abstractmethod
from collections.abc import Callable, Sequence
from typing import Any
import pathway.internals as pw
from pathway.internals import dtype as dt
from pathway.internals.arg_handlers import (
arg_handler,
offset_deprecation,
shard_deprecation,
windowby_handler,
)
from pathway.internals.desugaring import desugar
from pathway.internals.joins import validate_join_condition
from pathway.internals.runtime_type_check import check_arg_types
from pathway.internals.trace import trace_user_frame
from pathway.internals.type_interpreter import eval_type
from ._interval_join import interval, interval_join
from ._window_join import WindowJoinResult
from .temporal_behavior import (
Behavior,
CommonBehavior,
ExactlyOnceBehavior,
common_behavior,
)
from .utils import (
IntervalType,
TimeEventType,
check_joint_types,
get_default_origin,
zero_length_interval,
)
class Window(ABC):
def _apply(
self,
table: pw.Table,
key: pw.ColumnExpression,
behavior: Behavior | None,
instance: pw.ColumnExpression | None,
) -> pw.GroupedTable: ...
def _join(
self,
left: pw.Table,
right: pw.Table,
left_time_expression: pw.ColumnExpression,
right_time_expression: pw.ColumnExpression,
*on: pw.ColumnExpression,
mode: pw.JoinMode,
left_instance: pw.ColumnReference | None = None,
right_instance: pw.ColumnReference | None = None,
) -> WindowJoinResult: ...
class _SlidingWindow(Window):
hop: IntervalType
duration: IntervalType | None
ratio: int | None
origin: TimeEventType | None
def __init__(
self,
hop: IntervalType,
duration: IntervalType | None,
origin: TimeEventType | None,
ratio: int | None,
) -> None:
self.hop = hop
self.duration = duration
self.ratio = ratio
self.origin = origin
def _window_assignment_function(
self, key_dtype: dt.DType
) -> Callable[[Any, TimeEventType], list[tuple[Any, TimeEventType, TimeEventType]]]:
if self.origin is None:
origin = get_default_origin(key_dtype)
else:
origin = self.origin
def kth_stable_window(k):
"""Numerically stable k-th window."""
start = k * self.hop + origin
if self.ratio is not None:
end = (k + self.ratio) * self.hop + origin
else:
end = k * self.hop + origin + self.duration
return (start, end)
def assign_windows(
instance: Any, key: TimeEventType
) -> list[tuple[Any, TimeEventType, TimeEventType]]:
"""Returns the list of all the windows the given key belongs to.
Each window is a tuple (window_start, window_end) describing the range
of the window (window_start inclusive, window_end exclusive).
"""
# compute lower and upper bound for multipliers (first_k and last_k) of hop
# for which corresponding windows could contain key.
last_k = int((key - origin) // self.hop) + 1 # type: ignore[operator, arg-type]
if self.ratio is not None:
first_k = last_k - self.ratio - 1
else:
assert self.duration is not None
first_k = last_k - int(self.duration // self.hop) - 1 # type: ignore[operator, arg-type]
first_k -= 1 # safety to avoid off-by one
candidate_windows = [
kth_stable_window(k) for k in range(first_k, last_k + 1)
]
# filtering below is needed to handle case when hop > duration
return [
(instance, start, end)
for (start, end) in candidate_windows
if start <= key
and key < end
and (self.origin is None or start >= self.origin)
]
return assign_windows
def _apply(
self,
table: pw.Table,
key: pw.ColumnExpression,
behavior: Behavior | None,
instance: pw.ColumnExpression | None,
) -> pw.GroupedTable:
check_joint_types(
{
"time_expr": (key, TimeEventType),
"window.hop": (self.hop, IntervalType),
"window.duration": (self.duration, IntervalType),
"window.origin": (self.origin, TimeEventType),
}
)
key_dtype = eval_type(key)
assign_windows = self._window_assignment_function(key_dtype)
target = table.select(
_pw_window=pw.apply_with_type(
assign_windows,
dt.List(
dt.Tuple(
eval_type(instance), # type: ignore
key_dtype,
key_dtype,
)
),
instance,
key,
),
_pw_key=key,
)
target = target.flatten(target._pw_window, _pw_key=target._pw_key, *table)
target = target.with_columns(
_pw_instance=pw.this._pw_window.get(0),
_pw_window_start=pw.this._pw_window.get(1),
_pw_window_end=pw.this._pw_window.get(2),
)
if behavior is not None:
if isinstance(behavior, ExactlyOnceBehavior):
duration: IntervalType
# that is split in two if-s, as it helps mypy figure out proper types
# one if impl left either self.ratio or self.duration as optionals
# which won't fit into the duration variable of type IntervalType
if self.duration is not None:
duration = self.duration
elif self.ratio is not None:
duration = self.ratio * self.hop
shift = (
behavior.shift
if behavior.shift is not None
else zero_length_interval(type(duration))
)
behavior = common_behavior(
duration + shift, shift, True # type:ignore
)
elif not isinstance(behavior, CommonBehavior):
raise ValueError(
f"behavior {behavior} unsupported in sliding/tumbling window"
)
if behavior.cutoff is not None:
cutoff_threshold = pw.this._pw_window_end + behavior.cutoff
target = target._freeze(cutoff_threshold, pw.this._pw_key)
if behavior.delay is not None:
target = target._buffer(
target._pw_window_start + behavior.delay, target._pw_key
)
target = target.with_columns(
_pw_key=pw.if_else(
target._pw_key > target._pw_window_start + behavior.delay,
target._pw_key,
target._pw_window_start + behavior.delay,
)
)
if behavior.cutoff is not None:
cutoff_threshold = pw.this._pw_window_end + behavior.cutoff
target = target._forget(
cutoff_threshold, pw.this._pw_key, behavior.keep_results
)
filter_out_results_of_forgetting = (
behavior is not None
and behavior.cutoff is not None
and behavior.keep_results
)
target = target.groupby(
target._pw_window,
target._pw_window_start,
target._pw_window_end,
instance=target._pw_instance,
_filter_out_results_of_forgetting=filter_out_results_of_forgetting,
)
return target
def _join(
self,
left: pw.Table,
right: pw.Table,
left_time_expression: pw.ColumnExpression,
right_time_expression: pw.ColumnExpression,
*on: pw.ColumnExpression,
mode: pw.JoinMode,
left_instance: pw.ColumnReference | None = None,
right_instance: pw.ColumnReference | None = None,
) -> WindowJoinResult:
check_joint_types(
{
"left_time_expression": (left_time_expression, TimeEventType),
"right_time_expression": (right_time_expression, TimeEventType),
"window.hop": (self.hop, IntervalType),
"window.duration": (self.duration, IntervalType),
"window.origin": (self.origin, TimeEventType),
}
)
time_expression_dtype = eval_type(left_time_expression)
assert time_expression_dtype == eval_type(
right_time_expression
) # checked in check_joint_types
_pw_window_dtype = dt.List(
dt.Tuple(
dt.NONE,
time_expression_dtype,
time_expression_dtype,
)
)
assign_windows = self._window_assignment_function(time_expression_dtype)
left_window = left.select(
_pw_window=pw.apply_with_type(
assign_windows, _pw_window_dtype, None, left_time_expression
)
)
left_window = left_window.flatten(left_window._pw_window, *left)
left_window = left_window.with_columns(
_pw_window_start=pw.this._pw_window.get(1),
_pw_window_end=pw.this._pw_window.get(2),
)
right_window = right.select(
_pw_window=pw.apply_with_type(
assign_windows, _pw_window_dtype, None, right_time_expression
)
)
right_window = right_window.flatten(right_window._pw_window, *right)
right_window = right_window.with_columns(
_pw_window_start=pw.this._pw_window.get(1),
_pw_window_end=pw.this._pw_window.get(2),
)
for cond in on:
cond_left, cond_right, cond = validate_join_condition(cond, left, right)
cond._left = left_window[cond_left._name]
cond._right = right_window[cond_right._name]
join_result = pw.JoinResult._table_join(
left_window,
right_window,
left_window._pw_window_start == right_window._pw_window_start,
left_window._pw_window_end == right_window._pw_window_end,
left_window._pw_window == right_window._pw_window,
*on,
mode=mode,
left_instance=left_instance,
right_instance=right_instance,
)
return WindowJoinResult(join_result, left, right, left_window, right_window)
The provided code snippet includes necessary dependencies for implementing the `tumbling` function. Write a Python function `def tumbling( duration: int | float | datetime.timedelta, origin: int | float | datetime.datetime | None = None, ) -> Window` to solve the following problem:
Allows grouping together elements within a window of a given length tumbling across ordered time-like data column starting from a given origin. Note: Usually used as an argument of `.windowby()`. Args: duration: length of the window origin: a point in time at which the first window begins Returns: Window: object to pass as an argument to `.windowby()` Examples: >>> import pathway as pw >>> t = pw.debug.table_from_markdown( ... ''' ... | instance | t ... 1 | 0 | 12 ... 2 | 0 | 13 ... 3 | 0 | 14 ... 4 | 0 | 15 ... 5 | 0 | 16 ... 6 | 0 | 17 ... 7 | 1 | 12 ... 8 | 1 | 13 ... ''') >>> result = t.windowby( ... t.t, window=pw.temporal.tumbling(duration=5), instance=t.instance ... ).reduce( ... pw.this._pw_instance, ... pw.this._pw_window_start, ... pw.this._pw_window_end, ... min_t=pw.reducers.min(pw.this.t), ... max_t=pw.reducers.max(pw.this.t), ... count=pw.reducers.count(), ... ) >>> pw.debug.compute_and_print(result, include_id=False) _pw_instance | _pw_window_start | _pw_window_end | min_t | max_t | count 0 | 10 | 15 | 12 | 14 | 3 0 | 15 | 20 | 15 | 17 | 3 1 | 10 | 15 | 12 | 13 | 2
Here is the function:
def tumbling(
duration: int | float | datetime.timedelta,
origin: int | float | datetime.datetime | None = None,
) -> Window:
"""Allows grouping together elements within a window of a given length tumbling
across ordered time-like data column starting from a given origin.
Note:
Usually used as an argument of `.windowby()`.
Args:
duration: length of the window
origin: a point in time at which the first window begins
Returns:
Window: object to pass as an argument to `.windowby()`
Examples:
>>> import pathway as pw
>>> t = pw.debug.table_from_markdown(
... '''
... | instance | t
... 1 | 0 | 12
... 2 | 0 | 13
... 3 | 0 | 14
... 4 | 0 | 15
... 5 | 0 | 16
... 6 | 0 | 17
... 7 | 1 | 12
... 8 | 1 | 13
... ''')
>>> result = t.windowby(
... t.t, window=pw.temporal.tumbling(duration=5), instance=t.instance
... ).reduce(
... pw.this._pw_instance,
... pw.this._pw_window_start,
... pw.this._pw_window_end,
... min_t=pw.reducers.min(pw.this.t),
... max_t=pw.reducers.max(pw.this.t),
... count=pw.reducers.count(),
... )
>>> pw.debug.compute_and_print(result, include_id=False)
_pw_instance | _pw_window_start | _pw_window_end | min_t | max_t | count
0 | 10 | 15 | 12 | 14 | 3
0 | 15 | 20 | 15 | 17 | 3
1 | 10 | 15 | 12 | 13 | 2
"""
return _SlidingWindow(
duration=None,
hop=duration,
ratio=1,
origin=origin,
) | Allows grouping together elements within a window of a given length tumbling across ordered time-like data column starting from a given origin. Note: Usually used as an argument of `.windowby()`. Args: duration: length of the window origin: a point in time at which the first window begins Returns: Window: object to pass as an argument to `.windowby()` Examples: >>> import pathway as pw >>> t = pw.debug.table_from_markdown( ... ''' ... | instance | t ... 1 | 0 | 12 ... 2 | 0 | 13 ... 3 | 0 | 14 ... 4 | 0 | 15 ... 5 | 0 | 16 ... 6 | 0 | 17 ... 7 | 1 | 12 ... 8 | 1 | 13 ... ''') >>> result = t.windowby( ... t.t, window=pw.temporal.tumbling(duration=5), instance=t.instance ... ).reduce( ... pw.this._pw_instance, ... pw.this._pw_window_start, ... pw.this._pw_window_end, ... min_t=pw.reducers.min(pw.this.t), ... max_t=pw.reducers.max(pw.this.t), ... count=pw.reducers.count(), ... ) >>> pw.debug.compute_and_print(result, include_id=False) _pw_instance | _pw_window_start | _pw_window_end | min_t | max_t | count 0 | 10 | 15 | 12 | 14 | 3 0 | 15 | 20 | 15 | 17 | 3 1 | 10 | 15 | 12 | 13 | 2 |
166,641 | from __future__ import annotations
import dataclasses
import datetime
from abc import ABC, abstractmethod
from collections.abc import Callable, Sequence
from typing import Any
import pathway.internals as pw
from pathway.internals import dtype as dt
from pathway.internals.arg_handlers import (
arg_handler,
offset_deprecation,
shard_deprecation,
windowby_handler,
)
from pathway.internals.desugaring import desugar
from pathway.internals.joins import validate_join_condition
from pathway.internals.runtime_type_check import check_arg_types
from pathway.internals.trace import trace_user_frame
from pathway.internals.type_interpreter import eval_type
from ._interval_join import interval, interval_join
from ._window_join import WindowJoinResult
from .temporal_behavior import (
Behavior,
CommonBehavior,
ExactlyOnceBehavior,
common_behavior,
)
from .utils import (
IntervalType,
TimeEventType,
check_joint_types,
get_default_origin,
zero_length_interval,
)
class Window(ABC):
def _apply(
self,
table: pw.Table,
key: pw.ColumnExpression,
behavior: Behavior | None,
instance: pw.ColumnExpression | None,
) -> pw.GroupedTable: ...
def _join(
self,
left: pw.Table,
right: pw.Table,
left_time_expression: pw.ColumnExpression,
right_time_expression: pw.ColumnExpression,
*on: pw.ColumnExpression,
mode: pw.JoinMode,
left_instance: pw.ColumnReference | None = None,
right_instance: pw.ColumnReference | None = None,
) -> WindowJoinResult: ...
class _IntervalsOverWindow(Window):
at: pw.ColumnReference
lower_bound: int | float | datetime.timedelta
upper_bound: int | float | datetime.timedelta
is_outer: bool
def _apply(
self,
table: pw.Table,
key: pw.ColumnExpression,
behavior: CommonBehavior | None,
instance: pw.ColumnExpression | None,
) -> pw.GroupedTable:
if not isinstance(self.at.table, pw.Table):
at_table = table
at = table[self.at]
elif self.at.table == table:
at_table = self.at.table.copy()
at = at_table[self.at.name]
else:
at_table = self.at.table
at = self.at
check_joint_types(
{
"time_expr": (key, TimeEventType),
"window.lower_bound": (self.lower_bound, IntervalType),
"window.upper_bound": (self.upper_bound, IntervalType),
"window.at": (at, TimeEventType),
}
)
return (
interval_join(
at_table,
table,
at,
key,
interval(self.lower_bound, self.upper_bound), # type: ignore[arg-type]
how=pw.JoinMode.LEFT if self.is_outer else pw.JoinMode.INNER,
)
.select(
_pw_window_location=pw.left[at.name],
_pw_window_start=pw.left[at.name] + self.lower_bound,
_pw_window_end=pw.left[at.name] + self.upper_bound,
_pw_instance=instance,
_pw_key=key,
*pw.right,
)
.groupby(
pw.this._pw_window_location,
pw.this._pw_window_start,
pw.this._pw_window_end,
instance=pw.this._pw_instance,
sort_by=pw.this._pw_key,
)
)
def _join(
self,
left: pw.Table,
right: pw.Table,
left_time_expression: pw.ColumnExpression,
right_time_expression: pw.ColumnExpression,
*on: pw.ColumnExpression,
mode: pw.JoinMode,
left_instance: pw.ColumnReference | None = None,
right_instance: pw.ColumnReference | None = None,
) -> WindowJoinResult:
raise NotImplementedError(
"window_join doesn't support windows of type intervals_over"
)
The provided code snippet includes necessary dependencies for implementing the `intervals_over` function. Write a Python function `def intervals_over( *, at: pw.ColumnReference, lower_bound: int | float | datetime.timedelta, upper_bound: int | float | datetime.timedelta, is_outer: bool = True, ) -> Window` to solve the following problem:
Allows grouping together elements within a window. Windows are created for each time t in at, by taking values with times within [t+lower_bound, t+upper_bound]. Note: If a tuple reducer will be used on grouped elements within a window, values in the tuple will be sorted according to their time column. Args: lower_bound: lower bound for interval upper_bound: upper bound for interval at: column of times for which windows are to be created is_outer: decides whether empty windows should return None or be omitted Returns: Window: object to pass as an argument to `.windowby()` Examples: >>> import pathway as pw >>> t = pw.debug.table_from_markdown( ... ''' ... | t | v ... 1 | 1 | 10 ... 2 | 2 | 1 ... 3 | 4 | 3 ... 4 | 8 | 2 ... 5 | 9 | 4 ... 6 | 10| 8 ... 7 | 1 | 9 ... 8 | 2 | 16 ... ''') >>> probes = pw.debug.table_from_markdown( ... ''' ... t ... 2 ... 4 ... 6 ... 8 ... 10 ... ''') >>> result = ( ... pw.temporal.windowby(t, t.t, window=pw.temporal.intervals_over( ... at=probes.t, lower_bound=-2, upper_bound=1 ... )) ... .reduce(pw.this._pw_window_location, v=pw.reducers.tuple(pw.this.v)) ... ) >>> pw.debug.compute_and_print(result, include_id=False) _pw_window_location | v 2 | (9, 10, 16, 1) 4 | (16, 1, 3) 6 | (3,) 8 | (2, 4) 10 | (2, 4, 8)
Here is the function:
def intervals_over(
*,
at: pw.ColumnReference,
lower_bound: int | float | datetime.timedelta,
upper_bound: int | float | datetime.timedelta,
is_outer: bool = True,
) -> Window:
"""Allows grouping together elements within a window.
Windows are created for each time t in at, by taking values with times
within [t+lower_bound, t+upper_bound].
Note: If a tuple reducer will be used on grouped elements within a window, values
in the tuple will be sorted according to their time column.
Args:
lower_bound: lower bound for interval
upper_bound: upper bound for interval
at: column of times for which windows are to be created
is_outer: decides whether empty windows should return None or be omitted
Returns:
Window: object to pass as an argument to `.windowby()`
Examples:
>>> import pathway as pw
>>> t = pw.debug.table_from_markdown(
... '''
... | t | v
... 1 | 1 | 10
... 2 | 2 | 1
... 3 | 4 | 3
... 4 | 8 | 2
... 5 | 9 | 4
... 6 | 10| 8
... 7 | 1 | 9
... 8 | 2 | 16
... ''')
>>> probes = pw.debug.table_from_markdown(
... '''
... t
... 2
... 4
... 6
... 8
... 10
... ''')
>>> result = (
... pw.temporal.windowby(t, t.t, window=pw.temporal.intervals_over(
... at=probes.t, lower_bound=-2, upper_bound=1
... ))
... .reduce(pw.this._pw_window_location, v=pw.reducers.tuple(pw.this.v))
... )
>>> pw.debug.compute_and_print(result, include_id=False)
_pw_window_location | v
2 | (9, 10, 16, 1)
4 | (16, 1, 3)
6 | (3,)
8 | (2, 4)
10 | (2, 4, 8)
"""
return _IntervalsOverWindow(at, lower_bound, upper_bound, is_outer) | Allows grouping together elements within a window. Windows are created for each time t in at, by taking values with times within [t+lower_bound, t+upper_bound]. Note: If a tuple reducer will be used on grouped elements within a window, values in the tuple will be sorted according to their time column. Args: lower_bound: lower bound for interval upper_bound: upper bound for interval at: column of times for which windows are to be created is_outer: decides whether empty windows should return None or be omitted Returns: Window: object to pass as an argument to `.windowby()` Examples: >>> import pathway as pw >>> t = pw.debug.table_from_markdown( ... ''' ... | t | v ... 1 | 1 | 10 ... 2 | 2 | 1 ... 3 | 4 | 3 ... 4 | 8 | 2 ... 5 | 9 | 4 ... 6 | 10| 8 ... 7 | 1 | 9 ... 8 | 2 | 16 ... ''') >>> probes = pw.debug.table_from_markdown( ... ''' ... t ... 2 ... 4 ... 6 ... 8 ... 10 ... ''') >>> result = ( ... pw.temporal.windowby(t, t.t, window=pw.temporal.intervals_over( ... at=probes.t, lower_bound=-2, upper_bound=1 ... )) ... .reduce(pw.this._pw_window_location, v=pw.reducers.tuple(pw.this.v)) ... ) >>> pw.debug.compute_and_print(result, include_id=False) _pw_window_location | v 2 | (9, 10, 16, 1) 4 | (16, 1, 3) 6 | (3,) 8 | (2, 4) 10 | (2, 4, 8) |
166,642 | from __future__ import annotations
import dataclasses
import datetime
from abc import ABC, abstractmethod
from collections.abc import Callable, Sequence
from typing import Any
import pathway.internals as pw
from pathway.internals import dtype as dt
from pathway.internals.arg_handlers import (
arg_handler,
offset_deprecation,
shard_deprecation,
windowby_handler,
)
from pathway.internals.desugaring import desugar
from pathway.internals.joins import validate_join_condition
from pathway.internals.runtime_type_check import check_arg_types
from pathway.internals.trace import trace_user_frame
from pathway.internals.type_interpreter import eval_type
from ._interval_join import interval, interval_join
from ._window_join import WindowJoinResult
from .temporal_behavior import (
Behavior,
CommonBehavior,
ExactlyOnceBehavior,
common_behavior,
)
from .utils import (
IntervalType,
TimeEventType,
check_joint_types,
get_default_origin,
zero_length_interval,
)
class Window(ABC):
def _apply(
self,
table: pw.Table,
key: pw.ColumnExpression,
behavior: Behavior | None,
instance: pw.ColumnExpression | None,
) -> pw.GroupedTable: ...
def _join(
self,
left: pw.Table,
right: pw.Table,
left_time_expression: pw.ColumnExpression,
right_time_expression: pw.ColumnExpression,
*on: pw.ColumnExpression,
mode: pw.JoinMode,
left_instance: pw.ColumnReference | None = None,
right_instance: pw.ColumnReference | None = None,
) -> WindowJoinResult: ...
class Behavior:
"""
A superclass of all classes defining temporal behavior: its subclasses allow
to configure several temporal operators to delay outputs, ignore late entries,
and clean the memory.
"""
pass
The provided code snippet includes necessary dependencies for implementing the `windowby` function. Write a Python function `def windowby( self: pw.Table, time_expr: pw.ColumnExpression, *, window: Window, behavior: Behavior | None = None, instance: pw.ColumnExpression | None = None, ) -> pw.GroupedTable` to solve the following problem:
Create a GroupedTable by windowing the table (based on `expr` and `window`), optionally with `instance` argument. Args: time_expr (pw.ColumnExpression[int | float | datetime]): Column expression used for windowing window: type window to use instance: optional column expression to act as a shard key Examples: >>> import pathway as pw >>> t = pw.debug.table_from_markdown( ... ''' ... | instance | t | v ... 1 | 0 | 1 | 10 ... 2 | 0 | 2 | 1 ... 3 | 0 | 4 | 3 ... 4 | 0 | 8 | 2 ... 5 | 0 | 9 | 4 ... 6 | 0 | 10| 8 ... 7 | 1 | 1 | 9 ... 8 | 1 | 2 | 16 ... ''') >>> result = t.windowby( ... t.t, window=pw.temporal.session(predicate=lambda a, b: abs(a-b) <= 1), instance=t.instance ... ).reduce( ... pw.this.instance, ... min_t=pw.reducers.min(pw.this.t), ... max_v=pw.reducers.max(pw.this.v), ... count=pw.reducers.count(), ... ) >>> pw.debug.compute_and_print(result, include_id=False) instance | min_t | max_v | count 0 | 1 | 10 | 2 0 | 4 | 3 | 1 0 | 8 | 8 | 3 1 | 1 | 16 | 2
Here is the function:
def windowby(
self: pw.Table,
time_expr: pw.ColumnExpression,
*,
window: Window,
behavior: Behavior | None = None,
instance: pw.ColumnExpression | None = None,
) -> pw.GroupedTable:
"""
Create a GroupedTable by windowing the table (based on `expr` and `window`),
optionally with `instance` argument.
Args:
time_expr (pw.ColumnExpression[int | float | datetime]): Column expression used for windowing
window: type window to use
instance: optional column expression to act as a shard key
Examples:
>>> import pathway as pw
>>> t = pw.debug.table_from_markdown(
... '''
... | instance | t | v
... 1 | 0 | 1 | 10
... 2 | 0 | 2 | 1
... 3 | 0 | 4 | 3
... 4 | 0 | 8 | 2
... 5 | 0 | 9 | 4
... 6 | 0 | 10| 8
... 7 | 1 | 1 | 9
... 8 | 1 | 2 | 16
... ''')
>>> result = t.windowby(
... t.t, window=pw.temporal.session(predicate=lambda a, b: abs(a-b) <= 1), instance=t.instance
... ).reduce(
... pw.this.instance,
... min_t=pw.reducers.min(pw.this.t),
... max_v=pw.reducers.max(pw.this.v),
... count=pw.reducers.count(),
... )
>>> pw.debug.compute_and_print(result, include_id=False)
instance | min_t | max_v | count
0 | 1 | 10 | 2
0 | 4 | 3 | 1
0 | 8 | 8 | 3
1 | 1 | 16 | 2
"""
return window._apply(self, time_expr, behavior, instance) | Create a GroupedTable by windowing the table (based on `expr` and `window`), optionally with `instance` argument. Args: time_expr (pw.ColumnExpression[int | float | datetime]): Column expression used for windowing window: type window to use instance: optional column expression to act as a shard key Examples: >>> import pathway as pw >>> t = pw.debug.table_from_markdown( ... ''' ... | instance | t | v ... 1 | 0 | 1 | 10 ... 2 | 0 | 2 | 1 ... 3 | 0 | 4 | 3 ... 4 | 0 | 8 | 2 ... 5 | 0 | 9 | 4 ... 6 | 0 | 10| 8 ... 7 | 1 | 1 | 9 ... 8 | 1 | 2 | 16 ... ''') >>> result = t.windowby( ... t.t, window=pw.temporal.session(predicate=lambda a, b: abs(a-b) <= 1), instance=t.instance ... ).reduce( ... pw.this.instance, ... min_t=pw.reducers.min(pw.this.t), ... max_v=pw.reducers.max(pw.this.v), ... count=pw.reducers.count(), ... ) >>> pw.debug.compute_and_print(result, include_id=False) instance | min_t | max_v | count 0 | 1 | 10 | 2 0 | 4 | 3 | 1 0 | 8 | 8 | 3 1 | 1 | 16 | 2 |
166,643 | from __future__ import annotations
import datetime
from abc import abstractmethod
from dataclasses import dataclass
from typing import Any, Generic, TypeVar, overload
import pathway.internals as pw
from pathway.internals.arg_handlers import (
arg_handler,
join_kwargs_handler,
select_args_handler,
)
from pathway.internals.desugaring import (
DesugaringContext,
TableReplacementWithNoneDesugaring,
TableSubstitutionDesugaring,
combine_args_kwargs,
desugar,
)
from pathway.internals.runtime_type_check import check_arg_types
from pathway.internals.trace import trace_user_frame
from pathway.internals.type_interpreter import eval_type
from .temporal_behavior import CommonBehavior, apply_temporal_behavior
from .utils import IntervalType, TimeEventType, check_joint_types, get_default_origin
class Interval(Generic[T]):
lower_bound: T
upper_bound: T
def interval(
lower_bound: int,
upper_bound: int,
) -> Interval[int]: ... | null |
166,644 | from __future__ import annotations
import datetime
from abc import abstractmethod
from dataclasses import dataclass
from typing import Any, Generic, TypeVar, overload
import pathway.internals as pw
from pathway.internals.arg_handlers import (
arg_handler,
join_kwargs_handler,
select_args_handler,
)
from pathway.internals.desugaring import (
DesugaringContext,
TableReplacementWithNoneDesugaring,
TableSubstitutionDesugaring,
combine_args_kwargs,
desugar,
)
from pathway.internals.runtime_type_check import check_arg_types
from pathway.internals.trace import trace_user_frame
from pathway.internals.type_interpreter import eval_type
from .temporal_behavior import CommonBehavior, apply_temporal_behavior
from .utils import IntervalType, TimeEventType, check_joint_types, get_default_origin
class Interval(Generic[T]):
def interval(
lower_bound: float,
upper_bound: float,
) -> Interval[float]: ... | null |
166,645 | from __future__ import annotations
import datetime
from abc import abstractmethod
from dataclasses import dataclass
from typing import Any, Generic, TypeVar, overload
import pathway.internals as pw
from pathway.internals.arg_handlers import (
arg_handler,
join_kwargs_handler,
select_args_handler,
)
from pathway.internals.desugaring import (
DesugaringContext,
TableReplacementWithNoneDesugaring,
TableSubstitutionDesugaring,
combine_args_kwargs,
desugar,
)
from pathway.internals.runtime_type_check import check_arg_types
from pathway.internals.trace import trace_user_frame
from pathway.internals.type_interpreter import eval_type
from .temporal_behavior import CommonBehavior, apply_temporal_behavior
from .utils import IntervalType, TimeEventType, check_joint_types, get_default_origin
class Interval(Generic[T]):
lower_bound: T
upper_bound: T
def interval(
lower_bound: datetime.timedelta,
upper_bound: datetime.timedelta,
) -> Interval[datetime.timedelta]: ... | null |
166,646 | from __future__ import annotations
import datetime
from abc import abstractmethod
from dataclasses import dataclass
from typing import Any, Generic, TypeVar, overload
import pathway.internals as pw
from pathway.internals.arg_handlers import (
arg_handler,
join_kwargs_handler,
select_args_handler,
)
from pathway.internals.desugaring import (
DesugaringContext,
TableReplacementWithNoneDesugaring,
TableSubstitutionDesugaring,
combine_args_kwargs,
desugar,
)
from pathway.internals.runtime_type_check import check_arg_types
from pathway.internals.trace import trace_user_frame
from pathway.internals.type_interpreter import eval_type
from .temporal_behavior import CommonBehavior, apply_temporal_behavior
from .utils import IntervalType, TimeEventType, check_joint_types, get_default_origin
class Interval(Generic[T]):
lower_bound: T
upper_bound: T
The provided code snippet includes necessary dependencies for implementing the `interval` function. Write a Python function `def interval( lower_bound: int | float | datetime.timedelta, upper_bound: int | float | datetime.timedelta, ) -> Interval` to solve the following problem:
Allows testing whether two times are within a certain distance. Note: Usually used as an argument of `.interval_join()`. Args: lower_bound: a lower bound on `other_time - self_time`. upper_bound: an upper bound on `other_time - self_time`. Returns: Window: object to pass as an argument to `.interval_join()` Examples: >>> import pathway as pw >>> t1 = pw.debug.table_from_markdown( ... ''' ... | t ... 1 | 3 ... 2 | 4 ... 3 | 5 ... 4 | 11 ... ''' ... ) >>> t2 = pw.debug.table_from_markdown( ... ''' ... | t ... 1 | 0 ... 2 | 1 ... 3 | 4 ... 4 | 7 ... ''' ... ) >>> t3 = t1.interval_join(t2, t1.t, t2.t, pw.temporal.interval(-2, 1)).select( ... left_t=t1.t, right_t=t2.t ... ) >>> pw.debug.compute_and_print(t3, include_id=False) left_t | right_t 3 | 1 3 | 4 4 | 4 5 | 4
Here is the function:
def interval(
lower_bound: int | float | datetime.timedelta,
upper_bound: int | float | datetime.timedelta,
) -> Interval:
"""Allows testing whether two times are within a certain distance.
Note:
Usually used as an argument of `.interval_join()`.
Args:
lower_bound: a lower bound on `other_time - self_time`.
upper_bound: an upper bound on `other_time - self_time`.
Returns:
Window: object to pass as an argument to `.interval_join()`
Examples:
>>> import pathway as pw
>>> t1 = pw.debug.table_from_markdown(
... '''
... | t
... 1 | 3
... 2 | 4
... 3 | 5
... 4 | 11
... '''
... )
>>> t2 = pw.debug.table_from_markdown(
... '''
... | t
... 1 | 0
... 2 | 1
... 3 | 4
... 4 | 7
... '''
... )
>>> t3 = t1.interval_join(t2, t1.t, t2.t, pw.temporal.interval(-2, 1)).select(
... left_t=t1.t, right_t=t2.t
... )
>>> pw.debug.compute_and_print(t3, include_id=False)
left_t | right_t
3 | 1
3 | 4
4 | 4
5 | 4
"""
return Interval(lower_bound=lower_bound, upper_bound=upper_bound) | Allows testing whether two times are within a certain distance. Note: Usually used as an argument of `.interval_join()`. Args: lower_bound: a lower bound on `other_time - self_time`. upper_bound: an upper bound on `other_time - self_time`. Returns: Window: object to pass as an argument to `.interval_join()` Examples: >>> import pathway as pw >>> t1 = pw.debug.table_from_markdown( ... ''' ... | t ... 1 | 3 ... 2 | 4 ... 3 | 5 ... 4 | 11 ... ''' ... ) >>> t2 = pw.debug.table_from_markdown( ... ''' ... | t ... 1 | 0 ... 2 | 1 ... 3 | 4 ... 4 | 7 ... ''' ... ) >>> t3 = t1.interval_join(t2, t1.t, t2.t, pw.temporal.interval(-2, 1)).select( ... left_t=t1.t, right_t=t2.t ... ) >>> pw.debug.compute_and_print(t3, include_id=False) left_t | right_t 3 | 1 3 | 4 4 | 4 5 | 4 |
166,647 | from __future__ import annotations
import datetime
from abc import abstractmethod
from dataclasses import dataclass
from typing import Any, Generic, TypeVar, overload
import pathway.internals as pw
from pathway.internals.arg_handlers import (
arg_handler,
join_kwargs_handler,
select_args_handler,
)
from pathway.internals.desugaring import (
DesugaringContext,
TableReplacementWithNoneDesugaring,
TableSubstitutionDesugaring,
combine_args_kwargs,
desugar,
)
from pathway.internals.runtime_type_check import check_arg_types
from pathway.internals.trace import trace_user_frame
from pathway.internals.type_interpreter import eval_type
from .temporal_behavior import CommonBehavior, apply_temporal_behavior
from .utils import IntervalType, TimeEventType, check_joint_types, get_default_origin
class Interval(Generic[T]):
lower_bound: T
upper_bound: T
class IntervalJoinResult(DesugaringContext):
"""
Result of an interval join between tables.
Example:
>>> import pathway as pw
>>> t1 = pw.debug.table_from_markdown(
... '''
... | t
... 1 | 3
... 2 | 4
... 3 | 5
... 4 | 11
... '''
... )
>>> t2 = pw.debug.table_from_markdown(
... '''
... | t
... 1 | 0
... 2 | 1
... 3 | 4
... 4 | 7
... '''
... )
>>> join_result = t1.interval_join_inner(t2, t1.t, t2.t, pw.temporal.interval(-2, 1))
>>> isinstance(join_result, pw.temporal.IntervalJoinResult)
True
>>> pw.debug.compute_and_print(
... join_result.select(left_t=t1.t, right_t=t2.t), include_id=False
... )
left_t | right_t
3 | 1
3 | 4
4 | 4
5 | 4
"""
_table_substitution: dict[pw.TableLike, pw.Table]
_filter_out_results_of_forgetting: bool
def __init__(
self,
left: pw.Table,
right: pw.Table,
table_substitution: dict[pw.TableLike, pw.Table],
_filter_out_results_of_forgetting: bool,
):
self._substitution = {
pw.left: left,
pw.right: right,
pw.this: pw.this, # type: ignore[dict-item]
}
self._table_substitution = table_substitution
self._filter_out_results_of_forgetting = _filter_out_results_of_forgetting
def _should_filter_out_results_of_forgetting(
behavior: CommonBehavior | None,
) -> bool:
return (
behavior is not None
and behavior.cutoff is not None
and behavior.keep_results
)
def _interval_join(
left: pw.Table,
right: pw.Table,
left_time_expression: pw.ColumnExpression,
right_time_expression: pw.ColumnExpression,
interval: Interval[int] | Interval[float] | Interval[datetime.timedelta],
*on: pw.ColumnExpression,
behavior: CommonBehavior | None = None,
mode: pw.JoinMode,
left_instance: pw.ColumnReference | None = None,
right_instance: pw.ColumnReference | None = None,
) -> IntervalJoinResult:
"""Creates an IntervalJoinResult. To perform an interval join uses it uses two
tumbling windows of size `lower_bound` + `upper_bound` and then filters the result.
"""
check_joint_types(
{
"self_time_expression": (left_time_expression, TimeEventType),
"other_time_expression": (right_time_expression, TimeEventType),
"lower_bound": (interval.lower_bound, IntervalType),
"upper_bound": (interval.upper_bound, IntervalType),
}
)
if left == right:
raise ValueError(
"Cannot join table with itself. Use <table>.copy() as one of the arguments of the join."
)
if interval.lower_bound > interval.upper_bound: # type: ignore[operator]
raise ValueError(
"lower_bound has to be less than or equal to the upper_bound in the Table.interval_join()."
)
if interval.lower_bound == interval.upper_bound:
cls: type[IntervalJoinResult] = _ZeroDifferenceIntervalJoinResult
else:
cls = _NonZeroDifferenceIntervalJoinResult
return cls._interval_join(
left,
right,
left_time_expression,
right_time_expression,
interval,
*on,
behavior=behavior,
mode=mode,
left_instance=left_instance,
right_instance=right_instance,
)
def _desugaring(self) -> TableSubstitutionDesugaring:
return TableSubstitutionDesugaring(self._table_substitution)
def select(self, *args: pw.ColumnReference, **kwargs: Any) -> pw.Table:
"""
Computes a result of an interval join.
Args:
args: Column references.
kwargs: Column expressions with their new assigned names.
Returns:
Table: Created table.
Example:
>>> import pathway as pw
>>> t1 = pw.debug.table_from_markdown(
... '''
... | a | t
... 1 | 1 | 3
... 2 | 1 | 4
... 3 | 1 | 5
... 4 | 1 | 11
... 5 | 2 | 2
... 6 | 2 | 3
... 7 | 3 | 4
... '''
... )
>>> t2 = pw.debug.table_from_markdown(
... '''
... | b | t
... 1 | 1 | 0
... 2 | 1 | 1
... 3 | 1 | 4
... 4 | 1 | 7
... 5 | 2 | 0
... 6 | 2 | 2
... 7 | 4 | 2
... '''
... )
>>> t3 = t1.interval_join_inner(
... t2, t1.t, t2.t, pw.temporal.interval(-2, 1), t1.a == t2.b
... ).select(t1.a, left_t=t1.t, right_t=t2.t)
>>> pw.debug.compute_and_print(t3, include_id=False)
a | left_t | right_t
1 | 3 | 1
1 | 3 | 4
1 | 4 | 4
1 | 5 | 4
2 | 2 | 0
2 | 2 | 2
2 | 3 | 2
"""
...
class CommonBehavior(Behavior):
"""Defines temporal behavior of windows and temporal joins."""
delay: IntervalType | None
cutoff: IntervalType | None
keep_results: bool
class Table(
Joinable,
OperatorInput,
Generic[TSchema],
):
"""Collection of named columns over identical universes.
Example:
>>> import pathway as pw
>>> t1 = pw.debug.table_from_markdown('''
... age | owner | pet
... 10 | Alice | dog
... 9 | Bob | dog
... 8 | Alice | cat
... 7 | Bob | dog
... ''')
>>> isinstance(t1, pw.Table)
True
"""
if TYPE_CHECKING:
from pathway.stdlib.ordered import diff # type: ignore[misc]
from pathway.stdlib.statistical import interpolate # type: ignore[misc]
from pathway.stdlib.temporal import ( # type: ignore[misc]
asof_join,
asof_join_left,
asof_join_outer,
asof_join_right,
asof_now_join,
asof_now_join_inner,
asof_now_join_left,
interval_join,
interval_join_inner,
interval_join_left,
interval_join_outer,
interval_join_right,
window_join,
window_join_inner,
window_join_left,
window_join_outer,
window_join_right,
windowby,
)
from pathway.stdlib.viz import ( # type: ignore[misc]
_repr_mimebundle_,
plot,
show,
)
_columns: dict[str, clmn.Column]
_schema: type[Schema]
_id_column: clmn.IdColumn
_rowwise_context: clmn.RowwiseContext
_source: SetOnceProperty[OutputHandle] = SetOnceProperty()
"""Lateinit by operator."""
def __init__(
self,
_columns: Mapping[str, clmn.Column],
_context: clmn.Context,
_schema: type[Schema] | None = None,
):
if _schema is None:
_schema = schema_from_columns(_columns)
super().__init__(_context)
self._columns = dict(_columns)
self._schema = _schema
self._id_column = _context.id_column
self._substitution = {thisclass.this: self}
self._rowwise_context = clmn.RowwiseContext(self._id_column)
def id(self) -> expr.ColumnReference:
"""Get reference to pseudocolumn containing id's of a table.
Example:
>>> import pathway as pw
>>> t1 = pw.debug.table_from_markdown('''
... age | owner | pet
... 10 | Alice | dog
... 9 | Bob | dog
... 8 | Alice | cat
... 7 | Bob | dog
... ''')
>>> t2 = t1.select(ids = t1.id)
>>> t2.typehints()['ids']
<class 'pathway.engine.Pointer'>
>>> pw.debug.compute_and_print(t2.select(test=t2.id == t2.ids), include_id=False)
test
True
True
True
True
"""
return expr.ColumnReference(_table=self, _column=self._id_column, _name="id")
def column_names(self):
return self.keys()
def keys(self):
return self._columns.keys()
def _get_column(self, name: str) -> clmn.Column:
return self._columns[name]
def _ipython_key_completions_(self):
return list(self.column_names())
def __dir__(self):
return list(super().__dir__()) + list(self.column_names())
def _C(self) -> TSchema:
return self.C # type: ignore
def schema(self) -> type[Schema]:
"""Get schema of the table.
Example:
>>> import pathway as pw
>>> t1 = pw.debug.table_from_markdown('''
... age | owner | pet
... 10 | Alice | dog
... 9 | Bob | dog
... 8 | Alice | cat
... 7 | Bob | dog
... ''')
>>> t1.schema
<pathway.Schema types={'age': <class 'int'>, 'owner': <class 'str'>, 'pet': <class 'str'>}>
>>> t1.typehints()['age']
<class 'int'>
"""
return self._schema
def _get_colref_by_name(self, name, exception_type) -> expr.ColumnReference:
name = self._column_deprecation_rename(name)
if name == "id":
return self.id
if name not in self.keys():
raise exception_type(f"Table has no column with name {name}.")
return expr.ColumnReference(
_table=self, _column=self._get_column(name), _name=name
)
def __getitem__(self, args: str | expr.ColumnReference) -> expr.ColumnReference: ...
def __getitem__(self, args: list[str | expr.ColumnReference]) -> Table: ...
def __getitem__(
self, args: str | expr.ColumnReference | list[str | expr.ColumnReference]
) -> expr.ColumnReference | Table:
"""Get columns by name.
Warning:
- Does not allow repetitions of columns.
- Fails if tries to access nonexistent column.
Args:
names: a singe column name or list of columns names to be extracted from `self`.
Returns:
Table with specified columns, or column expression (if single argument given).
Instead of column names, column references are valid here.
Example:
>>> import pathway as pw
>>> t1 = pw.debug.table_from_markdown('''
... age | owner | pet
... 10 | Alice | dog
... 9 | Bob | dog
... 8 | Alice | cat
... 7 | Bob | dog
... ''')
>>> t2 = t1[["age", "pet"]]
>>> t2 = t1[["age", t1.pet]]
>>> pw.debug.compute_and_print(t2, include_id=False)
age | pet
7 | dog
8 | cat
9 | dog
10 | dog
"""
if isinstance(args, expr.ColumnReference):
if (args.table is not self) and not isinstance(
args.table, thisclass.ThisMetaclass
):
raise ValueError(
"Table.__getitem__ argument has to be a ColumnReference to the same table or pw.this, or a string "
+ "(or a list of those)."
)
return self._get_colref_by_name(args.name, KeyError)
elif isinstance(args, str):
return self._get_colref_by_name(args, KeyError)
else:
return self.select(*[self[name] for name in args])
def from_columns(
*args: expr.ColumnReference, **kwargs: expr.ColumnReference
) -> Table:
"""Build a table from columns.
All columns must have the same ids. Columns' names must be pairwise distinct.
Args:
args: List of columns.
kwargs: Columns with their new names.
Returns:
Table: Created table.
Example:
>>> import pathway as pw
>>> t1 = pw.Table.empty(age=float, pet=float)
>>> t2 = pw.Table.empty(foo=float, bar=float).with_universe_of(t1)
>>> t3 = pw.Table.from_columns(t1.pet, qux=t2.foo)
>>> pw.debug.compute_and_print(t3, include_id=False)
pet | qux
"""
all_args = cast(
dict[str, expr.ColumnReference], combine_args_kwargs(args, kwargs)
)
if not all_args:
raise ValueError("Table.from_columns() cannot have empty arguments list")
else:
arg = next(iter(all_args.values()))
table: Table = arg.table
for arg in all_args.values():
if not G.universe_solver.query_are_equal(
table._universe, arg.table._universe
):
raise ValueError(
"Universes of all arguments of Table.from_columns() have to be equal.\n"
+ "Consider using Table.promise_universes_are_equal() to assert it.\n"
+ "(However, untrue assertion might result in runtime errors.)"
)
return table.select(*args, **kwargs)
def concat_reindex(self, *tables: Table) -> Table:
"""Concatenate contents of several tables.
This is similar to PySpark union. All tables must have the same schema. Each row is reindexed.
Args:
tables: List of tables to concatenate. All tables must have the same schema.
Returns:
Table: The concatenated table. It will have new, synthetic ids.
Example:
>>> import pathway as pw
>>> t1 = pw.debug.table_from_markdown('''
... | pet
... 1 | Dog
... 7 | Cat
... ''')
>>> t2 = pw.debug.table_from_markdown('''
... | pet
... 1 | Manul
... 8 | Octopus
... ''')
>>> t3 = t1.concat_reindex(t2)
>>> pw.debug.compute_and_print(t3, include_id=False)
pet
Cat
Dog
Manul
Octopus
"""
reindexed = [
table.with_id_from(table.id, i) for i, table in enumerate([self, *tables])
]
universes.promise_are_pairwise_disjoint(*reindexed)
return Table.concat(*reindexed)
def empty(**kwargs: dt.DType) -> Table:
"""Creates an empty table with a schema specified by kwargs.
Args:
kwargs: Dict whose keys are column names and values are column types.
Returns:
Table: Created empty table.
Example:
>>> import pathway as pw
>>> t1 = pw.Table.empty(age=float, pet=float)
>>> pw.debug.compute_and_print(t1, include_id=False)
age | pet
"""
from pathway.internals import table_io
ret = table_io.empty_from_schema(schema_from_types(None, **kwargs))
G.universe_solver.register_as_empty(ret._universe)
return ret
def select(self, *args: expr.ColumnReference, **kwargs: Any) -> Table:
"""Build a new table with columns specified by kwargs.
Output columns' names are keys(kwargs). values(kwargs) can be raw values, boxed
values, columns. Assigning to id reindexes the table.
Args:
args: Column references.
kwargs: Column expressions with their new assigned names.
Returns:
Table: Created table.
Example:
>>> import pathway as pw
>>> t1 = pw.debug.table_from_markdown('''
... pet
... Dog
... Cat
... ''')
>>> t2 = t1.select(animal=t1.pet, desc="fluffy")
>>> pw.debug.compute_and_print(t2, include_id=False)
animal | desc
Cat | fluffy
Dog | fluffy
"""
new_columns = []
all_args = combine_args_kwargs(args, kwargs)
for new_name, expression in all_args.items():
self._validate_expression(expression)
column = self._eval(expression)
new_columns.append((new_name, column))
return self._with_same_universe(*new_columns)
def __add__(self, other: Table) -> Table:
"""Build a union of `self` with `other`.
Semantics: Returns a table C, such that
- C.columns == self.columns + other.columns
- C.id == self.id == other.id
Args:
other: The other table. `self.id` must be equal `other.id` and
`self.columns` and `other.columns` must be disjoint (or overlapping names
are THE SAME COLUMN)
Returns:
Table: Created table.
Example:
>>> import pathway as pw
>>> t1 = pw.debug.table_from_markdown('''
... pet
... 1 Dog
... 7 Cat
... ''')
>>> t2 = pw.debug.table_from_markdown('''
... age
... 1 10
... 7 3
... ''')
>>> t3 = t1 + t2
>>> pw.debug.compute_and_print(t3, include_id=False)
pet | age
Cat | 3
Dog | 10
"""
if not G.universe_solver.query_are_equal(self._universe, other._universe):
raise ValueError(
"Universes of all arguments of Table.__add__() have to be equal.\n"
+ "Consider using Table.promise_universes_are_equal() to assert it.\n"
+ "(However, untrue assertion might result in runtime errors.)"
)
return self.select(*self, *other)
def slice(self) -> TableSlice:
"""Creates a collection of references to self columns.
Supports basic column manipulation methods.
Example:
>>> import pathway as pw
>>> t1 = pw.debug.table_from_markdown('''
... age | owner | pet
... 10 | Alice | dog
... 9 | Bob | dog
... 8 | Alice | cat
... 7 | Bob | dog
... ''')
>>> t1.slice.without("age")
TableSlice({'owner': <table1>.owner, 'pet': <table1>.pet})
"""
return TableSlice(dict(**self), self)
def filter(self, filter_expression: expr.ColumnExpression) -> Table[TSchema]:
"""Filter a table according to `filter_expression` condition.
Args:
filter_expression: `ColumnExpression` that specifies the filtering condition.
Returns:
Table: Result has the same schema as `self` and its ids are subset of `self.id`.
Example:
>>> import pathway as pw
>>> vertices = pw.debug.table_from_markdown('''
... label outdegree
... 1 3
... 7 0
... ''')
>>> filtered = vertices.filter(vertices.outdegree == 0)
>>> pw.debug.compute_and_print(filtered, include_id=False)
label | outdegree
7 | 0
"""
filter_type = self.eval_type(filter_expression)
if filter_type != dt.BOOL:
raise TypeError(
f"Filter argument of Table.filter() has to be bool, found {filter_type}."
)
ret = self._filter(filter_expression)
if (
filter_col := expr.get_column_filtered_by_is_none(filter_expression)
) is not None and filter_col.table == self:
name = filter_col.name
dtype = self._columns[name].dtype
ret = ret.update_types(**{name: dt.unoptionalize(dtype)})
return ret
def split(
self, split_expression: expr.ColumnExpression
) -> tuple[Table[TSchema], Table[TSchema]]:
"""Split a table according to `split_expression` condition.
Args:
split_expression: `ColumnExpression` that specifies the split condition.
Returns:
positive_table, negative_table: tuple of tables,
with the same schemas as `self` and with ids that are subsets of `self.id`,
and provably disjoint.
Example:
>>> import pathway as pw
>>> vertices = pw.debug.table_from_markdown('''
... label outdegree
... 1 3
... 7 0
... ''')
>>> positive, negative = vertices.split(vertices.outdegree == 0)
>>> pw.debug.compute_and_print(positive, include_id=False)
label | outdegree
7 | 0
>>> pw.debug.compute_and_print(negative, include_id=False)
label | outdegree
1 | 3
"""
positive = self.filter(split_expression)
negative = self.filter(~split_expression)
universes.promise_are_pairwise_disjoint(positive, negative)
universes.promise_are_equal(
self, Table.concat(positive, negative)
) # TODO: add API method for this
return positive, negative
def _filter(self, filter_expression: expr.ColumnExpression) -> Table[TSchema]:
self._validate_expression(filter_expression)
filtering_column = self._eval(filter_expression)
assert self._universe == filtering_column.universe
context = clmn.FilterContext(filtering_column, self._id_column)
return self._table_with_context(context)
def _gradual_broadcast(
self,
threshold_table,
lower_column,
value_column,
upper_column,
) -> Table:
return self + self.__gradual_broadcast(
threshold_table, lower_column, value_column, upper_column
)
def __gradual_broadcast(
self,
threshold_table,
lower_column,
value_column,
upper_column,
):
context = clmn.GradualBroadcastContext(
self._id_column,
threshold_table._eval(lower_column),
threshold_table._eval(value_column),
threshold_table._eval(upper_column),
)
return Table(_columns={"apx_value": context.apx_value_column}, _context=context)
def _forget(
self,
threshold_column: expr.ColumnExpression,
time_column: expr.ColumnExpression,
mark_forgetting_records: bool,
) -> Table:
context = clmn.ForgetContext(
self._id_column,
self._eval(threshold_column),
self._eval(time_column),
mark_forgetting_records,
)
return self._table_with_context(context)
def _forget_immediately(
self,
) -> Table:
context = clmn.ForgetImmediatelyContext(self._id_column)
return self._table_with_context(context)
def _filter_out_results_of_forgetting(
self,
) -> Table:
# The output universe is a superset of input universe because forgetting entries
# are filtered out. At each point in time, the set of keys with +1 diff can be
# bigger than a set of keys with +1 diff in an input table.
context = clmn.FilterOutForgettingContext(self._id_column)
return self._table_with_context(context)
def _freeze(
self,
threshold_column: expr.ColumnExpression,
time_column: expr.ColumnExpression,
) -> Table:
context = clmn.FreezeContext(
self._id_column,
self._eval(threshold_column),
self._eval(time_column),
)
return self._table_with_context(context)
def _buffer(
self,
threshold_column: expr.ColumnExpression,
time_column: expr.ColumnExpression,
) -> Table:
context = clmn.BufferContext(
self._id_column,
self._eval(threshold_column),
self._eval(time_column),
)
return self._table_with_context(context)
def difference(self, other: Table) -> Table[TSchema]:
r"""Restrict self universe to keys not appearing in the other table.
Args:
other: table with ids to remove from self.
Returns:
Table: table with restricted universe, with the same set of columns
Example:
>>> import pathway as pw
>>> t1 = pw.debug.table_from_markdown('''
... | age | owner | pet
... 1 | 10 | Alice | 1
... 2 | 9 | Bob | 1
... 3 | 8 | Alice | 2
... ''')
>>> t2 = pw.debug.table_from_markdown('''
... | cost
... 2 | 100
... 3 | 200
... 4 | 300
... ''')
>>> t3 = t1.difference(t2)
>>> pw.debug.compute_and_print(t3, include_id=False)
age | owner | pet
10 | Alice | 1
"""
context = clmn.DifferenceContext(
left=self._id_column,
right=other._id_column,
)
return self._table_with_context(context)
def intersect(self, *tables: Table) -> Table[TSchema]:
"""Restrict self universe to keys appearing in all of the tables.
Args:
tables: tables keys of which are used to restrict universe.
Returns:
Table: table with restricted universe, with the same set of columns
Example:
>>> import pathway as pw
>>> t1 = pw.debug.table_from_markdown('''
... | age | owner | pet
... 1 | 10 | Alice | 1
... 2 | 9 | Bob | 1
... 3 | 8 | Alice | 2
... ''')
>>> t2 = pw.debug.table_from_markdown('''
... | cost
... 2 | 100
... 3 | 200
... 4 | 300
... ''')
>>> t3 = t1.intersect(t2)
>>> pw.debug.compute_and_print(t3, include_id=False)
age | owner | pet
8 | Alice | 2
9 | Bob | 1
"""
intersecting_universes = (
self._universe,
*tuple(table._universe for table in tables),
)
universe = G.universe_solver.get_intersection(*intersecting_universes)
if universe in intersecting_universes:
context: clmn.Context = clmn.RestrictContext(self._id_column, universe)
else:
intersecting_ids = (
self._id_column,
*tuple(table._id_column for table in tables),
)
context = clmn.IntersectContext(
intersecting_ids=intersecting_ids,
)
return self._table_with_context(context)
def restrict(self, other: TableLike) -> Table[TSchema]:
"""Restrict self universe to keys appearing in other.
Args:
other: table which universe is used to restrict universe of self.
Returns:
Table: table with restricted universe, with the same set of columns
Example:
>>> import pathway as pw
>>> t1 = pw.debug.table_from_markdown(
... '''
... | age | owner | pet
... 1 | 10 | Alice | 1
... 2 | 9 | Bob | 1
... 3 | 8 | Alice | 2
... '''
... )
>>> t2 = pw.debug.table_from_markdown(
... '''
... | cost
... 2 | 100
... 3 | 200
... '''
... )
>>> t2.promise_universe_is_subset_of(t1)
<pathway.Table schema={'cost': <class 'int'>}>
>>> t3 = t1.restrict(t2)
>>> pw.debug.compute_and_print(t3, include_id=False)
age | owner | pet
8 | Alice | 2
9 | Bob | 1
"""
if not G.universe_solver.query_is_subset(other._universe, self._universe):
raise ValueError(
"Table.restrict(): other universe has to be a subset of self universe."
+ "Consider using Table.promise_universe_is_subset_of() to assert it."
)
context = clmn.RestrictContext(self._id_column, other._universe)
columns = {
name: self._wrap_column_in_context(context, column, name)
for name, column in self._columns.items()
}
return Table(
_columns=columns,
_context=context,
)
def copy(self) -> Table[TSchema]:
"""Returns a copy of a table.
Example:
>>> import pathway as pw
>>> t1 = pw.debug.table_from_markdown('''
... age | owner | pet
... 10 | Alice | dog
... 9 | Bob | dog
... 8 | Alice | cat
... 7 | Bob | dog
... ''')
>>> t2 = t1.copy()
>>> pw.debug.compute_and_print(t2, include_id=False)
age | owner | pet
7 | Bob | dog
8 | Alice | cat
9 | Bob | dog
10 | Alice | dog
>>> t1 is t2
False
"""
return self._copy_as(type(self))
def _copy_as(self, table_type: type[TTable], /, **kwargs) -> TTable:
columns = {
name: self._wrap_column_in_context(self._rowwise_context, column, name)
for name, column in self._columns.items()
}
return table_type(_columns=columns, _context=self._rowwise_context, **kwargs)
def groupby(
self,
*args: expr.ColumnReference,
id: expr.ColumnReference | None = None,
sort_by: expr.ColumnReference | None = None,
_filter_out_results_of_forgetting: bool = False,
instance: expr.ColumnReference | None = None,
) -> groupbys.GroupedTable:
"""Groups table by columns from args.
Note:
Usually followed by `.reduce()` that aggregates the result and returns a table.
Args:
args: columns to group by.
id: if provided, is the column used to set id's of the rows of the result
sort_by: if provided, column values are used as sorting keys for particular reducers
instance: optional argument describing partitioning of the data into separate instances
Returns:
GroupedTable: Groupby object.
Example:
>>> import pathway as pw
>>> t1 = pw.debug.table_from_markdown('''
... age | owner | pet
... 10 | Alice | dog
... 9 | Bob | dog
... 8 | Alice | cat
... 7 | Bob | dog
... ''')
>>> t2 = t1.groupby(t1.pet, t1.owner).reduce(t1.owner, t1.pet, ageagg=pw.reducers.sum(t1.age))
>>> pw.debug.compute_and_print(t2, include_id=False)
owner | pet | ageagg
Alice | cat | 8
Alice | dog | 10
Bob | dog | 16
"""
if instance is not None:
args = (*args, instance)
if id is not None:
if len(args) == 0:
args = (id,)
elif len(args) > 1:
raise ValueError(
"Table.groupby() cannot have id argument when grouping by multiple columns."
)
elif args[0]._column != id._column:
raise ValueError(
"Table.groupby() received id argument and is grouped by a single column,"
+ " but the arguments are not equal.\n"
+ "Consider using <table>.groupby(id=...), skipping the positional argument."
)
for arg in args:
if not isinstance(arg, expr.ColumnReference):
if isinstance(arg, str):
raise ValueError(
f"Expected a ColumnReference, found a string. Did you mean <table>.{arg}"
+ f" instead of {repr(arg)}?"
)
else:
raise ValueError(
"All Table.groupby() arguments have to be a ColumnReference."
)
return groupbys.GroupedTable.create(
table=self,
grouping_columns=args,
set_id=id is not None,
sort_by=sort_by,
_filter_out_results_of_forgetting=_filter_out_results_of_forgetting,
)
def reduce(
self, *args: expr.ColumnReference, **kwargs: expr.ColumnExpression
) -> Table:
"""Reduce a table to a single row.
Equivalent to `self.groupby().reduce(*args, **kwargs)`.
Args:
args: reducer to reduce the table with
kwargs: reducer to reduce the table with. Its key is the new name of a column.
Returns:
Table: Reduced table.
Example:
>>> import pathway as pw
>>> t1 = pw.debug.table_from_markdown('''
... age | owner | pet
... 10 | Alice | dog
... 9 | Bob | dog
... 8 | Alice | cat
... 7 | Bob | dog
... ''')
>>> t2 = t1.reduce(ageagg=pw.reducers.argmin(t1.age))
>>> pw.debug.compute_and_print(t2, include_id=False) # doctest: +ELLIPSIS
ageagg
^...
>>> t3 = t2.select(t1.ix(t2.ageagg).age, t1.ix(t2.ageagg).pet)
>>> pw.debug.compute_and_print(t3, include_id=False)
age | pet
7 | dog
"""
return self.groupby().reduce(*args, **kwargs)
def deduplicate(
self,
*,
value: expr.ColumnExpression,
instance: expr.ColumnExpression | None = None,
acceptor: Callable[[T, T], bool],
persistent_id: str | None = None,
) -> Table:
"""Deduplicates rows in `self` on `value` column using acceptor function.
It keeps rows which where accepted by the acceptor function.
Acceptor operates on two arguments - current value and the previously accepted value.
Args:
value: column expression used for deduplication.
instance: Grouping column. For rows with different
values in this column, deduplication will be performed separately.
Defaults to None.
acceptor: callback telling whether two values are different.
persistent_id: (unstable) An identifier, under which the state of the table
will be persisted or ``None``, if there is no need to persist the state of this table.
When a program restarts, it restores the state for all input tables according to what
was saved for their ``persistent_id``. This way it's possible to configure the start of
computations from the moment they were terminated last time.
Returns:
Table: the result of deduplication.
Example:
>>> import pathway as pw
>>> table = pw.debug.table_from_markdown(
... '''
... val | __time__
... 1 | 2
... 2 | 4
... 3 | 6
... 4 | 8
... '''
... )
>>>
>>> def acceptor(new_value, old_value) -> bool:
... return new_value >= old_value + 2
...
>>>
>>> result = table.deduplicate(value=pw.this.val, acceptor=acceptor)
>>> pw.debug.compute_and_print_update_stream(result, include_id=False)
val | __time__ | __diff__
1 | 2 | 1
1 | 6 | -1
3 | 6 | 1
>>>
>>> table = pw.debug.table_from_markdown(
... '''
... val | instance | __time__
... 1 | 1 | 2
... 2 | 1 | 4
... 3 | 2 | 6
... 4 | 1 | 8
... 4 | 2 | 8
... 5 | 1 | 10
... '''
... )
>>>
>>> def acceptor(new_value, old_value) -> bool:
... return new_value >= old_value + 2
...
>>>
>>> result = table.deduplicate(
... value=pw.this.val, instance=pw.this.instance, acceptor=acceptor
... )
>>> pw.debug.compute_and_print_update_stream(result, include_id=False)
val | instance | __time__ | __diff__
1 | 1 | 2 | 1
3 | 2 | 6 | 1
1 | 1 | 8 | -1
4 | 1 | 8 | 1
"""
if instance is None:
instance = expr.ColumnConstExpression(None)
self._validate_expression(value)
self._validate_expression(instance)
value_col = self._eval(value)
instance_col = self._eval(instance)
context = clmn.DeduplicateContext(
value_col,
(instance_col,),
acceptor,
self._id_column,
persistent_id,
)
return self._table_with_context(context)
def ix(
self, expression: expr.ColumnExpression, *, optional: bool = False, context=None
) -> Table:
"""Reindexes the table using expression values as keys. Uses keys from context, or tries to infer
proper context from the expression.
If optional is True, then None in expression values result in None values in the result columns.
Missing values in table keys result in RuntimeError.
Context can be anything that allows for `select` or `reduce`, or `pathway.this` construct
(latter results in returning a delayed operation, and should be only used when using `ix` inside
join().select() or groupby().reduce() sequence).
Returns:
Reindexed table with the same set of columns.
Example:
>>> import pathway as pw
>>> t_animals = pw.debug.table_from_markdown('''
... | epithet | genus
... 1 | upupa | epops
... 2 | acherontia | atropos
... 3 | bubo | scandiacus
... 4 | dynastes | hercules
... ''')
>>> t_birds = pw.debug.table_from_markdown('''
... | desc
... 2 | hoopoe
... 4 | owl
... ''')
>>> ret = t_birds.select(t_birds.desc, latin=t_animals.ix(t_birds.id).genus)
>>> pw.debug.compute_and_print(ret, include_id=False)
desc | latin
hoopoe | atropos
owl | hercules
"""
if context is None:
all_tables = collect_tables(expression)
if len(all_tables) == 0:
context = thisclass.this
elif all(tab == all_tables[0] for tab in all_tables):
context = all_tables[0]
if context is None:
for tab in all_tables:
if not isinstance(tab, Table):
raise ValueError("Table expected here.")
if len(all_tables) == 0:
raise ValueError("Const value provided.")
context = all_tables[0]
for tab in all_tables:
assert context._universe.is_equal_to(tab._universe)
if isinstance(context, groupbys.GroupedJoinable):
context = thisclass.this
if isinstance(context, thisclass.ThisMetaclass):
return context._delayed_op(
lambda table, expression: self.ix(
expression=expression, optional=optional, context=table
),
expression=expression,
qualname=f"{self}.ix(...)",
name="ix",
)
restrict_universe = RestrictUniverseDesugaring(context)
expression = restrict_universe.eval_expression(expression)
key_col = context.select(tmp=expression).tmp
key_dtype = self.eval_type(key_col)
if (
optional and not dt.dtype_issubclass(key_dtype, dt.Optional(dt.POINTER))
) or (not optional and not isinstance(key_dtype, dt.Pointer)):
raise TypeError(
f"Pathway supports indexing with Pointer type only. The type used was {key_dtype}."
)
if optional and isinstance(key_dtype, dt.Optional):
self_ = self.update_types(
**{name: dt.Optional(self.typehints()[name]) for name in self.keys()}
)
else:
self_ = self
return self_._ix(key_col, optional)
def _ix(
self,
key_expression: expr.ColumnReference,
optional: bool,
) -> Table:
key_column = key_expression._column
context = clmn.IxContext(key_column, self._id_column, optional)
return self._table_with_context(context)
def __lshift__(self, other: Table) -> Table:
"""Alias to update_cells method.
Updates cells of `self`, breaking ties in favor of the values in `other`.
Semantics:
- result.columns == self.columns
- result.id == self.id
- conflicts are resolved preferring other's values
Requires:
- other.columns ⊆ self.columns
- other.id ⊆ self.id
Args:
other: the other table.
Returns:
Table: `self` updated with cells form `other`.
Example:
>>> import pathway as pw
>>> t1 = pw.debug.table_from_markdown('''
... | age | owner | pet
... 1 | 10 | Alice | 1
... 2 | 9 | Bob | 1
... 3 | 8 | Alice | 2
... ''')
>>> t2 = pw.debug.table_from_markdown('''
... | age | owner | pet
... 1 | 10 | Alice | 30
... ''')
>>> pw.universes.promise_is_subset_of(t2, t1)
>>> t3 = t1 << t2
>>> pw.debug.compute_and_print(t3, include_id=False)
age | owner | pet
8 | Alice | 2
9 | Bob | 1
10 | Alice | 30
"""
return self.update_cells(other, _stacklevel=2)
def concat(self, *others: Table[TSchema]) -> Table[TSchema]:
"""Concats `self` with every `other` ∊ `others`.
Semantics:
- result.columns == self.columns == other.columns
- result.id == self.id ∪ other.id
if self.id and other.id collide, throws an exception.
Requires:
- other.columns == self.columns
- self.id disjoint with other.id
Args:
other: the other table.
Returns:
Table: The concatenated table. Id's of rows from original tables are preserved.
Example:
>>> import pathway as pw
>>> t1 = pw.debug.table_from_markdown('''
... | age | owner | pet
... 1 | 10 | Alice | 1
... 2 | 9 | Bob | 1
... 3 | 8 | Alice | 2
... ''')
>>> t2 = pw.debug.table_from_markdown('''
... | age | owner | pet
... 11 | 11 | Alice | 30
... 12 | 12 | Tom | 40
... ''')
>>> pw.universes.promise_are_pairwise_disjoint(t1, t2)
>>> t3 = t1.concat(t2)
>>> pw.debug.compute_and_print(t3, include_id=False)
age | owner | pet
8 | Alice | 2
9 | Bob | 1
10 | Alice | 1
11 | Alice | 30
12 | Tom | 40
"""
for other in others:
if other.keys() != self.keys():
raise ValueError(
"columns do not match in the argument of Table.concat()"
)
schema = {
key: functools.reduce(
dt.types_lca,
[other.schema._dtypes()[key] for other in others],
self.schema._dtypes()[key],
)
for key in self.keys()
}
return Table._concat(
self.cast_to_types(**schema),
*[other.cast_to_types(**schema) for other in others],
)
def _concat(self, *others: Table[TSchema]) -> Table[TSchema]:
union_ids = (self._id_column, *(other._id_column for other in others))
if not G.universe_solver.query_are_disjoint(*(c.universe for c in union_ids)):
raise ValueError(
"Universes of the arguments of Table.concat() have to be disjoint.\n"
+ "Consider using Table.promise_universes_are_disjoint() to assert it.\n"
+ "(However, untrue assertion might result in runtime errors.)"
)
context = clmn.ConcatUnsafeContext(
union_ids=union_ids,
updates=tuple(
{col_name: other._columns[col_name] for col_name in self.keys()}
for other in others
),
)
return self._table_with_context(context)
def update_cells(self, other: Table, _stacklevel: int = 1) -> Table:
"""Updates cells of `self`, breaking ties in favor of the values in `other`.
Semantics:
- result.columns == self.columns
- result.id == self.id
- conflicts are resolved preferring other's values
Requires:
- other.columns ⊆ self.columns
- other.id ⊆ self.id
Args:
other: the other table.
Returns:
Table: `self` updated with cells form `other`.
Example:
>>> import pathway as pw
>>> t1 = pw.debug.table_from_markdown('''
... | age | owner | pet
... 1 | 10 | Alice | 1
... 2 | 9 | Bob | 1
... 3 | 8 | Alice | 2
... ''')
>>> t2 = pw.debug.table_from_markdown('''
... age | owner | pet
... 1 | 10 | Alice | 30
... ''')
>>> pw.universes.promise_is_subset_of(t2, t1)
>>> t3 = t1.update_cells(t2)
>>> pw.debug.compute_and_print(t3, include_id=False)
age | owner | pet
8 | Alice | 2
9 | Bob | 1
10 | Alice | 30
"""
if names := (set(other.keys()) - set(self.keys())):
raise ValueError(
f"Columns of the argument in Table.update_cells() not present in the updated table: {list(names)}."
)
if self._universe == other._universe:
warnings.warn(
"Key sets of self and other in update_cells are the same."
+ " Using with_columns instead of update_cells.",
stacklevel=_stacklevel + 4,
)
return self.with_columns(*(other[name] for name in other))
schema = {
key: dt.types_lca(self.schema.__dtypes__[key], other.schema.__dtypes__[key])
for key in other.keys()
}
return Table._update_cells(
self.cast_to_types(**schema), other.cast_to_types(**schema)
)
def _update_cells(self, other: Table) -> Table:
if not other._universe.is_subset_of(self._universe):
raise ValueError(
"Universe of the argument of Table.update_cells() needs to be "
+ "a subset of the universe of the updated table.\n"
+ "Consider using Table.promise_is_subset_of() to assert this.\n"
+ "(However, untrue assertion might result in runtime errors.)"
)
context = clmn.UpdateCellsContext(
left=self._id_column,
right=other._id_column,
updates={name: other._columns[name] for name in other.keys()},
)
return self._table_with_context(context)
def update_rows(self, other: Table[TSchema]) -> Table[TSchema]:
"""Updates rows of `self`, breaking ties in favor for the rows in `other`.
Semantics:
- result.columns == self.columns == other.columns
- result.id == self.id ∪ other.id
Requires:
- other.columns == self.columns
Args:
other: the other table.
Returns:
Table: `self` updated with rows form `other`.
Example:
>>> import pathway as pw
>>> t1 = pw.debug.table_from_markdown('''
... | age | owner | pet
... 1 | 10 | Alice | 1
... 2 | 9 | Bob | 1
... 3 | 8 | Alice | 2
... ''')
>>> t2 = pw.debug.table_from_markdown('''
... | age | owner | pet
... 1 | 10 | Alice | 30
... 12 | 12 | Tom | 40
... ''')
>>> t3 = t1.update_rows(t2)
>>> pw.debug.compute_and_print(t3, include_id=False)
age | owner | pet
8 | Alice | 2
9 | Bob | 1
10 | Alice | 30
12 | Tom | 40
"""
if other.keys() != self.keys():
raise ValueError(
"Columns do not match between argument of Table.update_rows() and the updated table."
)
if self._universe.is_subset_of(other._universe):
warnings.warn(
"Universe of self is a subset of universe of other in update_rows. Returning other.",
stacklevel=5,
)
return other
schema = {
key: dt.types_lca(self.schema.__dtypes__[key], other.schema.__dtypes__[key])
for key in self.keys()
}
union_universes = (self._universe, other._universe)
universe = G.universe_solver.get_union(*union_universes)
if universe == self._universe:
return Table._update_cells(
self.cast_to_types(**schema), other.cast_to_types(**schema)
)
else:
return Table._update_rows(
self.cast_to_types(**schema), other.cast_to_types(**schema)
)
def _update_rows(self, other: Table[TSchema]) -> Table[TSchema]:
union_ids = (self._id_column, other._id_column)
context = clmn.UpdateRowsContext(
updates={col_name: other._columns[col_name] for col_name in self.keys()},
union_ids=union_ids,
)
return self._table_with_context(context)
def with_columns(self, *args: expr.ColumnReference, **kwargs: Any) -> Table:
"""Updates columns of `self`, according to args and kwargs.
See `table.select` specification for evaluation of args and kwargs.
Example:
>>> import pathway as pw
>>> t1 = pw.debug.table_from_markdown('''
... | age | owner | pet
... 1 | 10 | Alice | 1
... 2 | 9 | Bob | 1
... 3 | 8 | Alice | 2
... ''')
>>> t2 = pw.debug.table_from_markdown('''
... | owner | pet | size
... 1 | Tom | 1 | 10
... 2 | Bob | 1 | 9
... 3 | Tom | 2 | 8
... ''')
>>> t3 = t1.with_columns(*t2)
>>> pw.debug.compute_and_print(t3, include_id=False)
age | owner | pet | size
8 | Tom | 2 | 8
9 | Bob | 1 | 9
10 | Tom | 1 | 10
"""
other = self.select(*args, **kwargs)
columns = dict(self)
columns.update(other)
return self.select(**columns)
def with_id(self, new_index: expr.ColumnReference) -> Table:
"""Set new ids based on another column containing id-typed values.
To generate ids based on arbitrary valued columns, use `with_id_from`.
Values assigned must be row-wise unique.
Args:
new_id: column to be used as the new index.
Returns:
Table with updated ids.
Example:
>>> import pytest; pytest.xfail("with_id is hard to test")
>>> import pathway as pw
>>> t1 = pw.debug.table_from_markdown('''
... | age | owner | pet
... 1 | 10 | Alice | 1
... 2 | 9 | Bob | 1
... 3 | 8 | Alice | 2
... ''')
>>> t2 = pw.debug.table_from_markdown('''
... | new_id
... 1 | 2
... 2 | 3
... 3 | 4
... ''')
>>> t3 = t1.promise_universe_is_subset_of(t2).with_id(t2.new_id)
>>> pw.debug.compute_and_print(t3)
age owner pet
^2 10 Alice 1
^3 9 Bob 1
^4 8 Alice 2
"""
return self._with_new_index(new_index)
def with_id_from(
self,
*args: expr.ColumnExpression | Value,
instance: expr.ColumnReference | None = None,
) -> Table:
"""Compute new ids based on values in columns.
Ids computed from `columns` must be row-wise unique.
Args:
columns: columns to be used as primary keys.
Returns:
Table: `self` updated with recomputed ids.
Example:
>>> import pathway as pw
>>> t1 = pw.debug.table_from_markdown('''
... | age | owner | pet
... 1 | 10 | Alice | 1
... 2 | 9 | Bob | 1
... 3 | 8 | Alice | 2
... ''')
>>> t2 = t1 + t1.select(old_id=t1.id)
>>> t3 = t2.with_id_from(t2.age)
>>> pw.debug.compute_and_print(t3) # doctest: +ELLIPSIS, +NORMALIZE_WHITESPACE
| age | owner | pet | old_id
^... | 8 | Alice | 2 | ^...
^... | 9 | Bob | 1 | ^...
^... | 10 | Alice | 1 | ^...
>>> t4 = t3.select(t3.age, t3.owner, t3.pet, same_as_old=(t3.id == t3.old_id),
... same_as_new=(t3.id == t3.pointer_from(t3.age)))
>>> pw.debug.compute_and_print(t4) # doctest: +ELLIPSIS, +NORMALIZE_WHITESPACE
| age | owner | pet | same_as_old | same_as_new
^... | 8 | Alice | 2 | False | True
^... | 9 | Bob | 1 | False | True
^... | 10 | Alice | 1 | False | True
"""
# new_index should be a column, so a little workaround
new_index = self.select(
ref_column=self.pointer_from(*args, instance=instance)
).ref_column
return self._with_new_index(
new_index=new_index,
)
def _with_new_index(
self,
new_index: expr.ColumnExpression,
) -> Table:
self._validate_expression(new_index)
index_type = self.eval_type(new_index)
if not isinstance(index_type, dt.Pointer):
raise TypeError(
f"Pathway supports reindexing Tables with Pointer type only. The type used was {index_type}."
)
reindex_column = self._eval(new_index)
assert self._universe == reindex_column.universe
context = clmn.ReindexContext(reindex_column)
return self._table_with_context(context)
def rename_columns(self, **kwargs: str | expr.ColumnReference) -> Table:
"""Rename columns according to kwargs.
Columns not in keys(kwargs) are not changed. New name of a column must not be `id`.
Args:
kwargs: mapping from old column names to new names.
Returns:
Table: `self` with columns renamed.
Example:
>>> import pathway as pw
>>> t1 = pw.debug.table_from_markdown('''
... age | owner | pet
... 10 | Alice | 1
... 9 | Bob | 1
... 8 | Alice | 2
... ''')
>>> t2 = t1.rename_columns(years_old=t1.age, animal=t1.pet)
>>> pw.debug.compute_and_print(t2, include_id=False)
owner | years_old | animal
Alice | 8 | 2
Alice | 10 | 1
Bob | 9 | 1
"""
mapping: dict[str, str] = {}
for new_name, old_name_col in kwargs.items():
if isinstance(old_name_col, expr.ColumnReference):
old_name = old_name_col.name
else:
old_name = old_name_col
if old_name not in self._columns:
raise ValueError(f"Column {old_name} does not exist in a given table.")
mapping[new_name] = old_name
renamed_columns = self._columns.copy()
for new_name, old_name in mapping.items():
renamed_columns.pop(old_name)
for new_name, old_name in mapping.items():
renamed_columns[new_name] = self._columns[old_name]
columns_wrapped = {
name: self._wrap_column_in_context(
self._rowwise_context,
column,
mapping[name] if name in mapping else name,
)
for name, column in renamed_columns.items()
}
return self._with_same_universe(*columns_wrapped.items())
def rename_by_dict(
self, names_mapping: dict[str | expr.ColumnReference, str]
) -> Table:
"""Rename columns according to a dictionary.
Columns not in keys(kwargs) are not changed. New name of a column must not be `id`.
Args:
names_mapping: mapping from old column names to new names.
Returns:
Table: `self` with columns renamed.
Example:
>>> import pathway as pw
>>> t1 = pw.debug.table_from_markdown('''
... age | owner | pet
... 10 | Alice | 1
... 9 | Bob | 1
... 8 | Alice | 2
... ''')
>>> t2 = t1.rename_by_dict({"age": "years_old", t1.pet: "animal"})
>>> pw.debug.compute_and_print(t2, include_id=False)
owner | years_old | animal
Alice | 8 | 2
Alice | 10 | 1
Bob | 9 | 1
"""
return self.rename_columns(
**{new_name: self[old_name] for old_name, new_name in names_mapping.items()}
)
def with_prefix(self, prefix: str) -> Table:
"""Rename columns by adding prefix to each name of column.
Example:
>>> import pathway as pw
>>> t1 = pw.debug.table_from_markdown('''
... age | owner | pet
... 10 | Alice | 1
... 9 | Bob | 1
... 8 | Alice | 2
... ''')
>>> t2 = t1.with_prefix("u_")
>>> pw.debug.compute_and_print(t2, include_id=False)
u_age | u_owner | u_pet
8 | Alice | 2
9 | Bob | 1
10 | Alice | 1
"""
return self.rename_by_dict({name: prefix + name for name in self.keys()})
def with_suffix(self, suffix: str) -> Table:
"""Rename columns by adding suffix to each name of column.
Example:
>>> import pathway as pw
>>> t1 = pw.debug.table_from_markdown('''
... age | owner | pet
... 10 | Alice | 1
... 9 | Bob | 1
... 8 | Alice | 2
... ''')
>>> t2 = t1.with_suffix("_current")
>>> pw.debug.compute_and_print(t2, include_id=False)
age_current | owner_current | pet_current
8 | Alice | 2
9 | Bob | 1
10 | Alice | 1
"""
return self.rename_by_dict({name: name + suffix for name in self.keys()})
def rename(
self,
names_mapping: dict[str | expr.ColumnReference, str] | None = None,
**kwargs: expr.ColumnExpression,
) -> Table:
"""Rename columns according either a dictionary or kwargs.
If a mapping is provided using a dictionary, ``rename_by_dict`` will be used.
Otherwise, ``rename_columns`` will be used with kwargs.
Columns not in keys(kwargs) are not changed. New name of a column must not be ``id``.
Args:
names_mapping: mapping from old column names to new names.
kwargs: mapping from old column names to new names.
Returns:
Table: `self` with columns renamed.
"""
if names_mapping is not None:
return self.rename_by_dict(names_mapping=names_mapping)
return self.rename_columns(**kwargs)
def without(self, *columns: str | expr.ColumnReference) -> Table:
"""Selects all columns without named column references.
Args:
columns: columns to be dropped provided by `table.column_name` notation.
Returns:
Table: `self` without specified columns.
Example:
>>> import pathway as pw
>>> t1 = pw.debug.table_from_markdown('''
... age | owner | pet
... 10 | Alice | 1
... 9 | Bob | 1
... 8 | Alice | 2
... ''')
>>> t2 = t1.without(t1.age, pw.this.pet)
>>> pw.debug.compute_and_print(t2, include_id=False)
owner
Alice
Alice
Bob
"""
new_columns = self._columns.copy()
for col in columns:
if isinstance(col, expr.ColumnReference):
new_columns.pop(col.name)
else:
assert isinstance(col, str)
new_columns.pop(col)
columns_wrapped = {
name: self._wrap_column_in_context(self._rowwise_context, column, name)
for name, column in new_columns.items()
}
return self._with_same_universe(*columns_wrapped.items())
def having(self, *indexers: expr.ColumnReference) -> Table[TSchema]:
"""Removes rows so that indexed.ix(indexer) is possible when some rows are missing,
for each indexer in indexers"""
rets: list[Table] = []
for indexer in indexers:
rets.append(self._having(indexer))
if len(rets) == 0:
return self
elif len(rets) == 1:
[ret] = rets
return ret
else:
return rets[0].intersect(*rets[1:])
def update_types(self, **kwargs: Any) -> Table:
"""Updates types in schema. Has no effect on the runtime."""
for name in kwargs.keys():
if name not in self.keys():
raise ValueError(
"Table.update_types() argument name has to be an existing table column name."
)
from pathway.internals.common import declare_type
return self.with_columns(
**{key: declare_type(val, self[key]) for key, val in kwargs.items()}
)
def cast_to_types(self, **kwargs: Any) -> Table:
"""Casts columns to types."""
for name in kwargs.keys():
if name not in self.keys():
raise ValueError(
"Table.cast_to_types() argument name has to be an existing table column name."
)
from pathway.internals.common import cast
return self.with_columns(
**{key: cast(val, self[key]) for key, val in kwargs.items()}
)
def _having(self, indexer: expr.ColumnReference) -> Table[TSchema]:
context = clmn.HavingContext(
orig_id_column=self._id_column, key_column=indexer._column
)
return self._table_with_context(context)
def with_universe_of(self, other: TableLike) -> Table:
"""Returns a copy of self with exactly the same universe as others.
Semantics: Required precondition self.universe == other.universe
Used in situations where Pathway cannot deduce equality of universes, but
those are equal as verified during runtime.
Example:
>>> import pathway as pw
>>> t1 = pw.debug.table_from_markdown('''
... | pet
... 1 | Dog
... 7 | Cat
... ''')
>>> t2 = pw.debug.table_from_markdown('''
... | age
... 1 | 10
... 7 | 3
... 8 | 100
... ''')
>>> t3 = t2.filter(pw.this.age < 30).with_universe_of(t1)
>>> t4 = t1 + t3
>>> pw.debug.compute_and_print(t4, include_id=False)
pet | age
Cat | 3
Dog | 10
"""
if self._universe == other._universe:
return self.copy()
universes.promise_are_equal(self, other)
return self._unsafe_promise_universe(other)
def flatten(self, *args: expr.ColumnReference, **kwargs: Any) -> Table:
"""Performs a flatmap operation on a column or expression given as a first
argument. Datatype of this column or expression has to be iterable or Json array.
Other columns specified in the method arguments are duplicated
as many times as the length of the iterable.
It is possible to get ids of source rows by using `table.id` column, e.g.
`table.flatten(table.column_to_be_flattened, original_id = table.id)`.
Example:
>>> import pathway as pw
>>> t1 = pw.debug.table_from_markdown('''
... | pet | age
... 1 | Dog | 2
... 7 | Cat | 5
... ''')
>>> t2 = t1.flatten(t1.pet)
>>> pw.debug.compute_and_print(t2, include_id=False)
pet
C
D
a
g
o
t
>>> t3 = t1.flatten(t1.pet, t1.age)
>>> pw.debug.compute_and_print(t3, include_id=False)
pet | age
C | 5
D | 2
a | 5
g | 2
o | 2
t | 5
"""
intermediate_table = self.select(*args, **kwargs)
all_args = combine_args_kwargs(args, kwargs)
if not all_args:
raise ValueError("Table.flatten() cannot have empty arguments list.")
all_names_iter = iter(all_args.keys())
flatten_name = next(all_names_iter)
return intermediate_table._flatten(flatten_name)
def _flatten(
self,
flatten_name: str,
) -> Table:
flatten_column = self._columns[flatten_name]
assert isinstance(flatten_column, clmn.ColumnWithExpression)
context = clmn.FlattenContext(
orig_universe=self._universe,
flatten_column=flatten_column,
)
columns = {
name: self._wrap_column_in_context(context, column, name)
for name, column in self._columns.items()
if name != flatten_name
}
return Table(
_columns={
flatten_name: context.flatten_result_column,
**columns,
},
_context=context,
)
def sort(
self,
key: expr.ColumnExpression,
instance: expr.ColumnExpression | None = None,
) -> Table:
"""
Sorts a table by the specified keys.
Args:
table : pw.Table
The table to be sorted.
key (ColumnExpression[int | float | datetime | str | bytes]):
An expression to sort by.
instance : ColumnReference or None
An expression with instance. Rows are sorted within an instance.
``prev`` and ``next`` columns will only point to rows that have the same instance.
Returns:
pw.Table: The sorted table. Contains two columns: ``prev`` and ``next``, containing the pointers
to the previous and next rows.
Example:
>>> import pathway as pw
>>> table = pw.debug.table_from_markdown('''
... name | age | score
... Alice | 25 | 80
... Bob | 20 | 90
... Charlie | 30 | 80
... ''')
>>> table = table.with_id_from(pw.this.name)
>>> table += table.sort(key=pw.this.age)
>>> pw.debug.compute_and_print(table, include_id=True)
| name | age | score | prev | next
^GBSDEEW... | Alice | 25 | 80 | ^EDPSSB1... | ^DS9AT95...
^EDPSSB1... | Bob | 20 | 90 | | ^GBSDEEW...
^DS9AT95... | Charlie | 30 | 80 | ^GBSDEEW... |
>>> table = pw.debug.table_from_markdown('''
... name | age | score
... Alice | 25 | 80
... Bob | 20 | 90
... Charlie | 30 | 80
... David | 35 | 90
... Eve | 15 | 80
... ''')
>>> table = table.with_id_from(pw.this.name)
>>> table += table.sort(key=pw.this.age, instance=pw.this.score)
>>> pw.debug.compute_and_print(table, include_id=True)
| name | age | score | prev | next
^GBSDEEW... | Alice | 25 | 80 | ^T0B95XH... | ^DS9AT95...
^EDPSSB1... | Bob | 20 | 90 | | ^RT0AZWX...
^DS9AT95... | Charlie | 30 | 80 | ^GBSDEEW... |
^RT0AZWX... | David | 35 | 90 | ^EDPSSB1... |
^T0B95XH... | Eve | 15 | 80 | | ^GBSDEEW...
"""
instance = clmn.ColumnExpression._wrap(instance)
context = clmn.SortingContext(
self._eval(key),
self._eval(instance),
)
return Table(
_columns={
"prev": context.prev_column,
"next": context.next_column,
},
_context=context,
)
def _set_source(self, source: OutputHandle):
self._source = source
if not hasattr(self._id_column, "lineage"):
self._id_column.lineage = clmn.ColumnLineage(name="id", source=source)
for name, column in self._columns.items():
if not hasattr(column, "lineage"):
column.lineage = clmn.ColumnLineage(name=name, source=source)
universe = self._universe
if not hasattr(universe, "lineage"):
universe.lineage = clmn.Lineage(source=source)
def _unsafe_promise_universe(self, other: TableLike) -> Table:
context = clmn.PromiseSameUniverseContext(self._id_column, other._id_column)
return self._table_with_context(context)
def _validate_expression(self, expression: expr.ColumnExpression):
for dep in expression._dependencies_above_reducer():
if self._universe != dep._column.universe:
raise ValueError(
f"You cannot use {dep.to_column_expression()} in this context."
+ " Its universe is different than the universe of the table the method"
+ " was called on. You can use <table1>.with_universe_of(<table2>)"
+ " to assign universe of <table2> to <table1> if you're sure their"
+ " sets of keys are equal."
)
def _wrap_column_in_context(
self,
context: clmn.Context,
column: clmn.Column,
name: str,
lineage: clmn.Lineage | None = None,
) -> clmn.Column:
"""Contextualize column by wrapping it in expression."""
expression = expr.ColumnReference(_table=self, _column=column, _name=name)
return expression._column_with_expression_cls(
context=context,
universe=context.universe,
expression=expression,
lineage=lineage,
)
def _table_with_context(self, context: clmn.Context) -> Table:
columns = {
name: self._wrap_column_in_context(context, column, name)
for name, column in self._columns.items()
}
return Table(
_columns=columns,
_context=context,
)
def _table_restricted_context(self) -> clmn.TableRestrictedRowwiseContext:
return clmn.TableRestrictedRowwiseContext(self._id_column, self)
def _eval(
self, expression: expr.ColumnExpression, context: clmn.Context | None = None
) -> clmn.ColumnWithExpression:
"""Desugar expression and wrap it in given context."""
if context is None:
context = self._rowwise_context
column = expression._column_with_expression_cls(
context=context,
universe=context.universe,
expression=expression,
)
return column
def _from_schema(cls: type[TTable], schema: type[Schema]) -> TTable:
universe = Universe()
context = clmn.MaterializedContext(universe, schema.universe_properties)
columns = {
name: clmn.MaterializedColumn(
universe,
schema.column_properties(name),
)
for name in schema.column_names()
}
return cls(_columns=columns, _schema=schema, _context=context)
def __repr__(self) -> str:
return f"<pathway.Table schema={dict(self.typehints())}>"
def _with_same_universe(
self,
*columns: tuple[str, clmn.Column],
schema: type[Schema] | None = None,
) -> Table:
return Table(
_columns=dict(columns),
_schema=schema,
_context=self._rowwise_context,
)
def _sort_columns_by_other(self, other: Table):
assert self.keys() == other.keys()
self._columns = {name: self._columns[name] for name in other.keys()}
def _operator_dependencies(self) -> StableSet[Table]:
return StableSet([self])
def debug(self, name: str):
G.add_operator(
lambda id: DebugOperator(name, id),
lambda operator: operator(self),
)
return self
def to(self, sink: DataSink) -> None:
from pathway.internals import table_io
table_io.table_to_datasink(self, sink)
def _materialize(self, universe: Universe):
context = clmn.MaterializedContext(universe)
columns = {
name: clmn.MaterializedColumn(universe, column.properties)
for (name, column) in self._columns.items()
}
return Table(
_columns=columns,
_schema=self.schema,
_context=context,
)
def pointer_from(
self, *args: Any, optional=False, instance: expr.ColumnReference | None = None
):
"""Pseudo-random hash of its argument. Produces pointer types. Applied column-wise.
Example:
>>> import pathway as pw
>>> t1 = pw.debug.table_from_markdown('''
... age owner pet
... 1 10 Alice dog
... 2 9 Bob dog
... 3 8 Alice cat
... 4 7 Bob dog''')
>>> g = t1.groupby(t1.owner).reduce(refcol = t1.pointer_from(t1.owner)) # g.id == g.refcol
>>> pw.debug.compute_and_print(g.select(test = (g.id == g.refcol)), include_id=False)
test
True
True
"""
if instance is not None:
args = (*args, instance)
# XXX verify types for the table primary_keys
return expr.PointerExpression(self, *args, optional=optional)
def ix_ref(
self,
*args: expr.ColumnExpression | Value,
optional: bool = False,
context=None,
instance: expr.ColumnReference | None = None,
):
"""Reindexes the table using expressions as primary keys.
Uses keys from context, or tries to infer proper context from the expression.
If optional is True, then None in expression values result in None values in the result columns.
Missing values in table keys result in RuntimeError.
Context can be anything that allows for `select` or `reduce`, or `pathway.this` construct
(latter results in returning a delayed operation, and should be only used when using `ix` inside
join().select() or groupby().reduce() sequence).
Args:
args: Column references.
Returns:
Row: indexed row.
Example:
>>> import pathway as pw
>>> t1 = pw.debug.table_from_markdown('''
... name | pet
... Alice | dog
... Bob | cat
... Carole | cat
... David | dog
... ''')
>>> t2 = t1.with_id_from(pw.this.name)
>>> t2 = t2.select(*pw.this, new_value=pw.this.ix_ref("Alice").pet)
>>> pw.debug.compute_and_print(t2, include_id=False)
name | pet | new_value
Alice | dog | dog
Bob | cat | dog
Carole | cat | dog
David | dog | dog
Tables obtained by a groupby/reduce scheme always have primary keys:
>>> import pathway as pw
>>> t1 = pw.debug.table_from_markdown('''
... name | pet
... Alice | dog
... Bob | cat
... Carole | cat
... David | cat
... ''')
>>> t2 = t1.groupby(pw.this.pet).reduce(pw.this.pet, count=pw.reducers.count())
>>> t3 = t1.select(*pw.this, new_value=t2.ix_ref(t1.pet).count)
>>> pw.debug.compute_and_print(t3, include_id=False)
name | pet | new_value
Alice | dog | 1
Bob | cat | 3
Carole | cat | 3
David | cat | 3
Single-row tables can be accessed via `ix_ref()`:
>>> import pathway as pw
>>> t1 = pw.debug.table_from_markdown('''
... name | pet
... Alice | dog
... Bob | cat
... Carole | cat
... David | cat
... ''')
>>> t2 = t1.reduce(count=pw.reducers.count())
>>> t3 = t1.select(*pw.this, new_value=t2.ix_ref(context=t1).count)
>>> pw.debug.compute_and_print(t3, include_id=False)
name | pet | new_value
Alice | dog | 4
Bob | cat | 4
Carole | cat | 4
David | cat | 4
"""
return self.ix(
self.pointer_from(*args, optional=optional, instance=instance),
optional=optional,
context=context,
)
def _subtables(self) -> StableSet[Table]:
return StableSet([self])
def _substitutions(
self,
) -> tuple[Table, dict[expr.InternalColRef, expr.ColumnExpression]]:
return self, {}
def typehints(self) -> Mapping[str, Any]:
"""
Return the types of the columns as a dictionary.
Example:
>>> import pathway as pw
>>> t1 = pw.debug.table_from_markdown('''
... age | owner | pet
... 10 | Alice | dog
... 9 | Bob | dog
... 8 | Alice | cat
... 7 | Bob | dog
... ''')
>>> t1.typehints()
mappingproxy({'age': <class 'int'>, 'owner': <class 'str'>, 'pet': <class 'str'>})
"""
return self.schema.typehints()
def eval_type(self, expression: expr.ColumnExpression) -> dt.DType:
return (
self._rowwise_context._get_type_interpreter()
.eval_expression(expression, state=TypeInterpreterState())
._dtype
)
def _auto_live(self) -> Table:
"""Make self automatically live in interactive mode"""
from pathway.internals.interactive import is_interactive_mode_enabled
if is_interactive_mode_enabled():
return self.live()
else:
return self
def live(self) -> LiveTable[TSchema]:
from pathway.internals.interactive import LiveTable
warnings.warn("live tables are an experimental feature", stacklevel=2)
return LiveTable._create(self)
The provided code snippet includes necessary dependencies for implementing the `interval_join` function. Write a Python function `def interval_join( self: pw.Table, other: pw.Table, self_time: pw.ColumnExpression, other_time: pw.ColumnExpression, interval: Interval[int] | Interval[float] | Interval[datetime.timedelta], *on: pw.ColumnExpression, behavior: CommonBehavior | None = None, how: pw.JoinMode = pw.JoinMode.INNER, left_instance: pw.ColumnReference | None = None, right_instance: pw.ColumnReference | None = None, ) -> IntervalJoinResult` to solve the following problem:
Performs an interval join of self with other using a time difference and join expressions. If `self_time + lower_bound <= other_time <= self_time + upper_bound` and conditions in `on` are satisfied, the rows are joined. Args: other: the right side of a join. self_time (pw.ColumnExpression[int | float | datetime]): time expression in self. other_time (pw.ColumnExpression[int | float | datetime]): time expression in other. lower_bound: a lower bound on time difference between other_time and self_time. upper_bound: an upper bound on time difference between other_time and self_time. on: a list of column expressions. Each must have == as the top level operation and be of the form LHS: ColumnReference == RHS: ColumnReference. behavior: defines a temporal behavior of a join - features like delaying entries or ignoring late entries. You can see examples below or read more in the `temporal behavior of interval join tutorial </developers/user-guide/temporal-data/temporal_behavior>`_ . how: decides whether to run `interval_join_inner`, `interval_join_left`, `interval_join_right` or `interval_join_outer`. Default is INNER. left_instance/right_instance: optional arguments describing partitioning of the data into separate instances Returns: IntervalJoinResult: a result of the interval join. A method `.select()` can be called on it to extract relevant columns from the result of a join. Examples: >>> import pathway as pw >>> t1 = pw.debug.table_from_markdown( ... ''' ... | t ... 1 | 3 ... 2 | 4 ... 3 | 5 ... 4 | 11 ... ''' ... ) >>> t2 = pw.debug.table_from_markdown( ... ''' ... | t ... 1 | 0 ... 2 | 1 ... 3 | 4 ... 4 | 7 ... ''' ... ) >>> t3 = t1.interval_join(t2, t1.t, t2.t, pw.temporal.interval(-2, 1)).select( ... left_t=t1.t, right_t=t2.t ... ) >>> pw.debug.compute_and_print(t3, include_id=False) left_t | right_t 3 | 1 3 | 4 4 | 4 5 | 4 >>> t1 = pw.debug.table_from_markdown( ... ''' ... | a | t ... 1 | 1 | 3 ... 2 | 1 | 4 ... 3 | 1 | 5 ... 4 | 1 | 11 ... 5 | 2 | 2 ... 6 | 2 | 3 ... 7 | 3 | 4 ... ''' ... ) >>> t2 = pw.debug.table_from_markdown( ... ''' ... | b | t ... 1 | 1 | 0 ... 2 | 1 | 1 ... 3 | 1 | 4 ... 4 | 1 | 7 ... 5 | 2 | 0 ... 6 | 2 | 2 ... 7 | 4 | 2 ... ''' ... ) >>> t3 = t1.interval_join( ... t2, t1.t, t2.t, pw.temporal.interval(-2, 1), t1.a == t2.b, how=pw.JoinMode.INNER ... ).select(t1.a, left_t=t1.t, right_t=t2.t) >>> pw.debug.compute_and_print(t3, include_id=False) a | left_t | right_t 1 | 3 | 1 1 | 3 | 4 1 | 4 | 4 1 | 5 | 4 2 | 2 | 0 2 | 2 | 2 2 | 3 | 2 Setting `behavior` allows to control temporal behavior of an interval join. Then, each side of the interval join keeps track of the maximal already seen time (`self_time` and `other_time`). The arguments of `behavior` mean in the context of an interval join what follows: - **delay** - buffers results until the maximal already seen time is greater than \ or equal to their time plus `delay`. - **cutoff** - ignores records with times less or equal to the maximal already seen time minus `cutoff`; \ it is also used to garbage collect records that have times lower or equal to the above threshold. \ When `cutoff` is not set, interval join will remember all records from both sides. - **keep_results** - if set to `True`, keeps all results of the operator. If set to `False`, \ keeps only results that are newer than the maximal seen time minus `cutoff`. Example without and with forgetting: >>> import pathway as pw >>> t1 = pw.debug.table_from_markdown( ... ''' ... value | instance | event_time | __time__ ... 1 | 1 | 0 | 2 ... 2 | 2 | 2 | 4 ... 3 | 1 | 4 | 4 ... 4 | 2 | 8 | 8 ... 5 | 1 | 0 | 10 ... 6 | 1 | 4 | 10 ... ''' ... ) >>> t2 = pw.debug.table_from_markdown( ... ''' ... value | instance | event_time | __time__ ... 42 | 1 | 2 | 2 ... 8 | 2 | 10 | 14 ... 10 | 2 | 4 | 30 ... ''' ... ) >>> result_without_cutoff = t1.interval_join( ... t2, ... t1.event_time, ... t2.event_time, ... pw.temporal.interval(-2, 2), ... t1.instance == t2.instance, ... ).select( ... left_value=t1.value, ... right_value=t2.value, ... instance=t1.instance, ... left_time=t1.event_time, ... right_time=t2.event_time, ... ) >>> pw.debug.compute_and_print_update_stream(result_without_cutoff, include_id=False) left_value | right_value | instance | left_time | right_time | __time__ | __diff__ 1 | 42 | 1 | 0 | 2 | 2 | 1 3 | 42 | 1 | 4 | 2 | 4 | 1 5 | 42 | 1 | 0 | 2 | 10 | 1 6 | 42 | 1 | 4 | 2 | 10 | 1 4 | 8 | 2 | 8 | 10 | 14 | 1 2 | 10 | 2 | 2 | 4 | 30 | 1 >>> result_with_cutoff = t1.interval_join( ... t2, ... t1.event_time, ... t2.event_time, ... pw.temporal.interval(-2, 2), ... t1.instance == t2.instance, ... behavior=pw.temporal.common_behavior(cutoff=6), ... ).select( ... left_value=t1.value, ... right_value=t2.value, ... instance=t1.instance, ... left_time=t1.event_time, ... right_time=t2.event_time, ... ) >>> pw.debug.compute_and_print_update_stream(result_with_cutoff, include_id=False) left_value | right_value | instance | left_time | right_time | __time__ | __diff__ 1 | 42 | 1 | 0 | 2 | 2 | 1 3 | 42 | 1 | 4 | 2 | 4 | 1 6 | 42 | 1 | 4 | 2 | 10 | 1 4 | 8 | 2 | 8 | 10 | 14 | 1 The record with ``value=5`` from table ``t1`` was not joined because its ``event_time`` was less than the maximal already seen time minus ``cutoff`` (``0 <= 8-6``). The record with ``value=10`` from table ``t2`` was not joined because its ``event_time`` was equal to the maximal already seen time minus ``cutoff`` (``4 <= 10-6``).
Here is the function:
def interval_join(
self: pw.Table,
other: pw.Table,
self_time: pw.ColumnExpression,
other_time: pw.ColumnExpression,
interval: Interval[int] | Interval[float] | Interval[datetime.timedelta],
*on: pw.ColumnExpression,
behavior: CommonBehavior | None = None,
how: pw.JoinMode = pw.JoinMode.INNER,
left_instance: pw.ColumnReference | None = None,
right_instance: pw.ColumnReference | None = None,
) -> IntervalJoinResult:
"""Performs an interval join of self with other using a time difference
and join expressions. If `self_time + lower_bound <=
other_time <= self_time + upper_bound`
and conditions in `on` are satisfied, the rows are joined.
Args:
other: the right side of a join.
self_time (pw.ColumnExpression[int | float | datetime]):
time expression in self.
other_time (pw.ColumnExpression[int | float | datetime]):
time expression in other.
lower_bound: a lower bound on time difference between other_time
and self_time.
upper_bound: an upper bound on time difference between other_time
and self_time.
on: a list of column expressions. Each must have == as the top level
operation and be of the form LHS: ColumnReference == RHS: ColumnReference.
behavior: defines a temporal behavior of a join - features like delaying entries
or ignoring late entries. You can see examples below or read more in the
`temporal behavior of interval join tutorial </developers/user-guide/temporal-data/temporal_behavior>`_ .
how: decides whether to run `interval_join_inner`, `interval_join_left`, `interval_join_right`
or `interval_join_outer`. Default is INNER.
left_instance/right_instance: optional arguments describing partitioning of the data into separate instances
Returns:
IntervalJoinResult: a result of the interval join. A method `.select()`
can be called on it to extract relevant columns from the result of a join.
Examples:
>>> import pathway as pw
>>> t1 = pw.debug.table_from_markdown(
... '''
... | t
... 1 | 3
... 2 | 4
... 3 | 5
... 4 | 11
... '''
... )
>>> t2 = pw.debug.table_from_markdown(
... '''
... | t
... 1 | 0
... 2 | 1
... 3 | 4
... 4 | 7
... '''
... )
>>> t3 = t1.interval_join(t2, t1.t, t2.t, pw.temporal.interval(-2, 1)).select(
... left_t=t1.t, right_t=t2.t
... )
>>> pw.debug.compute_and_print(t3, include_id=False)
left_t | right_t
3 | 1
3 | 4
4 | 4
5 | 4
>>> t1 = pw.debug.table_from_markdown(
... '''
... | a | t
... 1 | 1 | 3
... 2 | 1 | 4
... 3 | 1 | 5
... 4 | 1 | 11
... 5 | 2 | 2
... 6 | 2 | 3
... 7 | 3 | 4
... '''
... )
>>> t2 = pw.debug.table_from_markdown(
... '''
... | b | t
... 1 | 1 | 0
... 2 | 1 | 1
... 3 | 1 | 4
... 4 | 1 | 7
... 5 | 2 | 0
... 6 | 2 | 2
... 7 | 4 | 2
... '''
... )
>>> t3 = t1.interval_join(
... t2, t1.t, t2.t, pw.temporal.interval(-2, 1), t1.a == t2.b, how=pw.JoinMode.INNER
... ).select(t1.a, left_t=t1.t, right_t=t2.t)
>>> pw.debug.compute_and_print(t3, include_id=False)
a | left_t | right_t
1 | 3 | 1
1 | 3 | 4
1 | 4 | 4
1 | 5 | 4
2 | 2 | 0
2 | 2 | 2
2 | 3 | 2
Setting `behavior` allows to control temporal behavior of an interval join. Then, each side of
the interval join keeps track of the maximal already seen time (`self_time` and `other_time`).
The arguments of `behavior` mean in the context of an interval join what follows:
- **delay** - buffers results until the maximal already seen time is greater than \
or equal to their time plus `delay`.
- **cutoff** - ignores records with times less or equal to the maximal already seen time minus `cutoff`; \
it is also used to garbage collect records that have times lower or equal to the above threshold. \
When `cutoff` is not set, interval join will remember all records from both sides.
- **keep_results** - if set to `True`, keeps all results of the operator. If set to `False`, \
keeps only results that are newer than the maximal seen time minus `cutoff`.
Example without and with forgetting:
>>> import pathway as pw
>>> t1 = pw.debug.table_from_markdown(
... '''
... value | instance | event_time | __time__
... 1 | 1 | 0 | 2
... 2 | 2 | 2 | 4
... 3 | 1 | 4 | 4
... 4 | 2 | 8 | 8
... 5 | 1 | 0 | 10
... 6 | 1 | 4 | 10
... '''
... )
>>> t2 = pw.debug.table_from_markdown(
... '''
... value | instance | event_time | __time__
... 42 | 1 | 2 | 2
... 8 | 2 | 10 | 14
... 10 | 2 | 4 | 30
... '''
... )
>>> result_without_cutoff = t1.interval_join(
... t2,
... t1.event_time,
... t2.event_time,
... pw.temporal.interval(-2, 2),
... t1.instance == t2.instance,
... ).select(
... left_value=t1.value,
... right_value=t2.value,
... instance=t1.instance,
... left_time=t1.event_time,
... right_time=t2.event_time,
... )
>>> pw.debug.compute_and_print_update_stream(result_without_cutoff, include_id=False)
left_value | right_value | instance | left_time | right_time | __time__ | __diff__
1 | 42 | 1 | 0 | 2 | 2 | 1
3 | 42 | 1 | 4 | 2 | 4 | 1
5 | 42 | 1 | 0 | 2 | 10 | 1
6 | 42 | 1 | 4 | 2 | 10 | 1
4 | 8 | 2 | 8 | 10 | 14 | 1
2 | 10 | 2 | 2 | 4 | 30 | 1
>>> result_with_cutoff = t1.interval_join(
... t2,
... t1.event_time,
... t2.event_time,
... pw.temporal.interval(-2, 2),
... t1.instance == t2.instance,
... behavior=pw.temporal.common_behavior(cutoff=6),
... ).select(
... left_value=t1.value,
... right_value=t2.value,
... instance=t1.instance,
... left_time=t1.event_time,
... right_time=t2.event_time,
... )
>>> pw.debug.compute_and_print_update_stream(result_with_cutoff, include_id=False)
left_value | right_value | instance | left_time | right_time | __time__ | __diff__
1 | 42 | 1 | 0 | 2 | 2 | 1
3 | 42 | 1 | 4 | 2 | 4 | 1
6 | 42 | 1 | 4 | 2 | 10 | 1
4 | 8 | 2 | 8 | 10 | 14 | 1
The record with ``value=5`` from table ``t1`` was not joined because its ``event_time``
was less than the maximal already seen time minus ``cutoff`` (``0 <= 8-6``).
The record with ``value=10`` from table ``t2`` was not joined because its ``event_time``
was equal to the maximal already seen time minus ``cutoff`` (``4 <= 10-6``).
"""
return IntervalJoinResult._interval_join(
self,
other,
self_time,
other_time,
interval,
*on,
behavior=behavior,
mode=how,
left_instance=left_instance,
right_instance=right_instance,
) | Performs an interval join of self with other using a time difference and join expressions. If `self_time + lower_bound <= other_time <= self_time + upper_bound` and conditions in `on` are satisfied, the rows are joined. Args: other: the right side of a join. self_time (pw.ColumnExpression[int | float | datetime]): time expression in self. other_time (pw.ColumnExpression[int | float | datetime]): time expression in other. lower_bound: a lower bound on time difference between other_time and self_time. upper_bound: an upper bound on time difference between other_time and self_time. on: a list of column expressions. Each must have == as the top level operation and be of the form LHS: ColumnReference == RHS: ColumnReference. behavior: defines a temporal behavior of a join - features like delaying entries or ignoring late entries. You can see examples below or read more in the `temporal behavior of interval join tutorial </developers/user-guide/temporal-data/temporal_behavior>`_ . how: decides whether to run `interval_join_inner`, `interval_join_left`, `interval_join_right` or `interval_join_outer`. Default is INNER. left_instance/right_instance: optional arguments describing partitioning of the data into separate instances Returns: IntervalJoinResult: a result of the interval join. A method `.select()` can be called on it to extract relevant columns from the result of a join. Examples: >>> import pathway as pw >>> t1 = pw.debug.table_from_markdown( ... ''' ... | t ... 1 | 3 ... 2 | 4 ... 3 | 5 ... 4 | 11 ... ''' ... ) >>> t2 = pw.debug.table_from_markdown( ... ''' ... | t ... 1 | 0 ... 2 | 1 ... 3 | 4 ... 4 | 7 ... ''' ... ) >>> t3 = t1.interval_join(t2, t1.t, t2.t, pw.temporal.interval(-2, 1)).select( ... left_t=t1.t, right_t=t2.t ... ) >>> pw.debug.compute_and_print(t3, include_id=False) left_t | right_t 3 | 1 3 | 4 4 | 4 5 | 4 >>> t1 = pw.debug.table_from_markdown( ... ''' ... | a | t ... 1 | 1 | 3 ... 2 | 1 | 4 ... 3 | 1 | 5 ... 4 | 1 | 11 ... 5 | 2 | 2 ... 6 | 2 | 3 ... 7 | 3 | 4 ... ''' ... ) >>> t2 = pw.debug.table_from_markdown( ... ''' ... | b | t ... 1 | 1 | 0 ... 2 | 1 | 1 ... 3 | 1 | 4 ... 4 | 1 | 7 ... 5 | 2 | 0 ... 6 | 2 | 2 ... 7 | 4 | 2 ... ''' ... ) >>> t3 = t1.interval_join( ... t2, t1.t, t2.t, pw.temporal.interval(-2, 1), t1.a == t2.b, how=pw.JoinMode.INNER ... ).select(t1.a, left_t=t1.t, right_t=t2.t) >>> pw.debug.compute_and_print(t3, include_id=False) a | left_t | right_t 1 | 3 | 1 1 | 3 | 4 1 | 4 | 4 1 | 5 | 4 2 | 2 | 0 2 | 2 | 2 2 | 3 | 2 Setting `behavior` allows to control temporal behavior of an interval join. Then, each side of the interval join keeps track of the maximal already seen time (`self_time` and `other_time`). The arguments of `behavior` mean in the context of an interval join what follows: - **delay** - buffers results until the maximal already seen time is greater than \ or equal to their time plus `delay`. - **cutoff** - ignores records with times less or equal to the maximal already seen time minus `cutoff`; \ it is also used to garbage collect records that have times lower or equal to the above threshold. \ When `cutoff` is not set, interval join will remember all records from both sides. - **keep_results** - if set to `True`, keeps all results of the operator. If set to `False`, \ keeps only results that are newer than the maximal seen time minus `cutoff`. Example without and with forgetting: >>> import pathway as pw >>> t1 = pw.debug.table_from_markdown( ... ''' ... value | instance | event_time | __time__ ... 1 | 1 | 0 | 2 ... 2 | 2 | 2 | 4 ... 3 | 1 | 4 | 4 ... 4 | 2 | 8 | 8 ... 5 | 1 | 0 | 10 ... 6 | 1 | 4 | 10 ... ''' ... ) >>> t2 = pw.debug.table_from_markdown( ... ''' ... value | instance | event_time | __time__ ... 42 | 1 | 2 | 2 ... 8 | 2 | 10 | 14 ... 10 | 2 | 4 | 30 ... ''' ... ) >>> result_without_cutoff = t1.interval_join( ... t2, ... t1.event_time, ... t2.event_time, ... pw.temporal.interval(-2, 2), ... t1.instance == t2.instance, ... ).select( ... left_value=t1.value, ... right_value=t2.value, ... instance=t1.instance, ... left_time=t1.event_time, ... right_time=t2.event_time, ... ) >>> pw.debug.compute_and_print_update_stream(result_without_cutoff, include_id=False) left_value | right_value | instance | left_time | right_time | __time__ | __diff__ 1 | 42 | 1 | 0 | 2 | 2 | 1 3 | 42 | 1 | 4 | 2 | 4 | 1 5 | 42 | 1 | 0 | 2 | 10 | 1 6 | 42 | 1 | 4 | 2 | 10 | 1 4 | 8 | 2 | 8 | 10 | 14 | 1 2 | 10 | 2 | 2 | 4 | 30 | 1 >>> result_with_cutoff = t1.interval_join( ... t2, ... t1.event_time, ... t2.event_time, ... pw.temporal.interval(-2, 2), ... t1.instance == t2.instance, ... behavior=pw.temporal.common_behavior(cutoff=6), ... ).select( ... left_value=t1.value, ... right_value=t2.value, ... instance=t1.instance, ... left_time=t1.event_time, ... right_time=t2.event_time, ... ) >>> pw.debug.compute_and_print_update_stream(result_with_cutoff, include_id=False) left_value | right_value | instance | left_time | right_time | __time__ | __diff__ 1 | 42 | 1 | 0 | 2 | 2 | 1 3 | 42 | 1 | 4 | 2 | 4 | 1 6 | 42 | 1 | 4 | 2 | 10 | 1 4 | 8 | 2 | 8 | 10 | 14 | 1 The record with ``value=5`` from table ``t1`` was not joined because its ``event_time`` was less than the maximal already seen time minus ``cutoff`` (``0 <= 8-6``). The record with ``value=10`` from table ``t2`` was not joined because its ``event_time`` was equal to the maximal already seen time minus ``cutoff`` (``4 <= 10-6``). |
166,648 | from __future__ import annotations
import datetime
from abc import abstractmethod
from dataclasses import dataclass
from typing import Any, Generic, TypeVar, overload
import pathway.internals as pw
from pathway.internals.arg_handlers import (
arg_handler,
join_kwargs_handler,
select_args_handler,
)
from pathway.internals.desugaring import (
DesugaringContext,
TableReplacementWithNoneDesugaring,
TableSubstitutionDesugaring,
combine_args_kwargs,
desugar,
)
from pathway.internals.runtime_type_check import check_arg_types
from pathway.internals.trace import trace_user_frame
from pathway.internals.type_interpreter import eval_type
from .temporal_behavior import CommonBehavior, apply_temporal_behavior
from .utils import IntervalType, TimeEventType, check_joint_types, get_default_origin
class Interval(Generic[T]):
lower_bound: T
upper_bound: T
class IntervalJoinResult(DesugaringContext):
"""
Result of an interval join between tables.
Example:
>>> import pathway as pw
>>> t1 = pw.debug.table_from_markdown(
... '''
... | t
... 1 | 3
... 2 | 4
... 3 | 5
... 4 | 11
... '''
... )
>>> t2 = pw.debug.table_from_markdown(
... '''
... | t
... 1 | 0
... 2 | 1
... 3 | 4
... 4 | 7
... '''
... )
>>> join_result = t1.interval_join_inner(t2, t1.t, t2.t, pw.temporal.interval(-2, 1))
>>> isinstance(join_result, pw.temporal.IntervalJoinResult)
True
>>> pw.debug.compute_and_print(
... join_result.select(left_t=t1.t, right_t=t2.t), include_id=False
... )
left_t | right_t
3 | 1
3 | 4
4 | 4
5 | 4
"""
_table_substitution: dict[pw.TableLike, pw.Table]
_filter_out_results_of_forgetting: bool
def __init__(
self,
left: pw.Table,
right: pw.Table,
table_substitution: dict[pw.TableLike, pw.Table],
_filter_out_results_of_forgetting: bool,
):
self._substitution = {
pw.left: left,
pw.right: right,
pw.this: pw.this, # type: ignore[dict-item]
}
self._table_substitution = table_substitution
self._filter_out_results_of_forgetting = _filter_out_results_of_forgetting
def _should_filter_out_results_of_forgetting(
behavior: CommonBehavior | None,
) -> bool:
return (
behavior is not None
and behavior.cutoff is not None
and behavior.keep_results
)
def _interval_join(
left: pw.Table,
right: pw.Table,
left_time_expression: pw.ColumnExpression,
right_time_expression: pw.ColumnExpression,
interval: Interval[int] | Interval[float] | Interval[datetime.timedelta],
*on: pw.ColumnExpression,
behavior: CommonBehavior | None = None,
mode: pw.JoinMode,
left_instance: pw.ColumnReference | None = None,
right_instance: pw.ColumnReference | None = None,
) -> IntervalJoinResult:
"""Creates an IntervalJoinResult. To perform an interval join uses it uses two
tumbling windows of size `lower_bound` + `upper_bound` and then filters the result.
"""
check_joint_types(
{
"self_time_expression": (left_time_expression, TimeEventType),
"other_time_expression": (right_time_expression, TimeEventType),
"lower_bound": (interval.lower_bound, IntervalType),
"upper_bound": (interval.upper_bound, IntervalType),
}
)
if left == right:
raise ValueError(
"Cannot join table with itself. Use <table>.copy() as one of the arguments of the join."
)
if interval.lower_bound > interval.upper_bound: # type: ignore[operator]
raise ValueError(
"lower_bound has to be less than or equal to the upper_bound in the Table.interval_join()."
)
if interval.lower_bound == interval.upper_bound:
cls: type[IntervalJoinResult] = _ZeroDifferenceIntervalJoinResult
else:
cls = _NonZeroDifferenceIntervalJoinResult
return cls._interval_join(
left,
right,
left_time_expression,
right_time_expression,
interval,
*on,
behavior=behavior,
mode=mode,
left_instance=left_instance,
right_instance=right_instance,
)
def _desugaring(self) -> TableSubstitutionDesugaring:
return TableSubstitutionDesugaring(self._table_substitution)
def select(self, *args: pw.ColumnReference, **kwargs: Any) -> pw.Table:
"""
Computes a result of an interval join.
Args:
args: Column references.
kwargs: Column expressions with their new assigned names.
Returns:
Table: Created table.
Example:
>>> import pathway as pw
>>> t1 = pw.debug.table_from_markdown(
... '''
... | a | t
... 1 | 1 | 3
... 2 | 1 | 4
... 3 | 1 | 5
... 4 | 1 | 11
... 5 | 2 | 2
... 6 | 2 | 3
... 7 | 3 | 4
... '''
... )
>>> t2 = pw.debug.table_from_markdown(
... '''
... | b | t
... 1 | 1 | 0
... 2 | 1 | 1
... 3 | 1 | 4
... 4 | 1 | 7
... 5 | 2 | 0
... 6 | 2 | 2
... 7 | 4 | 2
... '''
... )
>>> t3 = t1.interval_join_inner(
... t2, t1.t, t2.t, pw.temporal.interval(-2, 1), t1.a == t2.b
... ).select(t1.a, left_t=t1.t, right_t=t2.t)
>>> pw.debug.compute_and_print(t3, include_id=False)
a | left_t | right_t
1 | 3 | 1
1 | 3 | 4
1 | 4 | 4
1 | 5 | 4
2 | 2 | 0
2 | 2 | 2
2 | 3 | 2
"""
...
class CommonBehavior(Behavior):
"""Defines temporal behavior of windows and temporal joins."""
delay: IntervalType | None
cutoff: IntervalType | None
keep_results: bool
class Table(
Joinable,
OperatorInput,
Generic[TSchema],
):
"""Collection of named columns over identical universes.
Example:
>>> import pathway as pw
>>> t1 = pw.debug.table_from_markdown('''
... age | owner | pet
... 10 | Alice | dog
... 9 | Bob | dog
... 8 | Alice | cat
... 7 | Bob | dog
... ''')
>>> isinstance(t1, pw.Table)
True
"""
if TYPE_CHECKING:
from pathway.stdlib.ordered import diff # type: ignore[misc]
from pathway.stdlib.statistical import interpolate # type: ignore[misc]
from pathway.stdlib.temporal import ( # type: ignore[misc]
asof_join,
asof_join_left,
asof_join_outer,
asof_join_right,
asof_now_join,
asof_now_join_inner,
asof_now_join_left,
interval_join,
interval_join_inner,
interval_join_left,
interval_join_outer,
interval_join_right,
window_join,
window_join_inner,
window_join_left,
window_join_outer,
window_join_right,
windowby,
)
from pathway.stdlib.viz import ( # type: ignore[misc]
_repr_mimebundle_,
plot,
show,
)
_columns: dict[str, clmn.Column]
_schema: type[Schema]
_id_column: clmn.IdColumn
_rowwise_context: clmn.RowwiseContext
_source: SetOnceProperty[OutputHandle] = SetOnceProperty()
"""Lateinit by operator."""
def __init__(
self,
_columns: Mapping[str, clmn.Column],
_context: clmn.Context,
_schema: type[Schema] | None = None,
):
if _schema is None:
_schema = schema_from_columns(_columns)
super().__init__(_context)
self._columns = dict(_columns)
self._schema = _schema
self._id_column = _context.id_column
self._substitution = {thisclass.this: self}
self._rowwise_context = clmn.RowwiseContext(self._id_column)
def id(self) -> expr.ColumnReference:
"""Get reference to pseudocolumn containing id's of a table.
Example:
>>> import pathway as pw
>>> t1 = pw.debug.table_from_markdown('''
... age | owner | pet
... 10 | Alice | dog
... 9 | Bob | dog
... 8 | Alice | cat
... 7 | Bob | dog
... ''')
>>> t2 = t1.select(ids = t1.id)
>>> t2.typehints()['ids']
<class 'pathway.engine.Pointer'>
>>> pw.debug.compute_and_print(t2.select(test=t2.id == t2.ids), include_id=False)
test
True
True
True
True
"""
return expr.ColumnReference(_table=self, _column=self._id_column, _name="id")
def column_names(self):
return self.keys()
def keys(self):
return self._columns.keys()
def _get_column(self, name: str) -> clmn.Column:
return self._columns[name]
def _ipython_key_completions_(self):
return list(self.column_names())
def __dir__(self):
return list(super().__dir__()) + list(self.column_names())
def _C(self) -> TSchema:
return self.C # type: ignore
def schema(self) -> type[Schema]:
"""Get schema of the table.
Example:
>>> import pathway as pw
>>> t1 = pw.debug.table_from_markdown('''
... age | owner | pet
... 10 | Alice | dog
... 9 | Bob | dog
... 8 | Alice | cat
... 7 | Bob | dog
... ''')
>>> t1.schema
<pathway.Schema types={'age': <class 'int'>, 'owner': <class 'str'>, 'pet': <class 'str'>}>
>>> t1.typehints()['age']
<class 'int'>
"""
return self._schema
def _get_colref_by_name(self, name, exception_type) -> expr.ColumnReference:
name = self._column_deprecation_rename(name)
if name == "id":
return self.id
if name not in self.keys():
raise exception_type(f"Table has no column with name {name}.")
return expr.ColumnReference(
_table=self, _column=self._get_column(name), _name=name
)
def __getitem__(self, args: str | expr.ColumnReference) -> expr.ColumnReference: ...
def __getitem__(self, args: list[str | expr.ColumnReference]) -> Table: ...
def __getitem__(
self, args: str | expr.ColumnReference | list[str | expr.ColumnReference]
) -> expr.ColumnReference | Table:
"""Get columns by name.
Warning:
- Does not allow repetitions of columns.
- Fails if tries to access nonexistent column.
Args:
names: a singe column name or list of columns names to be extracted from `self`.
Returns:
Table with specified columns, or column expression (if single argument given).
Instead of column names, column references are valid here.
Example:
>>> import pathway as pw
>>> t1 = pw.debug.table_from_markdown('''
... age | owner | pet
... 10 | Alice | dog
... 9 | Bob | dog
... 8 | Alice | cat
... 7 | Bob | dog
... ''')
>>> t2 = t1[["age", "pet"]]
>>> t2 = t1[["age", t1.pet]]
>>> pw.debug.compute_and_print(t2, include_id=False)
age | pet
7 | dog
8 | cat
9 | dog
10 | dog
"""
if isinstance(args, expr.ColumnReference):
if (args.table is not self) and not isinstance(
args.table, thisclass.ThisMetaclass
):
raise ValueError(
"Table.__getitem__ argument has to be a ColumnReference to the same table or pw.this, or a string "
+ "(or a list of those)."
)
return self._get_colref_by_name(args.name, KeyError)
elif isinstance(args, str):
return self._get_colref_by_name(args, KeyError)
else:
return self.select(*[self[name] for name in args])
def from_columns(
*args: expr.ColumnReference, **kwargs: expr.ColumnReference
) -> Table:
"""Build a table from columns.
All columns must have the same ids. Columns' names must be pairwise distinct.
Args:
args: List of columns.
kwargs: Columns with their new names.
Returns:
Table: Created table.
Example:
>>> import pathway as pw
>>> t1 = pw.Table.empty(age=float, pet=float)
>>> t2 = pw.Table.empty(foo=float, bar=float).with_universe_of(t1)
>>> t3 = pw.Table.from_columns(t1.pet, qux=t2.foo)
>>> pw.debug.compute_and_print(t3, include_id=False)
pet | qux
"""
all_args = cast(
dict[str, expr.ColumnReference], combine_args_kwargs(args, kwargs)
)
if not all_args:
raise ValueError("Table.from_columns() cannot have empty arguments list")
else:
arg = next(iter(all_args.values()))
table: Table = arg.table
for arg in all_args.values():
if not G.universe_solver.query_are_equal(
table._universe, arg.table._universe
):
raise ValueError(
"Universes of all arguments of Table.from_columns() have to be equal.\n"
+ "Consider using Table.promise_universes_are_equal() to assert it.\n"
+ "(However, untrue assertion might result in runtime errors.)"
)
return table.select(*args, **kwargs)
def concat_reindex(self, *tables: Table) -> Table:
"""Concatenate contents of several tables.
This is similar to PySpark union. All tables must have the same schema. Each row is reindexed.
Args:
tables: List of tables to concatenate. All tables must have the same schema.
Returns:
Table: The concatenated table. It will have new, synthetic ids.
Example:
>>> import pathway as pw
>>> t1 = pw.debug.table_from_markdown('''
... | pet
... 1 | Dog
... 7 | Cat
... ''')
>>> t2 = pw.debug.table_from_markdown('''
... | pet
... 1 | Manul
... 8 | Octopus
... ''')
>>> t3 = t1.concat_reindex(t2)
>>> pw.debug.compute_and_print(t3, include_id=False)
pet
Cat
Dog
Manul
Octopus
"""
reindexed = [
table.with_id_from(table.id, i) for i, table in enumerate([self, *tables])
]
universes.promise_are_pairwise_disjoint(*reindexed)
return Table.concat(*reindexed)
def empty(**kwargs: dt.DType) -> Table:
"""Creates an empty table with a schema specified by kwargs.
Args:
kwargs: Dict whose keys are column names and values are column types.
Returns:
Table: Created empty table.
Example:
>>> import pathway as pw
>>> t1 = pw.Table.empty(age=float, pet=float)
>>> pw.debug.compute_and_print(t1, include_id=False)
age | pet
"""
from pathway.internals import table_io
ret = table_io.empty_from_schema(schema_from_types(None, **kwargs))
G.universe_solver.register_as_empty(ret._universe)
return ret
def select(self, *args: expr.ColumnReference, **kwargs: Any) -> Table:
"""Build a new table with columns specified by kwargs.
Output columns' names are keys(kwargs). values(kwargs) can be raw values, boxed
values, columns. Assigning to id reindexes the table.
Args:
args: Column references.
kwargs: Column expressions with their new assigned names.
Returns:
Table: Created table.
Example:
>>> import pathway as pw
>>> t1 = pw.debug.table_from_markdown('''
... pet
... Dog
... Cat
... ''')
>>> t2 = t1.select(animal=t1.pet, desc="fluffy")
>>> pw.debug.compute_and_print(t2, include_id=False)
animal | desc
Cat | fluffy
Dog | fluffy
"""
new_columns = []
all_args = combine_args_kwargs(args, kwargs)
for new_name, expression in all_args.items():
self._validate_expression(expression)
column = self._eval(expression)
new_columns.append((new_name, column))
return self._with_same_universe(*new_columns)
def __add__(self, other: Table) -> Table:
"""Build a union of `self` with `other`.
Semantics: Returns a table C, such that
- C.columns == self.columns + other.columns
- C.id == self.id == other.id
Args:
other: The other table. `self.id` must be equal `other.id` and
`self.columns` and `other.columns` must be disjoint (or overlapping names
are THE SAME COLUMN)
Returns:
Table: Created table.
Example:
>>> import pathway as pw
>>> t1 = pw.debug.table_from_markdown('''
... pet
... 1 Dog
... 7 Cat
... ''')
>>> t2 = pw.debug.table_from_markdown('''
... age
... 1 10
... 7 3
... ''')
>>> t3 = t1 + t2
>>> pw.debug.compute_and_print(t3, include_id=False)
pet | age
Cat | 3
Dog | 10
"""
if not G.universe_solver.query_are_equal(self._universe, other._universe):
raise ValueError(
"Universes of all arguments of Table.__add__() have to be equal.\n"
+ "Consider using Table.promise_universes_are_equal() to assert it.\n"
+ "(However, untrue assertion might result in runtime errors.)"
)
return self.select(*self, *other)
def slice(self) -> TableSlice:
"""Creates a collection of references to self columns.
Supports basic column manipulation methods.
Example:
>>> import pathway as pw
>>> t1 = pw.debug.table_from_markdown('''
... age | owner | pet
... 10 | Alice | dog
... 9 | Bob | dog
... 8 | Alice | cat
... 7 | Bob | dog
... ''')
>>> t1.slice.without("age")
TableSlice({'owner': <table1>.owner, 'pet': <table1>.pet})
"""
return TableSlice(dict(**self), self)
def filter(self, filter_expression: expr.ColumnExpression) -> Table[TSchema]:
"""Filter a table according to `filter_expression` condition.
Args:
filter_expression: `ColumnExpression` that specifies the filtering condition.
Returns:
Table: Result has the same schema as `self` and its ids are subset of `self.id`.
Example:
>>> import pathway as pw
>>> vertices = pw.debug.table_from_markdown('''
... label outdegree
... 1 3
... 7 0
... ''')
>>> filtered = vertices.filter(vertices.outdegree == 0)
>>> pw.debug.compute_and_print(filtered, include_id=False)
label | outdegree
7 | 0
"""
filter_type = self.eval_type(filter_expression)
if filter_type != dt.BOOL:
raise TypeError(
f"Filter argument of Table.filter() has to be bool, found {filter_type}."
)
ret = self._filter(filter_expression)
if (
filter_col := expr.get_column_filtered_by_is_none(filter_expression)
) is not None and filter_col.table == self:
name = filter_col.name
dtype = self._columns[name].dtype
ret = ret.update_types(**{name: dt.unoptionalize(dtype)})
return ret
def split(
self, split_expression: expr.ColumnExpression
) -> tuple[Table[TSchema], Table[TSchema]]:
"""Split a table according to `split_expression` condition.
Args:
split_expression: `ColumnExpression` that specifies the split condition.
Returns:
positive_table, negative_table: tuple of tables,
with the same schemas as `self` and with ids that are subsets of `self.id`,
and provably disjoint.
Example:
>>> import pathway as pw
>>> vertices = pw.debug.table_from_markdown('''
... label outdegree
... 1 3
... 7 0
... ''')
>>> positive, negative = vertices.split(vertices.outdegree == 0)
>>> pw.debug.compute_and_print(positive, include_id=False)
label | outdegree
7 | 0
>>> pw.debug.compute_and_print(negative, include_id=False)
label | outdegree
1 | 3
"""
positive = self.filter(split_expression)
negative = self.filter(~split_expression)
universes.promise_are_pairwise_disjoint(positive, negative)
universes.promise_are_equal(
self, Table.concat(positive, negative)
) # TODO: add API method for this
return positive, negative
def _filter(self, filter_expression: expr.ColumnExpression) -> Table[TSchema]:
self._validate_expression(filter_expression)
filtering_column = self._eval(filter_expression)
assert self._universe == filtering_column.universe
context = clmn.FilterContext(filtering_column, self._id_column)
return self._table_with_context(context)
def _gradual_broadcast(
self,
threshold_table,
lower_column,
value_column,
upper_column,
) -> Table:
return self + self.__gradual_broadcast(
threshold_table, lower_column, value_column, upper_column
)
def __gradual_broadcast(
self,
threshold_table,
lower_column,
value_column,
upper_column,
):
context = clmn.GradualBroadcastContext(
self._id_column,
threshold_table._eval(lower_column),
threshold_table._eval(value_column),
threshold_table._eval(upper_column),
)
return Table(_columns={"apx_value": context.apx_value_column}, _context=context)
def _forget(
self,
threshold_column: expr.ColumnExpression,
time_column: expr.ColumnExpression,
mark_forgetting_records: bool,
) -> Table:
context = clmn.ForgetContext(
self._id_column,
self._eval(threshold_column),
self._eval(time_column),
mark_forgetting_records,
)
return self._table_with_context(context)
def _forget_immediately(
self,
) -> Table:
context = clmn.ForgetImmediatelyContext(self._id_column)
return self._table_with_context(context)
def _filter_out_results_of_forgetting(
self,
) -> Table:
# The output universe is a superset of input universe because forgetting entries
# are filtered out. At each point in time, the set of keys with +1 diff can be
# bigger than a set of keys with +1 diff in an input table.
context = clmn.FilterOutForgettingContext(self._id_column)
return self._table_with_context(context)
def _freeze(
self,
threshold_column: expr.ColumnExpression,
time_column: expr.ColumnExpression,
) -> Table:
context = clmn.FreezeContext(
self._id_column,
self._eval(threshold_column),
self._eval(time_column),
)
return self._table_with_context(context)
def _buffer(
self,
threshold_column: expr.ColumnExpression,
time_column: expr.ColumnExpression,
) -> Table:
context = clmn.BufferContext(
self._id_column,
self._eval(threshold_column),
self._eval(time_column),
)
return self._table_with_context(context)
def difference(self, other: Table) -> Table[TSchema]:
r"""Restrict self universe to keys not appearing in the other table.
Args:
other: table with ids to remove from self.
Returns:
Table: table with restricted universe, with the same set of columns
Example:
>>> import pathway as pw
>>> t1 = pw.debug.table_from_markdown('''
... | age | owner | pet
... 1 | 10 | Alice | 1
... 2 | 9 | Bob | 1
... 3 | 8 | Alice | 2
... ''')
>>> t2 = pw.debug.table_from_markdown('''
... | cost
... 2 | 100
... 3 | 200
... 4 | 300
... ''')
>>> t3 = t1.difference(t2)
>>> pw.debug.compute_and_print(t3, include_id=False)
age | owner | pet
10 | Alice | 1
"""
context = clmn.DifferenceContext(
left=self._id_column,
right=other._id_column,
)
return self._table_with_context(context)
def intersect(self, *tables: Table) -> Table[TSchema]:
"""Restrict self universe to keys appearing in all of the tables.
Args:
tables: tables keys of which are used to restrict universe.
Returns:
Table: table with restricted universe, with the same set of columns
Example:
>>> import pathway as pw
>>> t1 = pw.debug.table_from_markdown('''
... | age | owner | pet
... 1 | 10 | Alice | 1
... 2 | 9 | Bob | 1
... 3 | 8 | Alice | 2
... ''')
>>> t2 = pw.debug.table_from_markdown('''
... | cost
... 2 | 100
... 3 | 200
... 4 | 300
... ''')
>>> t3 = t1.intersect(t2)
>>> pw.debug.compute_and_print(t3, include_id=False)
age | owner | pet
8 | Alice | 2
9 | Bob | 1
"""
intersecting_universes = (
self._universe,
*tuple(table._universe for table in tables),
)
universe = G.universe_solver.get_intersection(*intersecting_universes)
if universe in intersecting_universes:
context: clmn.Context = clmn.RestrictContext(self._id_column, universe)
else:
intersecting_ids = (
self._id_column,
*tuple(table._id_column for table in tables),
)
context = clmn.IntersectContext(
intersecting_ids=intersecting_ids,
)
return self._table_with_context(context)
def restrict(self, other: TableLike) -> Table[TSchema]:
"""Restrict self universe to keys appearing in other.
Args:
other: table which universe is used to restrict universe of self.
Returns:
Table: table with restricted universe, with the same set of columns
Example:
>>> import pathway as pw
>>> t1 = pw.debug.table_from_markdown(
... '''
... | age | owner | pet
... 1 | 10 | Alice | 1
... 2 | 9 | Bob | 1
... 3 | 8 | Alice | 2
... '''
... )
>>> t2 = pw.debug.table_from_markdown(
... '''
... | cost
... 2 | 100
... 3 | 200
... '''
... )
>>> t2.promise_universe_is_subset_of(t1)
<pathway.Table schema={'cost': <class 'int'>}>
>>> t3 = t1.restrict(t2)
>>> pw.debug.compute_and_print(t3, include_id=False)
age | owner | pet
8 | Alice | 2
9 | Bob | 1
"""
if not G.universe_solver.query_is_subset(other._universe, self._universe):
raise ValueError(
"Table.restrict(): other universe has to be a subset of self universe."
+ "Consider using Table.promise_universe_is_subset_of() to assert it."
)
context = clmn.RestrictContext(self._id_column, other._universe)
columns = {
name: self._wrap_column_in_context(context, column, name)
for name, column in self._columns.items()
}
return Table(
_columns=columns,
_context=context,
)
def copy(self) -> Table[TSchema]:
"""Returns a copy of a table.
Example:
>>> import pathway as pw
>>> t1 = pw.debug.table_from_markdown('''
... age | owner | pet
... 10 | Alice | dog
... 9 | Bob | dog
... 8 | Alice | cat
... 7 | Bob | dog
... ''')
>>> t2 = t1.copy()
>>> pw.debug.compute_and_print(t2, include_id=False)
age | owner | pet
7 | Bob | dog
8 | Alice | cat
9 | Bob | dog
10 | Alice | dog
>>> t1 is t2
False
"""
return self._copy_as(type(self))
def _copy_as(self, table_type: type[TTable], /, **kwargs) -> TTable:
columns = {
name: self._wrap_column_in_context(self._rowwise_context, column, name)
for name, column in self._columns.items()
}
return table_type(_columns=columns, _context=self._rowwise_context, **kwargs)
def groupby(
self,
*args: expr.ColumnReference,
id: expr.ColumnReference | None = None,
sort_by: expr.ColumnReference | None = None,
_filter_out_results_of_forgetting: bool = False,
instance: expr.ColumnReference | None = None,
) -> groupbys.GroupedTable:
"""Groups table by columns from args.
Note:
Usually followed by `.reduce()` that aggregates the result and returns a table.
Args:
args: columns to group by.
id: if provided, is the column used to set id's of the rows of the result
sort_by: if provided, column values are used as sorting keys for particular reducers
instance: optional argument describing partitioning of the data into separate instances
Returns:
GroupedTable: Groupby object.
Example:
>>> import pathway as pw
>>> t1 = pw.debug.table_from_markdown('''
... age | owner | pet
... 10 | Alice | dog
... 9 | Bob | dog
... 8 | Alice | cat
... 7 | Bob | dog
... ''')
>>> t2 = t1.groupby(t1.pet, t1.owner).reduce(t1.owner, t1.pet, ageagg=pw.reducers.sum(t1.age))
>>> pw.debug.compute_and_print(t2, include_id=False)
owner | pet | ageagg
Alice | cat | 8
Alice | dog | 10
Bob | dog | 16
"""
if instance is not None:
args = (*args, instance)
if id is not None:
if len(args) == 0:
args = (id,)
elif len(args) > 1:
raise ValueError(
"Table.groupby() cannot have id argument when grouping by multiple columns."
)
elif args[0]._column != id._column:
raise ValueError(
"Table.groupby() received id argument and is grouped by a single column,"
+ " but the arguments are not equal.\n"
+ "Consider using <table>.groupby(id=...), skipping the positional argument."
)
for arg in args:
if not isinstance(arg, expr.ColumnReference):
if isinstance(arg, str):
raise ValueError(
f"Expected a ColumnReference, found a string. Did you mean <table>.{arg}"
+ f" instead of {repr(arg)}?"
)
else:
raise ValueError(
"All Table.groupby() arguments have to be a ColumnReference."
)
return groupbys.GroupedTable.create(
table=self,
grouping_columns=args,
set_id=id is not None,
sort_by=sort_by,
_filter_out_results_of_forgetting=_filter_out_results_of_forgetting,
)
def reduce(
self, *args: expr.ColumnReference, **kwargs: expr.ColumnExpression
) -> Table:
"""Reduce a table to a single row.
Equivalent to `self.groupby().reduce(*args, **kwargs)`.
Args:
args: reducer to reduce the table with
kwargs: reducer to reduce the table with. Its key is the new name of a column.
Returns:
Table: Reduced table.
Example:
>>> import pathway as pw
>>> t1 = pw.debug.table_from_markdown('''
... age | owner | pet
... 10 | Alice | dog
... 9 | Bob | dog
... 8 | Alice | cat
... 7 | Bob | dog
... ''')
>>> t2 = t1.reduce(ageagg=pw.reducers.argmin(t1.age))
>>> pw.debug.compute_and_print(t2, include_id=False) # doctest: +ELLIPSIS
ageagg
^...
>>> t3 = t2.select(t1.ix(t2.ageagg).age, t1.ix(t2.ageagg).pet)
>>> pw.debug.compute_and_print(t3, include_id=False)
age | pet
7 | dog
"""
return self.groupby().reduce(*args, **kwargs)
def deduplicate(
self,
*,
value: expr.ColumnExpression,
instance: expr.ColumnExpression | None = None,
acceptor: Callable[[T, T], bool],
persistent_id: str | None = None,
) -> Table:
"""Deduplicates rows in `self` on `value` column using acceptor function.
It keeps rows which where accepted by the acceptor function.
Acceptor operates on two arguments - current value and the previously accepted value.
Args:
value: column expression used for deduplication.
instance: Grouping column. For rows with different
values in this column, deduplication will be performed separately.
Defaults to None.
acceptor: callback telling whether two values are different.
persistent_id: (unstable) An identifier, under which the state of the table
will be persisted or ``None``, if there is no need to persist the state of this table.
When a program restarts, it restores the state for all input tables according to what
was saved for their ``persistent_id``. This way it's possible to configure the start of
computations from the moment they were terminated last time.
Returns:
Table: the result of deduplication.
Example:
>>> import pathway as pw
>>> table = pw.debug.table_from_markdown(
... '''
... val | __time__
... 1 | 2
... 2 | 4
... 3 | 6
... 4 | 8
... '''
... )
>>>
>>> def acceptor(new_value, old_value) -> bool:
... return new_value >= old_value + 2
...
>>>
>>> result = table.deduplicate(value=pw.this.val, acceptor=acceptor)
>>> pw.debug.compute_and_print_update_stream(result, include_id=False)
val | __time__ | __diff__
1 | 2 | 1
1 | 6 | -1
3 | 6 | 1
>>>
>>> table = pw.debug.table_from_markdown(
... '''
... val | instance | __time__
... 1 | 1 | 2
... 2 | 1 | 4
... 3 | 2 | 6
... 4 | 1 | 8
... 4 | 2 | 8
... 5 | 1 | 10
... '''
... )
>>>
>>> def acceptor(new_value, old_value) -> bool:
... return new_value >= old_value + 2
...
>>>
>>> result = table.deduplicate(
... value=pw.this.val, instance=pw.this.instance, acceptor=acceptor
... )
>>> pw.debug.compute_and_print_update_stream(result, include_id=False)
val | instance | __time__ | __diff__
1 | 1 | 2 | 1
3 | 2 | 6 | 1
1 | 1 | 8 | -1
4 | 1 | 8 | 1
"""
if instance is None:
instance = expr.ColumnConstExpression(None)
self._validate_expression(value)
self._validate_expression(instance)
value_col = self._eval(value)
instance_col = self._eval(instance)
context = clmn.DeduplicateContext(
value_col,
(instance_col,),
acceptor,
self._id_column,
persistent_id,
)
return self._table_with_context(context)
def ix(
self, expression: expr.ColumnExpression, *, optional: bool = False, context=None
) -> Table:
"""Reindexes the table using expression values as keys. Uses keys from context, or tries to infer
proper context from the expression.
If optional is True, then None in expression values result in None values in the result columns.
Missing values in table keys result in RuntimeError.
Context can be anything that allows for `select` or `reduce`, or `pathway.this` construct
(latter results in returning a delayed operation, and should be only used when using `ix` inside
join().select() or groupby().reduce() sequence).
Returns:
Reindexed table with the same set of columns.
Example:
>>> import pathway as pw
>>> t_animals = pw.debug.table_from_markdown('''
... | epithet | genus
... 1 | upupa | epops
... 2 | acherontia | atropos
... 3 | bubo | scandiacus
... 4 | dynastes | hercules
... ''')
>>> t_birds = pw.debug.table_from_markdown('''
... | desc
... 2 | hoopoe
... 4 | owl
... ''')
>>> ret = t_birds.select(t_birds.desc, latin=t_animals.ix(t_birds.id).genus)
>>> pw.debug.compute_and_print(ret, include_id=False)
desc | latin
hoopoe | atropos
owl | hercules
"""
if context is None:
all_tables = collect_tables(expression)
if len(all_tables) == 0:
context = thisclass.this
elif all(tab == all_tables[0] for tab in all_tables):
context = all_tables[0]
if context is None:
for tab in all_tables:
if not isinstance(tab, Table):
raise ValueError("Table expected here.")
if len(all_tables) == 0:
raise ValueError("Const value provided.")
context = all_tables[0]
for tab in all_tables:
assert context._universe.is_equal_to(tab._universe)
if isinstance(context, groupbys.GroupedJoinable):
context = thisclass.this
if isinstance(context, thisclass.ThisMetaclass):
return context._delayed_op(
lambda table, expression: self.ix(
expression=expression, optional=optional, context=table
),
expression=expression,
qualname=f"{self}.ix(...)",
name="ix",
)
restrict_universe = RestrictUniverseDesugaring(context)
expression = restrict_universe.eval_expression(expression)
key_col = context.select(tmp=expression).tmp
key_dtype = self.eval_type(key_col)
if (
optional and not dt.dtype_issubclass(key_dtype, dt.Optional(dt.POINTER))
) or (not optional and not isinstance(key_dtype, dt.Pointer)):
raise TypeError(
f"Pathway supports indexing with Pointer type only. The type used was {key_dtype}."
)
if optional and isinstance(key_dtype, dt.Optional):
self_ = self.update_types(
**{name: dt.Optional(self.typehints()[name]) for name in self.keys()}
)
else:
self_ = self
return self_._ix(key_col, optional)
def _ix(
self,
key_expression: expr.ColumnReference,
optional: bool,
) -> Table:
key_column = key_expression._column
context = clmn.IxContext(key_column, self._id_column, optional)
return self._table_with_context(context)
def __lshift__(self, other: Table) -> Table:
"""Alias to update_cells method.
Updates cells of `self`, breaking ties in favor of the values in `other`.
Semantics:
- result.columns == self.columns
- result.id == self.id
- conflicts are resolved preferring other's values
Requires:
- other.columns ⊆ self.columns
- other.id ⊆ self.id
Args:
other: the other table.
Returns:
Table: `self` updated with cells form `other`.
Example:
>>> import pathway as pw
>>> t1 = pw.debug.table_from_markdown('''
... | age | owner | pet
... 1 | 10 | Alice | 1
... 2 | 9 | Bob | 1
... 3 | 8 | Alice | 2
... ''')
>>> t2 = pw.debug.table_from_markdown('''
... | age | owner | pet
... 1 | 10 | Alice | 30
... ''')
>>> pw.universes.promise_is_subset_of(t2, t1)
>>> t3 = t1 << t2
>>> pw.debug.compute_and_print(t3, include_id=False)
age | owner | pet
8 | Alice | 2
9 | Bob | 1
10 | Alice | 30
"""
return self.update_cells(other, _stacklevel=2)
def concat(self, *others: Table[TSchema]) -> Table[TSchema]:
"""Concats `self` with every `other` ∊ `others`.
Semantics:
- result.columns == self.columns == other.columns
- result.id == self.id ∪ other.id
if self.id and other.id collide, throws an exception.
Requires:
- other.columns == self.columns
- self.id disjoint with other.id
Args:
other: the other table.
Returns:
Table: The concatenated table. Id's of rows from original tables are preserved.
Example:
>>> import pathway as pw
>>> t1 = pw.debug.table_from_markdown('''
... | age | owner | pet
... 1 | 10 | Alice | 1
... 2 | 9 | Bob | 1
... 3 | 8 | Alice | 2
... ''')
>>> t2 = pw.debug.table_from_markdown('''
... | age | owner | pet
... 11 | 11 | Alice | 30
... 12 | 12 | Tom | 40
... ''')
>>> pw.universes.promise_are_pairwise_disjoint(t1, t2)
>>> t3 = t1.concat(t2)
>>> pw.debug.compute_and_print(t3, include_id=False)
age | owner | pet
8 | Alice | 2
9 | Bob | 1
10 | Alice | 1
11 | Alice | 30
12 | Tom | 40
"""
for other in others:
if other.keys() != self.keys():
raise ValueError(
"columns do not match in the argument of Table.concat()"
)
schema = {
key: functools.reduce(
dt.types_lca,
[other.schema._dtypes()[key] for other in others],
self.schema._dtypes()[key],
)
for key in self.keys()
}
return Table._concat(
self.cast_to_types(**schema),
*[other.cast_to_types(**schema) for other in others],
)
def _concat(self, *others: Table[TSchema]) -> Table[TSchema]:
union_ids = (self._id_column, *(other._id_column for other in others))
if not G.universe_solver.query_are_disjoint(*(c.universe for c in union_ids)):
raise ValueError(
"Universes of the arguments of Table.concat() have to be disjoint.\n"
+ "Consider using Table.promise_universes_are_disjoint() to assert it.\n"
+ "(However, untrue assertion might result in runtime errors.)"
)
context = clmn.ConcatUnsafeContext(
union_ids=union_ids,
updates=tuple(
{col_name: other._columns[col_name] for col_name in self.keys()}
for other in others
),
)
return self._table_with_context(context)
def update_cells(self, other: Table, _stacklevel: int = 1) -> Table:
"""Updates cells of `self`, breaking ties in favor of the values in `other`.
Semantics:
- result.columns == self.columns
- result.id == self.id
- conflicts are resolved preferring other's values
Requires:
- other.columns ⊆ self.columns
- other.id ⊆ self.id
Args:
other: the other table.
Returns:
Table: `self` updated with cells form `other`.
Example:
>>> import pathway as pw
>>> t1 = pw.debug.table_from_markdown('''
... | age | owner | pet
... 1 | 10 | Alice | 1
... 2 | 9 | Bob | 1
... 3 | 8 | Alice | 2
... ''')
>>> t2 = pw.debug.table_from_markdown('''
... age | owner | pet
... 1 | 10 | Alice | 30
... ''')
>>> pw.universes.promise_is_subset_of(t2, t1)
>>> t3 = t1.update_cells(t2)
>>> pw.debug.compute_and_print(t3, include_id=False)
age | owner | pet
8 | Alice | 2
9 | Bob | 1
10 | Alice | 30
"""
if names := (set(other.keys()) - set(self.keys())):
raise ValueError(
f"Columns of the argument in Table.update_cells() not present in the updated table: {list(names)}."
)
if self._universe == other._universe:
warnings.warn(
"Key sets of self and other in update_cells are the same."
+ " Using with_columns instead of update_cells.",
stacklevel=_stacklevel + 4,
)
return self.with_columns(*(other[name] for name in other))
schema = {
key: dt.types_lca(self.schema.__dtypes__[key], other.schema.__dtypes__[key])
for key in other.keys()
}
return Table._update_cells(
self.cast_to_types(**schema), other.cast_to_types(**schema)
)
def _update_cells(self, other: Table) -> Table:
if not other._universe.is_subset_of(self._universe):
raise ValueError(
"Universe of the argument of Table.update_cells() needs to be "
+ "a subset of the universe of the updated table.\n"
+ "Consider using Table.promise_is_subset_of() to assert this.\n"
+ "(However, untrue assertion might result in runtime errors.)"
)
context = clmn.UpdateCellsContext(
left=self._id_column,
right=other._id_column,
updates={name: other._columns[name] for name in other.keys()},
)
return self._table_with_context(context)
def update_rows(self, other: Table[TSchema]) -> Table[TSchema]:
"""Updates rows of `self`, breaking ties in favor for the rows in `other`.
Semantics:
- result.columns == self.columns == other.columns
- result.id == self.id ∪ other.id
Requires:
- other.columns == self.columns
Args:
other: the other table.
Returns:
Table: `self` updated with rows form `other`.
Example:
>>> import pathway as pw
>>> t1 = pw.debug.table_from_markdown('''
... | age | owner | pet
... 1 | 10 | Alice | 1
... 2 | 9 | Bob | 1
... 3 | 8 | Alice | 2
... ''')
>>> t2 = pw.debug.table_from_markdown('''
... | age | owner | pet
... 1 | 10 | Alice | 30
... 12 | 12 | Tom | 40
... ''')
>>> t3 = t1.update_rows(t2)
>>> pw.debug.compute_and_print(t3, include_id=False)
age | owner | pet
8 | Alice | 2
9 | Bob | 1
10 | Alice | 30
12 | Tom | 40
"""
if other.keys() != self.keys():
raise ValueError(
"Columns do not match between argument of Table.update_rows() and the updated table."
)
if self._universe.is_subset_of(other._universe):
warnings.warn(
"Universe of self is a subset of universe of other in update_rows. Returning other.",
stacklevel=5,
)
return other
schema = {
key: dt.types_lca(self.schema.__dtypes__[key], other.schema.__dtypes__[key])
for key in self.keys()
}
union_universes = (self._universe, other._universe)
universe = G.universe_solver.get_union(*union_universes)
if universe == self._universe:
return Table._update_cells(
self.cast_to_types(**schema), other.cast_to_types(**schema)
)
else:
return Table._update_rows(
self.cast_to_types(**schema), other.cast_to_types(**schema)
)
def _update_rows(self, other: Table[TSchema]) -> Table[TSchema]:
union_ids = (self._id_column, other._id_column)
context = clmn.UpdateRowsContext(
updates={col_name: other._columns[col_name] for col_name in self.keys()},
union_ids=union_ids,
)
return self._table_with_context(context)
def with_columns(self, *args: expr.ColumnReference, **kwargs: Any) -> Table:
"""Updates columns of `self`, according to args and kwargs.
See `table.select` specification for evaluation of args and kwargs.
Example:
>>> import pathway as pw
>>> t1 = pw.debug.table_from_markdown('''
... | age | owner | pet
... 1 | 10 | Alice | 1
... 2 | 9 | Bob | 1
... 3 | 8 | Alice | 2
... ''')
>>> t2 = pw.debug.table_from_markdown('''
... | owner | pet | size
... 1 | Tom | 1 | 10
... 2 | Bob | 1 | 9
... 3 | Tom | 2 | 8
... ''')
>>> t3 = t1.with_columns(*t2)
>>> pw.debug.compute_and_print(t3, include_id=False)
age | owner | pet | size
8 | Tom | 2 | 8
9 | Bob | 1 | 9
10 | Tom | 1 | 10
"""
other = self.select(*args, **kwargs)
columns = dict(self)
columns.update(other)
return self.select(**columns)
def with_id(self, new_index: expr.ColumnReference) -> Table:
"""Set new ids based on another column containing id-typed values.
To generate ids based on arbitrary valued columns, use `with_id_from`.
Values assigned must be row-wise unique.
Args:
new_id: column to be used as the new index.
Returns:
Table with updated ids.
Example:
>>> import pytest; pytest.xfail("with_id is hard to test")
>>> import pathway as pw
>>> t1 = pw.debug.table_from_markdown('''
... | age | owner | pet
... 1 | 10 | Alice | 1
... 2 | 9 | Bob | 1
... 3 | 8 | Alice | 2
... ''')
>>> t2 = pw.debug.table_from_markdown('''
... | new_id
... 1 | 2
... 2 | 3
... 3 | 4
... ''')
>>> t3 = t1.promise_universe_is_subset_of(t2).with_id(t2.new_id)
>>> pw.debug.compute_and_print(t3)
age owner pet
^2 10 Alice 1
^3 9 Bob 1
^4 8 Alice 2
"""
return self._with_new_index(new_index)
def with_id_from(
self,
*args: expr.ColumnExpression | Value,
instance: expr.ColumnReference | None = None,
) -> Table:
"""Compute new ids based on values in columns.
Ids computed from `columns` must be row-wise unique.
Args:
columns: columns to be used as primary keys.
Returns:
Table: `self` updated with recomputed ids.
Example:
>>> import pathway as pw
>>> t1 = pw.debug.table_from_markdown('''
... | age | owner | pet
... 1 | 10 | Alice | 1
... 2 | 9 | Bob | 1
... 3 | 8 | Alice | 2
... ''')
>>> t2 = t1 + t1.select(old_id=t1.id)
>>> t3 = t2.with_id_from(t2.age)
>>> pw.debug.compute_and_print(t3) # doctest: +ELLIPSIS, +NORMALIZE_WHITESPACE
| age | owner | pet | old_id
^... | 8 | Alice | 2 | ^...
^... | 9 | Bob | 1 | ^...
^... | 10 | Alice | 1 | ^...
>>> t4 = t3.select(t3.age, t3.owner, t3.pet, same_as_old=(t3.id == t3.old_id),
... same_as_new=(t3.id == t3.pointer_from(t3.age)))
>>> pw.debug.compute_and_print(t4) # doctest: +ELLIPSIS, +NORMALIZE_WHITESPACE
| age | owner | pet | same_as_old | same_as_new
^... | 8 | Alice | 2 | False | True
^... | 9 | Bob | 1 | False | True
^... | 10 | Alice | 1 | False | True
"""
# new_index should be a column, so a little workaround
new_index = self.select(
ref_column=self.pointer_from(*args, instance=instance)
).ref_column
return self._with_new_index(
new_index=new_index,
)
def _with_new_index(
self,
new_index: expr.ColumnExpression,
) -> Table:
self._validate_expression(new_index)
index_type = self.eval_type(new_index)
if not isinstance(index_type, dt.Pointer):
raise TypeError(
f"Pathway supports reindexing Tables with Pointer type only. The type used was {index_type}."
)
reindex_column = self._eval(new_index)
assert self._universe == reindex_column.universe
context = clmn.ReindexContext(reindex_column)
return self._table_with_context(context)
def rename_columns(self, **kwargs: str | expr.ColumnReference) -> Table:
"""Rename columns according to kwargs.
Columns not in keys(kwargs) are not changed. New name of a column must not be `id`.
Args:
kwargs: mapping from old column names to new names.
Returns:
Table: `self` with columns renamed.
Example:
>>> import pathway as pw
>>> t1 = pw.debug.table_from_markdown('''
... age | owner | pet
... 10 | Alice | 1
... 9 | Bob | 1
... 8 | Alice | 2
... ''')
>>> t2 = t1.rename_columns(years_old=t1.age, animal=t1.pet)
>>> pw.debug.compute_and_print(t2, include_id=False)
owner | years_old | animal
Alice | 8 | 2
Alice | 10 | 1
Bob | 9 | 1
"""
mapping: dict[str, str] = {}
for new_name, old_name_col in kwargs.items():
if isinstance(old_name_col, expr.ColumnReference):
old_name = old_name_col.name
else:
old_name = old_name_col
if old_name not in self._columns:
raise ValueError(f"Column {old_name} does not exist in a given table.")
mapping[new_name] = old_name
renamed_columns = self._columns.copy()
for new_name, old_name in mapping.items():
renamed_columns.pop(old_name)
for new_name, old_name in mapping.items():
renamed_columns[new_name] = self._columns[old_name]
columns_wrapped = {
name: self._wrap_column_in_context(
self._rowwise_context,
column,
mapping[name] if name in mapping else name,
)
for name, column in renamed_columns.items()
}
return self._with_same_universe(*columns_wrapped.items())
def rename_by_dict(
self, names_mapping: dict[str | expr.ColumnReference, str]
) -> Table:
"""Rename columns according to a dictionary.
Columns not in keys(kwargs) are not changed. New name of a column must not be `id`.
Args:
names_mapping: mapping from old column names to new names.
Returns:
Table: `self` with columns renamed.
Example:
>>> import pathway as pw
>>> t1 = pw.debug.table_from_markdown('''
... age | owner | pet
... 10 | Alice | 1
... 9 | Bob | 1
... 8 | Alice | 2
... ''')
>>> t2 = t1.rename_by_dict({"age": "years_old", t1.pet: "animal"})
>>> pw.debug.compute_and_print(t2, include_id=False)
owner | years_old | animal
Alice | 8 | 2
Alice | 10 | 1
Bob | 9 | 1
"""
return self.rename_columns(
**{new_name: self[old_name] for old_name, new_name in names_mapping.items()}
)
def with_prefix(self, prefix: str) -> Table:
"""Rename columns by adding prefix to each name of column.
Example:
>>> import pathway as pw
>>> t1 = pw.debug.table_from_markdown('''
... age | owner | pet
... 10 | Alice | 1
... 9 | Bob | 1
... 8 | Alice | 2
... ''')
>>> t2 = t1.with_prefix("u_")
>>> pw.debug.compute_and_print(t2, include_id=False)
u_age | u_owner | u_pet
8 | Alice | 2
9 | Bob | 1
10 | Alice | 1
"""
return self.rename_by_dict({name: prefix + name for name in self.keys()})
def with_suffix(self, suffix: str) -> Table:
"""Rename columns by adding suffix to each name of column.
Example:
>>> import pathway as pw
>>> t1 = pw.debug.table_from_markdown('''
... age | owner | pet
... 10 | Alice | 1
... 9 | Bob | 1
... 8 | Alice | 2
... ''')
>>> t2 = t1.with_suffix("_current")
>>> pw.debug.compute_and_print(t2, include_id=False)
age_current | owner_current | pet_current
8 | Alice | 2
9 | Bob | 1
10 | Alice | 1
"""
return self.rename_by_dict({name: name + suffix for name in self.keys()})
def rename(
self,
names_mapping: dict[str | expr.ColumnReference, str] | None = None,
**kwargs: expr.ColumnExpression,
) -> Table:
"""Rename columns according either a dictionary or kwargs.
If a mapping is provided using a dictionary, ``rename_by_dict`` will be used.
Otherwise, ``rename_columns`` will be used with kwargs.
Columns not in keys(kwargs) are not changed. New name of a column must not be ``id``.
Args:
names_mapping: mapping from old column names to new names.
kwargs: mapping from old column names to new names.
Returns:
Table: `self` with columns renamed.
"""
if names_mapping is not None:
return self.rename_by_dict(names_mapping=names_mapping)
return self.rename_columns(**kwargs)
def without(self, *columns: str | expr.ColumnReference) -> Table:
"""Selects all columns without named column references.
Args:
columns: columns to be dropped provided by `table.column_name` notation.
Returns:
Table: `self` without specified columns.
Example:
>>> import pathway as pw
>>> t1 = pw.debug.table_from_markdown('''
... age | owner | pet
... 10 | Alice | 1
... 9 | Bob | 1
... 8 | Alice | 2
... ''')
>>> t2 = t1.without(t1.age, pw.this.pet)
>>> pw.debug.compute_and_print(t2, include_id=False)
owner
Alice
Alice
Bob
"""
new_columns = self._columns.copy()
for col in columns:
if isinstance(col, expr.ColumnReference):
new_columns.pop(col.name)
else:
assert isinstance(col, str)
new_columns.pop(col)
columns_wrapped = {
name: self._wrap_column_in_context(self._rowwise_context, column, name)
for name, column in new_columns.items()
}
return self._with_same_universe(*columns_wrapped.items())
def having(self, *indexers: expr.ColumnReference) -> Table[TSchema]:
"""Removes rows so that indexed.ix(indexer) is possible when some rows are missing,
for each indexer in indexers"""
rets: list[Table] = []
for indexer in indexers:
rets.append(self._having(indexer))
if len(rets) == 0:
return self
elif len(rets) == 1:
[ret] = rets
return ret
else:
return rets[0].intersect(*rets[1:])
def update_types(self, **kwargs: Any) -> Table:
"""Updates types in schema. Has no effect on the runtime."""
for name in kwargs.keys():
if name not in self.keys():
raise ValueError(
"Table.update_types() argument name has to be an existing table column name."
)
from pathway.internals.common import declare_type
return self.with_columns(
**{key: declare_type(val, self[key]) for key, val in kwargs.items()}
)
def cast_to_types(self, **kwargs: Any) -> Table:
"""Casts columns to types."""
for name in kwargs.keys():
if name not in self.keys():
raise ValueError(
"Table.cast_to_types() argument name has to be an existing table column name."
)
from pathway.internals.common import cast
return self.with_columns(
**{key: cast(val, self[key]) for key, val in kwargs.items()}
)
def _having(self, indexer: expr.ColumnReference) -> Table[TSchema]:
context = clmn.HavingContext(
orig_id_column=self._id_column, key_column=indexer._column
)
return self._table_with_context(context)
def with_universe_of(self, other: TableLike) -> Table:
"""Returns a copy of self with exactly the same universe as others.
Semantics: Required precondition self.universe == other.universe
Used in situations where Pathway cannot deduce equality of universes, but
those are equal as verified during runtime.
Example:
>>> import pathway as pw
>>> t1 = pw.debug.table_from_markdown('''
... | pet
... 1 | Dog
... 7 | Cat
... ''')
>>> t2 = pw.debug.table_from_markdown('''
... | age
... 1 | 10
... 7 | 3
... 8 | 100
... ''')
>>> t3 = t2.filter(pw.this.age < 30).with_universe_of(t1)
>>> t4 = t1 + t3
>>> pw.debug.compute_and_print(t4, include_id=False)
pet | age
Cat | 3
Dog | 10
"""
if self._universe == other._universe:
return self.copy()
universes.promise_are_equal(self, other)
return self._unsafe_promise_universe(other)
def flatten(self, *args: expr.ColumnReference, **kwargs: Any) -> Table:
"""Performs a flatmap operation on a column or expression given as a first
argument. Datatype of this column or expression has to be iterable or Json array.
Other columns specified in the method arguments are duplicated
as many times as the length of the iterable.
It is possible to get ids of source rows by using `table.id` column, e.g.
`table.flatten(table.column_to_be_flattened, original_id = table.id)`.
Example:
>>> import pathway as pw
>>> t1 = pw.debug.table_from_markdown('''
... | pet | age
... 1 | Dog | 2
... 7 | Cat | 5
... ''')
>>> t2 = t1.flatten(t1.pet)
>>> pw.debug.compute_and_print(t2, include_id=False)
pet
C
D
a
g
o
t
>>> t3 = t1.flatten(t1.pet, t1.age)
>>> pw.debug.compute_and_print(t3, include_id=False)
pet | age
C | 5
D | 2
a | 5
g | 2
o | 2
t | 5
"""
intermediate_table = self.select(*args, **kwargs)
all_args = combine_args_kwargs(args, kwargs)
if not all_args:
raise ValueError("Table.flatten() cannot have empty arguments list.")
all_names_iter = iter(all_args.keys())
flatten_name = next(all_names_iter)
return intermediate_table._flatten(flatten_name)
def _flatten(
self,
flatten_name: str,
) -> Table:
flatten_column = self._columns[flatten_name]
assert isinstance(flatten_column, clmn.ColumnWithExpression)
context = clmn.FlattenContext(
orig_universe=self._universe,
flatten_column=flatten_column,
)
columns = {
name: self._wrap_column_in_context(context, column, name)
for name, column in self._columns.items()
if name != flatten_name
}
return Table(
_columns={
flatten_name: context.flatten_result_column,
**columns,
},
_context=context,
)
def sort(
self,
key: expr.ColumnExpression,
instance: expr.ColumnExpression | None = None,
) -> Table:
"""
Sorts a table by the specified keys.
Args:
table : pw.Table
The table to be sorted.
key (ColumnExpression[int | float | datetime | str | bytes]):
An expression to sort by.
instance : ColumnReference or None
An expression with instance. Rows are sorted within an instance.
``prev`` and ``next`` columns will only point to rows that have the same instance.
Returns:
pw.Table: The sorted table. Contains two columns: ``prev`` and ``next``, containing the pointers
to the previous and next rows.
Example:
>>> import pathway as pw
>>> table = pw.debug.table_from_markdown('''
... name | age | score
... Alice | 25 | 80
... Bob | 20 | 90
... Charlie | 30 | 80
... ''')
>>> table = table.with_id_from(pw.this.name)
>>> table += table.sort(key=pw.this.age)
>>> pw.debug.compute_and_print(table, include_id=True)
| name | age | score | prev | next
^GBSDEEW... | Alice | 25 | 80 | ^EDPSSB1... | ^DS9AT95...
^EDPSSB1... | Bob | 20 | 90 | | ^GBSDEEW...
^DS9AT95... | Charlie | 30 | 80 | ^GBSDEEW... |
>>> table = pw.debug.table_from_markdown('''
... name | age | score
... Alice | 25 | 80
... Bob | 20 | 90
... Charlie | 30 | 80
... David | 35 | 90
... Eve | 15 | 80
... ''')
>>> table = table.with_id_from(pw.this.name)
>>> table += table.sort(key=pw.this.age, instance=pw.this.score)
>>> pw.debug.compute_and_print(table, include_id=True)
| name | age | score | prev | next
^GBSDEEW... | Alice | 25 | 80 | ^T0B95XH... | ^DS9AT95...
^EDPSSB1... | Bob | 20 | 90 | | ^RT0AZWX...
^DS9AT95... | Charlie | 30 | 80 | ^GBSDEEW... |
^RT0AZWX... | David | 35 | 90 | ^EDPSSB1... |
^T0B95XH... | Eve | 15 | 80 | | ^GBSDEEW...
"""
instance = clmn.ColumnExpression._wrap(instance)
context = clmn.SortingContext(
self._eval(key),
self._eval(instance),
)
return Table(
_columns={
"prev": context.prev_column,
"next": context.next_column,
},
_context=context,
)
def _set_source(self, source: OutputHandle):
self._source = source
if not hasattr(self._id_column, "lineage"):
self._id_column.lineage = clmn.ColumnLineage(name="id", source=source)
for name, column in self._columns.items():
if not hasattr(column, "lineage"):
column.lineage = clmn.ColumnLineage(name=name, source=source)
universe = self._universe
if not hasattr(universe, "lineage"):
universe.lineage = clmn.Lineage(source=source)
def _unsafe_promise_universe(self, other: TableLike) -> Table:
context = clmn.PromiseSameUniverseContext(self._id_column, other._id_column)
return self._table_with_context(context)
def _validate_expression(self, expression: expr.ColumnExpression):
for dep in expression._dependencies_above_reducer():
if self._universe != dep._column.universe:
raise ValueError(
f"You cannot use {dep.to_column_expression()} in this context."
+ " Its universe is different than the universe of the table the method"
+ " was called on. You can use <table1>.with_universe_of(<table2>)"
+ " to assign universe of <table2> to <table1> if you're sure their"
+ " sets of keys are equal."
)
def _wrap_column_in_context(
self,
context: clmn.Context,
column: clmn.Column,
name: str,
lineage: clmn.Lineage | None = None,
) -> clmn.Column:
"""Contextualize column by wrapping it in expression."""
expression = expr.ColumnReference(_table=self, _column=column, _name=name)
return expression._column_with_expression_cls(
context=context,
universe=context.universe,
expression=expression,
lineage=lineage,
)
def _table_with_context(self, context: clmn.Context) -> Table:
columns = {
name: self._wrap_column_in_context(context, column, name)
for name, column in self._columns.items()
}
return Table(
_columns=columns,
_context=context,
)
def _table_restricted_context(self) -> clmn.TableRestrictedRowwiseContext:
return clmn.TableRestrictedRowwiseContext(self._id_column, self)
def _eval(
self, expression: expr.ColumnExpression, context: clmn.Context | None = None
) -> clmn.ColumnWithExpression:
"""Desugar expression and wrap it in given context."""
if context is None:
context = self._rowwise_context
column = expression._column_with_expression_cls(
context=context,
universe=context.universe,
expression=expression,
)
return column
def _from_schema(cls: type[TTable], schema: type[Schema]) -> TTable:
universe = Universe()
context = clmn.MaterializedContext(universe, schema.universe_properties)
columns = {
name: clmn.MaterializedColumn(
universe,
schema.column_properties(name),
)
for name in schema.column_names()
}
return cls(_columns=columns, _schema=schema, _context=context)
def __repr__(self) -> str:
return f"<pathway.Table schema={dict(self.typehints())}>"
def _with_same_universe(
self,
*columns: tuple[str, clmn.Column],
schema: type[Schema] | None = None,
) -> Table:
return Table(
_columns=dict(columns),
_schema=schema,
_context=self._rowwise_context,
)
def _sort_columns_by_other(self, other: Table):
assert self.keys() == other.keys()
self._columns = {name: self._columns[name] for name in other.keys()}
def _operator_dependencies(self) -> StableSet[Table]:
return StableSet([self])
def debug(self, name: str):
G.add_operator(
lambda id: DebugOperator(name, id),
lambda operator: operator(self),
)
return self
def to(self, sink: DataSink) -> None:
from pathway.internals import table_io
table_io.table_to_datasink(self, sink)
def _materialize(self, universe: Universe):
context = clmn.MaterializedContext(universe)
columns = {
name: clmn.MaterializedColumn(universe, column.properties)
for (name, column) in self._columns.items()
}
return Table(
_columns=columns,
_schema=self.schema,
_context=context,
)
def pointer_from(
self, *args: Any, optional=False, instance: expr.ColumnReference | None = None
):
"""Pseudo-random hash of its argument. Produces pointer types. Applied column-wise.
Example:
>>> import pathway as pw
>>> t1 = pw.debug.table_from_markdown('''
... age owner pet
... 1 10 Alice dog
... 2 9 Bob dog
... 3 8 Alice cat
... 4 7 Bob dog''')
>>> g = t1.groupby(t1.owner).reduce(refcol = t1.pointer_from(t1.owner)) # g.id == g.refcol
>>> pw.debug.compute_and_print(g.select(test = (g.id == g.refcol)), include_id=False)
test
True
True
"""
if instance is not None:
args = (*args, instance)
# XXX verify types for the table primary_keys
return expr.PointerExpression(self, *args, optional=optional)
def ix_ref(
self,
*args: expr.ColumnExpression | Value,
optional: bool = False,
context=None,
instance: expr.ColumnReference | None = None,
):
"""Reindexes the table using expressions as primary keys.
Uses keys from context, or tries to infer proper context from the expression.
If optional is True, then None in expression values result in None values in the result columns.
Missing values in table keys result in RuntimeError.
Context can be anything that allows for `select` or `reduce`, or `pathway.this` construct
(latter results in returning a delayed operation, and should be only used when using `ix` inside
join().select() or groupby().reduce() sequence).
Args:
args: Column references.
Returns:
Row: indexed row.
Example:
>>> import pathway as pw
>>> t1 = pw.debug.table_from_markdown('''
... name | pet
... Alice | dog
... Bob | cat
... Carole | cat
... David | dog
... ''')
>>> t2 = t1.with_id_from(pw.this.name)
>>> t2 = t2.select(*pw.this, new_value=pw.this.ix_ref("Alice").pet)
>>> pw.debug.compute_and_print(t2, include_id=False)
name | pet | new_value
Alice | dog | dog
Bob | cat | dog
Carole | cat | dog
David | dog | dog
Tables obtained by a groupby/reduce scheme always have primary keys:
>>> import pathway as pw
>>> t1 = pw.debug.table_from_markdown('''
... name | pet
... Alice | dog
... Bob | cat
... Carole | cat
... David | cat
... ''')
>>> t2 = t1.groupby(pw.this.pet).reduce(pw.this.pet, count=pw.reducers.count())
>>> t3 = t1.select(*pw.this, new_value=t2.ix_ref(t1.pet).count)
>>> pw.debug.compute_and_print(t3, include_id=False)
name | pet | new_value
Alice | dog | 1
Bob | cat | 3
Carole | cat | 3
David | cat | 3
Single-row tables can be accessed via `ix_ref()`:
>>> import pathway as pw
>>> t1 = pw.debug.table_from_markdown('''
... name | pet
... Alice | dog
... Bob | cat
... Carole | cat
... David | cat
... ''')
>>> t2 = t1.reduce(count=pw.reducers.count())
>>> t3 = t1.select(*pw.this, new_value=t2.ix_ref(context=t1).count)
>>> pw.debug.compute_and_print(t3, include_id=False)
name | pet | new_value
Alice | dog | 4
Bob | cat | 4
Carole | cat | 4
David | cat | 4
"""
return self.ix(
self.pointer_from(*args, optional=optional, instance=instance),
optional=optional,
context=context,
)
def _subtables(self) -> StableSet[Table]:
return StableSet([self])
def _substitutions(
self,
) -> tuple[Table, dict[expr.InternalColRef, expr.ColumnExpression]]:
return self, {}
def typehints(self) -> Mapping[str, Any]:
"""
Return the types of the columns as a dictionary.
Example:
>>> import pathway as pw
>>> t1 = pw.debug.table_from_markdown('''
... age | owner | pet
... 10 | Alice | dog
... 9 | Bob | dog
... 8 | Alice | cat
... 7 | Bob | dog
... ''')
>>> t1.typehints()
mappingproxy({'age': <class 'int'>, 'owner': <class 'str'>, 'pet': <class 'str'>})
"""
return self.schema.typehints()
def eval_type(self, expression: expr.ColumnExpression) -> dt.DType:
return (
self._rowwise_context._get_type_interpreter()
.eval_expression(expression, state=TypeInterpreterState())
._dtype
)
def _auto_live(self) -> Table:
"""Make self automatically live in interactive mode"""
from pathway.internals.interactive import is_interactive_mode_enabled
if is_interactive_mode_enabled():
return self.live()
else:
return self
def live(self) -> LiveTable[TSchema]:
from pathway.internals.interactive import LiveTable
warnings.warn("live tables are an experimental feature", stacklevel=2)
return LiveTable._create(self)
The provided code snippet includes necessary dependencies for implementing the `interval_join_inner` function. Write a Python function `def interval_join_inner( self: pw.Table, other: pw.Table, self_time: pw.ColumnExpression, other_time: pw.ColumnExpression, interval: Interval[int] | Interval[float] | Interval[datetime.timedelta], *on: pw.ColumnExpression, behavior: CommonBehavior | None = None, left_instance: pw.ColumnReference | None = None, right_instance: pw.ColumnReference | None = None, ) -> IntervalJoinResult` to solve the following problem:
Performs an interval join of self with other using a time difference and join expressions. If `self_time + lower_bound <= other_time <= self_time + upper_bound` and conditions in `on` are satisfied, the rows are joined. Args: other: the right side of a join. self_time: time expression in self. other_time: time expression in other. lower_bound: a lower bound on time difference between other_time and self_time. upper_bound: an upper bound on time difference between other_time and self_time. on: a list of column expressions. Each must have == as the top level operation and be of the form LHS: ColumnReference == RHS: ColumnReference. behavior: defines temporal behavior of a join - features like delaying entries or ignoring late entries. left_instance/right_instance: optional arguments describing partitioning of the data into separate instances Returns: IntervalJoinResult: a result of the interval join. A method `.select()` can be called on it to extract relevant columns from the result of a join. Examples: >>> import pathway as pw >>> t1 = pw.debug.table_from_markdown( ... ''' ... | t ... 1 | 3 ... 2 | 4 ... 3 | 5 ... 4 | 11 ... ''' ... ) >>> t2 = pw.debug.table_from_markdown( ... ''' ... | t ... 1 | 0 ... 2 | 1 ... 3 | 4 ... 4 | 7 ... ''' ... ) >>> t3 = t1.interval_join_inner(t2, t1.t, t2.t, pw.temporal.interval(-2, 1)).select( ... left_t=t1.t, right_t=t2.t ... ) >>> pw.debug.compute_and_print(t3, include_id=False) left_t | right_t 3 | 1 3 | 4 4 | 4 5 | 4 >>> t1 = pw.debug.table_from_markdown( ... ''' ... | a | t ... 1 | 1 | 3 ... 2 | 1 | 4 ... 3 | 1 | 5 ... 4 | 1 | 11 ... 5 | 2 | 2 ... 6 | 2 | 3 ... 7 | 3 | 4 ... ''' ... ) >>> t2 = pw.debug.table_from_markdown( ... ''' ... | b | t ... 1 | 1 | 0 ... 2 | 1 | 1 ... 3 | 1 | 4 ... 4 | 1 | 7 ... 5 | 2 | 0 ... 6 | 2 | 2 ... 7 | 4 | 2 ... ''' ... ) >>> t3 = t1.interval_join_inner( ... t2, t1.t, t2.t, pw.temporal.interval(-2, 1), t1.a == t2.b ... ).select(t1.a, left_t=t1.t, right_t=t2.t) >>> pw.debug.compute_and_print(t3, include_id=False) a | left_t | right_t 1 | 3 | 1 1 | 3 | 4 1 | 4 | 4 1 | 5 | 4 2 | 2 | 0 2 | 2 | 2 2 | 3 | 2 Setting `behavior` allows to control temporal behavior of an interval join. Then, each side of the interval join keeps track of the maximal already seen time (`self_time` and `other_time`). The arguments of `behavior` mean in the context of an interval join what follows: - **delay** - buffers results until the maximal already seen time is greater than \ or equal to their time plus `delay`. - **cutoff** - ignores records with times less or equal to the maximal already seen time minus `cutoff`; \ it is also used to garbage collect records that have times lower or equal to the above threshold. \ When `cutoff` is not set, interval join will remember all records from both sides. - **keep_results** - if set to `True`, keeps all results of the operator. If set to `False`, \ keeps only results that are newer than the maximal seen time minus `cutoff`. Example without and with forgetting: >>> import pathway as pw >>> t1 = pw.debug.table_from_markdown( ... ''' ... value | instance | event_time | __time__ ... 1 | 1 | 0 | 2 ... 2 | 2 | 2 | 4 ... 3 | 1 | 4 | 4 ... 4 | 2 | 8 | 8 ... 5 | 1 | 0 | 10 ... 6 | 1 | 4 | 10 ... ''' ... ) >>> t2 = pw.debug.table_from_markdown( ... ''' ... value | instance | event_time | __time__ ... 42 | 1 | 2 | 2 ... 8 | 2 | 10 | 14 ... 10 | 2 | 4 | 30 ... ''' ... ) >>> result_without_cutoff = t1.interval_join_inner( ... t2, ... t1.event_time, ... t2.event_time, ... pw.temporal.interval(-2, 2), ... t1.instance == t2.instance, ... ).select( ... left_value=t1.value, ... right_value=t2.value, ... instance=t1.instance, ... left_time=t1.event_time, ... right_time=t2.event_time, ... ) >>> pw.debug.compute_and_print_update_stream(result_without_cutoff, include_id=False) left_value | right_value | instance | left_time | right_time | __time__ | __diff__ 1 | 42 | 1 | 0 | 2 | 2 | 1 3 | 42 | 1 | 4 | 2 | 4 | 1 5 | 42 | 1 | 0 | 2 | 10 | 1 6 | 42 | 1 | 4 | 2 | 10 | 1 4 | 8 | 2 | 8 | 10 | 14 | 1 2 | 10 | 2 | 2 | 4 | 30 | 1 >>> result_with_cutoff = t1.interval_join_inner( ... t2, ... t1.event_time, ... t2.event_time, ... pw.temporal.interval(-2, 2), ... t1.instance == t2.instance, ... behavior=pw.temporal.common_behavior(cutoff=6), ... ).select( ... left_value=t1.value, ... right_value=t2.value, ... instance=t1.instance, ... left_time=t1.event_time, ... right_time=t2.event_time, ... ) >>> pw.debug.compute_and_print_update_stream(result_with_cutoff, include_id=False) left_value | right_value | instance | left_time | right_time | __time__ | __diff__ 1 | 42 | 1 | 0 | 2 | 2 | 1 3 | 42 | 1 | 4 | 2 | 4 | 1 6 | 42 | 1 | 4 | 2 | 10 | 1 4 | 8 | 2 | 8 | 10 | 14 | 1 The record with ``value=5`` from table ``t1`` was not joined because its ``event_time`` was less than the maximal already seen time minus ``cutoff`` (``0 <= 8-6``). The record with ``value=10`` from table ``t2`` was not joined because its ``event_time`` was equal to the maximal already seen time minus ``cutoff`` (``4 <= 10-6``).
Here is the function:
def interval_join_inner(
self: pw.Table,
other: pw.Table,
self_time: pw.ColumnExpression,
other_time: pw.ColumnExpression,
interval: Interval[int] | Interval[float] | Interval[datetime.timedelta],
*on: pw.ColumnExpression,
behavior: CommonBehavior | None = None,
left_instance: pw.ColumnReference | None = None,
right_instance: pw.ColumnReference | None = None,
) -> IntervalJoinResult:
"""Performs an interval join of self with other using a time difference
and join expressions. If `self_time + lower_bound <=
other_time <= self_time + upper_bound`
and conditions in `on` are satisfied, the rows are joined.
Args:
other: the right side of a join.
self_time: time expression in self.
other_time: time expression in other.
lower_bound: a lower bound on time difference between other_time
and self_time.
upper_bound: an upper bound on time difference between other_time
and self_time.
on: a list of column expressions. Each must have == as the top level
operation and be of the form LHS: ColumnReference == RHS: ColumnReference.
behavior: defines temporal behavior of a join - features like delaying entries
or ignoring late entries.
left_instance/right_instance: optional arguments describing partitioning of the data into separate instances
Returns:
IntervalJoinResult: a result of the interval join. A method `.select()`
can be called on it to extract relevant columns from the result of a join.
Examples:
>>> import pathway as pw
>>> t1 = pw.debug.table_from_markdown(
... '''
... | t
... 1 | 3
... 2 | 4
... 3 | 5
... 4 | 11
... '''
... )
>>> t2 = pw.debug.table_from_markdown(
... '''
... | t
... 1 | 0
... 2 | 1
... 3 | 4
... 4 | 7
... '''
... )
>>> t3 = t1.interval_join_inner(t2, t1.t, t2.t, pw.temporal.interval(-2, 1)).select(
... left_t=t1.t, right_t=t2.t
... )
>>> pw.debug.compute_and_print(t3, include_id=False)
left_t | right_t
3 | 1
3 | 4
4 | 4
5 | 4
>>> t1 = pw.debug.table_from_markdown(
... '''
... | a | t
... 1 | 1 | 3
... 2 | 1 | 4
... 3 | 1 | 5
... 4 | 1 | 11
... 5 | 2 | 2
... 6 | 2 | 3
... 7 | 3 | 4
... '''
... )
>>> t2 = pw.debug.table_from_markdown(
... '''
... | b | t
... 1 | 1 | 0
... 2 | 1 | 1
... 3 | 1 | 4
... 4 | 1 | 7
... 5 | 2 | 0
... 6 | 2 | 2
... 7 | 4 | 2
... '''
... )
>>> t3 = t1.interval_join_inner(
... t2, t1.t, t2.t, pw.temporal.interval(-2, 1), t1.a == t2.b
... ).select(t1.a, left_t=t1.t, right_t=t2.t)
>>> pw.debug.compute_and_print(t3, include_id=False)
a | left_t | right_t
1 | 3 | 1
1 | 3 | 4
1 | 4 | 4
1 | 5 | 4
2 | 2 | 0
2 | 2 | 2
2 | 3 | 2
Setting `behavior` allows to control temporal behavior of an interval join. Then, each side of
the interval join keeps track of the maximal already seen time (`self_time` and `other_time`).
The arguments of `behavior` mean in the context of an interval join what follows:
- **delay** - buffers results until the maximal already seen time is greater than \
or equal to their time plus `delay`.
- **cutoff** - ignores records with times less or equal to the maximal already seen time minus `cutoff`; \
it is also used to garbage collect records that have times lower or equal to the above threshold. \
When `cutoff` is not set, interval join will remember all records from both sides.
- **keep_results** - if set to `True`, keeps all results of the operator. If set to `False`, \
keeps only results that are newer than the maximal seen time minus `cutoff`.
Example without and with forgetting:
>>> import pathway as pw
>>> t1 = pw.debug.table_from_markdown(
... '''
... value | instance | event_time | __time__
... 1 | 1 | 0 | 2
... 2 | 2 | 2 | 4
... 3 | 1 | 4 | 4
... 4 | 2 | 8 | 8
... 5 | 1 | 0 | 10
... 6 | 1 | 4 | 10
... '''
... )
>>> t2 = pw.debug.table_from_markdown(
... '''
... value | instance | event_time | __time__
... 42 | 1 | 2 | 2
... 8 | 2 | 10 | 14
... 10 | 2 | 4 | 30
... '''
... )
>>> result_without_cutoff = t1.interval_join_inner(
... t2,
... t1.event_time,
... t2.event_time,
... pw.temporal.interval(-2, 2),
... t1.instance == t2.instance,
... ).select(
... left_value=t1.value,
... right_value=t2.value,
... instance=t1.instance,
... left_time=t1.event_time,
... right_time=t2.event_time,
... )
>>> pw.debug.compute_and_print_update_stream(result_without_cutoff, include_id=False)
left_value | right_value | instance | left_time | right_time | __time__ | __diff__
1 | 42 | 1 | 0 | 2 | 2 | 1
3 | 42 | 1 | 4 | 2 | 4 | 1
5 | 42 | 1 | 0 | 2 | 10 | 1
6 | 42 | 1 | 4 | 2 | 10 | 1
4 | 8 | 2 | 8 | 10 | 14 | 1
2 | 10 | 2 | 2 | 4 | 30 | 1
>>> result_with_cutoff = t1.interval_join_inner(
... t2,
... t1.event_time,
... t2.event_time,
... pw.temporal.interval(-2, 2),
... t1.instance == t2.instance,
... behavior=pw.temporal.common_behavior(cutoff=6),
... ).select(
... left_value=t1.value,
... right_value=t2.value,
... instance=t1.instance,
... left_time=t1.event_time,
... right_time=t2.event_time,
... )
>>> pw.debug.compute_and_print_update_stream(result_with_cutoff, include_id=False)
left_value | right_value | instance | left_time | right_time | __time__ | __diff__
1 | 42 | 1 | 0 | 2 | 2 | 1
3 | 42 | 1 | 4 | 2 | 4 | 1
6 | 42 | 1 | 4 | 2 | 10 | 1
4 | 8 | 2 | 8 | 10 | 14 | 1
The record with ``value=5`` from table ``t1`` was not joined because its ``event_time``
was less than the maximal already seen time minus ``cutoff`` (``0 <= 8-6``).
The record with ``value=10`` from table ``t2`` was not joined because its ``event_time``
was equal to the maximal already seen time minus ``cutoff`` (``4 <= 10-6``).
"""
return IntervalJoinResult._interval_join(
self,
other,
self_time,
other_time,
interval,
*on,
behavior=behavior,
mode=pw.JoinMode.INNER,
left_instance=left_instance,
right_instance=right_instance,
) | Performs an interval join of self with other using a time difference and join expressions. If `self_time + lower_bound <= other_time <= self_time + upper_bound` and conditions in `on` are satisfied, the rows are joined. Args: other: the right side of a join. self_time: time expression in self. other_time: time expression in other. lower_bound: a lower bound on time difference between other_time and self_time. upper_bound: an upper bound on time difference between other_time and self_time. on: a list of column expressions. Each must have == as the top level operation and be of the form LHS: ColumnReference == RHS: ColumnReference. behavior: defines temporal behavior of a join - features like delaying entries or ignoring late entries. left_instance/right_instance: optional arguments describing partitioning of the data into separate instances Returns: IntervalJoinResult: a result of the interval join. A method `.select()` can be called on it to extract relevant columns from the result of a join. Examples: >>> import pathway as pw >>> t1 = pw.debug.table_from_markdown( ... ''' ... | t ... 1 | 3 ... 2 | 4 ... 3 | 5 ... 4 | 11 ... ''' ... ) >>> t2 = pw.debug.table_from_markdown( ... ''' ... | t ... 1 | 0 ... 2 | 1 ... 3 | 4 ... 4 | 7 ... ''' ... ) >>> t3 = t1.interval_join_inner(t2, t1.t, t2.t, pw.temporal.interval(-2, 1)).select( ... left_t=t1.t, right_t=t2.t ... ) >>> pw.debug.compute_and_print(t3, include_id=False) left_t | right_t 3 | 1 3 | 4 4 | 4 5 | 4 >>> t1 = pw.debug.table_from_markdown( ... ''' ... | a | t ... 1 | 1 | 3 ... 2 | 1 | 4 ... 3 | 1 | 5 ... 4 | 1 | 11 ... 5 | 2 | 2 ... 6 | 2 | 3 ... 7 | 3 | 4 ... ''' ... ) >>> t2 = pw.debug.table_from_markdown( ... ''' ... | b | t ... 1 | 1 | 0 ... 2 | 1 | 1 ... 3 | 1 | 4 ... 4 | 1 | 7 ... 5 | 2 | 0 ... 6 | 2 | 2 ... 7 | 4 | 2 ... ''' ... ) >>> t3 = t1.interval_join_inner( ... t2, t1.t, t2.t, pw.temporal.interval(-2, 1), t1.a == t2.b ... ).select(t1.a, left_t=t1.t, right_t=t2.t) >>> pw.debug.compute_and_print(t3, include_id=False) a | left_t | right_t 1 | 3 | 1 1 | 3 | 4 1 | 4 | 4 1 | 5 | 4 2 | 2 | 0 2 | 2 | 2 2 | 3 | 2 Setting `behavior` allows to control temporal behavior of an interval join. Then, each side of the interval join keeps track of the maximal already seen time (`self_time` and `other_time`). The arguments of `behavior` mean in the context of an interval join what follows: - **delay** - buffers results until the maximal already seen time is greater than \ or equal to their time plus `delay`. - **cutoff** - ignores records with times less or equal to the maximal already seen time minus `cutoff`; \ it is also used to garbage collect records that have times lower or equal to the above threshold. \ When `cutoff` is not set, interval join will remember all records from both sides. - **keep_results** - if set to `True`, keeps all results of the operator. If set to `False`, \ keeps only results that are newer than the maximal seen time minus `cutoff`. Example without and with forgetting: >>> import pathway as pw >>> t1 = pw.debug.table_from_markdown( ... ''' ... value | instance | event_time | __time__ ... 1 | 1 | 0 | 2 ... 2 | 2 | 2 | 4 ... 3 | 1 | 4 | 4 ... 4 | 2 | 8 | 8 ... 5 | 1 | 0 | 10 ... 6 | 1 | 4 | 10 ... ''' ... ) >>> t2 = pw.debug.table_from_markdown( ... ''' ... value | instance | event_time | __time__ ... 42 | 1 | 2 | 2 ... 8 | 2 | 10 | 14 ... 10 | 2 | 4 | 30 ... ''' ... ) >>> result_without_cutoff = t1.interval_join_inner( ... t2, ... t1.event_time, ... t2.event_time, ... pw.temporal.interval(-2, 2), ... t1.instance == t2.instance, ... ).select( ... left_value=t1.value, ... right_value=t2.value, ... instance=t1.instance, ... left_time=t1.event_time, ... right_time=t2.event_time, ... ) >>> pw.debug.compute_and_print_update_stream(result_without_cutoff, include_id=False) left_value | right_value | instance | left_time | right_time | __time__ | __diff__ 1 | 42 | 1 | 0 | 2 | 2 | 1 3 | 42 | 1 | 4 | 2 | 4 | 1 5 | 42 | 1 | 0 | 2 | 10 | 1 6 | 42 | 1 | 4 | 2 | 10 | 1 4 | 8 | 2 | 8 | 10 | 14 | 1 2 | 10 | 2 | 2 | 4 | 30 | 1 >>> result_with_cutoff = t1.interval_join_inner( ... t2, ... t1.event_time, ... t2.event_time, ... pw.temporal.interval(-2, 2), ... t1.instance == t2.instance, ... behavior=pw.temporal.common_behavior(cutoff=6), ... ).select( ... left_value=t1.value, ... right_value=t2.value, ... instance=t1.instance, ... left_time=t1.event_time, ... right_time=t2.event_time, ... ) >>> pw.debug.compute_and_print_update_stream(result_with_cutoff, include_id=False) left_value | right_value | instance | left_time | right_time | __time__ | __diff__ 1 | 42 | 1 | 0 | 2 | 2 | 1 3 | 42 | 1 | 4 | 2 | 4 | 1 6 | 42 | 1 | 4 | 2 | 10 | 1 4 | 8 | 2 | 8 | 10 | 14 | 1 The record with ``value=5`` from table ``t1`` was not joined because its ``event_time`` was less than the maximal already seen time minus ``cutoff`` (``0 <= 8-6``). The record with ``value=10`` from table ``t2`` was not joined because its ``event_time`` was equal to the maximal already seen time minus ``cutoff`` (``4 <= 10-6``). |
166,649 | from __future__ import annotations
import datetime
from abc import abstractmethod
from dataclasses import dataclass
from typing import Any, Generic, TypeVar, overload
import pathway.internals as pw
from pathway.internals.arg_handlers import (
arg_handler,
join_kwargs_handler,
select_args_handler,
)
from pathway.internals.desugaring import (
DesugaringContext,
TableReplacementWithNoneDesugaring,
TableSubstitutionDesugaring,
combine_args_kwargs,
desugar,
)
from pathway.internals.runtime_type_check import check_arg_types
from pathway.internals.trace import trace_user_frame
from pathway.internals.type_interpreter import eval_type
from .temporal_behavior import CommonBehavior, apply_temporal_behavior
from .utils import IntervalType, TimeEventType, check_joint_types, get_default_origin
class Interval(Generic[T]):
lower_bound: T
upper_bound: T
class IntervalJoinResult(DesugaringContext):
"""
Result of an interval join between tables.
Example:
>>> import pathway as pw
>>> t1 = pw.debug.table_from_markdown(
... '''
... | t
... 1 | 3
... 2 | 4
... 3 | 5
... 4 | 11
... '''
... )
>>> t2 = pw.debug.table_from_markdown(
... '''
... | t
... 1 | 0
... 2 | 1
... 3 | 4
... 4 | 7
... '''
... )
>>> join_result = t1.interval_join_inner(t2, t1.t, t2.t, pw.temporal.interval(-2, 1))
>>> isinstance(join_result, pw.temporal.IntervalJoinResult)
True
>>> pw.debug.compute_and_print(
... join_result.select(left_t=t1.t, right_t=t2.t), include_id=False
... )
left_t | right_t
3 | 1
3 | 4
4 | 4
5 | 4
"""
_table_substitution: dict[pw.TableLike, pw.Table]
_filter_out_results_of_forgetting: bool
def __init__(
self,
left: pw.Table,
right: pw.Table,
table_substitution: dict[pw.TableLike, pw.Table],
_filter_out_results_of_forgetting: bool,
):
self._substitution = {
pw.left: left,
pw.right: right,
pw.this: pw.this, # type: ignore[dict-item]
}
self._table_substitution = table_substitution
self._filter_out_results_of_forgetting = _filter_out_results_of_forgetting
def _should_filter_out_results_of_forgetting(
behavior: CommonBehavior | None,
) -> bool:
return (
behavior is not None
and behavior.cutoff is not None
and behavior.keep_results
)
def _interval_join(
left: pw.Table,
right: pw.Table,
left_time_expression: pw.ColumnExpression,
right_time_expression: pw.ColumnExpression,
interval: Interval[int] | Interval[float] | Interval[datetime.timedelta],
*on: pw.ColumnExpression,
behavior: CommonBehavior | None = None,
mode: pw.JoinMode,
left_instance: pw.ColumnReference | None = None,
right_instance: pw.ColumnReference | None = None,
) -> IntervalJoinResult:
"""Creates an IntervalJoinResult. To perform an interval join uses it uses two
tumbling windows of size `lower_bound` + `upper_bound` and then filters the result.
"""
check_joint_types(
{
"self_time_expression": (left_time_expression, TimeEventType),
"other_time_expression": (right_time_expression, TimeEventType),
"lower_bound": (interval.lower_bound, IntervalType),
"upper_bound": (interval.upper_bound, IntervalType),
}
)
if left == right:
raise ValueError(
"Cannot join table with itself. Use <table>.copy() as one of the arguments of the join."
)
if interval.lower_bound > interval.upper_bound: # type: ignore[operator]
raise ValueError(
"lower_bound has to be less than or equal to the upper_bound in the Table.interval_join()."
)
if interval.lower_bound == interval.upper_bound:
cls: type[IntervalJoinResult] = _ZeroDifferenceIntervalJoinResult
else:
cls = _NonZeroDifferenceIntervalJoinResult
return cls._interval_join(
left,
right,
left_time_expression,
right_time_expression,
interval,
*on,
behavior=behavior,
mode=mode,
left_instance=left_instance,
right_instance=right_instance,
)
def _desugaring(self) -> TableSubstitutionDesugaring:
return TableSubstitutionDesugaring(self._table_substitution)
def select(self, *args: pw.ColumnReference, **kwargs: Any) -> pw.Table:
"""
Computes a result of an interval join.
Args:
args: Column references.
kwargs: Column expressions with their new assigned names.
Returns:
Table: Created table.
Example:
>>> import pathway as pw
>>> t1 = pw.debug.table_from_markdown(
... '''
... | a | t
... 1 | 1 | 3
... 2 | 1 | 4
... 3 | 1 | 5
... 4 | 1 | 11
... 5 | 2 | 2
... 6 | 2 | 3
... 7 | 3 | 4
... '''
... )
>>> t2 = pw.debug.table_from_markdown(
... '''
... | b | t
... 1 | 1 | 0
... 2 | 1 | 1
... 3 | 1 | 4
... 4 | 1 | 7
... 5 | 2 | 0
... 6 | 2 | 2
... 7 | 4 | 2
... '''
... )
>>> t3 = t1.interval_join_inner(
... t2, t1.t, t2.t, pw.temporal.interval(-2, 1), t1.a == t2.b
... ).select(t1.a, left_t=t1.t, right_t=t2.t)
>>> pw.debug.compute_and_print(t3, include_id=False)
a | left_t | right_t
1 | 3 | 1
1 | 3 | 4
1 | 4 | 4
1 | 5 | 4
2 | 2 | 0
2 | 2 | 2
2 | 3 | 2
"""
...
class CommonBehavior(Behavior):
"""Defines temporal behavior of windows and temporal joins."""
delay: IntervalType | None
cutoff: IntervalType | None
keep_results: bool
class Table(
Joinable,
OperatorInput,
Generic[TSchema],
):
"""Collection of named columns over identical universes.
Example:
>>> import pathway as pw
>>> t1 = pw.debug.table_from_markdown('''
... age | owner | pet
... 10 | Alice | dog
... 9 | Bob | dog
... 8 | Alice | cat
... 7 | Bob | dog
... ''')
>>> isinstance(t1, pw.Table)
True
"""
if TYPE_CHECKING:
from pathway.stdlib.ordered import diff # type: ignore[misc]
from pathway.stdlib.statistical import interpolate # type: ignore[misc]
from pathway.stdlib.temporal import ( # type: ignore[misc]
asof_join,
asof_join_left,
asof_join_outer,
asof_join_right,
asof_now_join,
asof_now_join_inner,
asof_now_join_left,
interval_join,
interval_join_inner,
interval_join_left,
interval_join_outer,
interval_join_right,
window_join,
window_join_inner,
window_join_left,
window_join_outer,
window_join_right,
windowby,
)
from pathway.stdlib.viz import ( # type: ignore[misc]
_repr_mimebundle_,
plot,
show,
)
_columns: dict[str, clmn.Column]
_schema: type[Schema]
_id_column: clmn.IdColumn
_rowwise_context: clmn.RowwiseContext
_source: SetOnceProperty[OutputHandle] = SetOnceProperty()
"""Lateinit by operator."""
def __init__(
self,
_columns: Mapping[str, clmn.Column],
_context: clmn.Context,
_schema: type[Schema] | None = None,
):
if _schema is None:
_schema = schema_from_columns(_columns)
super().__init__(_context)
self._columns = dict(_columns)
self._schema = _schema
self._id_column = _context.id_column
self._substitution = {thisclass.this: self}
self._rowwise_context = clmn.RowwiseContext(self._id_column)
def id(self) -> expr.ColumnReference:
"""Get reference to pseudocolumn containing id's of a table.
Example:
>>> import pathway as pw
>>> t1 = pw.debug.table_from_markdown('''
... age | owner | pet
... 10 | Alice | dog
... 9 | Bob | dog
... 8 | Alice | cat
... 7 | Bob | dog
... ''')
>>> t2 = t1.select(ids = t1.id)
>>> t2.typehints()['ids']
<class 'pathway.engine.Pointer'>
>>> pw.debug.compute_and_print(t2.select(test=t2.id == t2.ids), include_id=False)
test
True
True
True
True
"""
return expr.ColumnReference(_table=self, _column=self._id_column, _name="id")
def column_names(self):
return self.keys()
def keys(self):
return self._columns.keys()
def _get_column(self, name: str) -> clmn.Column:
return self._columns[name]
def _ipython_key_completions_(self):
return list(self.column_names())
def __dir__(self):
return list(super().__dir__()) + list(self.column_names())
def _C(self) -> TSchema:
return self.C # type: ignore
def schema(self) -> type[Schema]:
"""Get schema of the table.
Example:
>>> import pathway as pw
>>> t1 = pw.debug.table_from_markdown('''
... age | owner | pet
... 10 | Alice | dog
... 9 | Bob | dog
... 8 | Alice | cat
... 7 | Bob | dog
... ''')
>>> t1.schema
<pathway.Schema types={'age': <class 'int'>, 'owner': <class 'str'>, 'pet': <class 'str'>}>
>>> t1.typehints()['age']
<class 'int'>
"""
return self._schema
def _get_colref_by_name(self, name, exception_type) -> expr.ColumnReference:
name = self._column_deprecation_rename(name)
if name == "id":
return self.id
if name not in self.keys():
raise exception_type(f"Table has no column with name {name}.")
return expr.ColumnReference(
_table=self, _column=self._get_column(name), _name=name
)
def __getitem__(self, args: str | expr.ColumnReference) -> expr.ColumnReference: ...
def __getitem__(self, args: list[str | expr.ColumnReference]) -> Table: ...
def __getitem__(
self, args: str | expr.ColumnReference | list[str | expr.ColumnReference]
) -> expr.ColumnReference | Table:
"""Get columns by name.
Warning:
- Does not allow repetitions of columns.
- Fails if tries to access nonexistent column.
Args:
names: a singe column name or list of columns names to be extracted from `self`.
Returns:
Table with specified columns, or column expression (if single argument given).
Instead of column names, column references are valid here.
Example:
>>> import pathway as pw
>>> t1 = pw.debug.table_from_markdown('''
... age | owner | pet
... 10 | Alice | dog
... 9 | Bob | dog
... 8 | Alice | cat
... 7 | Bob | dog
... ''')
>>> t2 = t1[["age", "pet"]]
>>> t2 = t1[["age", t1.pet]]
>>> pw.debug.compute_and_print(t2, include_id=False)
age | pet
7 | dog
8 | cat
9 | dog
10 | dog
"""
if isinstance(args, expr.ColumnReference):
if (args.table is not self) and not isinstance(
args.table, thisclass.ThisMetaclass
):
raise ValueError(
"Table.__getitem__ argument has to be a ColumnReference to the same table or pw.this, or a string "
+ "(or a list of those)."
)
return self._get_colref_by_name(args.name, KeyError)
elif isinstance(args, str):
return self._get_colref_by_name(args, KeyError)
else:
return self.select(*[self[name] for name in args])
def from_columns(
*args: expr.ColumnReference, **kwargs: expr.ColumnReference
) -> Table:
"""Build a table from columns.
All columns must have the same ids. Columns' names must be pairwise distinct.
Args:
args: List of columns.
kwargs: Columns with their new names.
Returns:
Table: Created table.
Example:
>>> import pathway as pw
>>> t1 = pw.Table.empty(age=float, pet=float)
>>> t2 = pw.Table.empty(foo=float, bar=float).with_universe_of(t1)
>>> t3 = pw.Table.from_columns(t1.pet, qux=t2.foo)
>>> pw.debug.compute_and_print(t3, include_id=False)
pet | qux
"""
all_args = cast(
dict[str, expr.ColumnReference], combine_args_kwargs(args, kwargs)
)
if not all_args:
raise ValueError("Table.from_columns() cannot have empty arguments list")
else:
arg = next(iter(all_args.values()))
table: Table = arg.table
for arg in all_args.values():
if not G.universe_solver.query_are_equal(
table._universe, arg.table._universe
):
raise ValueError(
"Universes of all arguments of Table.from_columns() have to be equal.\n"
+ "Consider using Table.promise_universes_are_equal() to assert it.\n"
+ "(However, untrue assertion might result in runtime errors.)"
)
return table.select(*args, **kwargs)
def concat_reindex(self, *tables: Table) -> Table:
"""Concatenate contents of several tables.
This is similar to PySpark union. All tables must have the same schema. Each row is reindexed.
Args:
tables: List of tables to concatenate. All tables must have the same schema.
Returns:
Table: The concatenated table. It will have new, synthetic ids.
Example:
>>> import pathway as pw
>>> t1 = pw.debug.table_from_markdown('''
... | pet
... 1 | Dog
... 7 | Cat
... ''')
>>> t2 = pw.debug.table_from_markdown('''
... | pet
... 1 | Manul
... 8 | Octopus
... ''')
>>> t3 = t1.concat_reindex(t2)
>>> pw.debug.compute_and_print(t3, include_id=False)
pet
Cat
Dog
Manul
Octopus
"""
reindexed = [
table.with_id_from(table.id, i) for i, table in enumerate([self, *tables])
]
universes.promise_are_pairwise_disjoint(*reindexed)
return Table.concat(*reindexed)
def empty(**kwargs: dt.DType) -> Table:
"""Creates an empty table with a schema specified by kwargs.
Args:
kwargs: Dict whose keys are column names and values are column types.
Returns:
Table: Created empty table.
Example:
>>> import pathway as pw
>>> t1 = pw.Table.empty(age=float, pet=float)
>>> pw.debug.compute_and_print(t1, include_id=False)
age | pet
"""
from pathway.internals import table_io
ret = table_io.empty_from_schema(schema_from_types(None, **kwargs))
G.universe_solver.register_as_empty(ret._universe)
return ret
def select(self, *args: expr.ColumnReference, **kwargs: Any) -> Table:
"""Build a new table with columns specified by kwargs.
Output columns' names are keys(kwargs). values(kwargs) can be raw values, boxed
values, columns. Assigning to id reindexes the table.
Args:
args: Column references.
kwargs: Column expressions with their new assigned names.
Returns:
Table: Created table.
Example:
>>> import pathway as pw
>>> t1 = pw.debug.table_from_markdown('''
... pet
... Dog
... Cat
... ''')
>>> t2 = t1.select(animal=t1.pet, desc="fluffy")
>>> pw.debug.compute_and_print(t2, include_id=False)
animal | desc
Cat | fluffy
Dog | fluffy
"""
new_columns = []
all_args = combine_args_kwargs(args, kwargs)
for new_name, expression in all_args.items():
self._validate_expression(expression)
column = self._eval(expression)
new_columns.append((new_name, column))
return self._with_same_universe(*new_columns)
def __add__(self, other: Table) -> Table:
"""Build a union of `self` with `other`.
Semantics: Returns a table C, such that
- C.columns == self.columns + other.columns
- C.id == self.id == other.id
Args:
other: The other table. `self.id` must be equal `other.id` and
`self.columns` and `other.columns` must be disjoint (or overlapping names
are THE SAME COLUMN)
Returns:
Table: Created table.
Example:
>>> import pathway as pw
>>> t1 = pw.debug.table_from_markdown('''
... pet
... 1 Dog
... 7 Cat
... ''')
>>> t2 = pw.debug.table_from_markdown('''
... age
... 1 10
... 7 3
... ''')
>>> t3 = t1 + t2
>>> pw.debug.compute_and_print(t3, include_id=False)
pet | age
Cat | 3
Dog | 10
"""
if not G.universe_solver.query_are_equal(self._universe, other._universe):
raise ValueError(
"Universes of all arguments of Table.__add__() have to be equal.\n"
+ "Consider using Table.promise_universes_are_equal() to assert it.\n"
+ "(However, untrue assertion might result in runtime errors.)"
)
return self.select(*self, *other)
def slice(self) -> TableSlice:
"""Creates a collection of references to self columns.
Supports basic column manipulation methods.
Example:
>>> import pathway as pw
>>> t1 = pw.debug.table_from_markdown('''
... age | owner | pet
... 10 | Alice | dog
... 9 | Bob | dog
... 8 | Alice | cat
... 7 | Bob | dog
... ''')
>>> t1.slice.without("age")
TableSlice({'owner': <table1>.owner, 'pet': <table1>.pet})
"""
return TableSlice(dict(**self), self)
def filter(self, filter_expression: expr.ColumnExpression) -> Table[TSchema]:
"""Filter a table according to `filter_expression` condition.
Args:
filter_expression: `ColumnExpression` that specifies the filtering condition.
Returns:
Table: Result has the same schema as `self` and its ids are subset of `self.id`.
Example:
>>> import pathway as pw
>>> vertices = pw.debug.table_from_markdown('''
... label outdegree
... 1 3
... 7 0
... ''')
>>> filtered = vertices.filter(vertices.outdegree == 0)
>>> pw.debug.compute_and_print(filtered, include_id=False)
label | outdegree
7 | 0
"""
filter_type = self.eval_type(filter_expression)
if filter_type != dt.BOOL:
raise TypeError(
f"Filter argument of Table.filter() has to be bool, found {filter_type}."
)
ret = self._filter(filter_expression)
if (
filter_col := expr.get_column_filtered_by_is_none(filter_expression)
) is not None and filter_col.table == self:
name = filter_col.name
dtype = self._columns[name].dtype
ret = ret.update_types(**{name: dt.unoptionalize(dtype)})
return ret
def split(
self, split_expression: expr.ColumnExpression
) -> tuple[Table[TSchema], Table[TSchema]]:
"""Split a table according to `split_expression` condition.
Args:
split_expression: `ColumnExpression` that specifies the split condition.
Returns:
positive_table, negative_table: tuple of tables,
with the same schemas as `self` and with ids that are subsets of `self.id`,
and provably disjoint.
Example:
>>> import pathway as pw
>>> vertices = pw.debug.table_from_markdown('''
... label outdegree
... 1 3
... 7 0
... ''')
>>> positive, negative = vertices.split(vertices.outdegree == 0)
>>> pw.debug.compute_and_print(positive, include_id=False)
label | outdegree
7 | 0
>>> pw.debug.compute_and_print(negative, include_id=False)
label | outdegree
1 | 3
"""
positive = self.filter(split_expression)
negative = self.filter(~split_expression)
universes.promise_are_pairwise_disjoint(positive, negative)
universes.promise_are_equal(
self, Table.concat(positive, negative)
) # TODO: add API method for this
return positive, negative
def _filter(self, filter_expression: expr.ColumnExpression) -> Table[TSchema]:
self._validate_expression(filter_expression)
filtering_column = self._eval(filter_expression)
assert self._universe == filtering_column.universe
context = clmn.FilterContext(filtering_column, self._id_column)
return self._table_with_context(context)
def _gradual_broadcast(
self,
threshold_table,
lower_column,
value_column,
upper_column,
) -> Table:
return self + self.__gradual_broadcast(
threshold_table, lower_column, value_column, upper_column
)
def __gradual_broadcast(
self,
threshold_table,
lower_column,
value_column,
upper_column,
):
context = clmn.GradualBroadcastContext(
self._id_column,
threshold_table._eval(lower_column),
threshold_table._eval(value_column),
threshold_table._eval(upper_column),
)
return Table(_columns={"apx_value": context.apx_value_column}, _context=context)
def _forget(
self,
threshold_column: expr.ColumnExpression,
time_column: expr.ColumnExpression,
mark_forgetting_records: bool,
) -> Table:
context = clmn.ForgetContext(
self._id_column,
self._eval(threshold_column),
self._eval(time_column),
mark_forgetting_records,
)
return self._table_with_context(context)
def _forget_immediately(
self,
) -> Table:
context = clmn.ForgetImmediatelyContext(self._id_column)
return self._table_with_context(context)
def _filter_out_results_of_forgetting(
self,
) -> Table:
# The output universe is a superset of input universe because forgetting entries
# are filtered out. At each point in time, the set of keys with +1 diff can be
# bigger than a set of keys with +1 diff in an input table.
context = clmn.FilterOutForgettingContext(self._id_column)
return self._table_with_context(context)
def _freeze(
self,
threshold_column: expr.ColumnExpression,
time_column: expr.ColumnExpression,
) -> Table:
context = clmn.FreezeContext(
self._id_column,
self._eval(threshold_column),
self._eval(time_column),
)
return self._table_with_context(context)
def _buffer(
self,
threshold_column: expr.ColumnExpression,
time_column: expr.ColumnExpression,
) -> Table:
context = clmn.BufferContext(
self._id_column,
self._eval(threshold_column),
self._eval(time_column),
)
return self._table_with_context(context)
def difference(self, other: Table) -> Table[TSchema]:
r"""Restrict self universe to keys not appearing in the other table.
Args:
other: table with ids to remove from self.
Returns:
Table: table with restricted universe, with the same set of columns
Example:
>>> import pathway as pw
>>> t1 = pw.debug.table_from_markdown('''
... | age | owner | pet
... 1 | 10 | Alice | 1
... 2 | 9 | Bob | 1
... 3 | 8 | Alice | 2
... ''')
>>> t2 = pw.debug.table_from_markdown('''
... | cost
... 2 | 100
... 3 | 200
... 4 | 300
... ''')
>>> t3 = t1.difference(t2)
>>> pw.debug.compute_and_print(t3, include_id=False)
age | owner | pet
10 | Alice | 1
"""
context = clmn.DifferenceContext(
left=self._id_column,
right=other._id_column,
)
return self._table_with_context(context)
def intersect(self, *tables: Table) -> Table[TSchema]:
"""Restrict self universe to keys appearing in all of the tables.
Args:
tables: tables keys of which are used to restrict universe.
Returns:
Table: table with restricted universe, with the same set of columns
Example:
>>> import pathway as pw
>>> t1 = pw.debug.table_from_markdown('''
... | age | owner | pet
... 1 | 10 | Alice | 1
... 2 | 9 | Bob | 1
... 3 | 8 | Alice | 2
... ''')
>>> t2 = pw.debug.table_from_markdown('''
... | cost
... 2 | 100
... 3 | 200
... 4 | 300
... ''')
>>> t3 = t1.intersect(t2)
>>> pw.debug.compute_and_print(t3, include_id=False)
age | owner | pet
8 | Alice | 2
9 | Bob | 1
"""
intersecting_universes = (
self._universe,
*tuple(table._universe for table in tables),
)
universe = G.universe_solver.get_intersection(*intersecting_universes)
if universe in intersecting_universes:
context: clmn.Context = clmn.RestrictContext(self._id_column, universe)
else:
intersecting_ids = (
self._id_column,
*tuple(table._id_column for table in tables),
)
context = clmn.IntersectContext(
intersecting_ids=intersecting_ids,
)
return self._table_with_context(context)
def restrict(self, other: TableLike) -> Table[TSchema]:
"""Restrict self universe to keys appearing in other.
Args:
other: table which universe is used to restrict universe of self.
Returns:
Table: table with restricted universe, with the same set of columns
Example:
>>> import pathway as pw
>>> t1 = pw.debug.table_from_markdown(
... '''
... | age | owner | pet
... 1 | 10 | Alice | 1
... 2 | 9 | Bob | 1
... 3 | 8 | Alice | 2
... '''
... )
>>> t2 = pw.debug.table_from_markdown(
... '''
... | cost
... 2 | 100
... 3 | 200
... '''
... )
>>> t2.promise_universe_is_subset_of(t1)
<pathway.Table schema={'cost': <class 'int'>}>
>>> t3 = t1.restrict(t2)
>>> pw.debug.compute_and_print(t3, include_id=False)
age | owner | pet
8 | Alice | 2
9 | Bob | 1
"""
if not G.universe_solver.query_is_subset(other._universe, self._universe):
raise ValueError(
"Table.restrict(): other universe has to be a subset of self universe."
+ "Consider using Table.promise_universe_is_subset_of() to assert it."
)
context = clmn.RestrictContext(self._id_column, other._universe)
columns = {
name: self._wrap_column_in_context(context, column, name)
for name, column in self._columns.items()
}
return Table(
_columns=columns,
_context=context,
)
def copy(self) -> Table[TSchema]:
"""Returns a copy of a table.
Example:
>>> import pathway as pw
>>> t1 = pw.debug.table_from_markdown('''
... age | owner | pet
... 10 | Alice | dog
... 9 | Bob | dog
... 8 | Alice | cat
... 7 | Bob | dog
... ''')
>>> t2 = t1.copy()
>>> pw.debug.compute_and_print(t2, include_id=False)
age | owner | pet
7 | Bob | dog
8 | Alice | cat
9 | Bob | dog
10 | Alice | dog
>>> t1 is t2
False
"""
return self._copy_as(type(self))
def _copy_as(self, table_type: type[TTable], /, **kwargs) -> TTable:
columns = {
name: self._wrap_column_in_context(self._rowwise_context, column, name)
for name, column in self._columns.items()
}
return table_type(_columns=columns, _context=self._rowwise_context, **kwargs)
def groupby(
self,
*args: expr.ColumnReference,
id: expr.ColumnReference | None = None,
sort_by: expr.ColumnReference | None = None,
_filter_out_results_of_forgetting: bool = False,
instance: expr.ColumnReference | None = None,
) -> groupbys.GroupedTable:
"""Groups table by columns from args.
Note:
Usually followed by `.reduce()` that aggregates the result and returns a table.
Args:
args: columns to group by.
id: if provided, is the column used to set id's of the rows of the result
sort_by: if provided, column values are used as sorting keys for particular reducers
instance: optional argument describing partitioning of the data into separate instances
Returns:
GroupedTable: Groupby object.
Example:
>>> import pathway as pw
>>> t1 = pw.debug.table_from_markdown('''
... age | owner | pet
... 10 | Alice | dog
... 9 | Bob | dog
... 8 | Alice | cat
... 7 | Bob | dog
... ''')
>>> t2 = t1.groupby(t1.pet, t1.owner).reduce(t1.owner, t1.pet, ageagg=pw.reducers.sum(t1.age))
>>> pw.debug.compute_and_print(t2, include_id=False)
owner | pet | ageagg
Alice | cat | 8
Alice | dog | 10
Bob | dog | 16
"""
if instance is not None:
args = (*args, instance)
if id is not None:
if len(args) == 0:
args = (id,)
elif len(args) > 1:
raise ValueError(
"Table.groupby() cannot have id argument when grouping by multiple columns."
)
elif args[0]._column != id._column:
raise ValueError(
"Table.groupby() received id argument and is grouped by a single column,"
+ " but the arguments are not equal.\n"
+ "Consider using <table>.groupby(id=...), skipping the positional argument."
)
for arg in args:
if not isinstance(arg, expr.ColumnReference):
if isinstance(arg, str):
raise ValueError(
f"Expected a ColumnReference, found a string. Did you mean <table>.{arg}"
+ f" instead of {repr(arg)}?"
)
else:
raise ValueError(
"All Table.groupby() arguments have to be a ColumnReference."
)
return groupbys.GroupedTable.create(
table=self,
grouping_columns=args,
set_id=id is not None,
sort_by=sort_by,
_filter_out_results_of_forgetting=_filter_out_results_of_forgetting,
)
def reduce(
self, *args: expr.ColumnReference, **kwargs: expr.ColumnExpression
) -> Table:
"""Reduce a table to a single row.
Equivalent to `self.groupby().reduce(*args, **kwargs)`.
Args:
args: reducer to reduce the table with
kwargs: reducer to reduce the table with. Its key is the new name of a column.
Returns:
Table: Reduced table.
Example:
>>> import pathway as pw
>>> t1 = pw.debug.table_from_markdown('''
... age | owner | pet
... 10 | Alice | dog
... 9 | Bob | dog
... 8 | Alice | cat
... 7 | Bob | dog
... ''')
>>> t2 = t1.reduce(ageagg=pw.reducers.argmin(t1.age))
>>> pw.debug.compute_and_print(t2, include_id=False) # doctest: +ELLIPSIS
ageagg
^...
>>> t3 = t2.select(t1.ix(t2.ageagg).age, t1.ix(t2.ageagg).pet)
>>> pw.debug.compute_and_print(t3, include_id=False)
age | pet
7 | dog
"""
return self.groupby().reduce(*args, **kwargs)
def deduplicate(
self,
*,
value: expr.ColumnExpression,
instance: expr.ColumnExpression | None = None,
acceptor: Callable[[T, T], bool],
persistent_id: str | None = None,
) -> Table:
"""Deduplicates rows in `self` on `value` column using acceptor function.
It keeps rows which where accepted by the acceptor function.
Acceptor operates on two arguments - current value and the previously accepted value.
Args:
value: column expression used for deduplication.
instance: Grouping column. For rows with different
values in this column, deduplication will be performed separately.
Defaults to None.
acceptor: callback telling whether two values are different.
persistent_id: (unstable) An identifier, under which the state of the table
will be persisted or ``None``, if there is no need to persist the state of this table.
When a program restarts, it restores the state for all input tables according to what
was saved for their ``persistent_id``. This way it's possible to configure the start of
computations from the moment they were terminated last time.
Returns:
Table: the result of deduplication.
Example:
>>> import pathway as pw
>>> table = pw.debug.table_from_markdown(
... '''
... val | __time__
... 1 | 2
... 2 | 4
... 3 | 6
... 4 | 8
... '''
... )
>>>
>>> def acceptor(new_value, old_value) -> bool:
... return new_value >= old_value + 2
...
>>>
>>> result = table.deduplicate(value=pw.this.val, acceptor=acceptor)
>>> pw.debug.compute_and_print_update_stream(result, include_id=False)
val | __time__ | __diff__
1 | 2 | 1
1 | 6 | -1
3 | 6 | 1
>>>
>>> table = pw.debug.table_from_markdown(
... '''
... val | instance | __time__
... 1 | 1 | 2
... 2 | 1 | 4
... 3 | 2 | 6
... 4 | 1 | 8
... 4 | 2 | 8
... 5 | 1 | 10
... '''
... )
>>>
>>> def acceptor(new_value, old_value) -> bool:
... return new_value >= old_value + 2
...
>>>
>>> result = table.deduplicate(
... value=pw.this.val, instance=pw.this.instance, acceptor=acceptor
... )
>>> pw.debug.compute_and_print_update_stream(result, include_id=False)
val | instance | __time__ | __diff__
1 | 1 | 2 | 1
3 | 2 | 6 | 1
1 | 1 | 8 | -1
4 | 1 | 8 | 1
"""
if instance is None:
instance = expr.ColumnConstExpression(None)
self._validate_expression(value)
self._validate_expression(instance)
value_col = self._eval(value)
instance_col = self._eval(instance)
context = clmn.DeduplicateContext(
value_col,
(instance_col,),
acceptor,
self._id_column,
persistent_id,
)
return self._table_with_context(context)
def ix(
self, expression: expr.ColumnExpression, *, optional: bool = False, context=None
) -> Table:
"""Reindexes the table using expression values as keys. Uses keys from context, or tries to infer
proper context from the expression.
If optional is True, then None in expression values result in None values in the result columns.
Missing values in table keys result in RuntimeError.
Context can be anything that allows for `select` or `reduce`, or `pathway.this` construct
(latter results in returning a delayed operation, and should be only used when using `ix` inside
join().select() or groupby().reduce() sequence).
Returns:
Reindexed table with the same set of columns.
Example:
>>> import pathway as pw
>>> t_animals = pw.debug.table_from_markdown('''
... | epithet | genus
... 1 | upupa | epops
... 2 | acherontia | atropos
... 3 | bubo | scandiacus
... 4 | dynastes | hercules
... ''')
>>> t_birds = pw.debug.table_from_markdown('''
... | desc
... 2 | hoopoe
... 4 | owl
... ''')
>>> ret = t_birds.select(t_birds.desc, latin=t_animals.ix(t_birds.id).genus)
>>> pw.debug.compute_and_print(ret, include_id=False)
desc | latin
hoopoe | atropos
owl | hercules
"""
if context is None:
all_tables = collect_tables(expression)
if len(all_tables) == 0:
context = thisclass.this
elif all(tab == all_tables[0] for tab in all_tables):
context = all_tables[0]
if context is None:
for tab in all_tables:
if not isinstance(tab, Table):
raise ValueError("Table expected here.")
if len(all_tables) == 0:
raise ValueError("Const value provided.")
context = all_tables[0]
for tab in all_tables:
assert context._universe.is_equal_to(tab._universe)
if isinstance(context, groupbys.GroupedJoinable):
context = thisclass.this
if isinstance(context, thisclass.ThisMetaclass):
return context._delayed_op(
lambda table, expression: self.ix(
expression=expression, optional=optional, context=table
),
expression=expression,
qualname=f"{self}.ix(...)",
name="ix",
)
restrict_universe = RestrictUniverseDesugaring(context)
expression = restrict_universe.eval_expression(expression)
key_col = context.select(tmp=expression).tmp
key_dtype = self.eval_type(key_col)
if (
optional and not dt.dtype_issubclass(key_dtype, dt.Optional(dt.POINTER))
) or (not optional and not isinstance(key_dtype, dt.Pointer)):
raise TypeError(
f"Pathway supports indexing with Pointer type only. The type used was {key_dtype}."
)
if optional and isinstance(key_dtype, dt.Optional):
self_ = self.update_types(
**{name: dt.Optional(self.typehints()[name]) for name in self.keys()}
)
else:
self_ = self
return self_._ix(key_col, optional)
def _ix(
self,
key_expression: expr.ColumnReference,
optional: bool,
) -> Table:
key_column = key_expression._column
context = clmn.IxContext(key_column, self._id_column, optional)
return self._table_with_context(context)
def __lshift__(self, other: Table) -> Table:
"""Alias to update_cells method.
Updates cells of `self`, breaking ties in favor of the values in `other`.
Semantics:
- result.columns == self.columns
- result.id == self.id
- conflicts are resolved preferring other's values
Requires:
- other.columns ⊆ self.columns
- other.id ⊆ self.id
Args:
other: the other table.
Returns:
Table: `self` updated with cells form `other`.
Example:
>>> import pathway as pw
>>> t1 = pw.debug.table_from_markdown('''
... | age | owner | pet
... 1 | 10 | Alice | 1
... 2 | 9 | Bob | 1
... 3 | 8 | Alice | 2
... ''')
>>> t2 = pw.debug.table_from_markdown('''
... | age | owner | pet
... 1 | 10 | Alice | 30
... ''')
>>> pw.universes.promise_is_subset_of(t2, t1)
>>> t3 = t1 << t2
>>> pw.debug.compute_and_print(t3, include_id=False)
age | owner | pet
8 | Alice | 2
9 | Bob | 1
10 | Alice | 30
"""
return self.update_cells(other, _stacklevel=2)
def concat(self, *others: Table[TSchema]) -> Table[TSchema]:
"""Concats `self` with every `other` ∊ `others`.
Semantics:
- result.columns == self.columns == other.columns
- result.id == self.id ∪ other.id
if self.id and other.id collide, throws an exception.
Requires:
- other.columns == self.columns
- self.id disjoint with other.id
Args:
other: the other table.
Returns:
Table: The concatenated table. Id's of rows from original tables are preserved.
Example:
>>> import pathway as pw
>>> t1 = pw.debug.table_from_markdown('''
... | age | owner | pet
... 1 | 10 | Alice | 1
... 2 | 9 | Bob | 1
... 3 | 8 | Alice | 2
... ''')
>>> t2 = pw.debug.table_from_markdown('''
... | age | owner | pet
... 11 | 11 | Alice | 30
... 12 | 12 | Tom | 40
... ''')
>>> pw.universes.promise_are_pairwise_disjoint(t1, t2)
>>> t3 = t1.concat(t2)
>>> pw.debug.compute_and_print(t3, include_id=False)
age | owner | pet
8 | Alice | 2
9 | Bob | 1
10 | Alice | 1
11 | Alice | 30
12 | Tom | 40
"""
for other in others:
if other.keys() != self.keys():
raise ValueError(
"columns do not match in the argument of Table.concat()"
)
schema = {
key: functools.reduce(
dt.types_lca,
[other.schema._dtypes()[key] for other in others],
self.schema._dtypes()[key],
)
for key in self.keys()
}
return Table._concat(
self.cast_to_types(**schema),
*[other.cast_to_types(**schema) for other in others],
)
def _concat(self, *others: Table[TSchema]) -> Table[TSchema]:
union_ids = (self._id_column, *(other._id_column for other in others))
if not G.universe_solver.query_are_disjoint(*(c.universe for c in union_ids)):
raise ValueError(
"Universes of the arguments of Table.concat() have to be disjoint.\n"
+ "Consider using Table.promise_universes_are_disjoint() to assert it.\n"
+ "(However, untrue assertion might result in runtime errors.)"
)
context = clmn.ConcatUnsafeContext(
union_ids=union_ids,
updates=tuple(
{col_name: other._columns[col_name] for col_name in self.keys()}
for other in others
),
)
return self._table_with_context(context)
def update_cells(self, other: Table, _stacklevel: int = 1) -> Table:
"""Updates cells of `self`, breaking ties in favor of the values in `other`.
Semantics:
- result.columns == self.columns
- result.id == self.id
- conflicts are resolved preferring other's values
Requires:
- other.columns ⊆ self.columns
- other.id ⊆ self.id
Args:
other: the other table.
Returns:
Table: `self` updated with cells form `other`.
Example:
>>> import pathway as pw
>>> t1 = pw.debug.table_from_markdown('''
... | age | owner | pet
... 1 | 10 | Alice | 1
... 2 | 9 | Bob | 1
... 3 | 8 | Alice | 2
... ''')
>>> t2 = pw.debug.table_from_markdown('''
... age | owner | pet
... 1 | 10 | Alice | 30
... ''')
>>> pw.universes.promise_is_subset_of(t2, t1)
>>> t3 = t1.update_cells(t2)
>>> pw.debug.compute_and_print(t3, include_id=False)
age | owner | pet
8 | Alice | 2
9 | Bob | 1
10 | Alice | 30
"""
if names := (set(other.keys()) - set(self.keys())):
raise ValueError(
f"Columns of the argument in Table.update_cells() not present in the updated table: {list(names)}."
)
if self._universe == other._universe:
warnings.warn(
"Key sets of self and other in update_cells are the same."
+ " Using with_columns instead of update_cells.",
stacklevel=_stacklevel + 4,
)
return self.with_columns(*(other[name] for name in other))
schema = {
key: dt.types_lca(self.schema.__dtypes__[key], other.schema.__dtypes__[key])
for key in other.keys()
}
return Table._update_cells(
self.cast_to_types(**schema), other.cast_to_types(**schema)
)
def _update_cells(self, other: Table) -> Table:
if not other._universe.is_subset_of(self._universe):
raise ValueError(
"Universe of the argument of Table.update_cells() needs to be "
+ "a subset of the universe of the updated table.\n"
+ "Consider using Table.promise_is_subset_of() to assert this.\n"
+ "(However, untrue assertion might result in runtime errors.)"
)
context = clmn.UpdateCellsContext(
left=self._id_column,
right=other._id_column,
updates={name: other._columns[name] for name in other.keys()},
)
return self._table_with_context(context)
def update_rows(self, other: Table[TSchema]) -> Table[TSchema]:
"""Updates rows of `self`, breaking ties in favor for the rows in `other`.
Semantics:
- result.columns == self.columns == other.columns
- result.id == self.id ∪ other.id
Requires:
- other.columns == self.columns
Args:
other: the other table.
Returns:
Table: `self` updated with rows form `other`.
Example:
>>> import pathway as pw
>>> t1 = pw.debug.table_from_markdown('''
... | age | owner | pet
... 1 | 10 | Alice | 1
... 2 | 9 | Bob | 1
... 3 | 8 | Alice | 2
... ''')
>>> t2 = pw.debug.table_from_markdown('''
... | age | owner | pet
... 1 | 10 | Alice | 30
... 12 | 12 | Tom | 40
... ''')
>>> t3 = t1.update_rows(t2)
>>> pw.debug.compute_and_print(t3, include_id=False)
age | owner | pet
8 | Alice | 2
9 | Bob | 1
10 | Alice | 30
12 | Tom | 40
"""
if other.keys() != self.keys():
raise ValueError(
"Columns do not match between argument of Table.update_rows() and the updated table."
)
if self._universe.is_subset_of(other._universe):
warnings.warn(
"Universe of self is a subset of universe of other in update_rows. Returning other.",
stacklevel=5,
)
return other
schema = {
key: dt.types_lca(self.schema.__dtypes__[key], other.schema.__dtypes__[key])
for key in self.keys()
}
union_universes = (self._universe, other._universe)
universe = G.universe_solver.get_union(*union_universes)
if universe == self._universe:
return Table._update_cells(
self.cast_to_types(**schema), other.cast_to_types(**schema)
)
else:
return Table._update_rows(
self.cast_to_types(**schema), other.cast_to_types(**schema)
)
def _update_rows(self, other: Table[TSchema]) -> Table[TSchema]:
union_ids = (self._id_column, other._id_column)
context = clmn.UpdateRowsContext(
updates={col_name: other._columns[col_name] for col_name in self.keys()},
union_ids=union_ids,
)
return self._table_with_context(context)
def with_columns(self, *args: expr.ColumnReference, **kwargs: Any) -> Table:
"""Updates columns of `self`, according to args and kwargs.
See `table.select` specification for evaluation of args and kwargs.
Example:
>>> import pathway as pw
>>> t1 = pw.debug.table_from_markdown('''
... | age | owner | pet
... 1 | 10 | Alice | 1
... 2 | 9 | Bob | 1
... 3 | 8 | Alice | 2
... ''')
>>> t2 = pw.debug.table_from_markdown('''
... | owner | pet | size
... 1 | Tom | 1 | 10
... 2 | Bob | 1 | 9
... 3 | Tom | 2 | 8
... ''')
>>> t3 = t1.with_columns(*t2)
>>> pw.debug.compute_and_print(t3, include_id=False)
age | owner | pet | size
8 | Tom | 2 | 8
9 | Bob | 1 | 9
10 | Tom | 1 | 10
"""
other = self.select(*args, **kwargs)
columns = dict(self)
columns.update(other)
return self.select(**columns)
def with_id(self, new_index: expr.ColumnReference) -> Table:
"""Set new ids based on another column containing id-typed values.
To generate ids based on arbitrary valued columns, use `with_id_from`.
Values assigned must be row-wise unique.
Args:
new_id: column to be used as the new index.
Returns:
Table with updated ids.
Example:
>>> import pytest; pytest.xfail("with_id is hard to test")
>>> import pathway as pw
>>> t1 = pw.debug.table_from_markdown('''
... | age | owner | pet
... 1 | 10 | Alice | 1
... 2 | 9 | Bob | 1
... 3 | 8 | Alice | 2
... ''')
>>> t2 = pw.debug.table_from_markdown('''
... | new_id
... 1 | 2
... 2 | 3
... 3 | 4
... ''')
>>> t3 = t1.promise_universe_is_subset_of(t2).with_id(t2.new_id)
>>> pw.debug.compute_and_print(t3)
age owner pet
^2 10 Alice 1
^3 9 Bob 1
^4 8 Alice 2
"""
return self._with_new_index(new_index)
def with_id_from(
self,
*args: expr.ColumnExpression | Value,
instance: expr.ColumnReference | None = None,
) -> Table:
"""Compute new ids based on values in columns.
Ids computed from `columns` must be row-wise unique.
Args:
columns: columns to be used as primary keys.
Returns:
Table: `self` updated with recomputed ids.
Example:
>>> import pathway as pw
>>> t1 = pw.debug.table_from_markdown('''
... | age | owner | pet
... 1 | 10 | Alice | 1
... 2 | 9 | Bob | 1
... 3 | 8 | Alice | 2
... ''')
>>> t2 = t1 + t1.select(old_id=t1.id)
>>> t3 = t2.with_id_from(t2.age)
>>> pw.debug.compute_and_print(t3) # doctest: +ELLIPSIS, +NORMALIZE_WHITESPACE
| age | owner | pet | old_id
^... | 8 | Alice | 2 | ^...
^... | 9 | Bob | 1 | ^...
^... | 10 | Alice | 1 | ^...
>>> t4 = t3.select(t3.age, t3.owner, t3.pet, same_as_old=(t3.id == t3.old_id),
... same_as_new=(t3.id == t3.pointer_from(t3.age)))
>>> pw.debug.compute_and_print(t4) # doctest: +ELLIPSIS, +NORMALIZE_WHITESPACE
| age | owner | pet | same_as_old | same_as_new
^... | 8 | Alice | 2 | False | True
^... | 9 | Bob | 1 | False | True
^... | 10 | Alice | 1 | False | True
"""
# new_index should be a column, so a little workaround
new_index = self.select(
ref_column=self.pointer_from(*args, instance=instance)
).ref_column
return self._with_new_index(
new_index=new_index,
)
def _with_new_index(
self,
new_index: expr.ColumnExpression,
) -> Table:
self._validate_expression(new_index)
index_type = self.eval_type(new_index)
if not isinstance(index_type, dt.Pointer):
raise TypeError(
f"Pathway supports reindexing Tables with Pointer type only. The type used was {index_type}."
)
reindex_column = self._eval(new_index)
assert self._universe == reindex_column.universe
context = clmn.ReindexContext(reindex_column)
return self._table_with_context(context)
def rename_columns(self, **kwargs: str | expr.ColumnReference) -> Table:
"""Rename columns according to kwargs.
Columns not in keys(kwargs) are not changed. New name of a column must not be `id`.
Args:
kwargs: mapping from old column names to new names.
Returns:
Table: `self` with columns renamed.
Example:
>>> import pathway as pw
>>> t1 = pw.debug.table_from_markdown('''
... age | owner | pet
... 10 | Alice | 1
... 9 | Bob | 1
... 8 | Alice | 2
... ''')
>>> t2 = t1.rename_columns(years_old=t1.age, animal=t1.pet)
>>> pw.debug.compute_and_print(t2, include_id=False)
owner | years_old | animal
Alice | 8 | 2
Alice | 10 | 1
Bob | 9 | 1
"""
mapping: dict[str, str] = {}
for new_name, old_name_col in kwargs.items():
if isinstance(old_name_col, expr.ColumnReference):
old_name = old_name_col.name
else:
old_name = old_name_col
if old_name not in self._columns:
raise ValueError(f"Column {old_name} does not exist in a given table.")
mapping[new_name] = old_name
renamed_columns = self._columns.copy()
for new_name, old_name in mapping.items():
renamed_columns.pop(old_name)
for new_name, old_name in mapping.items():
renamed_columns[new_name] = self._columns[old_name]
columns_wrapped = {
name: self._wrap_column_in_context(
self._rowwise_context,
column,
mapping[name] if name in mapping else name,
)
for name, column in renamed_columns.items()
}
return self._with_same_universe(*columns_wrapped.items())
def rename_by_dict(
self, names_mapping: dict[str | expr.ColumnReference, str]
) -> Table:
"""Rename columns according to a dictionary.
Columns not in keys(kwargs) are not changed. New name of a column must not be `id`.
Args:
names_mapping: mapping from old column names to new names.
Returns:
Table: `self` with columns renamed.
Example:
>>> import pathway as pw
>>> t1 = pw.debug.table_from_markdown('''
... age | owner | pet
... 10 | Alice | 1
... 9 | Bob | 1
... 8 | Alice | 2
... ''')
>>> t2 = t1.rename_by_dict({"age": "years_old", t1.pet: "animal"})
>>> pw.debug.compute_and_print(t2, include_id=False)
owner | years_old | animal
Alice | 8 | 2
Alice | 10 | 1
Bob | 9 | 1
"""
return self.rename_columns(
**{new_name: self[old_name] for old_name, new_name in names_mapping.items()}
)
def with_prefix(self, prefix: str) -> Table:
"""Rename columns by adding prefix to each name of column.
Example:
>>> import pathway as pw
>>> t1 = pw.debug.table_from_markdown('''
... age | owner | pet
... 10 | Alice | 1
... 9 | Bob | 1
... 8 | Alice | 2
... ''')
>>> t2 = t1.with_prefix("u_")
>>> pw.debug.compute_and_print(t2, include_id=False)
u_age | u_owner | u_pet
8 | Alice | 2
9 | Bob | 1
10 | Alice | 1
"""
return self.rename_by_dict({name: prefix + name for name in self.keys()})
def with_suffix(self, suffix: str) -> Table:
"""Rename columns by adding suffix to each name of column.
Example:
>>> import pathway as pw
>>> t1 = pw.debug.table_from_markdown('''
... age | owner | pet
... 10 | Alice | 1
... 9 | Bob | 1
... 8 | Alice | 2
... ''')
>>> t2 = t1.with_suffix("_current")
>>> pw.debug.compute_and_print(t2, include_id=False)
age_current | owner_current | pet_current
8 | Alice | 2
9 | Bob | 1
10 | Alice | 1
"""
return self.rename_by_dict({name: name + suffix for name in self.keys()})
def rename(
self,
names_mapping: dict[str | expr.ColumnReference, str] | None = None,
**kwargs: expr.ColumnExpression,
) -> Table:
"""Rename columns according either a dictionary or kwargs.
If a mapping is provided using a dictionary, ``rename_by_dict`` will be used.
Otherwise, ``rename_columns`` will be used with kwargs.
Columns not in keys(kwargs) are not changed. New name of a column must not be ``id``.
Args:
names_mapping: mapping from old column names to new names.
kwargs: mapping from old column names to new names.
Returns:
Table: `self` with columns renamed.
"""
if names_mapping is not None:
return self.rename_by_dict(names_mapping=names_mapping)
return self.rename_columns(**kwargs)
def without(self, *columns: str | expr.ColumnReference) -> Table:
"""Selects all columns without named column references.
Args:
columns: columns to be dropped provided by `table.column_name` notation.
Returns:
Table: `self` without specified columns.
Example:
>>> import pathway as pw
>>> t1 = pw.debug.table_from_markdown('''
... age | owner | pet
... 10 | Alice | 1
... 9 | Bob | 1
... 8 | Alice | 2
... ''')
>>> t2 = t1.without(t1.age, pw.this.pet)
>>> pw.debug.compute_and_print(t2, include_id=False)
owner
Alice
Alice
Bob
"""
new_columns = self._columns.copy()
for col in columns:
if isinstance(col, expr.ColumnReference):
new_columns.pop(col.name)
else:
assert isinstance(col, str)
new_columns.pop(col)
columns_wrapped = {
name: self._wrap_column_in_context(self._rowwise_context, column, name)
for name, column in new_columns.items()
}
return self._with_same_universe(*columns_wrapped.items())
def having(self, *indexers: expr.ColumnReference) -> Table[TSchema]:
"""Removes rows so that indexed.ix(indexer) is possible when some rows are missing,
for each indexer in indexers"""
rets: list[Table] = []
for indexer in indexers:
rets.append(self._having(indexer))
if len(rets) == 0:
return self
elif len(rets) == 1:
[ret] = rets
return ret
else:
return rets[0].intersect(*rets[1:])
def update_types(self, **kwargs: Any) -> Table:
"""Updates types in schema. Has no effect on the runtime."""
for name in kwargs.keys():
if name not in self.keys():
raise ValueError(
"Table.update_types() argument name has to be an existing table column name."
)
from pathway.internals.common import declare_type
return self.with_columns(
**{key: declare_type(val, self[key]) for key, val in kwargs.items()}
)
def cast_to_types(self, **kwargs: Any) -> Table:
"""Casts columns to types."""
for name in kwargs.keys():
if name not in self.keys():
raise ValueError(
"Table.cast_to_types() argument name has to be an existing table column name."
)
from pathway.internals.common import cast
return self.with_columns(
**{key: cast(val, self[key]) for key, val in kwargs.items()}
)
def _having(self, indexer: expr.ColumnReference) -> Table[TSchema]:
context = clmn.HavingContext(
orig_id_column=self._id_column, key_column=indexer._column
)
return self._table_with_context(context)
def with_universe_of(self, other: TableLike) -> Table:
"""Returns a copy of self with exactly the same universe as others.
Semantics: Required precondition self.universe == other.universe
Used in situations where Pathway cannot deduce equality of universes, but
those are equal as verified during runtime.
Example:
>>> import pathway as pw
>>> t1 = pw.debug.table_from_markdown('''
... | pet
... 1 | Dog
... 7 | Cat
... ''')
>>> t2 = pw.debug.table_from_markdown('''
... | age
... 1 | 10
... 7 | 3
... 8 | 100
... ''')
>>> t3 = t2.filter(pw.this.age < 30).with_universe_of(t1)
>>> t4 = t1 + t3
>>> pw.debug.compute_and_print(t4, include_id=False)
pet | age
Cat | 3
Dog | 10
"""
if self._universe == other._universe:
return self.copy()
universes.promise_are_equal(self, other)
return self._unsafe_promise_universe(other)
def flatten(self, *args: expr.ColumnReference, **kwargs: Any) -> Table:
"""Performs a flatmap operation on a column or expression given as a first
argument. Datatype of this column or expression has to be iterable or Json array.
Other columns specified in the method arguments are duplicated
as many times as the length of the iterable.
It is possible to get ids of source rows by using `table.id` column, e.g.
`table.flatten(table.column_to_be_flattened, original_id = table.id)`.
Example:
>>> import pathway as pw
>>> t1 = pw.debug.table_from_markdown('''
... | pet | age
... 1 | Dog | 2
... 7 | Cat | 5
... ''')
>>> t2 = t1.flatten(t1.pet)
>>> pw.debug.compute_and_print(t2, include_id=False)
pet
C
D
a
g
o
t
>>> t3 = t1.flatten(t1.pet, t1.age)
>>> pw.debug.compute_and_print(t3, include_id=False)
pet | age
C | 5
D | 2
a | 5
g | 2
o | 2
t | 5
"""
intermediate_table = self.select(*args, **kwargs)
all_args = combine_args_kwargs(args, kwargs)
if not all_args:
raise ValueError("Table.flatten() cannot have empty arguments list.")
all_names_iter = iter(all_args.keys())
flatten_name = next(all_names_iter)
return intermediate_table._flatten(flatten_name)
def _flatten(
self,
flatten_name: str,
) -> Table:
flatten_column = self._columns[flatten_name]
assert isinstance(flatten_column, clmn.ColumnWithExpression)
context = clmn.FlattenContext(
orig_universe=self._universe,
flatten_column=flatten_column,
)
columns = {
name: self._wrap_column_in_context(context, column, name)
for name, column in self._columns.items()
if name != flatten_name
}
return Table(
_columns={
flatten_name: context.flatten_result_column,
**columns,
},
_context=context,
)
def sort(
self,
key: expr.ColumnExpression,
instance: expr.ColumnExpression | None = None,
) -> Table:
"""
Sorts a table by the specified keys.
Args:
table : pw.Table
The table to be sorted.
key (ColumnExpression[int | float | datetime | str | bytes]):
An expression to sort by.
instance : ColumnReference or None
An expression with instance. Rows are sorted within an instance.
``prev`` and ``next`` columns will only point to rows that have the same instance.
Returns:
pw.Table: The sorted table. Contains two columns: ``prev`` and ``next``, containing the pointers
to the previous and next rows.
Example:
>>> import pathway as pw
>>> table = pw.debug.table_from_markdown('''
... name | age | score
... Alice | 25 | 80
... Bob | 20 | 90
... Charlie | 30 | 80
... ''')
>>> table = table.with_id_from(pw.this.name)
>>> table += table.sort(key=pw.this.age)
>>> pw.debug.compute_and_print(table, include_id=True)
| name | age | score | prev | next
^GBSDEEW... | Alice | 25 | 80 | ^EDPSSB1... | ^DS9AT95...
^EDPSSB1... | Bob | 20 | 90 | | ^GBSDEEW...
^DS9AT95... | Charlie | 30 | 80 | ^GBSDEEW... |
>>> table = pw.debug.table_from_markdown('''
... name | age | score
... Alice | 25 | 80
... Bob | 20 | 90
... Charlie | 30 | 80
... David | 35 | 90
... Eve | 15 | 80
... ''')
>>> table = table.with_id_from(pw.this.name)
>>> table += table.sort(key=pw.this.age, instance=pw.this.score)
>>> pw.debug.compute_and_print(table, include_id=True)
| name | age | score | prev | next
^GBSDEEW... | Alice | 25 | 80 | ^T0B95XH... | ^DS9AT95...
^EDPSSB1... | Bob | 20 | 90 | | ^RT0AZWX...
^DS9AT95... | Charlie | 30 | 80 | ^GBSDEEW... |
^RT0AZWX... | David | 35 | 90 | ^EDPSSB1... |
^T0B95XH... | Eve | 15 | 80 | | ^GBSDEEW...
"""
instance = clmn.ColumnExpression._wrap(instance)
context = clmn.SortingContext(
self._eval(key),
self._eval(instance),
)
return Table(
_columns={
"prev": context.prev_column,
"next": context.next_column,
},
_context=context,
)
def _set_source(self, source: OutputHandle):
self._source = source
if not hasattr(self._id_column, "lineage"):
self._id_column.lineage = clmn.ColumnLineage(name="id", source=source)
for name, column in self._columns.items():
if not hasattr(column, "lineage"):
column.lineage = clmn.ColumnLineage(name=name, source=source)
universe = self._universe
if not hasattr(universe, "lineage"):
universe.lineage = clmn.Lineage(source=source)
def _unsafe_promise_universe(self, other: TableLike) -> Table:
context = clmn.PromiseSameUniverseContext(self._id_column, other._id_column)
return self._table_with_context(context)
def _validate_expression(self, expression: expr.ColumnExpression):
for dep in expression._dependencies_above_reducer():
if self._universe != dep._column.universe:
raise ValueError(
f"You cannot use {dep.to_column_expression()} in this context."
+ " Its universe is different than the universe of the table the method"
+ " was called on. You can use <table1>.with_universe_of(<table2>)"
+ " to assign universe of <table2> to <table1> if you're sure their"
+ " sets of keys are equal."
)
def _wrap_column_in_context(
self,
context: clmn.Context,
column: clmn.Column,
name: str,
lineage: clmn.Lineage | None = None,
) -> clmn.Column:
"""Contextualize column by wrapping it in expression."""
expression = expr.ColumnReference(_table=self, _column=column, _name=name)
return expression._column_with_expression_cls(
context=context,
universe=context.universe,
expression=expression,
lineage=lineage,
)
def _table_with_context(self, context: clmn.Context) -> Table:
columns = {
name: self._wrap_column_in_context(context, column, name)
for name, column in self._columns.items()
}
return Table(
_columns=columns,
_context=context,
)
def _table_restricted_context(self) -> clmn.TableRestrictedRowwiseContext:
return clmn.TableRestrictedRowwiseContext(self._id_column, self)
def _eval(
self, expression: expr.ColumnExpression, context: clmn.Context | None = None
) -> clmn.ColumnWithExpression:
"""Desugar expression and wrap it in given context."""
if context is None:
context = self._rowwise_context
column = expression._column_with_expression_cls(
context=context,
universe=context.universe,
expression=expression,
)
return column
def _from_schema(cls: type[TTable], schema: type[Schema]) -> TTable:
universe = Universe()
context = clmn.MaterializedContext(universe, schema.universe_properties)
columns = {
name: clmn.MaterializedColumn(
universe,
schema.column_properties(name),
)
for name in schema.column_names()
}
return cls(_columns=columns, _schema=schema, _context=context)
def __repr__(self) -> str:
return f"<pathway.Table schema={dict(self.typehints())}>"
def _with_same_universe(
self,
*columns: tuple[str, clmn.Column],
schema: type[Schema] | None = None,
) -> Table:
return Table(
_columns=dict(columns),
_schema=schema,
_context=self._rowwise_context,
)
def _sort_columns_by_other(self, other: Table):
assert self.keys() == other.keys()
self._columns = {name: self._columns[name] for name in other.keys()}
def _operator_dependencies(self) -> StableSet[Table]:
return StableSet([self])
def debug(self, name: str):
G.add_operator(
lambda id: DebugOperator(name, id),
lambda operator: operator(self),
)
return self
def to(self, sink: DataSink) -> None:
from pathway.internals import table_io
table_io.table_to_datasink(self, sink)
def _materialize(self, universe: Universe):
context = clmn.MaterializedContext(universe)
columns = {
name: clmn.MaterializedColumn(universe, column.properties)
for (name, column) in self._columns.items()
}
return Table(
_columns=columns,
_schema=self.schema,
_context=context,
)
def pointer_from(
self, *args: Any, optional=False, instance: expr.ColumnReference | None = None
):
"""Pseudo-random hash of its argument. Produces pointer types. Applied column-wise.
Example:
>>> import pathway as pw
>>> t1 = pw.debug.table_from_markdown('''
... age owner pet
... 1 10 Alice dog
... 2 9 Bob dog
... 3 8 Alice cat
... 4 7 Bob dog''')
>>> g = t1.groupby(t1.owner).reduce(refcol = t1.pointer_from(t1.owner)) # g.id == g.refcol
>>> pw.debug.compute_and_print(g.select(test = (g.id == g.refcol)), include_id=False)
test
True
True
"""
if instance is not None:
args = (*args, instance)
# XXX verify types for the table primary_keys
return expr.PointerExpression(self, *args, optional=optional)
def ix_ref(
self,
*args: expr.ColumnExpression | Value,
optional: bool = False,
context=None,
instance: expr.ColumnReference | None = None,
):
"""Reindexes the table using expressions as primary keys.
Uses keys from context, or tries to infer proper context from the expression.
If optional is True, then None in expression values result in None values in the result columns.
Missing values in table keys result in RuntimeError.
Context can be anything that allows for `select` or `reduce`, or `pathway.this` construct
(latter results in returning a delayed operation, and should be only used when using `ix` inside
join().select() or groupby().reduce() sequence).
Args:
args: Column references.
Returns:
Row: indexed row.
Example:
>>> import pathway as pw
>>> t1 = pw.debug.table_from_markdown('''
... name | pet
... Alice | dog
... Bob | cat
... Carole | cat
... David | dog
... ''')
>>> t2 = t1.with_id_from(pw.this.name)
>>> t2 = t2.select(*pw.this, new_value=pw.this.ix_ref("Alice").pet)
>>> pw.debug.compute_and_print(t2, include_id=False)
name | pet | new_value
Alice | dog | dog
Bob | cat | dog
Carole | cat | dog
David | dog | dog
Tables obtained by a groupby/reduce scheme always have primary keys:
>>> import pathway as pw
>>> t1 = pw.debug.table_from_markdown('''
... name | pet
... Alice | dog
... Bob | cat
... Carole | cat
... David | cat
... ''')
>>> t2 = t1.groupby(pw.this.pet).reduce(pw.this.pet, count=pw.reducers.count())
>>> t3 = t1.select(*pw.this, new_value=t2.ix_ref(t1.pet).count)
>>> pw.debug.compute_and_print(t3, include_id=False)
name | pet | new_value
Alice | dog | 1
Bob | cat | 3
Carole | cat | 3
David | cat | 3
Single-row tables can be accessed via `ix_ref()`:
>>> import pathway as pw
>>> t1 = pw.debug.table_from_markdown('''
... name | pet
... Alice | dog
... Bob | cat
... Carole | cat
... David | cat
... ''')
>>> t2 = t1.reduce(count=pw.reducers.count())
>>> t3 = t1.select(*pw.this, new_value=t2.ix_ref(context=t1).count)
>>> pw.debug.compute_and_print(t3, include_id=False)
name | pet | new_value
Alice | dog | 4
Bob | cat | 4
Carole | cat | 4
David | cat | 4
"""
return self.ix(
self.pointer_from(*args, optional=optional, instance=instance),
optional=optional,
context=context,
)
def _subtables(self) -> StableSet[Table]:
return StableSet([self])
def _substitutions(
self,
) -> tuple[Table, dict[expr.InternalColRef, expr.ColumnExpression]]:
return self, {}
def typehints(self) -> Mapping[str, Any]:
"""
Return the types of the columns as a dictionary.
Example:
>>> import pathway as pw
>>> t1 = pw.debug.table_from_markdown('''
... age | owner | pet
... 10 | Alice | dog
... 9 | Bob | dog
... 8 | Alice | cat
... 7 | Bob | dog
... ''')
>>> t1.typehints()
mappingproxy({'age': <class 'int'>, 'owner': <class 'str'>, 'pet': <class 'str'>})
"""
return self.schema.typehints()
def eval_type(self, expression: expr.ColumnExpression) -> dt.DType:
return (
self._rowwise_context._get_type_interpreter()
.eval_expression(expression, state=TypeInterpreterState())
._dtype
)
def _auto_live(self) -> Table:
"""Make self automatically live in interactive mode"""
from pathway.internals.interactive import is_interactive_mode_enabled
if is_interactive_mode_enabled():
return self.live()
else:
return self
def live(self) -> LiveTable[TSchema]:
from pathway.internals.interactive import LiveTable
warnings.warn("live tables are an experimental feature", stacklevel=2)
return LiveTable._create(self)
The provided code snippet includes necessary dependencies for implementing the `interval_join_left` function. Write a Python function `def interval_join_left( self: pw.Table, other: pw.Table, self_time: pw.ColumnExpression, other_time: pw.ColumnExpression, interval: Interval[int] | Interval[float] | Interval[datetime.timedelta], *on: pw.ColumnExpression, behavior: CommonBehavior | None = None, left_instance: pw.ColumnReference | None = None, right_instance: pw.ColumnReference | None = None, ) -> IntervalJoinResult` to solve the following problem:
Performs an interval left join of self with other using a time difference and join expressions. If `self_time + lower_bound <= other_time <= self_time + upper_bound` and conditions in `on` are satisfied, the rows are joined. Rows from the left side that haven't been matched with the right side are returned with missing values on the right side replaced with `None`. Args: other: the right side of the join. self_time: time expression in self. other_time: time expression in other. lower_bound: a lower bound on time difference between other_time and self_time. upper_bound: an upper bound on time difference between other_time and self_time. on: a list of column expressions. Each must have == as the top level operation and be of the form LHS: ColumnReference == RHS: ColumnReference. behavior: defines temporal behavior of a join - features like delaying entries or ignoring late entries. left_instance/right_instance: optional arguments describing partitioning of the data into separate instances Returns: IntervalJoinResult: a result of the interval join. A method `.select()` can be called on it to extract relevant columns from the result of a join. Examples: >>> import pathway as pw >>> t1 = pw.debug.table_from_markdown( ... ''' ... | t ... 1 | 3 ... 2 | 4 ... 3 | 5 ... 4 | 11 ... ''' ... ) >>> t2 = pw.debug.table_from_markdown( ... ''' ... | t ... 1 | 0 ... 2 | 1 ... 3 | 4 ... 4 | 7 ... ''' ... ) >>> t3 = t1.interval_join_left(t2, t1.t, t2.t, pw.temporal.interval(-2, 1)).select( ... left_t=t1.t, right_t=t2.t ... ) >>> pw.debug.compute_and_print(t3, include_id=False) left_t | right_t 3 | 1 3 | 4 4 | 4 5 | 4 11 | >>> t1 = pw.debug.table_from_markdown( ... ''' ... | a | t ... 1 | 1 | 3 ... 2 | 1 | 4 ... 3 | 1 | 5 ... 4 | 1 | 11 ... 5 | 2 | 2 ... 6 | 2 | 3 ... 7 | 3 | 4 ... ''' ... ) >>> t2 = pw.debug.table_from_markdown( ... ''' ... | b | t ... 1 | 1 | 0 ... 2 | 1 | 1 ... 3 | 1 | 4 ... 4 | 1 | 7 ... 5 | 2 | 0 ... 6 | 2 | 2 ... 7 | 4 | 2 ... ''' ... ) >>> t3 = t1.interval_join_left( ... t2, t1.t, t2.t, pw.temporal.interval(-2, 1), t1.a == t2.b ... ).select(t1.a, left_t=t1.t, right_t=t2.t) >>> pw.debug.compute_and_print(t3, include_id=False) a | left_t | right_t 1 | 3 | 1 1 | 3 | 4 1 | 4 | 4 1 | 5 | 4 1 | 11 | 2 | 2 | 0 2 | 2 | 2 2 | 3 | 2 3 | 4 | Setting `behavior` allows to control temporal behavior of an interval join. Then, each side of the interval join keeps track of the maximal already seen time (`self_time` and `other_time`). The arguments of `behavior` mean in the context of an interval join what follows: - **delay** - buffers results until the maximal already seen time is greater than \ or equal to their time plus `delay`. - **cutoff** - ignores records with times less or equal to the maximal already seen time minus `cutoff`; \ it is also used to garbage collect records that have times lower or equal to the above threshold. \ When `cutoff` is not set, interval join will remember all records from both sides. - **keep_results** - if set to `True`, keeps all results of the operator. If set to `False`, \ keeps only results that are newer than the maximal seen time minus `cutoff`. Example without and with forgetting: >>> import pathway as pw >>> t1 = pw.debug.table_from_markdown( ... ''' ... value | instance | event_time | __time__ ... 1 | 1 | 0 | 2 ... 2 | 2 | 2 | 4 ... 3 | 1 | 4 | 4 ... 4 | 2 | 8 | 8 ... 5 | 1 | 0 | 10 ... 6 | 1 | 4 | 10 ... ''' ... ) >>> t2 = pw.debug.table_from_markdown( ... ''' ... value | instance | event_time | __time__ ... 42 | 1 | 2 | 2 ... 8 | 2 | 10 | 14 ... 10 | 2 | 4 | 30 ... ''' ... ) >>> result_without_cutoff = t1.interval_join_left( ... t2, ... t1.event_time, ... t2.event_time, ... pw.temporal.interval(-2, 2), ... t1.instance == t2.instance, ... ).select( ... left_value=t1.value, ... right_value=t2.value, ... instance=t1.instance, ... left_time=t1.event_time, ... right_time=t2.event_time, ... ) >>> pw.debug.compute_and_print_update_stream(result_without_cutoff, include_id=False) left_value | right_value | instance | left_time | right_time | __time__ | __diff__ 1 | 42 | 1 | 0 | 2 | 2 | 1 2 | | 2 | 2 | | 4 | 1 3 | 42 | 1 | 4 | 2 | 4 | 1 4 | | 2 | 8 | | 8 | 1 5 | 42 | 1 | 0 | 2 | 10 | 1 6 | 42 | 1 | 4 | 2 | 10 | 1 4 | | 2 | 8 | | 14 | -1 4 | 8 | 2 | 8 | 10 | 14 | 1 2 | | 2 | 2 | | 30 | -1 2 | 10 | 2 | 2 | 4 | 30 | 1 >>> result_with_cutoff = t1.interval_join_left( ... t2, ... t1.event_time, ... t2.event_time, ... pw.temporal.interval(-2, 2), ... t1.instance == t2.instance, ... behavior=pw.temporal.common_behavior(cutoff=6), ... ).select( ... left_value=t1.value, ... right_value=t2.value, ... instance=t1.instance, ... left_time=t1.event_time, ... right_time=t2.event_time, ... ) >>> pw.debug.compute_and_print_update_stream(result_with_cutoff, include_id=False) left_value | right_value | instance | left_time | right_time | __time__ | __diff__ 1 | 42 | 1 | 0 | 2 | 2 | 1 2 | | 2 | 2 | | 4 | 1 3 | 42 | 1 | 4 | 2 | 4 | 1 4 | | 2 | 8 | | 8 | 1 6 | 42 | 1 | 4 | 2 | 10 | 1 4 | | 2 | 8 | | 14 | -1 4 | 8 | 2 | 8 | 10 | 14 | 1 The record with ``value=5`` from table ``t1`` was not joined because its ``event_time`` was less than the maximal already seen time minus ``cutoff`` (``0 <= 8-6``). The record with ``value=10`` from table ``t2`` was not joined because its ``event_time`` was equal to the maximal already seen time minus ``cutoff`` (``4 <= 10-6``). Notice also the entries with ``__diff__=-1``. They're deletion entries caused by the arrival of matching entries on the right side of the join. The matches caused the removal of entries without values in the fields from the right side and insertion of entries with values in these fields.
Here is the function:
def interval_join_left(
self: pw.Table,
other: pw.Table,
self_time: pw.ColumnExpression,
other_time: pw.ColumnExpression,
interval: Interval[int] | Interval[float] | Interval[datetime.timedelta],
*on: pw.ColumnExpression,
behavior: CommonBehavior | None = None,
left_instance: pw.ColumnReference | None = None,
right_instance: pw.ColumnReference | None = None,
) -> IntervalJoinResult:
"""Performs an interval left join of self with other using a time difference
and join expressions. If `self_time + lower_bound <=
other_time <= self_time + upper_bound`
and conditions in `on` are satisfied, the rows are joined. Rows from the left
side that haven't been matched with the right side are returned with missing
values on the right side replaced with `None`.
Args:
other: the right side of the join.
self_time: time expression in self.
other_time: time expression in other.
lower_bound: a lower bound on time difference between other_time
and self_time.
upper_bound: an upper bound on time difference between other_time
and self_time.
on: a list of column expressions. Each must have == as the top level
operation and be of the form LHS: ColumnReference == RHS: ColumnReference.
behavior: defines temporal behavior of a join - features like delaying entries
or ignoring late entries.
left_instance/right_instance: optional arguments describing partitioning of the data into separate instances
Returns:
IntervalJoinResult: a result of the interval join. A method `.select()`
can be called on it to extract relevant columns from the result of a join.
Examples:
>>> import pathway as pw
>>> t1 = pw.debug.table_from_markdown(
... '''
... | t
... 1 | 3
... 2 | 4
... 3 | 5
... 4 | 11
... '''
... )
>>> t2 = pw.debug.table_from_markdown(
... '''
... | t
... 1 | 0
... 2 | 1
... 3 | 4
... 4 | 7
... '''
... )
>>> t3 = t1.interval_join_left(t2, t1.t, t2.t, pw.temporal.interval(-2, 1)).select(
... left_t=t1.t, right_t=t2.t
... )
>>> pw.debug.compute_and_print(t3, include_id=False)
left_t | right_t
3 | 1
3 | 4
4 | 4
5 | 4
11 |
>>> t1 = pw.debug.table_from_markdown(
... '''
... | a | t
... 1 | 1 | 3
... 2 | 1 | 4
... 3 | 1 | 5
... 4 | 1 | 11
... 5 | 2 | 2
... 6 | 2 | 3
... 7 | 3 | 4
... '''
... )
>>> t2 = pw.debug.table_from_markdown(
... '''
... | b | t
... 1 | 1 | 0
... 2 | 1 | 1
... 3 | 1 | 4
... 4 | 1 | 7
... 5 | 2 | 0
... 6 | 2 | 2
... 7 | 4 | 2
... '''
... )
>>> t3 = t1.interval_join_left(
... t2, t1.t, t2.t, pw.temporal.interval(-2, 1), t1.a == t2.b
... ).select(t1.a, left_t=t1.t, right_t=t2.t)
>>> pw.debug.compute_and_print(t3, include_id=False)
a | left_t | right_t
1 | 3 | 1
1 | 3 | 4
1 | 4 | 4
1 | 5 | 4
1 | 11 |
2 | 2 | 0
2 | 2 | 2
2 | 3 | 2
3 | 4 |
Setting `behavior` allows to control temporal behavior of an interval join. Then, each side of
the interval join keeps track of the maximal already seen time (`self_time` and `other_time`).
The arguments of `behavior` mean in the context of an interval join what follows:
- **delay** - buffers results until the maximal already seen time is greater than \
or equal to their time plus `delay`.
- **cutoff** - ignores records with times less or equal to the maximal already seen time minus `cutoff`; \
it is also used to garbage collect records that have times lower or equal to the above threshold. \
When `cutoff` is not set, interval join will remember all records from both sides.
- **keep_results** - if set to `True`, keeps all results of the operator. If set to `False`, \
keeps only results that are newer than the maximal seen time minus `cutoff`.
Example without and with forgetting:
>>> import pathway as pw
>>> t1 = pw.debug.table_from_markdown(
... '''
... value | instance | event_time | __time__
... 1 | 1 | 0 | 2
... 2 | 2 | 2 | 4
... 3 | 1 | 4 | 4
... 4 | 2 | 8 | 8
... 5 | 1 | 0 | 10
... 6 | 1 | 4 | 10
... '''
... )
>>> t2 = pw.debug.table_from_markdown(
... '''
... value | instance | event_time | __time__
... 42 | 1 | 2 | 2
... 8 | 2 | 10 | 14
... 10 | 2 | 4 | 30
... '''
... )
>>> result_without_cutoff = t1.interval_join_left(
... t2,
... t1.event_time,
... t2.event_time,
... pw.temporal.interval(-2, 2),
... t1.instance == t2.instance,
... ).select(
... left_value=t1.value,
... right_value=t2.value,
... instance=t1.instance,
... left_time=t1.event_time,
... right_time=t2.event_time,
... )
>>> pw.debug.compute_and_print_update_stream(result_without_cutoff, include_id=False)
left_value | right_value | instance | left_time | right_time | __time__ | __diff__
1 | 42 | 1 | 0 | 2 | 2 | 1
2 | | 2 | 2 | | 4 | 1
3 | 42 | 1 | 4 | 2 | 4 | 1
4 | | 2 | 8 | | 8 | 1
5 | 42 | 1 | 0 | 2 | 10 | 1
6 | 42 | 1 | 4 | 2 | 10 | 1
4 | | 2 | 8 | | 14 | -1
4 | 8 | 2 | 8 | 10 | 14 | 1
2 | | 2 | 2 | | 30 | -1
2 | 10 | 2 | 2 | 4 | 30 | 1
>>> result_with_cutoff = t1.interval_join_left(
... t2,
... t1.event_time,
... t2.event_time,
... pw.temporal.interval(-2, 2),
... t1.instance == t2.instance,
... behavior=pw.temporal.common_behavior(cutoff=6),
... ).select(
... left_value=t1.value,
... right_value=t2.value,
... instance=t1.instance,
... left_time=t1.event_time,
... right_time=t2.event_time,
... )
>>> pw.debug.compute_and_print_update_stream(result_with_cutoff, include_id=False)
left_value | right_value | instance | left_time | right_time | __time__ | __diff__
1 | 42 | 1 | 0 | 2 | 2 | 1
2 | | 2 | 2 | | 4 | 1
3 | 42 | 1 | 4 | 2 | 4 | 1
4 | | 2 | 8 | | 8 | 1
6 | 42 | 1 | 4 | 2 | 10 | 1
4 | | 2 | 8 | | 14 | -1
4 | 8 | 2 | 8 | 10 | 14 | 1
The record with ``value=5`` from table ``t1`` was not joined because its ``event_time``
was less than the maximal already seen time minus ``cutoff`` (``0 <= 8-6``).
The record with ``value=10`` from table ``t2`` was not joined because its ``event_time``
was equal to the maximal already seen time minus ``cutoff`` (``4 <= 10-6``).
Notice also the entries with ``__diff__=-1``. They're deletion entries caused by the arrival
of matching entries on the right side of the join. The matches caused the removal of entries
without values in the fields from the right side and insertion of entries with values
in these fields.
"""
return IntervalJoinResult._interval_join(
self,
other,
self_time,
other_time,
interval,
*on,
behavior=behavior,
mode=pw.JoinMode.LEFT,
left_instance=left_instance,
right_instance=right_instance,
) | Performs an interval left join of self with other using a time difference and join expressions. If `self_time + lower_bound <= other_time <= self_time + upper_bound` and conditions in `on` are satisfied, the rows are joined. Rows from the left side that haven't been matched with the right side are returned with missing values on the right side replaced with `None`. Args: other: the right side of the join. self_time: time expression in self. other_time: time expression in other. lower_bound: a lower bound on time difference between other_time and self_time. upper_bound: an upper bound on time difference between other_time and self_time. on: a list of column expressions. Each must have == as the top level operation and be of the form LHS: ColumnReference == RHS: ColumnReference. behavior: defines temporal behavior of a join - features like delaying entries or ignoring late entries. left_instance/right_instance: optional arguments describing partitioning of the data into separate instances Returns: IntervalJoinResult: a result of the interval join. A method `.select()` can be called on it to extract relevant columns from the result of a join. Examples: >>> import pathway as pw >>> t1 = pw.debug.table_from_markdown( ... ''' ... | t ... 1 | 3 ... 2 | 4 ... 3 | 5 ... 4 | 11 ... ''' ... ) >>> t2 = pw.debug.table_from_markdown( ... ''' ... | t ... 1 | 0 ... 2 | 1 ... 3 | 4 ... 4 | 7 ... ''' ... ) >>> t3 = t1.interval_join_left(t2, t1.t, t2.t, pw.temporal.interval(-2, 1)).select( ... left_t=t1.t, right_t=t2.t ... ) >>> pw.debug.compute_and_print(t3, include_id=False) left_t | right_t 3 | 1 3 | 4 4 | 4 5 | 4 11 | >>> t1 = pw.debug.table_from_markdown( ... ''' ... | a | t ... 1 | 1 | 3 ... 2 | 1 | 4 ... 3 | 1 | 5 ... 4 | 1 | 11 ... 5 | 2 | 2 ... 6 | 2 | 3 ... 7 | 3 | 4 ... ''' ... ) >>> t2 = pw.debug.table_from_markdown( ... ''' ... | b | t ... 1 | 1 | 0 ... 2 | 1 | 1 ... 3 | 1 | 4 ... 4 | 1 | 7 ... 5 | 2 | 0 ... 6 | 2 | 2 ... 7 | 4 | 2 ... ''' ... ) >>> t3 = t1.interval_join_left( ... t2, t1.t, t2.t, pw.temporal.interval(-2, 1), t1.a == t2.b ... ).select(t1.a, left_t=t1.t, right_t=t2.t) >>> pw.debug.compute_and_print(t3, include_id=False) a | left_t | right_t 1 | 3 | 1 1 | 3 | 4 1 | 4 | 4 1 | 5 | 4 1 | 11 | 2 | 2 | 0 2 | 2 | 2 2 | 3 | 2 3 | 4 | Setting `behavior` allows to control temporal behavior of an interval join. Then, each side of the interval join keeps track of the maximal already seen time (`self_time` and `other_time`). The arguments of `behavior` mean in the context of an interval join what follows: - **delay** - buffers results until the maximal already seen time is greater than \ or equal to their time plus `delay`. - **cutoff** - ignores records with times less or equal to the maximal already seen time minus `cutoff`; \ it is also used to garbage collect records that have times lower or equal to the above threshold. \ When `cutoff` is not set, interval join will remember all records from both sides. - **keep_results** - if set to `True`, keeps all results of the operator. If set to `False`, \ keeps only results that are newer than the maximal seen time minus `cutoff`. Example without and with forgetting: >>> import pathway as pw >>> t1 = pw.debug.table_from_markdown( ... ''' ... value | instance | event_time | __time__ ... 1 | 1 | 0 | 2 ... 2 | 2 | 2 | 4 ... 3 | 1 | 4 | 4 ... 4 | 2 | 8 | 8 ... 5 | 1 | 0 | 10 ... 6 | 1 | 4 | 10 ... ''' ... ) >>> t2 = pw.debug.table_from_markdown( ... ''' ... value | instance | event_time | __time__ ... 42 | 1 | 2 | 2 ... 8 | 2 | 10 | 14 ... 10 | 2 | 4 | 30 ... ''' ... ) >>> result_without_cutoff = t1.interval_join_left( ... t2, ... t1.event_time, ... t2.event_time, ... pw.temporal.interval(-2, 2), ... t1.instance == t2.instance, ... ).select( ... left_value=t1.value, ... right_value=t2.value, ... instance=t1.instance, ... left_time=t1.event_time, ... right_time=t2.event_time, ... ) >>> pw.debug.compute_and_print_update_stream(result_without_cutoff, include_id=False) left_value | right_value | instance | left_time | right_time | __time__ | __diff__ 1 | 42 | 1 | 0 | 2 | 2 | 1 2 | | 2 | 2 | | 4 | 1 3 | 42 | 1 | 4 | 2 | 4 | 1 4 | | 2 | 8 | | 8 | 1 5 | 42 | 1 | 0 | 2 | 10 | 1 6 | 42 | 1 | 4 | 2 | 10 | 1 4 | | 2 | 8 | | 14 | -1 4 | 8 | 2 | 8 | 10 | 14 | 1 2 | | 2 | 2 | | 30 | -1 2 | 10 | 2 | 2 | 4 | 30 | 1 >>> result_with_cutoff = t1.interval_join_left( ... t2, ... t1.event_time, ... t2.event_time, ... pw.temporal.interval(-2, 2), ... t1.instance == t2.instance, ... behavior=pw.temporal.common_behavior(cutoff=6), ... ).select( ... left_value=t1.value, ... right_value=t2.value, ... instance=t1.instance, ... left_time=t1.event_time, ... right_time=t2.event_time, ... ) >>> pw.debug.compute_and_print_update_stream(result_with_cutoff, include_id=False) left_value | right_value | instance | left_time | right_time | __time__ | __diff__ 1 | 42 | 1 | 0 | 2 | 2 | 1 2 | | 2 | 2 | | 4 | 1 3 | 42 | 1 | 4 | 2 | 4 | 1 4 | | 2 | 8 | | 8 | 1 6 | 42 | 1 | 4 | 2 | 10 | 1 4 | | 2 | 8 | | 14 | -1 4 | 8 | 2 | 8 | 10 | 14 | 1 The record with ``value=5`` from table ``t1`` was not joined because its ``event_time`` was less than the maximal already seen time minus ``cutoff`` (``0 <= 8-6``). The record with ``value=10`` from table ``t2`` was not joined because its ``event_time`` was equal to the maximal already seen time minus ``cutoff`` (``4 <= 10-6``). Notice also the entries with ``__diff__=-1``. They're deletion entries caused by the arrival of matching entries on the right side of the join. The matches caused the removal of entries without values in the fields from the right side and insertion of entries with values in these fields. |
166,650 | from __future__ import annotations
import datetime
from abc import abstractmethod
from dataclasses import dataclass
from typing import Any, Generic, TypeVar, overload
import pathway.internals as pw
from pathway.internals.arg_handlers import (
arg_handler,
join_kwargs_handler,
select_args_handler,
)
from pathway.internals.desugaring import (
DesugaringContext,
TableReplacementWithNoneDesugaring,
TableSubstitutionDesugaring,
combine_args_kwargs,
desugar,
)
from pathway.internals.runtime_type_check import check_arg_types
from pathway.internals.trace import trace_user_frame
from pathway.internals.type_interpreter import eval_type
from .temporal_behavior import CommonBehavior, apply_temporal_behavior
from .utils import IntervalType, TimeEventType, check_joint_types, get_default_origin
class Interval(Generic[T]):
lower_bound: T
upper_bound: T
class IntervalJoinResult(DesugaringContext):
"""
Result of an interval join between tables.
Example:
>>> import pathway as pw
>>> t1 = pw.debug.table_from_markdown(
... '''
... | t
... 1 | 3
... 2 | 4
... 3 | 5
... 4 | 11
... '''
... )
>>> t2 = pw.debug.table_from_markdown(
... '''
... | t
... 1 | 0
... 2 | 1
... 3 | 4
... 4 | 7
... '''
... )
>>> join_result = t1.interval_join_inner(t2, t1.t, t2.t, pw.temporal.interval(-2, 1))
>>> isinstance(join_result, pw.temporal.IntervalJoinResult)
True
>>> pw.debug.compute_and_print(
... join_result.select(left_t=t1.t, right_t=t2.t), include_id=False
... )
left_t | right_t
3 | 1
3 | 4
4 | 4
5 | 4
"""
_table_substitution: dict[pw.TableLike, pw.Table]
_filter_out_results_of_forgetting: bool
def __init__(
self,
left: pw.Table,
right: pw.Table,
table_substitution: dict[pw.TableLike, pw.Table],
_filter_out_results_of_forgetting: bool,
):
self._substitution = {
pw.left: left,
pw.right: right,
pw.this: pw.this, # type: ignore[dict-item]
}
self._table_substitution = table_substitution
self._filter_out_results_of_forgetting = _filter_out_results_of_forgetting
def _should_filter_out_results_of_forgetting(
behavior: CommonBehavior | None,
) -> bool:
return (
behavior is not None
and behavior.cutoff is not None
and behavior.keep_results
)
def _interval_join(
left: pw.Table,
right: pw.Table,
left_time_expression: pw.ColumnExpression,
right_time_expression: pw.ColumnExpression,
interval: Interval[int] | Interval[float] | Interval[datetime.timedelta],
*on: pw.ColumnExpression,
behavior: CommonBehavior | None = None,
mode: pw.JoinMode,
left_instance: pw.ColumnReference | None = None,
right_instance: pw.ColumnReference | None = None,
) -> IntervalJoinResult:
"""Creates an IntervalJoinResult. To perform an interval join uses it uses two
tumbling windows of size `lower_bound` + `upper_bound` and then filters the result.
"""
check_joint_types(
{
"self_time_expression": (left_time_expression, TimeEventType),
"other_time_expression": (right_time_expression, TimeEventType),
"lower_bound": (interval.lower_bound, IntervalType),
"upper_bound": (interval.upper_bound, IntervalType),
}
)
if left == right:
raise ValueError(
"Cannot join table with itself. Use <table>.copy() as one of the arguments of the join."
)
if interval.lower_bound > interval.upper_bound: # type: ignore[operator]
raise ValueError(
"lower_bound has to be less than or equal to the upper_bound in the Table.interval_join()."
)
if interval.lower_bound == interval.upper_bound:
cls: type[IntervalJoinResult] = _ZeroDifferenceIntervalJoinResult
else:
cls = _NonZeroDifferenceIntervalJoinResult
return cls._interval_join(
left,
right,
left_time_expression,
right_time_expression,
interval,
*on,
behavior=behavior,
mode=mode,
left_instance=left_instance,
right_instance=right_instance,
)
def _desugaring(self) -> TableSubstitutionDesugaring:
return TableSubstitutionDesugaring(self._table_substitution)
def select(self, *args: pw.ColumnReference, **kwargs: Any) -> pw.Table:
"""
Computes a result of an interval join.
Args:
args: Column references.
kwargs: Column expressions with their new assigned names.
Returns:
Table: Created table.
Example:
>>> import pathway as pw
>>> t1 = pw.debug.table_from_markdown(
... '''
... | a | t
... 1 | 1 | 3
... 2 | 1 | 4
... 3 | 1 | 5
... 4 | 1 | 11
... 5 | 2 | 2
... 6 | 2 | 3
... 7 | 3 | 4
... '''
... )
>>> t2 = pw.debug.table_from_markdown(
... '''
... | b | t
... 1 | 1 | 0
... 2 | 1 | 1
... 3 | 1 | 4
... 4 | 1 | 7
... 5 | 2 | 0
... 6 | 2 | 2
... 7 | 4 | 2
... '''
... )
>>> t3 = t1.interval_join_inner(
... t2, t1.t, t2.t, pw.temporal.interval(-2, 1), t1.a == t2.b
... ).select(t1.a, left_t=t1.t, right_t=t2.t)
>>> pw.debug.compute_and_print(t3, include_id=False)
a | left_t | right_t
1 | 3 | 1
1 | 3 | 4
1 | 4 | 4
1 | 5 | 4
2 | 2 | 0
2 | 2 | 2
2 | 3 | 2
"""
...
class CommonBehavior(Behavior):
"""Defines temporal behavior of windows and temporal joins."""
delay: IntervalType | None
cutoff: IntervalType | None
keep_results: bool
class Table(
Joinable,
OperatorInput,
Generic[TSchema],
):
"""Collection of named columns over identical universes.
Example:
>>> import pathway as pw
>>> t1 = pw.debug.table_from_markdown('''
... age | owner | pet
... 10 | Alice | dog
... 9 | Bob | dog
... 8 | Alice | cat
... 7 | Bob | dog
... ''')
>>> isinstance(t1, pw.Table)
True
"""
if TYPE_CHECKING:
from pathway.stdlib.ordered import diff # type: ignore[misc]
from pathway.stdlib.statistical import interpolate # type: ignore[misc]
from pathway.stdlib.temporal import ( # type: ignore[misc]
asof_join,
asof_join_left,
asof_join_outer,
asof_join_right,
asof_now_join,
asof_now_join_inner,
asof_now_join_left,
interval_join,
interval_join_inner,
interval_join_left,
interval_join_outer,
interval_join_right,
window_join,
window_join_inner,
window_join_left,
window_join_outer,
window_join_right,
windowby,
)
from pathway.stdlib.viz import ( # type: ignore[misc]
_repr_mimebundle_,
plot,
show,
)
_columns: dict[str, clmn.Column]
_schema: type[Schema]
_id_column: clmn.IdColumn
_rowwise_context: clmn.RowwiseContext
_source: SetOnceProperty[OutputHandle] = SetOnceProperty()
"""Lateinit by operator."""
def __init__(
self,
_columns: Mapping[str, clmn.Column],
_context: clmn.Context,
_schema: type[Schema] | None = None,
):
if _schema is None:
_schema = schema_from_columns(_columns)
super().__init__(_context)
self._columns = dict(_columns)
self._schema = _schema
self._id_column = _context.id_column
self._substitution = {thisclass.this: self}
self._rowwise_context = clmn.RowwiseContext(self._id_column)
def id(self) -> expr.ColumnReference:
"""Get reference to pseudocolumn containing id's of a table.
Example:
>>> import pathway as pw
>>> t1 = pw.debug.table_from_markdown('''
... age | owner | pet
... 10 | Alice | dog
... 9 | Bob | dog
... 8 | Alice | cat
... 7 | Bob | dog
... ''')
>>> t2 = t1.select(ids = t1.id)
>>> t2.typehints()['ids']
<class 'pathway.engine.Pointer'>
>>> pw.debug.compute_and_print(t2.select(test=t2.id == t2.ids), include_id=False)
test
True
True
True
True
"""
return expr.ColumnReference(_table=self, _column=self._id_column, _name="id")
def column_names(self):
return self.keys()
def keys(self):
return self._columns.keys()
def _get_column(self, name: str) -> clmn.Column:
return self._columns[name]
def _ipython_key_completions_(self):
return list(self.column_names())
def __dir__(self):
return list(super().__dir__()) + list(self.column_names())
def _C(self) -> TSchema:
return self.C # type: ignore
def schema(self) -> type[Schema]:
"""Get schema of the table.
Example:
>>> import pathway as pw
>>> t1 = pw.debug.table_from_markdown('''
... age | owner | pet
... 10 | Alice | dog
... 9 | Bob | dog
... 8 | Alice | cat
... 7 | Bob | dog
... ''')
>>> t1.schema
<pathway.Schema types={'age': <class 'int'>, 'owner': <class 'str'>, 'pet': <class 'str'>}>
>>> t1.typehints()['age']
<class 'int'>
"""
return self._schema
def _get_colref_by_name(self, name, exception_type) -> expr.ColumnReference:
name = self._column_deprecation_rename(name)
if name == "id":
return self.id
if name not in self.keys():
raise exception_type(f"Table has no column with name {name}.")
return expr.ColumnReference(
_table=self, _column=self._get_column(name), _name=name
)
def __getitem__(self, args: str | expr.ColumnReference) -> expr.ColumnReference: ...
def __getitem__(self, args: list[str | expr.ColumnReference]) -> Table: ...
def __getitem__(
self, args: str | expr.ColumnReference | list[str | expr.ColumnReference]
) -> expr.ColumnReference | Table:
"""Get columns by name.
Warning:
- Does not allow repetitions of columns.
- Fails if tries to access nonexistent column.
Args:
names: a singe column name or list of columns names to be extracted from `self`.
Returns:
Table with specified columns, or column expression (if single argument given).
Instead of column names, column references are valid here.
Example:
>>> import pathway as pw
>>> t1 = pw.debug.table_from_markdown('''
... age | owner | pet
... 10 | Alice | dog
... 9 | Bob | dog
... 8 | Alice | cat
... 7 | Bob | dog
... ''')
>>> t2 = t1[["age", "pet"]]
>>> t2 = t1[["age", t1.pet]]
>>> pw.debug.compute_and_print(t2, include_id=False)
age | pet
7 | dog
8 | cat
9 | dog
10 | dog
"""
if isinstance(args, expr.ColumnReference):
if (args.table is not self) and not isinstance(
args.table, thisclass.ThisMetaclass
):
raise ValueError(
"Table.__getitem__ argument has to be a ColumnReference to the same table or pw.this, or a string "
+ "(or a list of those)."
)
return self._get_colref_by_name(args.name, KeyError)
elif isinstance(args, str):
return self._get_colref_by_name(args, KeyError)
else:
return self.select(*[self[name] for name in args])
def from_columns(
*args: expr.ColumnReference, **kwargs: expr.ColumnReference
) -> Table:
"""Build a table from columns.
All columns must have the same ids. Columns' names must be pairwise distinct.
Args:
args: List of columns.
kwargs: Columns with their new names.
Returns:
Table: Created table.
Example:
>>> import pathway as pw
>>> t1 = pw.Table.empty(age=float, pet=float)
>>> t2 = pw.Table.empty(foo=float, bar=float).with_universe_of(t1)
>>> t3 = pw.Table.from_columns(t1.pet, qux=t2.foo)
>>> pw.debug.compute_and_print(t3, include_id=False)
pet | qux
"""
all_args = cast(
dict[str, expr.ColumnReference], combine_args_kwargs(args, kwargs)
)
if not all_args:
raise ValueError("Table.from_columns() cannot have empty arguments list")
else:
arg = next(iter(all_args.values()))
table: Table = arg.table
for arg in all_args.values():
if not G.universe_solver.query_are_equal(
table._universe, arg.table._universe
):
raise ValueError(
"Universes of all arguments of Table.from_columns() have to be equal.\n"
+ "Consider using Table.promise_universes_are_equal() to assert it.\n"
+ "(However, untrue assertion might result in runtime errors.)"
)
return table.select(*args, **kwargs)
def concat_reindex(self, *tables: Table) -> Table:
"""Concatenate contents of several tables.
This is similar to PySpark union. All tables must have the same schema. Each row is reindexed.
Args:
tables: List of tables to concatenate. All tables must have the same schema.
Returns:
Table: The concatenated table. It will have new, synthetic ids.
Example:
>>> import pathway as pw
>>> t1 = pw.debug.table_from_markdown('''
... | pet
... 1 | Dog
... 7 | Cat
... ''')
>>> t2 = pw.debug.table_from_markdown('''
... | pet
... 1 | Manul
... 8 | Octopus
... ''')
>>> t3 = t1.concat_reindex(t2)
>>> pw.debug.compute_and_print(t3, include_id=False)
pet
Cat
Dog
Manul
Octopus
"""
reindexed = [
table.with_id_from(table.id, i) for i, table in enumerate([self, *tables])
]
universes.promise_are_pairwise_disjoint(*reindexed)
return Table.concat(*reindexed)
def empty(**kwargs: dt.DType) -> Table:
"""Creates an empty table with a schema specified by kwargs.
Args:
kwargs: Dict whose keys are column names and values are column types.
Returns:
Table: Created empty table.
Example:
>>> import pathway as pw
>>> t1 = pw.Table.empty(age=float, pet=float)
>>> pw.debug.compute_and_print(t1, include_id=False)
age | pet
"""
from pathway.internals import table_io
ret = table_io.empty_from_schema(schema_from_types(None, **kwargs))
G.universe_solver.register_as_empty(ret._universe)
return ret
def select(self, *args: expr.ColumnReference, **kwargs: Any) -> Table:
"""Build a new table with columns specified by kwargs.
Output columns' names are keys(kwargs). values(kwargs) can be raw values, boxed
values, columns. Assigning to id reindexes the table.
Args:
args: Column references.
kwargs: Column expressions with their new assigned names.
Returns:
Table: Created table.
Example:
>>> import pathway as pw
>>> t1 = pw.debug.table_from_markdown('''
... pet
... Dog
... Cat
... ''')
>>> t2 = t1.select(animal=t1.pet, desc="fluffy")
>>> pw.debug.compute_and_print(t2, include_id=False)
animal | desc
Cat | fluffy
Dog | fluffy
"""
new_columns = []
all_args = combine_args_kwargs(args, kwargs)
for new_name, expression in all_args.items():
self._validate_expression(expression)
column = self._eval(expression)
new_columns.append((new_name, column))
return self._with_same_universe(*new_columns)
def __add__(self, other: Table) -> Table:
"""Build a union of `self` with `other`.
Semantics: Returns a table C, such that
- C.columns == self.columns + other.columns
- C.id == self.id == other.id
Args:
other: The other table. `self.id` must be equal `other.id` and
`self.columns` and `other.columns` must be disjoint (or overlapping names
are THE SAME COLUMN)
Returns:
Table: Created table.
Example:
>>> import pathway as pw
>>> t1 = pw.debug.table_from_markdown('''
... pet
... 1 Dog
... 7 Cat
... ''')
>>> t2 = pw.debug.table_from_markdown('''
... age
... 1 10
... 7 3
... ''')
>>> t3 = t1 + t2
>>> pw.debug.compute_and_print(t3, include_id=False)
pet | age
Cat | 3
Dog | 10
"""
if not G.universe_solver.query_are_equal(self._universe, other._universe):
raise ValueError(
"Universes of all arguments of Table.__add__() have to be equal.\n"
+ "Consider using Table.promise_universes_are_equal() to assert it.\n"
+ "(However, untrue assertion might result in runtime errors.)"
)
return self.select(*self, *other)
def slice(self) -> TableSlice:
"""Creates a collection of references to self columns.
Supports basic column manipulation methods.
Example:
>>> import pathway as pw
>>> t1 = pw.debug.table_from_markdown('''
... age | owner | pet
... 10 | Alice | dog
... 9 | Bob | dog
... 8 | Alice | cat
... 7 | Bob | dog
... ''')
>>> t1.slice.without("age")
TableSlice({'owner': <table1>.owner, 'pet': <table1>.pet})
"""
return TableSlice(dict(**self), self)
def filter(self, filter_expression: expr.ColumnExpression) -> Table[TSchema]:
"""Filter a table according to `filter_expression` condition.
Args:
filter_expression: `ColumnExpression` that specifies the filtering condition.
Returns:
Table: Result has the same schema as `self` and its ids are subset of `self.id`.
Example:
>>> import pathway as pw
>>> vertices = pw.debug.table_from_markdown('''
... label outdegree
... 1 3
... 7 0
... ''')
>>> filtered = vertices.filter(vertices.outdegree == 0)
>>> pw.debug.compute_and_print(filtered, include_id=False)
label | outdegree
7 | 0
"""
filter_type = self.eval_type(filter_expression)
if filter_type != dt.BOOL:
raise TypeError(
f"Filter argument of Table.filter() has to be bool, found {filter_type}."
)
ret = self._filter(filter_expression)
if (
filter_col := expr.get_column_filtered_by_is_none(filter_expression)
) is not None and filter_col.table == self:
name = filter_col.name
dtype = self._columns[name].dtype
ret = ret.update_types(**{name: dt.unoptionalize(dtype)})
return ret
def split(
self, split_expression: expr.ColumnExpression
) -> tuple[Table[TSchema], Table[TSchema]]:
"""Split a table according to `split_expression` condition.
Args:
split_expression: `ColumnExpression` that specifies the split condition.
Returns:
positive_table, negative_table: tuple of tables,
with the same schemas as `self` and with ids that are subsets of `self.id`,
and provably disjoint.
Example:
>>> import pathway as pw
>>> vertices = pw.debug.table_from_markdown('''
... label outdegree
... 1 3
... 7 0
... ''')
>>> positive, negative = vertices.split(vertices.outdegree == 0)
>>> pw.debug.compute_and_print(positive, include_id=False)
label | outdegree
7 | 0
>>> pw.debug.compute_and_print(negative, include_id=False)
label | outdegree
1 | 3
"""
positive = self.filter(split_expression)
negative = self.filter(~split_expression)
universes.promise_are_pairwise_disjoint(positive, negative)
universes.promise_are_equal(
self, Table.concat(positive, negative)
) # TODO: add API method for this
return positive, negative
def _filter(self, filter_expression: expr.ColumnExpression) -> Table[TSchema]:
self._validate_expression(filter_expression)
filtering_column = self._eval(filter_expression)
assert self._universe == filtering_column.universe
context = clmn.FilterContext(filtering_column, self._id_column)
return self._table_with_context(context)
def _gradual_broadcast(
self,
threshold_table,
lower_column,
value_column,
upper_column,
) -> Table:
return self + self.__gradual_broadcast(
threshold_table, lower_column, value_column, upper_column
)
def __gradual_broadcast(
self,
threshold_table,
lower_column,
value_column,
upper_column,
):
context = clmn.GradualBroadcastContext(
self._id_column,
threshold_table._eval(lower_column),
threshold_table._eval(value_column),
threshold_table._eval(upper_column),
)
return Table(_columns={"apx_value": context.apx_value_column}, _context=context)
def _forget(
self,
threshold_column: expr.ColumnExpression,
time_column: expr.ColumnExpression,
mark_forgetting_records: bool,
) -> Table:
context = clmn.ForgetContext(
self._id_column,
self._eval(threshold_column),
self._eval(time_column),
mark_forgetting_records,
)
return self._table_with_context(context)
def _forget_immediately(
self,
) -> Table:
context = clmn.ForgetImmediatelyContext(self._id_column)
return self._table_with_context(context)
def _filter_out_results_of_forgetting(
self,
) -> Table:
# The output universe is a superset of input universe because forgetting entries
# are filtered out. At each point in time, the set of keys with +1 diff can be
# bigger than a set of keys with +1 diff in an input table.
context = clmn.FilterOutForgettingContext(self._id_column)
return self._table_with_context(context)
def _freeze(
self,
threshold_column: expr.ColumnExpression,
time_column: expr.ColumnExpression,
) -> Table:
context = clmn.FreezeContext(
self._id_column,
self._eval(threshold_column),
self._eval(time_column),
)
return self._table_with_context(context)
def _buffer(
self,
threshold_column: expr.ColumnExpression,
time_column: expr.ColumnExpression,
) -> Table:
context = clmn.BufferContext(
self._id_column,
self._eval(threshold_column),
self._eval(time_column),
)
return self._table_with_context(context)
def difference(self, other: Table) -> Table[TSchema]:
r"""Restrict self universe to keys not appearing in the other table.
Args:
other: table with ids to remove from self.
Returns:
Table: table with restricted universe, with the same set of columns
Example:
>>> import pathway as pw
>>> t1 = pw.debug.table_from_markdown('''
... | age | owner | pet
... 1 | 10 | Alice | 1
... 2 | 9 | Bob | 1
... 3 | 8 | Alice | 2
... ''')
>>> t2 = pw.debug.table_from_markdown('''
... | cost
... 2 | 100
... 3 | 200
... 4 | 300
... ''')
>>> t3 = t1.difference(t2)
>>> pw.debug.compute_and_print(t3, include_id=False)
age | owner | pet
10 | Alice | 1
"""
context = clmn.DifferenceContext(
left=self._id_column,
right=other._id_column,
)
return self._table_with_context(context)
def intersect(self, *tables: Table) -> Table[TSchema]:
"""Restrict self universe to keys appearing in all of the tables.
Args:
tables: tables keys of which are used to restrict universe.
Returns:
Table: table with restricted universe, with the same set of columns
Example:
>>> import pathway as pw
>>> t1 = pw.debug.table_from_markdown('''
... | age | owner | pet
... 1 | 10 | Alice | 1
... 2 | 9 | Bob | 1
... 3 | 8 | Alice | 2
... ''')
>>> t2 = pw.debug.table_from_markdown('''
... | cost
... 2 | 100
... 3 | 200
... 4 | 300
... ''')
>>> t3 = t1.intersect(t2)
>>> pw.debug.compute_and_print(t3, include_id=False)
age | owner | pet
8 | Alice | 2
9 | Bob | 1
"""
intersecting_universes = (
self._universe,
*tuple(table._universe for table in tables),
)
universe = G.universe_solver.get_intersection(*intersecting_universes)
if universe in intersecting_universes:
context: clmn.Context = clmn.RestrictContext(self._id_column, universe)
else:
intersecting_ids = (
self._id_column,
*tuple(table._id_column for table in tables),
)
context = clmn.IntersectContext(
intersecting_ids=intersecting_ids,
)
return self._table_with_context(context)
def restrict(self, other: TableLike) -> Table[TSchema]:
"""Restrict self universe to keys appearing in other.
Args:
other: table which universe is used to restrict universe of self.
Returns:
Table: table with restricted universe, with the same set of columns
Example:
>>> import pathway as pw
>>> t1 = pw.debug.table_from_markdown(
... '''
... | age | owner | pet
... 1 | 10 | Alice | 1
... 2 | 9 | Bob | 1
... 3 | 8 | Alice | 2
... '''
... )
>>> t2 = pw.debug.table_from_markdown(
... '''
... | cost
... 2 | 100
... 3 | 200
... '''
... )
>>> t2.promise_universe_is_subset_of(t1)
<pathway.Table schema={'cost': <class 'int'>}>
>>> t3 = t1.restrict(t2)
>>> pw.debug.compute_and_print(t3, include_id=False)
age | owner | pet
8 | Alice | 2
9 | Bob | 1
"""
if not G.universe_solver.query_is_subset(other._universe, self._universe):
raise ValueError(
"Table.restrict(): other universe has to be a subset of self universe."
+ "Consider using Table.promise_universe_is_subset_of() to assert it."
)
context = clmn.RestrictContext(self._id_column, other._universe)
columns = {
name: self._wrap_column_in_context(context, column, name)
for name, column in self._columns.items()
}
return Table(
_columns=columns,
_context=context,
)
def copy(self) -> Table[TSchema]:
"""Returns a copy of a table.
Example:
>>> import pathway as pw
>>> t1 = pw.debug.table_from_markdown('''
... age | owner | pet
... 10 | Alice | dog
... 9 | Bob | dog
... 8 | Alice | cat
... 7 | Bob | dog
... ''')
>>> t2 = t1.copy()
>>> pw.debug.compute_and_print(t2, include_id=False)
age | owner | pet
7 | Bob | dog
8 | Alice | cat
9 | Bob | dog
10 | Alice | dog
>>> t1 is t2
False
"""
return self._copy_as(type(self))
def _copy_as(self, table_type: type[TTable], /, **kwargs) -> TTable:
columns = {
name: self._wrap_column_in_context(self._rowwise_context, column, name)
for name, column in self._columns.items()
}
return table_type(_columns=columns, _context=self._rowwise_context, **kwargs)
def groupby(
self,
*args: expr.ColumnReference,
id: expr.ColumnReference | None = None,
sort_by: expr.ColumnReference | None = None,
_filter_out_results_of_forgetting: bool = False,
instance: expr.ColumnReference | None = None,
) -> groupbys.GroupedTable:
"""Groups table by columns from args.
Note:
Usually followed by `.reduce()` that aggregates the result and returns a table.
Args:
args: columns to group by.
id: if provided, is the column used to set id's of the rows of the result
sort_by: if provided, column values are used as sorting keys for particular reducers
instance: optional argument describing partitioning of the data into separate instances
Returns:
GroupedTable: Groupby object.
Example:
>>> import pathway as pw
>>> t1 = pw.debug.table_from_markdown('''
... age | owner | pet
... 10 | Alice | dog
... 9 | Bob | dog
... 8 | Alice | cat
... 7 | Bob | dog
... ''')
>>> t2 = t1.groupby(t1.pet, t1.owner).reduce(t1.owner, t1.pet, ageagg=pw.reducers.sum(t1.age))
>>> pw.debug.compute_and_print(t2, include_id=False)
owner | pet | ageagg
Alice | cat | 8
Alice | dog | 10
Bob | dog | 16
"""
if instance is not None:
args = (*args, instance)
if id is not None:
if len(args) == 0:
args = (id,)
elif len(args) > 1:
raise ValueError(
"Table.groupby() cannot have id argument when grouping by multiple columns."
)
elif args[0]._column != id._column:
raise ValueError(
"Table.groupby() received id argument and is grouped by a single column,"
+ " but the arguments are not equal.\n"
+ "Consider using <table>.groupby(id=...), skipping the positional argument."
)
for arg in args:
if not isinstance(arg, expr.ColumnReference):
if isinstance(arg, str):
raise ValueError(
f"Expected a ColumnReference, found a string. Did you mean <table>.{arg}"
+ f" instead of {repr(arg)}?"
)
else:
raise ValueError(
"All Table.groupby() arguments have to be a ColumnReference."
)
return groupbys.GroupedTable.create(
table=self,
grouping_columns=args,
set_id=id is not None,
sort_by=sort_by,
_filter_out_results_of_forgetting=_filter_out_results_of_forgetting,
)
def reduce(
self, *args: expr.ColumnReference, **kwargs: expr.ColumnExpression
) -> Table:
"""Reduce a table to a single row.
Equivalent to `self.groupby().reduce(*args, **kwargs)`.
Args:
args: reducer to reduce the table with
kwargs: reducer to reduce the table with. Its key is the new name of a column.
Returns:
Table: Reduced table.
Example:
>>> import pathway as pw
>>> t1 = pw.debug.table_from_markdown('''
... age | owner | pet
... 10 | Alice | dog
... 9 | Bob | dog
... 8 | Alice | cat
... 7 | Bob | dog
... ''')
>>> t2 = t1.reduce(ageagg=pw.reducers.argmin(t1.age))
>>> pw.debug.compute_and_print(t2, include_id=False) # doctest: +ELLIPSIS
ageagg
^...
>>> t3 = t2.select(t1.ix(t2.ageagg).age, t1.ix(t2.ageagg).pet)
>>> pw.debug.compute_and_print(t3, include_id=False)
age | pet
7 | dog
"""
return self.groupby().reduce(*args, **kwargs)
def deduplicate(
self,
*,
value: expr.ColumnExpression,
instance: expr.ColumnExpression | None = None,
acceptor: Callable[[T, T], bool],
persistent_id: str | None = None,
) -> Table:
"""Deduplicates rows in `self` on `value` column using acceptor function.
It keeps rows which where accepted by the acceptor function.
Acceptor operates on two arguments - current value and the previously accepted value.
Args:
value: column expression used for deduplication.
instance: Grouping column. For rows with different
values in this column, deduplication will be performed separately.
Defaults to None.
acceptor: callback telling whether two values are different.
persistent_id: (unstable) An identifier, under which the state of the table
will be persisted or ``None``, if there is no need to persist the state of this table.
When a program restarts, it restores the state for all input tables according to what
was saved for their ``persistent_id``. This way it's possible to configure the start of
computations from the moment they were terminated last time.
Returns:
Table: the result of deduplication.
Example:
>>> import pathway as pw
>>> table = pw.debug.table_from_markdown(
... '''
... val | __time__
... 1 | 2
... 2 | 4
... 3 | 6
... 4 | 8
... '''
... )
>>>
>>> def acceptor(new_value, old_value) -> bool:
... return new_value >= old_value + 2
...
>>>
>>> result = table.deduplicate(value=pw.this.val, acceptor=acceptor)
>>> pw.debug.compute_and_print_update_stream(result, include_id=False)
val | __time__ | __diff__
1 | 2 | 1
1 | 6 | -1
3 | 6 | 1
>>>
>>> table = pw.debug.table_from_markdown(
... '''
... val | instance | __time__
... 1 | 1 | 2
... 2 | 1 | 4
... 3 | 2 | 6
... 4 | 1 | 8
... 4 | 2 | 8
... 5 | 1 | 10
... '''
... )
>>>
>>> def acceptor(new_value, old_value) -> bool:
... return new_value >= old_value + 2
...
>>>
>>> result = table.deduplicate(
... value=pw.this.val, instance=pw.this.instance, acceptor=acceptor
... )
>>> pw.debug.compute_and_print_update_stream(result, include_id=False)
val | instance | __time__ | __diff__
1 | 1 | 2 | 1
3 | 2 | 6 | 1
1 | 1 | 8 | -1
4 | 1 | 8 | 1
"""
if instance is None:
instance = expr.ColumnConstExpression(None)
self._validate_expression(value)
self._validate_expression(instance)
value_col = self._eval(value)
instance_col = self._eval(instance)
context = clmn.DeduplicateContext(
value_col,
(instance_col,),
acceptor,
self._id_column,
persistent_id,
)
return self._table_with_context(context)
def ix(
self, expression: expr.ColumnExpression, *, optional: bool = False, context=None
) -> Table:
"""Reindexes the table using expression values as keys. Uses keys from context, or tries to infer
proper context from the expression.
If optional is True, then None in expression values result in None values in the result columns.
Missing values in table keys result in RuntimeError.
Context can be anything that allows for `select` or `reduce`, or `pathway.this` construct
(latter results in returning a delayed operation, and should be only used when using `ix` inside
join().select() or groupby().reduce() sequence).
Returns:
Reindexed table with the same set of columns.
Example:
>>> import pathway as pw
>>> t_animals = pw.debug.table_from_markdown('''
... | epithet | genus
... 1 | upupa | epops
... 2 | acherontia | atropos
... 3 | bubo | scandiacus
... 4 | dynastes | hercules
... ''')
>>> t_birds = pw.debug.table_from_markdown('''
... | desc
... 2 | hoopoe
... 4 | owl
... ''')
>>> ret = t_birds.select(t_birds.desc, latin=t_animals.ix(t_birds.id).genus)
>>> pw.debug.compute_and_print(ret, include_id=False)
desc | latin
hoopoe | atropos
owl | hercules
"""
if context is None:
all_tables = collect_tables(expression)
if len(all_tables) == 0:
context = thisclass.this
elif all(tab == all_tables[0] for tab in all_tables):
context = all_tables[0]
if context is None:
for tab in all_tables:
if not isinstance(tab, Table):
raise ValueError("Table expected here.")
if len(all_tables) == 0:
raise ValueError("Const value provided.")
context = all_tables[0]
for tab in all_tables:
assert context._universe.is_equal_to(tab._universe)
if isinstance(context, groupbys.GroupedJoinable):
context = thisclass.this
if isinstance(context, thisclass.ThisMetaclass):
return context._delayed_op(
lambda table, expression: self.ix(
expression=expression, optional=optional, context=table
),
expression=expression,
qualname=f"{self}.ix(...)",
name="ix",
)
restrict_universe = RestrictUniverseDesugaring(context)
expression = restrict_universe.eval_expression(expression)
key_col = context.select(tmp=expression).tmp
key_dtype = self.eval_type(key_col)
if (
optional and not dt.dtype_issubclass(key_dtype, dt.Optional(dt.POINTER))
) or (not optional and not isinstance(key_dtype, dt.Pointer)):
raise TypeError(
f"Pathway supports indexing with Pointer type only. The type used was {key_dtype}."
)
if optional and isinstance(key_dtype, dt.Optional):
self_ = self.update_types(
**{name: dt.Optional(self.typehints()[name]) for name in self.keys()}
)
else:
self_ = self
return self_._ix(key_col, optional)
def _ix(
self,
key_expression: expr.ColumnReference,
optional: bool,
) -> Table:
key_column = key_expression._column
context = clmn.IxContext(key_column, self._id_column, optional)
return self._table_with_context(context)
def __lshift__(self, other: Table) -> Table:
"""Alias to update_cells method.
Updates cells of `self`, breaking ties in favor of the values in `other`.
Semantics:
- result.columns == self.columns
- result.id == self.id
- conflicts are resolved preferring other's values
Requires:
- other.columns ⊆ self.columns
- other.id ⊆ self.id
Args:
other: the other table.
Returns:
Table: `self` updated with cells form `other`.
Example:
>>> import pathway as pw
>>> t1 = pw.debug.table_from_markdown('''
... | age | owner | pet
... 1 | 10 | Alice | 1
... 2 | 9 | Bob | 1
... 3 | 8 | Alice | 2
... ''')
>>> t2 = pw.debug.table_from_markdown('''
... | age | owner | pet
... 1 | 10 | Alice | 30
... ''')
>>> pw.universes.promise_is_subset_of(t2, t1)
>>> t3 = t1 << t2
>>> pw.debug.compute_and_print(t3, include_id=False)
age | owner | pet
8 | Alice | 2
9 | Bob | 1
10 | Alice | 30
"""
return self.update_cells(other, _stacklevel=2)
def concat(self, *others: Table[TSchema]) -> Table[TSchema]:
"""Concats `self` with every `other` ∊ `others`.
Semantics:
- result.columns == self.columns == other.columns
- result.id == self.id ∪ other.id
if self.id and other.id collide, throws an exception.
Requires:
- other.columns == self.columns
- self.id disjoint with other.id
Args:
other: the other table.
Returns:
Table: The concatenated table. Id's of rows from original tables are preserved.
Example:
>>> import pathway as pw
>>> t1 = pw.debug.table_from_markdown('''
... | age | owner | pet
... 1 | 10 | Alice | 1
... 2 | 9 | Bob | 1
... 3 | 8 | Alice | 2
... ''')
>>> t2 = pw.debug.table_from_markdown('''
... | age | owner | pet
... 11 | 11 | Alice | 30
... 12 | 12 | Tom | 40
... ''')
>>> pw.universes.promise_are_pairwise_disjoint(t1, t2)
>>> t3 = t1.concat(t2)
>>> pw.debug.compute_and_print(t3, include_id=False)
age | owner | pet
8 | Alice | 2
9 | Bob | 1
10 | Alice | 1
11 | Alice | 30
12 | Tom | 40
"""
for other in others:
if other.keys() != self.keys():
raise ValueError(
"columns do not match in the argument of Table.concat()"
)
schema = {
key: functools.reduce(
dt.types_lca,
[other.schema._dtypes()[key] for other in others],
self.schema._dtypes()[key],
)
for key in self.keys()
}
return Table._concat(
self.cast_to_types(**schema),
*[other.cast_to_types(**schema) for other in others],
)
def _concat(self, *others: Table[TSchema]) -> Table[TSchema]:
union_ids = (self._id_column, *(other._id_column for other in others))
if not G.universe_solver.query_are_disjoint(*(c.universe for c in union_ids)):
raise ValueError(
"Universes of the arguments of Table.concat() have to be disjoint.\n"
+ "Consider using Table.promise_universes_are_disjoint() to assert it.\n"
+ "(However, untrue assertion might result in runtime errors.)"
)
context = clmn.ConcatUnsafeContext(
union_ids=union_ids,
updates=tuple(
{col_name: other._columns[col_name] for col_name in self.keys()}
for other in others
),
)
return self._table_with_context(context)
def update_cells(self, other: Table, _stacklevel: int = 1) -> Table:
"""Updates cells of `self`, breaking ties in favor of the values in `other`.
Semantics:
- result.columns == self.columns
- result.id == self.id
- conflicts are resolved preferring other's values
Requires:
- other.columns ⊆ self.columns
- other.id ⊆ self.id
Args:
other: the other table.
Returns:
Table: `self` updated with cells form `other`.
Example:
>>> import pathway as pw
>>> t1 = pw.debug.table_from_markdown('''
... | age | owner | pet
... 1 | 10 | Alice | 1
... 2 | 9 | Bob | 1
... 3 | 8 | Alice | 2
... ''')
>>> t2 = pw.debug.table_from_markdown('''
... age | owner | pet
... 1 | 10 | Alice | 30
... ''')
>>> pw.universes.promise_is_subset_of(t2, t1)
>>> t3 = t1.update_cells(t2)
>>> pw.debug.compute_and_print(t3, include_id=False)
age | owner | pet
8 | Alice | 2
9 | Bob | 1
10 | Alice | 30
"""
if names := (set(other.keys()) - set(self.keys())):
raise ValueError(
f"Columns of the argument in Table.update_cells() not present in the updated table: {list(names)}."
)
if self._universe == other._universe:
warnings.warn(
"Key sets of self and other in update_cells are the same."
+ " Using with_columns instead of update_cells.",
stacklevel=_stacklevel + 4,
)
return self.with_columns(*(other[name] for name in other))
schema = {
key: dt.types_lca(self.schema.__dtypes__[key], other.schema.__dtypes__[key])
for key in other.keys()
}
return Table._update_cells(
self.cast_to_types(**schema), other.cast_to_types(**schema)
)
def _update_cells(self, other: Table) -> Table:
if not other._universe.is_subset_of(self._universe):
raise ValueError(
"Universe of the argument of Table.update_cells() needs to be "
+ "a subset of the universe of the updated table.\n"
+ "Consider using Table.promise_is_subset_of() to assert this.\n"
+ "(However, untrue assertion might result in runtime errors.)"
)
context = clmn.UpdateCellsContext(
left=self._id_column,
right=other._id_column,
updates={name: other._columns[name] for name in other.keys()},
)
return self._table_with_context(context)
def update_rows(self, other: Table[TSchema]) -> Table[TSchema]:
"""Updates rows of `self`, breaking ties in favor for the rows in `other`.
Semantics:
- result.columns == self.columns == other.columns
- result.id == self.id ∪ other.id
Requires:
- other.columns == self.columns
Args:
other: the other table.
Returns:
Table: `self` updated with rows form `other`.
Example:
>>> import pathway as pw
>>> t1 = pw.debug.table_from_markdown('''
... | age | owner | pet
... 1 | 10 | Alice | 1
... 2 | 9 | Bob | 1
... 3 | 8 | Alice | 2
... ''')
>>> t2 = pw.debug.table_from_markdown('''
... | age | owner | pet
... 1 | 10 | Alice | 30
... 12 | 12 | Tom | 40
... ''')
>>> t3 = t1.update_rows(t2)
>>> pw.debug.compute_and_print(t3, include_id=False)
age | owner | pet
8 | Alice | 2
9 | Bob | 1
10 | Alice | 30
12 | Tom | 40
"""
if other.keys() != self.keys():
raise ValueError(
"Columns do not match between argument of Table.update_rows() and the updated table."
)
if self._universe.is_subset_of(other._universe):
warnings.warn(
"Universe of self is a subset of universe of other in update_rows. Returning other.",
stacklevel=5,
)
return other
schema = {
key: dt.types_lca(self.schema.__dtypes__[key], other.schema.__dtypes__[key])
for key in self.keys()
}
union_universes = (self._universe, other._universe)
universe = G.universe_solver.get_union(*union_universes)
if universe == self._universe:
return Table._update_cells(
self.cast_to_types(**schema), other.cast_to_types(**schema)
)
else:
return Table._update_rows(
self.cast_to_types(**schema), other.cast_to_types(**schema)
)
def _update_rows(self, other: Table[TSchema]) -> Table[TSchema]:
union_ids = (self._id_column, other._id_column)
context = clmn.UpdateRowsContext(
updates={col_name: other._columns[col_name] for col_name in self.keys()},
union_ids=union_ids,
)
return self._table_with_context(context)
def with_columns(self, *args: expr.ColumnReference, **kwargs: Any) -> Table:
"""Updates columns of `self`, according to args and kwargs.
See `table.select` specification for evaluation of args and kwargs.
Example:
>>> import pathway as pw
>>> t1 = pw.debug.table_from_markdown('''
... | age | owner | pet
... 1 | 10 | Alice | 1
... 2 | 9 | Bob | 1
... 3 | 8 | Alice | 2
... ''')
>>> t2 = pw.debug.table_from_markdown('''
... | owner | pet | size
... 1 | Tom | 1 | 10
... 2 | Bob | 1 | 9
... 3 | Tom | 2 | 8
... ''')
>>> t3 = t1.with_columns(*t2)
>>> pw.debug.compute_and_print(t3, include_id=False)
age | owner | pet | size
8 | Tom | 2 | 8
9 | Bob | 1 | 9
10 | Tom | 1 | 10
"""
other = self.select(*args, **kwargs)
columns = dict(self)
columns.update(other)
return self.select(**columns)
def with_id(self, new_index: expr.ColumnReference) -> Table:
"""Set new ids based on another column containing id-typed values.
To generate ids based on arbitrary valued columns, use `with_id_from`.
Values assigned must be row-wise unique.
Args:
new_id: column to be used as the new index.
Returns:
Table with updated ids.
Example:
>>> import pytest; pytest.xfail("with_id is hard to test")
>>> import pathway as pw
>>> t1 = pw.debug.table_from_markdown('''
... | age | owner | pet
... 1 | 10 | Alice | 1
... 2 | 9 | Bob | 1
... 3 | 8 | Alice | 2
... ''')
>>> t2 = pw.debug.table_from_markdown('''
... | new_id
... 1 | 2
... 2 | 3
... 3 | 4
... ''')
>>> t3 = t1.promise_universe_is_subset_of(t2).with_id(t2.new_id)
>>> pw.debug.compute_and_print(t3)
age owner pet
^2 10 Alice 1
^3 9 Bob 1
^4 8 Alice 2
"""
return self._with_new_index(new_index)
def with_id_from(
self,
*args: expr.ColumnExpression | Value,
instance: expr.ColumnReference | None = None,
) -> Table:
"""Compute new ids based on values in columns.
Ids computed from `columns` must be row-wise unique.
Args:
columns: columns to be used as primary keys.
Returns:
Table: `self` updated with recomputed ids.
Example:
>>> import pathway as pw
>>> t1 = pw.debug.table_from_markdown('''
... | age | owner | pet
... 1 | 10 | Alice | 1
... 2 | 9 | Bob | 1
... 3 | 8 | Alice | 2
... ''')
>>> t2 = t1 + t1.select(old_id=t1.id)
>>> t3 = t2.with_id_from(t2.age)
>>> pw.debug.compute_and_print(t3) # doctest: +ELLIPSIS, +NORMALIZE_WHITESPACE
| age | owner | pet | old_id
^... | 8 | Alice | 2 | ^...
^... | 9 | Bob | 1 | ^...
^... | 10 | Alice | 1 | ^...
>>> t4 = t3.select(t3.age, t3.owner, t3.pet, same_as_old=(t3.id == t3.old_id),
... same_as_new=(t3.id == t3.pointer_from(t3.age)))
>>> pw.debug.compute_and_print(t4) # doctest: +ELLIPSIS, +NORMALIZE_WHITESPACE
| age | owner | pet | same_as_old | same_as_new
^... | 8 | Alice | 2 | False | True
^... | 9 | Bob | 1 | False | True
^... | 10 | Alice | 1 | False | True
"""
# new_index should be a column, so a little workaround
new_index = self.select(
ref_column=self.pointer_from(*args, instance=instance)
).ref_column
return self._with_new_index(
new_index=new_index,
)
def _with_new_index(
self,
new_index: expr.ColumnExpression,
) -> Table:
self._validate_expression(new_index)
index_type = self.eval_type(new_index)
if not isinstance(index_type, dt.Pointer):
raise TypeError(
f"Pathway supports reindexing Tables with Pointer type only. The type used was {index_type}."
)
reindex_column = self._eval(new_index)
assert self._universe == reindex_column.universe
context = clmn.ReindexContext(reindex_column)
return self._table_with_context(context)
def rename_columns(self, **kwargs: str | expr.ColumnReference) -> Table:
"""Rename columns according to kwargs.
Columns not in keys(kwargs) are not changed. New name of a column must not be `id`.
Args:
kwargs: mapping from old column names to new names.
Returns:
Table: `self` with columns renamed.
Example:
>>> import pathway as pw
>>> t1 = pw.debug.table_from_markdown('''
... age | owner | pet
... 10 | Alice | 1
... 9 | Bob | 1
... 8 | Alice | 2
... ''')
>>> t2 = t1.rename_columns(years_old=t1.age, animal=t1.pet)
>>> pw.debug.compute_and_print(t2, include_id=False)
owner | years_old | animal
Alice | 8 | 2
Alice | 10 | 1
Bob | 9 | 1
"""
mapping: dict[str, str] = {}
for new_name, old_name_col in kwargs.items():
if isinstance(old_name_col, expr.ColumnReference):
old_name = old_name_col.name
else:
old_name = old_name_col
if old_name not in self._columns:
raise ValueError(f"Column {old_name} does not exist in a given table.")
mapping[new_name] = old_name
renamed_columns = self._columns.copy()
for new_name, old_name in mapping.items():
renamed_columns.pop(old_name)
for new_name, old_name in mapping.items():
renamed_columns[new_name] = self._columns[old_name]
columns_wrapped = {
name: self._wrap_column_in_context(
self._rowwise_context,
column,
mapping[name] if name in mapping else name,
)
for name, column in renamed_columns.items()
}
return self._with_same_universe(*columns_wrapped.items())
def rename_by_dict(
self, names_mapping: dict[str | expr.ColumnReference, str]
) -> Table:
"""Rename columns according to a dictionary.
Columns not in keys(kwargs) are not changed. New name of a column must not be `id`.
Args:
names_mapping: mapping from old column names to new names.
Returns:
Table: `self` with columns renamed.
Example:
>>> import pathway as pw
>>> t1 = pw.debug.table_from_markdown('''
... age | owner | pet
... 10 | Alice | 1
... 9 | Bob | 1
... 8 | Alice | 2
... ''')
>>> t2 = t1.rename_by_dict({"age": "years_old", t1.pet: "animal"})
>>> pw.debug.compute_and_print(t2, include_id=False)
owner | years_old | animal
Alice | 8 | 2
Alice | 10 | 1
Bob | 9 | 1
"""
return self.rename_columns(
**{new_name: self[old_name] for old_name, new_name in names_mapping.items()}
)
def with_prefix(self, prefix: str) -> Table:
"""Rename columns by adding prefix to each name of column.
Example:
>>> import pathway as pw
>>> t1 = pw.debug.table_from_markdown('''
... age | owner | pet
... 10 | Alice | 1
... 9 | Bob | 1
... 8 | Alice | 2
... ''')
>>> t2 = t1.with_prefix("u_")
>>> pw.debug.compute_and_print(t2, include_id=False)
u_age | u_owner | u_pet
8 | Alice | 2
9 | Bob | 1
10 | Alice | 1
"""
return self.rename_by_dict({name: prefix + name for name in self.keys()})
def with_suffix(self, suffix: str) -> Table:
"""Rename columns by adding suffix to each name of column.
Example:
>>> import pathway as pw
>>> t1 = pw.debug.table_from_markdown('''
... age | owner | pet
... 10 | Alice | 1
... 9 | Bob | 1
... 8 | Alice | 2
... ''')
>>> t2 = t1.with_suffix("_current")
>>> pw.debug.compute_and_print(t2, include_id=False)
age_current | owner_current | pet_current
8 | Alice | 2
9 | Bob | 1
10 | Alice | 1
"""
return self.rename_by_dict({name: name + suffix for name in self.keys()})
def rename(
self,
names_mapping: dict[str | expr.ColumnReference, str] | None = None,
**kwargs: expr.ColumnExpression,
) -> Table:
"""Rename columns according either a dictionary or kwargs.
If a mapping is provided using a dictionary, ``rename_by_dict`` will be used.
Otherwise, ``rename_columns`` will be used with kwargs.
Columns not in keys(kwargs) are not changed. New name of a column must not be ``id``.
Args:
names_mapping: mapping from old column names to new names.
kwargs: mapping from old column names to new names.
Returns:
Table: `self` with columns renamed.
"""
if names_mapping is not None:
return self.rename_by_dict(names_mapping=names_mapping)
return self.rename_columns(**kwargs)
def without(self, *columns: str | expr.ColumnReference) -> Table:
"""Selects all columns without named column references.
Args:
columns: columns to be dropped provided by `table.column_name` notation.
Returns:
Table: `self` without specified columns.
Example:
>>> import pathway as pw
>>> t1 = pw.debug.table_from_markdown('''
... age | owner | pet
... 10 | Alice | 1
... 9 | Bob | 1
... 8 | Alice | 2
... ''')
>>> t2 = t1.without(t1.age, pw.this.pet)
>>> pw.debug.compute_and_print(t2, include_id=False)
owner
Alice
Alice
Bob
"""
new_columns = self._columns.copy()
for col in columns:
if isinstance(col, expr.ColumnReference):
new_columns.pop(col.name)
else:
assert isinstance(col, str)
new_columns.pop(col)
columns_wrapped = {
name: self._wrap_column_in_context(self._rowwise_context, column, name)
for name, column in new_columns.items()
}
return self._with_same_universe(*columns_wrapped.items())
def having(self, *indexers: expr.ColumnReference) -> Table[TSchema]:
"""Removes rows so that indexed.ix(indexer) is possible when some rows are missing,
for each indexer in indexers"""
rets: list[Table] = []
for indexer in indexers:
rets.append(self._having(indexer))
if len(rets) == 0:
return self
elif len(rets) == 1:
[ret] = rets
return ret
else:
return rets[0].intersect(*rets[1:])
def update_types(self, **kwargs: Any) -> Table:
"""Updates types in schema. Has no effect on the runtime."""
for name in kwargs.keys():
if name not in self.keys():
raise ValueError(
"Table.update_types() argument name has to be an existing table column name."
)
from pathway.internals.common import declare_type
return self.with_columns(
**{key: declare_type(val, self[key]) for key, val in kwargs.items()}
)
def cast_to_types(self, **kwargs: Any) -> Table:
"""Casts columns to types."""
for name in kwargs.keys():
if name not in self.keys():
raise ValueError(
"Table.cast_to_types() argument name has to be an existing table column name."
)
from pathway.internals.common import cast
return self.with_columns(
**{key: cast(val, self[key]) for key, val in kwargs.items()}
)
def _having(self, indexer: expr.ColumnReference) -> Table[TSchema]:
context = clmn.HavingContext(
orig_id_column=self._id_column, key_column=indexer._column
)
return self._table_with_context(context)
def with_universe_of(self, other: TableLike) -> Table:
"""Returns a copy of self with exactly the same universe as others.
Semantics: Required precondition self.universe == other.universe
Used in situations where Pathway cannot deduce equality of universes, but
those are equal as verified during runtime.
Example:
>>> import pathway as pw
>>> t1 = pw.debug.table_from_markdown('''
... | pet
... 1 | Dog
... 7 | Cat
... ''')
>>> t2 = pw.debug.table_from_markdown('''
... | age
... 1 | 10
... 7 | 3
... 8 | 100
... ''')
>>> t3 = t2.filter(pw.this.age < 30).with_universe_of(t1)
>>> t4 = t1 + t3
>>> pw.debug.compute_and_print(t4, include_id=False)
pet | age
Cat | 3
Dog | 10
"""
if self._universe == other._universe:
return self.copy()
universes.promise_are_equal(self, other)
return self._unsafe_promise_universe(other)
def flatten(self, *args: expr.ColumnReference, **kwargs: Any) -> Table:
"""Performs a flatmap operation on a column or expression given as a first
argument. Datatype of this column or expression has to be iterable or Json array.
Other columns specified in the method arguments are duplicated
as many times as the length of the iterable.
It is possible to get ids of source rows by using `table.id` column, e.g.
`table.flatten(table.column_to_be_flattened, original_id = table.id)`.
Example:
>>> import pathway as pw
>>> t1 = pw.debug.table_from_markdown('''
... | pet | age
... 1 | Dog | 2
... 7 | Cat | 5
... ''')
>>> t2 = t1.flatten(t1.pet)
>>> pw.debug.compute_and_print(t2, include_id=False)
pet
C
D
a
g
o
t
>>> t3 = t1.flatten(t1.pet, t1.age)
>>> pw.debug.compute_and_print(t3, include_id=False)
pet | age
C | 5
D | 2
a | 5
g | 2
o | 2
t | 5
"""
intermediate_table = self.select(*args, **kwargs)
all_args = combine_args_kwargs(args, kwargs)
if not all_args:
raise ValueError("Table.flatten() cannot have empty arguments list.")
all_names_iter = iter(all_args.keys())
flatten_name = next(all_names_iter)
return intermediate_table._flatten(flatten_name)
def _flatten(
self,
flatten_name: str,
) -> Table:
flatten_column = self._columns[flatten_name]
assert isinstance(flatten_column, clmn.ColumnWithExpression)
context = clmn.FlattenContext(
orig_universe=self._universe,
flatten_column=flatten_column,
)
columns = {
name: self._wrap_column_in_context(context, column, name)
for name, column in self._columns.items()
if name != flatten_name
}
return Table(
_columns={
flatten_name: context.flatten_result_column,
**columns,
},
_context=context,
)
def sort(
self,
key: expr.ColumnExpression,
instance: expr.ColumnExpression | None = None,
) -> Table:
"""
Sorts a table by the specified keys.
Args:
table : pw.Table
The table to be sorted.
key (ColumnExpression[int | float | datetime | str | bytes]):
An expression to sort by.
instance : ColumnReference or None
An expression with instance. Rows are sorted within an instance.
``prev`` and ``next`` columns will only point to rows that have the same instance.
Returns:
pw.Table: The sorted table. Contains two columns: ``prev`` and ``next``, containing the pointers
to the previous and next rows.
Example:
>>> import pathway as pw
>>> table = pw.debug.table_from_markdown('''
... name | age | score
... Alice | 25 | 80
... Bob | 20 | 90
... Charlie | 30 | 80
... ''')
>>> table = table.with_id_from(pw.this.name)
>>> table += table.sort(key=pw.this.age)
>>> pw.debug.compute_and_print(table, include_id=True)
| name | age | score | prev | next
^GBSDEEW... | Alice | 25 | 80 | ^EDPSSB1... | ^DS9AT95...
^EDPSSB1... | Bob | 20 | 90 | | ^GBSDEEW...
^DS9AT95... | Charlie | 30 | 80 | ^GBSDEEW... |
>>> table = pw.debug.table_from_markdown('''
... name | age | score
... Alice | 25 | 80
... Bob | 20 | 90
... Charlie | 30 | 80
... David | 35 | 90
... Eve | 15 | 80
... ''')
>>> table = table.with_id_from(pw.this.name)
>>> table += table.sort(key=pw.this.age, instance=pw.this.score)
>>> pw.debug.compute_and_print(table, include_id=True)
| name | age | score | prev | next
^GBSDEEW... | Alice | 25 | 80 | ^T0B95XH... | ^DS9AT95...
^EDPSSB1... | Bob | 20 | 90 | | ^RT0AZWX...
^DS9AT95... | Charlie | 30 | 80 | ^GBSDEEW... |
^RT0AZWX... | David | 35 | 90 | ^EDPSSB1... |
^T0B95XH... | Eve | 15 | 80 | | ^GBSDEEW...
"""
instance = clmn.ColumnExpression._wrap(instance)
context = clmn.SortingContext(
self._eval(key),
self._eval(instance),
)
return Table(
_columns={
"prev": context.prev_column,
"next": context.next_column,
},
_context=context,
)
def _set_source(self, source: OutputHandle):
self._source = source
if not hasattr(self._id_column, "lineage"):
self._id_column.lineage = clmn.ColumnLineage(name="id", source=source)
for name, column in self._columns.items():
if not hasattr(column, "lineage"):
column.lineage = clmn.ColumnLineage(name=name, source=source)
universe = self._universe
if not hasattr(universe, "lineage"):
universe.lineage = clmn.Lineage(source=source)
def _unsafe_promise_universe(self, other: TableLike) -> Table:
context = clmn.PromiseSameUniverseContext(self._id_column, other._id_column)
return self._table_with_context(context)
def _validate_expression(self, expression: expr.ColumnExpression):
for dep in expression._dependencies_above_reducer():
if self._universe != dep._column.universe:
raise ValueError(
f"You cannot use {dep.to_column_expression()} in this context."
+ " Its universe is different than the universe of the table the method"
+ " was called on. You can use <table1>.with_universe_of(<table2>)"
+ " to assign universe of <table2> to <table1> if you're sure their"
+ " sets of keys are equal."
)
def _wrap_column_in_context(
self,
context: clmn.Context,
column: clmn.Column,
name: str,
lineage: clmn.Lineage | None = None,
) -> clmn.Column:
"""Contextualize column by wrapping it in expression."""
expression = expr.ColumnReference(_table=self, _column=column, _name=name)
return expression._column_with_expression_cls(
context=context,
universe=context.universe,
expression=expression,
lineage=lineage,
)
def _table_with_context(self, context: clmn.Context) -> Table:
columns = {
name: self._wrap_column_in_context(context, column, name)
for name, column in self._columns.items()
}
return Table(
_columns=columns,
_context=context,
)
def _table_restricted_context(self) -> clmn.TableRestrictedRowwiseContext:
return clmn.TableRestrictedRowwiseContext(self._id_column, self)
def _eval(
self, expression: expr.ColumnExpression, context: clmn.Context | None = None
) -> clmn.ColumnWithExpression:
"""Desugar expression and wrap it in given context."""
if context is None:
context = self._rowwise_context
column = expression._column_with_expression_cls(
context=context,
universe=context.universe,
expression=expression,
)
return column
def _from_schema(cls: type[TTable], schema: type[Schema]) -> TTable:
universe = Universe()
context = clmn.MaterializedContext(universe, schema.universe_properties)
columns = {
name: clmn.MaterializedColumn(
universe,
schema.column_properties(name),
)
for name in schema.column_names()
}
return cls(_columns=columns, _schema=schema, _context=context)
def __repr__(self) -> str:
return f"<pathway.Table schema={dict(self.typehints())}>"
def _with_same_universe(
self,
*columns: tuple[str, clmn.Column],
schema: type[Schema] | None = None,
) -> Table:
return Table(
_columns=dict(columns),
_schema=schema,
_context=self._rowwise_context,
)
def _sort_columns_by_other(self, other: Table):
assert self.keys() == other.keys()
self._columns = {name: self._columns[name] for name in other.keys()}
def _operator_dependencies(self) -> StableSet[Table]:
return StableSet([self])
def debug(self, name: str):
G.add_operator(
lambda id: DebugOperator(name, id),
lambda operator: operator(self),
)
return self
def to(self, sink: DataSink) -> None:
from pathway.internals import table_io
table_io.table_to_datasink(self, sink)
def _materialize(self, universe: Universe):
context = clmn.MaterializedContext(universe)
columns = {
name: clmn.MaterializedColumn(universe, column.properties)
for (name, column) in self._columns.items()
}
return Table(
_columns=columns,
_schema=self.schema,
_context=context,
)
def pointer_from(
self, *args: Any, optional=False, instance: expr.ColumnReference | None = None
):
"""Pseudo-random hash of its argument. Produces pointer types. Applied column-wise.
Example:
>>> import pathway as pw
>>> t1 = pw.debug.table_from_markdown('''
... age owner pet
... 1 10 Alice dog
... 2 9 Bob dog
... 3 8 Alice cat
... 4 7 Bob dog''')
>>> g = t1.groupby(t1.owner).reduce(refcol = t1.pointer_from(t1.owner)) # g.id == g.refcol
>>> pw.debug.compute_and_print(g.select(test = (g.id == g.refcol)), include_id=False)
test
True
True
"""
if instance is not None:
args = (*args, instance)
# XXX verify types for the table primary_keys
return expr.PointerExpression(self, *args, optional=optional)
def ix_ref(
self,
*args: expr.ColumnExpression | Value,
optional: bool = False,
context=None,
instance: expr.ColumnReference | None = None,
):
"""Reindexes the table using expressions as primary keys.
Uses keys from context, or tries to infer proper context from the expression.
If optional is True, then None in expression values result in None values in the result columns.
Missing values in table keys result in RuntimeError.
Context can be anything that allows for `select` or `reduce`, or `pathway.this` construct
(latter results in returning a delayed operation, and should be only used when using `ix` inside
join().select() or groupby().reduce() sequence).
Args:
args: Column references.
Returns:
Row: indexed row.
Example:
>>> import pathway as pw
>>> t1 = pw.debug.table_from_markdown('''
... name | pet
... Alice | dog
... Bob | cat
... Carole | cat
... David | dog
... ''')
>>> t2 = t1.with_id_from(pw.this.name)
>>> t2 = t2.select(*pw.this, new_value=pw.this.ix_ref("Alice").pet)
>>> pw.debug.compute_and_print(t2, include_id=False)
name | pet | new_value
Alice | dog | dog
Bob | cat | dog
Carole | cat | dog
David | dog | dog
Tables obtained by a groupby/reduce scheme always have primary keys:
>>> import pathway as pw
>>> t1 = pw.debug.table_from_markdown('''
... name | pet
... Alice | dog
... Bob | cat
... Carole | cat
... David | cat
... ''')
>>> t2 = t1.groupby(pw.this.pet).reduce(pw.this.pet, count=pw.reducers.count())
>>> t3 = t1.select(*pw.this, new_value=t2.ix_ref(t1.pet).count)
>>> pw.debug.compute_and_print(t3, include_id=False)
name | pet | new_value
Alice | dog | 1
Bob | cat | 3
Carole | cat | 3
David | cat | 3
Single-row tables can be accessed via `ix_ref()`:
>>> import pathway as pw
>>> t1 = pw.debug.table_from_markdown('''
... name | pet
... Alice | dog
... Bob | cat
... Carole | cat
... David | cat
... ''')
>>> t2 = t1.reduce(count=pw.reducers.count())
>>> t3 = t1.select(*pw.this, new_value=t2.ix_ref(context=t1).count)
>>> pw.debug.compute_and_print(t3, include_id=False)
name | pet | new_value
Alice | dog | 4
Bob | cat | 4
Carole | cat | 4
David | cat | 4
"""
return self.ix(
self.pointer_from(*args, optional=optional, instance=instance),
optional=optional,
context=context,
)
def _subtables(self) -> StableSet[Table]:
return StableSet([self])
def _substitutions(
self,
) -> tuple[Table, dict[expr.InternalColRef, expr.ColumnExpression]]:
return self, {}
def typehints(self) -> Mapping[str, Any]:
"""
Return the types of the columns as a dictionary.
Example:
>>> import pathway as pw
>>> t1 = pw.debug.table_from_markdown('''
... age | owner | pet
... 10 | Alice | dog
... 9 | Bob | dog
... 8 | Alice | cat
... 7 | Bob | dog
... ''')
>>> t1.typehints()
mappingproxy({'age': <class 'int'>, 'owner': <class 'str'>, 'pet': <class 'str'>})
"""
return self.schema.typehints()
def eval_type(self, expression: expr.ColumnExpression) -> dt.DType:
return (
self._rowwise_context._get_type_interpreter()
.eval_expression(expression, state=TypeInterpreterState())
._dtype
)
def _auto_live(self) -> Table:
"""Make self automatically live in interactive mode"""
from pathway.internals.interactive import is_interactive_mode_enabled
if is_interactive_mode_enabled():
return self.live()
else:
return self
def live(self) -> LiveTable[TSchema]:
from pathway.internals.interactive import LiveTable
warnings.warn("live tables are an experimental feature", stacklevel=2)
return LiveTable._create(self)
The provided code snippet includes necessary dependencies for implementing the `interval_join_right` function. Write a Python function `def interval_join_right( self: pw.Table, other: pw.Table, self_time: pw.ColumnExpression, other_time: pw.ColumnExpression, interval: Interval[int] | Interval[float] | Interval[datetime.timedelta], *on: pw.ColumnExpression, behavior: CommonBehavior | None = None, left_instance: pw.ColumnReference | None = None, right_instance: pw.ColumnReference | None = None, ) -> IntervalJoinResult` to solve the following problem:
Performs an interval right join of self with other using a time difference and join expressions. If `self_time + lower_bound <= other_time <= self_time + upper_bound` and conditions in `on` are satisfied, the rows are joined. Rows from the right side that haven't been matched with the left side are returned with missing values on the left side replaced with `None`. Args: other: the right side of the join. self_time: time expression in self. other_time: time expression in other. lower_bound: a lower bound on time difference between other_time and self_time. upper_bound: an upper bound on time difference between other_time and self_time. on: a list of column expressions. Each must have == as the top level operation and be of the form LHS: ColumnReference == RHS: ColumnReference. behavior: defines temporal behavior of a join - features like delaying entries or ignoring late entries. left_instance/right_instance: optional arguments describing partitioning of the data into separate instances Returns: IntervalJoinResult: a result of the interval join. A method `.select()` can be called on it to extract relevant columns from the result of a join. Examples: >>> import pathway as pw >>> t1 = pw.debug.table_from_markdown( ... ''' ... | t ... 1 | 3 ... 2 | 4 ... 3 | 5 ... 4 | 11 ... ''' ... ) >>> t2 = pw.debug.table_from_markdown( ... ''' ... | t ... 1 | 0 ... 2 | 1 ... 3 | 4 ... 4 | 7 ... ''' ... ) >>> t3 = t1.interval_join_right(t2, t1.t, t2.t, pw.temporal.interval(-2, 1)).select( ... left_t=t1.t, right_t=t2.t ... ) >>> pw.debug.compute_and_print(t3, include_id=False) left_t | right_t | 0 | 7 3 | 1 3 | 4 4 | 4 5 | 4 >>> t1 = pw.debug.table_from_markdown( ... ''' ... | a | t ... 1 | 1 | 3 ... 2 | 1 | 4 ... 3 | 1 | 5 ... 4 | 1 | 11 ... 5 | 2 | 2 ... 6 | 2 | 3 ... 7 | 3 | 4 ... ''' ... ) >>> t2 = pw.debug.table_from_markdown( ... ''' ... | b | t ... 1 | 1 | 0 ... 2 | 1 | 1 ... 3 | 1 | 4 ... 4 | 1 | 7 ... 5 | 2 | 0 ... 6 | 2 | 2 ... 7 | 4 | 2 ... ''' ... ) >>> t3 = t1.interval_join_right( ... t2, t1.t, t2.t, pw.temporal.interval(-2, 1), t1.a == t2.b ... ).select(t1.a, left_t=t1.t, right_t=t2.t) >>> pw.debug.compute_and_print(t3, include_id=False) a | left_t | right_t | | 0 | | 2 | | 7 1 | 3 | 1 1 | 3 | 4 1 | 4 | 4 1 | 5 | 4 2 | 2 | 0 2 | 2 | 2 2 | 3 | 2 Setting `behavior` allows to control temporal behavior of an interval join. Then, each side of the interval join keeps track of the maximal already seen time (`self_time` and `other_time`). The arguments of `behavior` mean in the context of an interval join what follows: - **delay** - buffers results until the maximal already seen time is greater than \ or equal to their time plus `delay`. - **cutoff** - ignores records with times less or equal to the maximal already seen time minus `cutoff`; \ it is also used to garbage collect records that have times lower or equal to the above threshold. \ When `cutoff` is not set, interval join will remember all records from both sides. - **keep_results** - if set to `True`, keeps all results of the operator. If set to `False`, \ keeps only results that are newer than the maximal seen time minus `cutoff`. Example without and with forgetting: >>> import pathway as pw >>> t1 = pw.debug.table_from_markdown( ... ''' ... value | instance | event_time | __time__ ... 1 | 1 | 0 | 2 ... 2 | 2 | 2 | 4 ... 3 | 1 | 4 | 4 ... 4 | 2 | 8 | 8 ... 5 | 1 | 0 | 10 ... 6 | 1 | 4 | 10 ... ''' ... ) >>> t2 = pw.debug.table_from_markdown( ... ''' ... value | instance | event_time | __time__ ... 42 | 1 | 2 | 2 ... 8 | 2 | 10 | 14 ... 10 | 2 | 4 | 30 ... ''' ... ) >>> result_without_cutoff = t1.interval_join_right( ... t2, ... t1.event_time, ... t2.event_time, ... pw.temporal.interval(-2, 2), ... t1.instance == t2.instance, ... ).select( ... left_value=t1.value, ... right_value=t2.value, ... instance=t1.instance, ... left_time=t1.event_time, ... right_time=t2.event_time, ... ) >>> pw.debug.compute_and_print_update_stream(result_without_cutoff, include_id=False) left_value | right_value | instance | left_time | right_time | __time__ | __diff__ 1 | 42 | 1 | 0 | 2 | 2 | 1 3 | 42 | 1 | 4 | 2 | 4 | 1 5 | 42 | 1 | 0 | 2 | 10 | 1 6 | 42 | 1 | 4 | 2 | 10 | 1 4 | 8 | 2 | 8 | 10 | 14 | 1 2 | 10 | 2 | 2 | 4 | 30 | 1 >>> result_with_cutoff = t1.interval_join_right( ... t2, ... t1.event_time, ... t2.event_time, ... pw.temporal.interval(-2, 2), ... t1.instance == t2.instance, ... behavior=pw.temporal.common_behavior(cutoff=6), ... ).select( ... left_value=t1.value, ... right_value=t2.value, ... instance=t1.instance, ... left_time=t1.event_time, ... right_time=t2.event_time, ... ) >>> pw.debug.compute_and_print_update_stream(result_with_cutoff, include_id=False) left_value | right_value | instance | left_time | right_time | __time__ | __diff__ 1 | 42 | 1 | 0 | 2 | 2 | 1 3 | 42 | 1 | 4 | 2 | 4 | 1 6 | 42 | 1 | 4 | 2 | 10 | 1 4 | 8 | 2 | 8 | 10 | 14 | 1 The record with ``value=5`` from table ``t1`` was not joined because its ``event_time`` was less than the maximal already seen time minus ``cutoff`` (``0 <= 8-6``). The record with ``value=10`` from table ``t2`` was not joined because its ``event_time`` was equal to the maximal already seen time minus ``cutoff`` (``4 <= 10-6``).
Here is the function:
def interval_join_right(
self: pw.Table,
other: pw.Table,
self_time: pw.ColumnExpression,
other_time: pw.ColumnExpression,
interval: Interval[int] | Interval[float] | Interval[datetime.timedelta],
*on: pw.ColumnExpression,
behavior: CommonBehavior | None = None,
left_instance: pw.ColumnReference | None = None,
right_instance: pw.ColumnReference | None = None,
) -> IntervalJoinResult:
"""Performs an interval right join of self with other using a time difference
and join expressions. If `self_time + lower_bound <=
other_time <= self_time + upper_bound`
and conditions in `on` are satisfied, the rows are joined. Rows from the right
side that haven't been matched with the left side are returned with missing
values on the left side replaced with `None`.
Args:
other: the right side of the join.
self_time: time expression in self.
other_time: time expression in other.
lower_bound: a lower bound on time difference between other_time
and self_time.
upper_bound: an upper bound on time difference between other_time
and self_time.
on: a list of column expressions. Each must have == as the top level
operation and be of the form LHS: ColumnReference == RHS: ColumnReference.
behavior: defines temporal behavior of a join - features like delaying entries
or ignoring late entries.
left_instance/right_instance: optional arguments describing partitioning of the data into separate instances
Returns:
IntervalJoinResult: a result of the interval join. A method `.select()`
can be called on it to extract relevant columns from the result of a join.
Examples:
>>> import pathway as pw
>>> t1 = pw.debug.table_from_markdown(
... '''
... | t
... 1 | 3
... 2 | 4
... 3 | 5
... 4 | 11
... '''
... )
>>> t2 = pw.debug.table_from_markdown(
... '''
... | t
... 1 | 0
... 2 | 1
... 3 | 4
... 4 | 7
... '''
... )
>>> t3 = t1.interval_join_right(t2, t1.t, t2.t, pw.temporal.interval(-2, 1)).select(
... left_t=t1.t, right_t=t2.t
... )
>>> pw.debug.compute_and_print(t3, include_id=False)
left_t | right_t
| 0
| 7
3 | 1
3 | 4
4 | 4
5 | 4
>>> t1 = pw.debug.table_from_markdown(
... '''
... | a | t
... 1 | 1 | 3
... 2 | 1 | 4
... 3 | 1 | 5
... 4 | 1 | 11
... 5 | 2 | 2
... 6 | 2 | 3
... 7 | 3 | 4
... '''
... )
>>> t2 = pw.debug.table_from_markdown(
... '''
... | b | t
... 1 | 1 | 0
... 2 | 1 | 1
... 3 | 1 | 4
... 4 | 1 | 7
... 5 | 2 | 0
... 6 | 2 | 2
... 7 | 4 | 2
... '''
... )
>>> t3 = t1.interval_join_right(
... t2, t1.t, t2.t, pw.temporal.interval(-2, 1), t1.a == t2.b
... ).select(t1.a, left_t=t1.t, right_t=t2.t)
>>> pw.debug.compute_and_print(t3, include_id=False)
a | left_t | right_t
| | 0
| | 2
| | 7
1 | 3 | 1
1 | 3 | 4
1 | 4 | 4
1 | 5 | 4
2 | 2 | 0
2 | 2 | 2
2 | 3 | 2
Setting `behavior` allows to control temporal behavior of an interval join. Then, each side of
the interval join keeps track of the maximal already seen time (`self_time` and `other_time`).
The arguments of `behavior` mean in the context of an interval join what follows:
- **delay** - buffers results until the maximal already seen time is greater than \
or equal to their time plus `delay`.
- **cutoff** - ignores records with times less or equal to the maximal already seen time minus `cutoff`; \
it is also used to garbage collect records that have times lower or equal to the above threshold. \
When `cutoff` is not set, interval join will remember all records from both sides.
- **keep_results** - if set to `True`, keeps all results of the operator. If set to `False`, \
keeps only results that are newer than the maximal seen time minus `cutoff`.
Example without and with forgetting:
>>> import pathway as pw
>>> t1 = pw.debug.table_from_markdown(
... '''
... value | instance | event_time | __time__
... 1 | 1 | 0 | 2
... 2 | 2 | 2 | 4
... 3 | 1 | 4 | 4
... 4 | 2 | 8 | 8
... 5 | 1 | 0 | 10
... 6 | 1 | 4 | 10
... '''
... )
>>> t2 = pw.debug.table_from_markdown(
... '''
... value | instance | event_time | __time__
... 42 | 1 | 2 | 2
... 8 | 2 | 10 | 14
... 10 | 2 | 4 | 30
... '''
... )
>>> result_without_cutoff = t1.interval_join_right(
... t2,
... t1.event_time,
... t2.event_time,
... pw.temporal.interval(-2, 2),
... t1.instance == t2.instance,
... ).select(
... left_value=t1.value,
... right_value=t2.value,
... instance=t1.instance,
... left_time=t1.event_time,
... right_time=t2.event_time,
... )
>>> pw.debug.compute_and_print_update_stream(result_without_cutoff, include_id=False)
left_value | right_value | instance | left_time | right_time | __time__ | __diff__
1 | 42 | 1 | 0 | 2 | 2 | 1
3 | 42 | 1 | 4 | 2 | 4 | 1
5 | 42 | 1 | 0 | 2 | 10 | 1
6 | 42 | 1 | 4 | 2 | 10 | 1
4 | 8 | 2 | 8 | 10 | 14 | 1
2 | 10 | 2 | 2 | 4 | 30 | 1
>>> result_with_cutoff = t1.interval_join_right(
... t2,
... t1.event_time,
... t2.event_time,
... pw.temporal.interval(-2, 2),
... t1.instance == t2.instance,
... behavior=pw.temporal.common_behavior(cutoff=6),
... ).select(
... left_value=t1.value,
... right_value=t2.value,
... instance=t1.instance,
... left_time=t1.event_time,
... right_time=t2.event_time,
... )
>>> pw.debug.compute_and_print_update_stream(result_with_cutoff, include_id=False)
left_value | right_value | instance | left_time | right_time | __time__ | __diff__
1 | 42 | 1 | 0 | 2 | 2 | 1
3 | 42 | 1 | 4 | 2 | 4 | 1
6 | 42 | 1 | 4 | 2 | 10 | 1
4 | 8 | 2 | 8 | 10 | 14 | 1
The record with ``value=5`` from table ``t1`` was not joined because its ``event_time``
was less than the maximal already seen time minus ``cutoff`` (``0 <= 8-6``).
The record with ``value=10`` from table ``t2`` was not joined because its ``event_time``
was equal to the maximal already seen time minus ``cutoff`` (``4 <= 10-6``).
"""
return IntervalJoinResult._interval_join(
self,
other,
self_time,
other_time,
interval,
*on,
behavior=behavior,
mode=pw.JoinMode.RIGHT,
left_instance=left_instance,
right_instance=right_instance,
) | Performs an interval right join of self with other using a time difference and join expressions. If `self_time + lower_bound <= other_time <= self_time + upper_bound` and conditions in `on` are satisfied, the rows are joined. Rows from the right side that haven't been matched with the left side are returned with missing values on the left side replaced with `None`. Args: other: the right side of the join. self_time: time expression in self. other_time: time expression in other. lower_bound: a lower bound on time difference between other_time and self_time. upper_bound: an upper bound on time difference between other_time and self_time. on: a list of column expressions. Each must have == as the top level operation and be of the form LHS: ColumnReference == RHS: ColumnReference. behavior: defines temporal behavior of a join - features like delaying entries or ignoring late entries. left_instance/right_instance: optional arguments describing partitioning of the data into separate instances Returns: IntervalJoinResult: a result of the interval join. A method `.select()` can be called on it to extract relevant columns from the result of a join. Examples: >>> import pathway as pw >>> t1 = pw.debug.table_from_markdown( ... ''' ... | t ... 1 | 3 ... 2 | 4 ... 3 | 5 ... 4 | 11 ... ''' ... ) >>> t2 = pw.debug.table_from_markdown( ... ''' ... | t ... 1 | 0 ... 2 | 1 ... 3 | 4 ... 4 | 7 ... ''' ... ) >>> t3 = t1.interval_join_right(t2, t1.t, t2.t, pw.temporal.interval(-2, 1)).select( ... left_t=t1.t, right_t=t2.t ... ) >>> pw.debug.compute_and_print(t3, include_id=False) left_t | right_t | 0 | 7 3 | 1 3 | 4 4 | 4 5 | 4 >>> t1 = pw.debug.table_from_markdown( ... ''' ... | a | t ... 1 | 1 | 3 ... 2 | 1 | 4 ... 3 | 1 | 5 ... 4 | 1 | 11 ... 5 | 2 | 2 ... 6 | 2 | 3 ... 7 | 3 | 4 ... ''' ... ) >>> t2 = pw.debug.table_from_markdown( ... ''' ... | b | t ... 1 | 1 | 0 ... 2 | 1 | 1 ... 3 | 1 | 4 ... 4 | 1 | 7 ... 5 | 2 | 0 ... 6 | 2 | 2 ... 7 | 4 | 2 ... ''' ... ) >>> t3 = t1.interval_join_right( ... t2, t1.t, t2.t, pw.temporal.interval(-2, 1), t1.a == t2.b ... ).select(t1.a, left_t=t1.t, right_t=t2.t) >>> pw.debug.compute_and_print(t3, include_id=False) a | left_t | right_t | | 0 | | 2 | | 7 1 | 3 | 1 1 | 3 | 4 1 | 4 | 4 1 | 5 | 4 2 | 2 | 0 2 | 2 | 2 2 | 3 | 2 Setting `behavior` allows to control temporal behavior of an interval join. Then, each side of the interval join keeps track of the maximal already seen time (`self_time` and `other_time`). The arguments of `behavior` mean in the context of an interval join what follows: - **delay** - buffers results until the maximal already seen time is greater than \ or equal to their time plus `delay`. - **cutoff** - ignores records with times less or equal to the maximal already seen time minus `cutoff`; \ it is also used to garbage collect records that have times lower or equal to the above threshold. \ When `cutoff` is not set, interval join will remember all records from both sides. - **keep_results** - if set to `True`, keeps all results of the operator. If set to `False`, \ keeps only results that are newer than the maximal seen time minus `cutoff`. Example without and with forgetting: >>> import pathway as pw >>> t1 = pw.debug.table_from_markdown( ... ''' ... value | instance | event_time | __time__ ... 1 | 1 | 0 | 2 ... 2 | 2 | 2 | 4 ... 3 | 1 | 4 | 4 ... 4 | 2 | 8 | 8 ... 5 | 1 | 0 | 10 ... 6 | 1 | 4 | 10 ... ''' ... ) >>> t2 = pw.debug.table_from_markdown( ... ''' ... value | instance | event_time | __time__ ... 42 | 1 | 2 | 2 ... 8 | 2 | 10 | 14 ... 10 | 2 | 4 | 30 ... ''' ... ) >>> result_without_cutoff = t1.interval_join_right( ... t2, ... t1.event_time, ... t2.event_time, ... pw.temporal.interval(-2, 2), ... t1.instance == t2.instance, ... ).select( ... left_value=t1.value, ... right_value=t2.value, ... instance=t1.instance, ... left_time=t1.event_time, ... right_time=t2.event_time, ... ) >>> pw.debug.compute_and_print_update_stream(result_without_cutoff, include_id=False) left_value | right_value | instance | left_time | right_time | __time__ | __diff__ 1 | 42 | 1 | 0 | 2 | 2 | 1 3 | 42 | 1 | 4 | 2 | 4 | 1 5 | 42 | 1 | 0 | 2 | 10 | 1 6 | 42 | 1 | 4 | 2 | 10 | 1 4 | 8 | 2 | 8 | 10 | 14 | 1 2 | 10 | 2 | 2 | 4 | 30 | 1 >>> result_with_cutoff = t1.interval_join_right( ... t2, ... t1.event_time, ... t2.event_time, ... pw.temporal.interval(-2, 2), ... t1.instance == t2.instance, ... behavior=pw.temporal.common_behavior(cutoff=6), ... ).select( ... left_value=t1.value, ... right_value=t2.value, ... instance=t1.instance, ... left_time=t1.event_time, ... right_time=t2.event_time, ... ) >>> pw.debug.compute_and_print_update_stream(result_with_cutoff, include_id=False) left_value | right_value | instance | left_time | right_time | __time__ | __diff__ 1 | 42 | 1 | 0 | 2 | 2 | 1 3 | 42 | 1 | 4 | 2 | 4 | 1 6 | 42 | 1 | 4 | 2 | 10 | 1 4 | 8 | 2 | 8 | 10 | 14 | 1 The record with ``value=5`` from table ``t1`` was not joined because its ``event_time`` was less than the maximal already seen time minus ``cutoff`` (``0 <= 8-6``). The record with ``value=10`` from table ``t2`` was not joined because its ``event_time`` was equal to the maximal already seen time minus ``cutoff`` (``4 <= 10-6``). |
166,651 | from __future__ import annotations
import datetime
from abc import abstractmethod
from dataclasses import dataclass
from typing import Any, Generic, TypeVar, overload
import pathway.internals as pw
from pathway.internals.arg_handlers import (
arg_handler,
join_kwargs_handler,
select_args_handler,
)
from pathway.internals.desugaring import (
DesugaringContext,
TableReplacementWithNoneDesugaring,
TableSubstitutionDesugaring,
combine_args_kwargs,
desugar,
)
from pathway.internals.runtime_type_check import check_arg_types
from pathway.internals.trace import trace_user_frame
from pathway.internals.type_interpreter import eval_type
from .temporal_behavior import CommonBehavior, apply_temporal_behavior
from .utils import IntervalType, TimeEventType, check_joint_types, get_default_origin
class Interval(Generic[T]):
lower_bound: T
upper_bound: T
class IntervalJoinResult(DesugaringContext):
"""
Result of an interval join between tables.
Example:
>>> import pathway as pw
>>> t1 = pw.debug.table_from_markdown(
... '''
... | t
... 1 | 3
... 2 | 4
... 3 | 5
... 4 | 11
... '''
... )
>>> t2 = pw.debug.table_from_markdown(
... '''
... | t
... 1 | 0
... 2 | 1
... 3 | 4
... 4 | 7
... '''
... )
>>> join_result = t1.interval_join_inner(t2, t1.t, t2.t, pw.temporal.interval(-2, 1))
>>> isinstance(join_result, pw.temporal.IntervalJoinResult)
True
>>> pw.debug.compute_and_print(
... join_result.select(left_t=t1.t, right_t=t2.t), include_id=False
... )
left_t | right_t
3 | 1
3 | 4
4 | 4
5 | 4
"""
_table_substitution: dict[pw.TableLike, pw.Table]
_filter_out_results_of_forgetting: bool
def __init__(
self,
left: pw.Table,
right: pw.Table,
table_substitution: dict[pw.TableLike, pw.Table],
_filter_out_results_of_forgetting: bool,
):
self._substitution = {
pw.left: left,
pw.right: right,
pw.this: pw.this, # type: ignore[dict-item]
}
self._table_substitution = table_substitution
self._filter_out_results_of_forgetting = _filter_out_results_of_forgetting
def _should_filter_out_results_of_forgetting(
behavior: CommonBehavior | None,
) -> bool:
return (
behavior is not None
and behavior.cutoff is not None
and behavior.keep_results
)
def _interval_join(
left: pw.Table,
right: pw.Table,
left_time_expression: pw.ColumnExpression,
right_time_expression: pw.ColumnExpression,
interval: Interval[int] | Interval[float] | Interval[datetime.timedelta],
*on: pw.ColumnExpression,
behavior: CommonBehavior | None = None,
mode: pw.JoinMode,
left_instance: pw.ColumnReference | None = None,
right_instance: pw.ColumnReference | None = None,
) -> IntervalJoinResult:
"""Creates an IntervalJoinResult. To perform an interval join uses it uses two
tumbling windows of size `lower_bound` + `upper_bound` and then filters the result.
"""
check_joint_types(
{
"self_time_expression": (left_time_expression, TimeEventType),
"other_time_expression": (right_time_expression, TimeEventType),
"lower_bound": (interval.lower_bound, IntervalType),
"upper_bound": (interval.upper_bound, IntervalType),
}
)
if left == right:
raise ValueError(
"Cannot join table with itself. Use <table>.copy() as one of the arguments of the join."
)
if interval.lower_bound > interval.upper_bound: # type: ignore[operator]
raise ValueError(
"lower_bound has to be less than or equal to the upper_bound in the Table.interval_join()."
)
if interval.lower_bound == interval.upper_bound:
cls: type[IntervalJoinResult] = _ZeroDifferenceIntervalJoinResult
else:
cls = _NonZeroDifferenceIntervalJoinResult
return cls._interval_join(
left,
right,
left_time_expression,
right_time_expression,
interval,
*on,
behavior=behavior,
mode=mode,
left_instance=left_instance,
right_instance=right_instance,
)
def _desugaring(self) -> TableSubstitutionDesugaring:
return TableSubstitutionDesugaring(self._table_substitution)
def select(self, *args: pw.ColumnReference, **kwargs: Any) -> pw.Table:
"""
Computes a result of an interval join.
Args:
args: Column references.
kwargs: Column expressions with their new assigned names.
Returns:
Table: Created table.
Example:
>>> import pathway as pw
>>> t1 = pw.debug.table_from_markdown(
... '''
... | a | t
... 1 | 1 | 3
... 2 | 1 | 4
... 3 | 1 | 5
... 4 | 1 | 11
... 5 | 2 | 2
... 6 | 2 | 3
... 7 | 3 | 4
... '''
... )
>>> t2 = pw.debug.table_from_markdown(
... '''
... | b | t
... 1 | 1 | 0
... 2 | 1 | 1
... 3 | 1 | 4
... 4 | 1 | 7
... 5 | 2 | 0
... 6 | 2 | 2
... 7 | 4 | 2
... '''
... )
>>> t3 = t1.interval_join_inner(
... t2, t1.t, t2.t, pw.temporal.interval(-2, 1), t1.a == t2.b
... ).select(t1.a, left_t=t1.t, right_t=t2.t)
>>> pw.debug.compute_and_print(t3, include_id=False)
a | left_t | right_t
1 | 3 | 1
1 | 3 | 4
1 | 4 | 4
1 | 5 | 4
2 | 2 | 0
2 | 2 | 2
2 | 3 | 2
"""
...
class CommonBehavior(Behavior):
"""Defines temporal behavior of windows and temporal joins."""
delay: IntervalType | None
cutoff: IntervalType | None
keep_results: bool
class Table(
Joinable,
OperatorInput,
Generic[TSchema],
):
"""Collection of named columns over identical universes.
Example:
>>> import pathway as pw
>>> t1 = pw.debug.table_from_markdown('''
... age | owner | pet
... 10 | Alice | dog
... 9 | Bob | dog
... 8 | Alice | cat
... 7 | Bob | dog
... ''')
>>> isinstance(t1, pw.Table)
True
"""
if TYPE_CHECKING:
from pathway.stdlib.ordered import diff # type: ignore[misc]
from pathway.stdlib.statistical import interpolate # type: ignore[misc]
from pathway.stdlib.temporal import ( # type: ignore[misc]
asof_join,
asof_join_left,
asof_join_outer,
asof_join_right,
asof_now_join,
asof_now_join_inner,
asof_now_join_left,
interval_join,
interval_join_inner,
interval_join_left,
interval_join_outer,
interval_join_right,
window_join,
window_join_inner,
window_join_left,
window_join_outer,
window_join_right,
windowby,
)
from pathway.stdlib.viz import ( # type: ignore[misc]
_repr_mimebundle_,
plot,
show,
)
_columns: dict[str, clmn.Column]
_schema: type[Schema]
_id_column: clmn.IdColumn
_rowwise_context: clmn.RowwiseContext
_source: SetOnceProperty[OutputHandle] = SetOnceProperty()
"""Lateinit by operator."""
def __init__(
self,
_columns: Mapping[str, clmn.Column],
_context: clmn.Context,
_schema: type[Schema] | None = None,
):
if _schema is None:
_schema = schema_from_columns(_columns)
super().__init__(_context)
self._columns = dict(_columns)
self._schema = _schema
self._id_column = _context.id_column
self._substitution = {thisclass.this: self}
self._rowwise_context = clmn.RowwiseContext(self._id_column)
def id(self) -> expr.ColumnReference:
"""Get reference to pseudocolumn containing id's of a table.
Example:
>>> import pathway as pw
>>> t1 = pw.debug.table_from_markdown('''
... age | owner | pet
... 10 | Alice | dog
... 9 | Bob | dog
... 8 | Alice | cat
... 7 | Bob | dog
... ''')
>>> t2 = t1.select(ids = t1.id)
>>> t2.typehints()['ids']
<class 'pathway.engine.Pointer'>
>>> pw.debug.compute_and_print(t2.select(test=t2.id == t2.ids), include_id=False)
test
True
True
True
True
"""
return expr.ColumnReference(_table=self, _column=self._id_column, _name="id")
def column_names(self):
return self.keys()
def keys(self):
return self._columns.keys()
def _get_column(self, name: str) -> clmn.Column:
return self._columns[name]
def _ipython_key_completions_(self):
return list(self.column_names())
def __dir__(self):
return list(super().__dir__()) + list(self.column_names())
def _C(self) -> TSchema:
return self.C # type: ignore
def schema(self) -> type[Schema]:
"""Get schema of the table.
Example:
>>> import pathway as pw
>>> t1 = pw.debug.table_from_markdown('''
... age | owner | pet
... 10 | Alice | dog
... 9 | Bob | dog
... 8 | Alice | cat
... 7 | Bob | dog
... ''')
>>> t1.schema
<pathway.Schema types={'age': <class 'int'>, 'owner': <class 'str'>, 'pet': <class 'str'>}>
>>> t1.typehints()['age']
<class 'int'>
"""
return self._schema
def _get_colref_by_name(self, name, exception_type) -> expr.ColumnReference:
name = self._column_deprecation_rename(name)
if name == "id":
return self.id
if name not in self.keys():
raise exception_type(f"Table has no column with name {name}.")
return expr.ColumnReference(
_table=self, _column=self._get_column(name), _name=name
)
def __getitem__(self, args: str | expr.ColumnReference) -> expr.ColumnReference: ...
def __getitem__(self, args: list[str | expr.ColumnReference]) -> Table: ...
def __getitem__(
self, args: str | expr.ColumnReference | list[str | expr.ColumnReference]
) -> expr.ColumnReference | Table:
"""Get columns by name.
Warning:
- Does not allow repetitions of columns.
- Fails if tries to access nonexistent column.
Args:
names: a singe column name or list of columns names to be extracted from `self`.
Returns:
Table with specified columns, or column expression (if single argument given).
Instead of column names, column references are valid here.
Example:
>>> import pathway as pw
>>> t1 = pw.debug.table_from_markdown('''
... age | owner | pet
... 10 | Alice | dog
... 9 | Bob | dog
... 8 | Alice | cat
... 7 | Bob | dog
... ''')
>>> t2 = t1[["age", "pet"]]
>>> t2 = t1[["age", t1.pet]]
>>> pw.debug.compute_and_print(t2, include_id=False)
age | pet
7 | dog
8 | cat
9 | dog
10 | dog
"""
if isinstance(args, expr.ColumnReference):
if (args.table is not self) and not isinstance(
args.table, thisclass.ThisMetaclass
):
raise ValueError(
"Table.__getitem__ argument has to be a ColumnReference to the same table or pw.this, or a string "
+ "(or a list of those)."
)
return self._get_colref_by_name(args.name, KeyError)
elif isinstance(args, str):
return self._get_colref_by_name(args, KeyError)
else:
return self.select(*[self[name] for name in args])
def from_columns(
*args: expr.ColumnReference, **kwargs: expr.ColumnReference
) -> Table:
"""Build a table from columns.
All columns must have the same ids. Columns' names must be pairwise distinct.
Args:
args: List of columns.
kwargs: Columns with their new names.
Returns:
Table: Created table.
Example:
>>> import pathway as pw
>>> t1 = pw.Table.empty(age=float, pet=float)
>>> t2 = pw.Table.empty(foo=float, bar=float).with_universe_of(t1)
>>> t3 = pw.Table.from_columns(t1.pet, qux=t2.foo)
>>> pw.debug.compute_and_print(t3, include_id=False)
pet | qux
"""
all_args = cast(
dict[str, expr.ColumnReference], combine_args_kwargs(args, kwargs)
)
if not all_args:
raise ValueError("Table.from_columns() cannot have empty arguments list")
else:
arg = next(iter(all_args.values()))
table: Table = arg.table
for arg in all_args.values():
if not G.universe_solver.query_are_equal(
table._universe, arg.table._universe
):
raise ValueError(
"Universes of all arguments of Table.from_columns() have to be equal.\n"
+ "Consider using Table.promise_universes_are_equal() to assert it.\n"
+ "(However, untrue assertion might result in runtime errors.)"
)
return table.select(*args, **kwargs)
def concat_reindex(self, *tables: Table) -> Table:
"""Concatenate contents of several tables.
This is similar to PySpark union. All tables must have the same schema. Each row is reindexed.
Args:
tables: List of tables to concatenate. All tables must have the same schema.
Returns:
Table: The concatenated table. It will have new, synthetic ids.
Example:
>>> import pathway as pw
>>> t1 = pw.debug.table_from_markdown('''
... | pet
... 1 | Dog
... 7 | Cat
... ''')
>>> t2 = pw.debug.table_from_markdown('''
... | pet
... 1 | Manul
... 8 | Octopus
... ''')
>>> t3 = t1.concat_reindex(t2)
>>> pw.debug.compute_and_print(t3, include_id=False)
pet
Cat
Dog
Manul
Octopus
"""
reindexed = [
table.with_id_from(table.id, i) for i, table in enumerate([self, *tables])
]
universes.promise_are_pairwise_disjoint(*reindexed)
return Table.concat(*reindexed)
def empty(**kwargs: dt.DType) -> Table:
"""Creates an empty table with a schema specified by kwargs.
Args:
kwargs: Dict whose keys are column names and values are column types.
Returns:
Table: Created empty table.
Example:
>>> import pathway as pw
>>> t1 = pw.Table.empty(age=float, pet=float)
>>> pw.debug.compute_and_print(t1, include_id=False)
age | pet
"""
from pathway.internals import table_io
ret = table_io.empty_from_schema(schema_from_types(None, **kwargs))
G.universe_solver.register_as_empty(ret._universe)
return ret
def select(self, *args: expr.ColumnReference, **kwargs: Any) -> Table:
"""Build a new table with columns specified by kwargs.
Output columns' names are keys(kwargs). values(kwargs) can be raw values, boxed
values, columns. Assigning to id reindexes the table.
Args:
args: Column references.
kwargs: Column expressions with their new assigned names.
Returns:
Table: Created table.
Example:
>>> import pathway as pw
>>> t1 = pw.debug.table_from_markdown('''
... pet
... Dog
... Cat
... ''')
>>> t2 = t1.select(animal=t1.pet, desc="fluffy")
>>> pw.debug.compute_and_print(t2, include_id=False)
animal | desc
Cat | fluffy
Dog | fluffy
"""
new_columns = []
all_args = combine_args_kwargs(args, kwargs)
for new_name, expression in all_args.items():
self._validate_expression(expression)
column = self._eval(expression)
new_columns.append((new_name, column))
return self._with_same_universe(*new_columns)
def __add__(self, other: Table) -> Table:
"""Build a union of `self` with `other`.
Semantics: Returns a table C, such that
- C.columns == self.columns + other.columns
- C.id == self.id == other.id
Args:
other: The other table. `self.id` must be equal `other.id` and
`self.columns` and `other.columns` must be disjoint (or overlapping names
are THE SAME COLUMN)
Returns:
Table: Created table.
Example:
>>> import pathway as pw
>>> t1 = pw.debug.table_from_markdown('''
... pet
... 1 Dog
... 7 Cat
... ''')
>>> t2 = pw.debug.table_from_markdown('''
... age
... 1 10
... 7 3
... ''')
>>> t3 = t1 + t2
>>> pw.debug.compute_and_print(t3, include_id=False)
pet | age
Cat | 3
Dog | 10
"""
if not G.universe_solver.query_are_equal(self._universe, other._universe):
raise ValueError(
"Universes of all arguments of Table.__add__() have to be equal.\n"
+ "Consider using Table.promise_universes_are_equal() to assert it.\n"
+ "(However, untrue assertion might result in runtime errors.)"
)
return self.select(*self, *other)
def slice(self) -> TableSlice:
"""Creates a collection of references to self columns.
Supports basic column manipulation methods.
Example:
>>> import pathway as pw
>>> t1 = pw.debug.table_from_markdown('''
... age | owner | pet
... 10 | Alice | dog
... 9 | Bob | dog
... 8 | Alice | cat
... 7 | Bob | dog
... ''')
>>> t1.slice.without("age")
TableSlice({'owner': <table1>.owner, 'pet': <table1>.pet})
"""
return TableSlice(dict(**self), self)
def filter(self, filter_expression: expr.ColumnExpression) -> Table[TSchema]:
"""Filter a table according to `filter_expression` condition.
Args:
filter_expression: `ColumnExpression` that specifies the filtering condition.
Returns:
Table: Result has the same schema as `self` and its ids are subset of `self.id`.
Example:
>>> import pathway as pw
>>> vertices = pw.debug.table_from_markdown('''
... label outdegree
... 1 3
... 7 0
... ''')
>>> filtered = vertices.filter(vertices.outdegree == 0)
>>> pw.debug.compute_and_print(filtered, include_id=False)
label | outdegree
7 | 0
"""
filter_type = self.eval_type(filter_expression)
if filter_type != dt.BOOL:
raise TypeError(
f"Filter argument of Table.filter() has to be bool, found {filter_type}."
)
ret = self._filter(filter_expression)
if (
filter_col := expr.get_column_filtered_by_is_none(filter_expression)
) is not None and filter_col.table == self:
name = filter_col.name
dtype = self._columns[name].dtype
ret = ret.update_types(**{name: dt.unoptionalize(dtype)})
return ret
def split(
self, split_expression: expr.ColumnExpression
) -> tuple[Table[TSchema], Table[TSchema]]:
"""Split a table according to `split_expression` condition.
Args:
split_expression: `ColumnExpression` that specifies the split condition.
Returns:
positive_table, negative_table: tuple of tables,
with the same schemas as `self` and with ids that are subsets of `self.id`,
and provably disjoint.
Example:
>>> import pathway as pw
>>> vertices = pw.debug.table_from_markdown('''
... label outdegree
... 1 3
... 7 0
... ''')
>>> positive, negative = vertices.split(vertices.outdegree == 0)
>>> pw.debug.compute_and_print(positive, include_id=False)
label | outdegree
7 | 0
>>> pw.debug.compute_and_print(negative, include_id=False)
label | outdegree
1 | 3
"""
positive = self.filter(split_expression)
negative = self.filter(~split_expression)
universes.promise_are_pairwise_disjoint(positive, negative)
universes.promise_are_equal(
self, Table.concat(positive, negative)
) # TODO: add API method for this
return positive, negative
def _filter(self, filter_expression: expr.ColumnExpression) -> Table[TSchema]:
self._validate_expression(filter_expression)
filtering_column = self._eval(filter_expression)
assert self._universe == filtering_column.universe
context = clmn.FilterContext(filtering_column, self._id_column)
return self._table_with_context(context)
def _gradual_broadcast(
self,
threshold_table,
lower_column,
value_column,
upper_column,
) -> Table:
return self + self.__gradual_broadcast(
threshold_table, lower_column, value_column, upper_column
)
def __gradual_broadcast(
self,
threshold_table,
lower_column,
value_column,
upper_column,
):
context = clmn.GradualBroadcastContext(
self._id_column,
threshold_table._eval(lower_column),
threshold_table._eval(value_column),
threshold_table._eval(upper_column),
)
return Table(_columns={"apx_value": context.apx_value_column}, _context=context)
def _forget(
self,
threshold_column: expr.ColumnExpression,
time_column: expr.ColumnExpression,
mark_forgetting_records: bool,
) -> Table:
context = clmn.ForgetContext(
self._id_column,
self._eval(threshold_column),
self._eval(time_column),
mark_forgetting_records,
)
return self._table_with_context(context)
def _forget_immediately(
self,
) -> Table:
context = clmn.ForgetImmediatelyContext(self._id_column)
return self._table_with_context(context)
def _filter_out_results_of_forgetting(
self,
) -> Table:
# The output universe is a superset of input universe because forgetting entries
# are filtered out. At each point in time, the set of keys with +1 diff can be
# bigger than a set of keys with +1 diff in an input table.
context = clmn.FilterOutForgettingContext(self._id_column)
return self._table_with_context(context)
def _freeze(
self,
threshold_column: expr.ColumnExpression,
time_column: expr.ColumnExpression,
) -> Table:
context = clmn.FreezeContext(
self._id_column,
self._eval(threshold_column),
self._eval(time_column),
)
return self._table_with_context(context)
def _buffer(
self,
threshold_column: expr.ColumnExpression,
time_column: expr.ColumnExpression,
) -> Table:
context = clmn.BufferContext(
self._id_column,
self._eval(threshold_column),
self._eval(time_column),
)
return self._table_with_context(context)
def difference(self, other: Table) -> Table[TSchema]:
r"""Restrict self universe to keys not appearing in the other table.
Args:
other: table with ids to remove from self.
Returns:
Table: table with restricted universe, with the same set of columns
Example:
>>> import pathway as pw
>>> t1 = pw.debug.table_from_markdown('''
... | age | owner | pet
... 1 | 10 | Alice | 1
... 2 | 9 | Bob | 1
... 3 | 8 | Alice | 2
... ''')
>>> t2 = pw.debug.table_from_markdown('''
... | cost
... 2 | 100
... 3 | 200
... 4 | 300
... ''')
>>> t3 = t1.difference(t2)
>>> pw.debug.compute_and_print(t3, include_id=False)
age | owner | pet
10 | Alice | 1
"""
context = clmn.DifferenceContext(
left=self._id_column,
right=other._id_column,
)
return self._table_with_context(context)
def intersect(self, *tables: Table) -> Table[TSchema]:
"""Restrict self universe to keys appearing in all of the tables.
Args:
tables: tables keys of which are used to restrict universe.
Returns:
Table: table with restricted universe, with the same set of columns
Example:
>>> import pathway as pw
>>> t1 = pw.debug.table_from_markdown('''
... | age | owner | pet
... 1 | 10 | Alice | 1
... 2 | 9 | Bob | 1
... 3 | 8 | Alice | 2
... ''')
>>> t2 = pw.debug.table_from_markdown('''
... | cost
... 2 | 100
... 3 | 200
... 4 | 300
... ''')
>>> t3 = t1.intersect(t2)
>>> pw.debug.compute_and_print(t3, include_id=False)
age | owner | pet
8 | Alice | 2
9 | Bob | 1
"""
intersecting_universes = (
self._universe,
*tuple(table._universe for table in tables),
)
universe = G.universe_solver.get_intersection(*intersecting_universes)
if universe in intersecting_universes:
context: clmn.Context = clmn.RestrictContext(self._id_column, universe)
else:
intersecting_ids = (
self._id_column,
*tuple(table._id_column for table in tables),
)
context = clmn.IntersectContext(
intersecting_ids=intersecting_ids,
)
return self._table_with_context(context)
def restrict(self, other: TableLike) -> Table[TSchema]:
"""Restrict self universe to keys appearing in other.
Args:
other: table which universe is used to restrict universe of self.
Returns:
Table: table with restricted universe, with the same set of columns
Example:
>>> import pathway as pw
>>> t1 = pw.debug.table_from_markdown(
... '''
... | age | owner | pet
... 1 | 10 | Alice | 1
... 2 | 9 | Bob | 1
... 3 | 8 | Alice | 2
... '''
... )
>>> t2 = pw.debug.table_from_markdown(
... '''
... | cost
... 2 | 100
... 3 | 200
... '''
... )
>>> t2.promise_universe_is_subset_of(t1)
<pathway.Table schema={'cost': <class 'int'>}>
>>> t3 = t1.restrict(t2)
>>> pw.debug.compute_and_print(t3, include_id=False)
age | owner | pet
8 | Alice | 2
9 | Bob | 1
"""
if not G.universe_solver.query_is_subset(other._universe, self._universe):
raise ValueError(
"Table.restrict(): other universe has to be a subset of self universe."
+ "Consider using Table.promise_universe_is_subset_of() to assert it."
)
context = clmn.RestrictContext(self._id_column, other._universe)
columns = {
name: self._wrap_column_in_context(context, column, name)
for name, column in self._columns.items()
}
return Table(
_columns=columns,
_context=context,
)
def copy(self) -> Table[TSchema]:
"""Returns a copy of a table.
Example:
>>> import pathway as pw
>>> t1 = pw.debug.table_from_markdown('''
... age | owner | pet
... 10 | Alice | dog
... 9 | Bob | dog
... 8 | Alice | cat
... 7 | Bob | dog
... ''')
>>> t2 = t1.copy()
>>> pw.debug.compute_and_print(t2, include_id=False)
age | owner | pet
7 | Bob | dog
8 | Alice | cat
9 | Bob | dog
10 | Alice | dog
>>> t1 is t2
False
"""
return self._copy_as(type(self))
def _copy_as(self, table_type: type[TTable], /, **kwargs) -> TTable:
columns = {
name: self._wrap_column_in_context(self._rowwise_context, column, name)
for name, column in self._columns.items()
}
return table_type(_columns=columns, _context=self._rowwise_context, **kwargs)
def groupby(
self,
*args: expr.ColumnReference,
id: expr.ColumnReference | None = None,
sort_by: expr.ColumnReference | None = None,
_filter_out_results_of_forgetting: bool = False,
instance: expr.ColumnReference | None = None,
) -> groupbys.GroupedTable:
"""Groups table by columns from args.
Note:
Usually followed by `.reduce()` that aggregates the result and returns a table.
Args:
args: columns to group by.
id: if provided, is the column used to set id's of the rows of the result
sort_by: if provided, column values are used as sorting keys for particular reducers
instance: optional argument describing partitioning of the data into separate instances
Returns:
GroupedTable: Groupby object.
Example:
>>> import pathway as pw
>>> t1 = pw.debug.table_from_markdown('''
... age | owner | pet
... 10 | Alice | dog
... 9 | Bob | dog
... 8 | Alice | cat
... 7 | Bob | dog
... ''')
>>> t2 = t1.groupby(t1.pet, t1.owner).reduce(t1.owner, t1.pet, ageagg=pw.reducers.sum(t1.age))
>>> pw.debug.compute_and_print(t2, include_id=False)
owner | pet | ageagg
Alice | cat | 8
Alice | dog | 10
Bob | dog | 16
"""
if instance is not None:
args = (*args, instance)
if id is not None:
if len(args) == 0:
args = (id,)
elif len(args) > 1:
raise ValueError(
"Table.groupby() cannot have id argument when grouping by multiple columns."
)
elif args[0]._column != id._column:
raise ValueError(
"Table.groupby() received id argument and is grouped by a single column,"
+ " but the arguments are not equal.\n"
+ "Consider using <table>.groupby(id=...), skipping the positional argument."
)
for arg in args:
if not isinstance(arg, expr.ColumnReference):
if isinstance(arg, str):
raise ValueError(
f"Expected a ColumnReference, found a string. Did you mean <table>.{arg}"
+ f" instead of {repr(arg)}?"
)
else:
raise ValueError(
"All Table.groupby() arguments have to be a ColumnReference."
)
return groupbys.GroupedTable.create(
table=self,
grouping_columns=args,
set_id=id is not None,
sort_by=sort_by,
_filter_out_results_of_forgetting=_filter_out_results_of_forgetting,
)
def reduce(
self, *args: expr.ColumnReference, **kwargs: expr.ColumnExpression
) -> Table:
"""Reduce a table to a single row.
Equivalent to `self.groupby().reduce(*args, **kwargs)`.
Args:
args: reducer to reduce the table with
kwargs: reducer to reduce the table with. Its key is the new name of a column.
Returns:
Table: Reduced table.
Example:
>>> import pathway as pw
>>> t1 = pw.debug.table_from_markdown('''
... age | owner | pet
... 10 | Alice | dog
... 9 | Bob | dog
... 8 | Alice | cat
... 7 | Bob | dog
... ''')
>>> t2 = t1.reduce(ageagg=pw.reducers.argmin(t1.age))
>>> pw.debug.compute_and_print(t2, include_id=False) # doctest: +ELLIPSIS
ageagg
^...
>>> t3 = t2.select(t1.ix(t2.ageagg).age, t1.ix(t2.ageagg).pet)
>>> pw.debug.compute_and_print(t3, include_id=False)
age | pet
7 | dog
"""
return self.groupby().reduce(*args, **kwargs)
def deduplicate(
self,
*,
value: expr.ColumnExpression,
instance: expr.ColumnExpression | None = None,
acceptor: Callable[[T, T], bool],
persistent_id: str | None = None,
) -> Table:
"""Deduplicates rows in `self` on `value` column using acceptor function.
It keeps rows which where accepted by the acceptor function.
Acceptor operates on two arguments - current value and the previously accepted value.
Args:
value: column expression used for deduplication.
instance: Grouping column. For rows with different
values in this column, deduplication will be performed separately.
Defaults to None.
acceptor: callback telling whether two values are different.
persistent_id: (unstable) An identifier, under which the state of the table
will be persisted or ``None``, if there is no need to persist the state of this table.
When a program restarts, it restores the state for all input tables according to what
was saved for their ``persistent_id``. This way it's possible to configure the start of
computations from the moment they were terminated last time.
Returns:
Table: the result of deduplication.
Example:
>>> import pathway as pw
>>> table = pw.debug.table_from_markdown(
... '''
... val | __time__
... 1 | 2
... 2 | 4
... 3 | 6
... 4 | 8
... '''
... )
>>>
>>> def acceptor(new_value, old_value) -> bool:
... return new_value >= old_value + 2
...
>>>
>>> result = table.deduplicate(value=pw.this.val, acceptor=acceptor)
>>> pw.debug.compute_and_print_update_stream(result, include_id=False)
val | __time__ | __diff__
1 | 2 | 1
1 | 6 | -1
3 | 6 | 1
>>>
>>> table = pw.debug.table_from_markdown(
... '''
... val | instance | __time__
... 1 | 1 | 2
... 2 | 1 | 4
... 3 | 2 | 6
... 4 | 1 | 8
... 4 | 2 | 8
... 5 | 1 | 10
... '''
... )
>>>
>>> def acceptor(new_value, old_value) -> bool:
... return new_value >= old_value + 2
...
>>>
>>> result = table.deduplicate(
... value=pw.this.val, instance=pw.this.instance, acceptor=acceptor
... )
>>> pw.debug.compute_and_print_update_stream(result, include_id=False)
val | instance | __time__ | __diff__
1 | 1 | 2 | 1
3 | 2 | 6 | 1
1 | 1 | 8 | -1
4 | 1 | 8 | 1
"""
if instance is None:
instance = expr.ColumnConstExpression(None)
self._validate_expression(value)
self._validate_expression(instance)
value_col = self._eval(value)
instance_col = self._eval(instance)
context = clmn.DeduplicateContext(
value_col,
(instance_col,),
acceptor,
self._id_column,
persistent_id,
)
return self._table_with_context(context)
def ix(
self, expression: expr.ColumnExpression, *, optional: bool = False, context=None
) -> Table:
"""Reindexes the table using expression values as keys. Uses keys from context, or tries to infer
proper context from the expression.
If optional is True, then None in expression values result in None values in the result columns.
Missing values in table keys result in RuntimeError.
Context can be anything that allows for `select` or `reduce`, or `pathway.this` construct
(latter results in returning a delayed operation, and should be only used when using `ix` inside
join().select() or groupby().reduce() sequence).
Returns:
Reindexed table with the same set of columns.
Example:
>>> import pathway as pw
>>> t_animals = pw.debug.table_from_markdown('''
... | epithet | genus
... 1 | upupa | epops
... 2 | acherontia | atropos
... 3 | bubo | scandiacus
... 4 | dynastes | hercules
... ''')
>>> t_birds = pw.debug.table_from_markdown('''
... | desc
... 2 | hoopoe
... 4 | owl
... ''')
>>> ret = t_birds.select(t_birds.desc, latin=t_animals.ix(t_birds.id).genus)
>>> pw.debug.compute_and_print(ret, include_id=False)
desc | latin
hoopoe | atropos
owl | hercules
"""
if context is None:
all_tables = collect_tables(expression)
if len(all_tables) == 0:
context = thisclass.this
elif all(tab == all_tables[0] for tab in all_tables):
context = all_tables[0]
if context is None:
for tab in all_tables:
if not isinstance(tab, Table):
raise ValueError("Table expected here.")
if len(all_tables) == 0:
raise ValueError("Const value provided.")
context = all_tables[0]
for tab in all_tables:
assert context._universe.is_equal_to(tab._universe)
if isinstance(context, groupbys.GroupedJoinable):
context = thisclass.this
if isinstance(context, thisclass.ThisMetaclass):
return context._delayed_op(
lambda table, expression: self.ix(
expression=expression, optional=optional, context=table
),
expression=expression,
qualname=f"{self}.ix(...)",
name="ix",
)
restrict_universe = RestrictUniverseDesugaring(context)
expression = restrict_universe.eval_expression(expression)
key_col = context.select(tmp=expression).tmp
key_dtype = self.eval_type(key_col)
if (
optional and not dt.dtype_issubclass(key_dtype, dt.Optional(dt.POINTER))
) or (not optional and not isinstance(key_dtype, dt.Pointer)):
raise TypeError(
f"Pathway supports indexing with Pointer type only. The type used was {key_dtype}."
)
if optional and isinstance(key_dtype, dt.Optional):
self_ = self.update_types(
**{name: dt.Optional(self.typehints()[name]) for name in self.keys()}
)
else:
self_ = self
return self_._ix(key_col, optional)
def _ix(
self,
key_expression: expr.ColumnReference,
optional: bool,
) -> Table:
key_column = key_expression._column
context = clmn.IxContext(key_column, self._id_column, optional)
return self._table_with_context(context)
def __lshift__(self, other: Table) -> Table:
"""Alias to update_cells method.
Updates cells of `self`, breaking ties in favor of the values in `other`.
Semantics:
- result.columns == self.columns
- result.id == self.id
- conflicts are resolved preferring other's values
Requires:
- other.columns ⊆ self.columns
- other.id ⊆ self.id
Args:
other: the other table.
Returns:
Table: `self` updated with cells form `other`.
Example:
>>> import pathway as pw
>>> t1 = pw.debug.table_from_markdown('''
... | age | owner | pet
... 1 | 10 | Alice | 1
... 2 | 9 | Bob | 1
... 3 | 8 | Alice | 2
... ''')
>>> t2 = pw.debug.table_from_markdown('''
... | age | owner | pet
... 1 | 10 | Alice | 30
... ''')
>>> pw.universes.promise_is_subset_of(t2, t1)
>>> t3 = t1 << t2
>>> pw.debug.compute_and_print(t3, include_id=False)
age | owner | pet
8 | Alice | 2
9 | Bob | 1
10 | Alice | 30
"""
return self.update_cells(other, _stacklevel=2)
def concat(self, *others: Table[TSchema]) -> Table[TSchema]:
"""Concats `self` with every `other` ∊ `others`.
Semantics:
- result.columns == self.columns == other.columns
- result.id == self.id ∪ other.id
if self.id and other.id collide, throws an exception.
Requires:
- other.columns == self.columns
- self.id disjoint with other.id
Args:
other: the other table.
Returns:
Table: The concatenated table. Id's of rows from original tables are preserved.
Example:
>>> import pathway as pw
>>> t1 = pw.debug.table_from_markdown('''
... | age | owner | pet
... 1 | 10 | Alice | 1
... 2 | 9 | Bob | 1
... 3 | 8 | Alice | 2
... ''')
>>> t2 = pw.debug.table_from_markdown('''
... | age | owner | pet
... 11 | 11 | Alice | 30
... 12 | 12 | Tom | 40
... ''')
>>> pw.universes.promise_are_pairwise_disjoint(t1, t2)
>>> t3 = t1.concat(t2)
>>> pw.debug.compute_and_print(t3, include_id=False)
age | owner | pet
8 | Alice | 2
9 | Bob | 1
10 | Alice | 1
11 | Alice | 30
12 | Tom | 40
"""
for other in others:
if other.keys() != self.keys():
raise ValueError(
"columns do not match in the argument of Table.concat()"
)
schema = {
key: functools.reduce(
dt.types_lca,
[other.schema._dtypes()[key] for other in others],
self.schema._dtypes()[key],
)
for key in self.keys()
}
return Table._concat(
self.cast_to_types(**schema),
*[other.cast_to_types(**schema) for other in others],
)
def _concat(self, *others: Table[TSchema]) -> Table[TSchema]:
union_ids = (self._id_column, *(other._id_column for other in others))
if not G.universe_solver.query_are_disjoint(*(c.universe for c in union_ids)):
raise ValueError(
"Universes of the arguments of Table.concat() have to be disjoint.\n"
+ "Consider using Table.promise_universes_are_disjoint() to assert it.\n"
+ "(However, untrue assertion might result in runtime errors.)"
)
context = clmn.ConcatUnsafeContext(
union_ids=union_ids,
updates=tuple(
{col_name: other._columns[col_name] for col_name in self.keys()}
for other in others
),
)
return self._table_with_context(context)
def update_cells(self, other: Table, _stacklevel: int = 1) -> Table:
"""Updates cells of `self`, breaking ties in favor of the values in `other`.
Semantics:
- result.columns == self.columns
- result.id == self.id
- conflicts are resolved preferring other's values
Requires:
- other.columns ⊆ self.columns
- other.id ⊆ self.id
Args:
other: the other table.
Returns:
Table: `self` updated with cells form `other`.
Example:
>>> import pathway as pw
>>> t1 = pw.debug.table_from_markdown('''
... | age | owner | pet
... 1 | 10 | Alice | 1
... 2 | 9 | Bob | 1
... 3 | 8 | Alice | 2
... ''')
>>> t2 = pw.debug.table_from_markdown('''
... age | owner | pet
... 1 | 10 | Alice | 30
... ''')
>>> pw.universes.promise_is_subset_of(t2, t1)
>>> t3 = t1.update_cells(t2)
>>> pw.debug.compute_and_print(t3, include_id=False)
age | owner | pet
8 | Alice | 2
9 | Bob | 1
10 | Alice | 30
"""
if names := (set(other.keys()) - set(self.keys())):
raise ValueError(
f"Columns of the argument in Table.update_cells() not present in the updated table: {list(names)}."
)
if self._universe == other._universe:
warnings.warn(
"Key sets of self and other in update_cells are the same."
+ " Using with_columns instead of update_cells.",
stacklevel=_stacklevel + 4,
)
return self.with_columns(*(other[name] for name in other))
schema = {
key: dt.types_lca(self.schema.__dtypes__[key], other.schema.__dtypes__[key])
for key in other.keys()
}
return Table._update_cells(
self.cast_to_types(**schema), other.cast_to_types(**schema)
)
def _update_cells(self, other: Table) -> Table:
if not other._universe.is_subset_of(self._universe):
raise ValueError(
"Universe of the argument of Table.update_cells() needs to be "
+ "a subset of the universe of the updated table.\n"
+ "Consider using Table.promise_is_subset_of() to assert this.\n"
+ "(However, untrue assertion might result in runtime errors.)"
)
context = clmn.UpdateCellsContext(
left=self._id_column,
right=other._id_column,
updates={name: other._columns[name] for name in other.keys()},
)
return self._table_with_context(context)
def update_rows(self, other: Table[TSchema]) -> Table[TSchema]:
"""Updates rows of `self`, breaking ties in favor for the rows in `other`.
Semantics:
- result.columns == self.columns == other.columns
- result.id == self.id ∪ other.id
Requires:
- other.columns == self.columns
Args:
other: the other table.
Returns:
Table: `self` updated with rows form `other`.
Example:
>>> import pathway as pw
>>> t1 = pw.debug.table_from_markdown('''
... | age | owner | pet
... 1 | 10 | Alice | 1
... 2 | 9 | Bob | 1
... 3 | 8 | Alice | 2
... ''')
>>> t2 = pw.debug.table_from_markdown('''
... | age | owner | pet
... 1 | 10 | Alice | 30
... 12 | 12 | Tom | 40
... ''')
>>> t3 = t1.update_rows(t2)
>>> pw.debug.compute_and_print(t3, include_id=False)
age | owner | pet
8 | Alice | 2
9 | Bob | 1
10 | Alice | 30
12 | Tom | 40
"""
if other.keys() != self.keys():
raise ValueError(
"Columns do not match between argument of Table.update_rows() and the updated table."
)
if self._universe.is_subset_of(other._universe):
warnings.warn(
"Universe of self is a subset of universe of other in update_rows. Returning other.",
stacklevel=5,
)
return other
schema = {
key: dt.types_lca(self.schema.__dtypes__[key], other.schema.__dtypes__[key])
for key in self.keys()
}
union_universes = (self._universe, other._universe)
universe = G.universe_solver.get_union(*union_universes)
if universe == self._universe:
return Table._update_cells(
self.cast_to_types(**schema), other.cast_to_types(**schema)
)
else:
return Table._update_rows(
self.cast_to_types(**schema), other.cast_to_types(**schema)
)
def _update_rows(self, other: Table[TSchema]) -> Table[TSchema]:
union_ids = (self._id_column, other._id_column)
context = clmn.UpdateRowsContext(
updates={col_name: other._columns[col_name] for col_name in self.keys()},
union_ids=union_ids,
)
return self._table_with_context(context)
def with_columns(self, *args: expr.ColumnReference, **kwargs: Any) -> Table:
"""Updates columns of `self`, according to args and kwargs.
See `table.select` specification for evaluation of args and kwargs.
Example:
>>> import pathway as pw
>>> t1 = pw.debug.table_from_markdown('''
... | age | owner | pet
... 1 | 10 | Alice | 1
... 2 | 9 | Bob | 1
... 3 | 8 | Alice | 2
... ''')
>>> t2 = pw.debug.table_from_markdown('''
... | owner | pet | size
... 1 | Tom | 1 | 10
... 2 | Bob | 1 | 9
... 3 | Tom | 2 | 8
... ''')
>>> t3 = t1.with_columns(*t2)
>>> pw.debug.compute_and_print(t3, include_id=False)
age | owner | pet | size
8 | Tom | 2 | 8
9 | Bob | 1 | 9
10 | Tom | 1 | 10
"""
other = self.select(*args, **kwargs)
columns = dict(self)
columns.update(other)
return self.select(**columns)
def with_id(self, new_index: expr.ColumnReference) -> Table:
"""Set new ids based on another column containing id-typed values.
To generate ids based on arbitrary valued columns, use `with_id_from`.
Values assigned must be row-wise unique.
Args:
new_id: column to be used as the new index.
Returns:
Table with updated ids.
Example:
>>> import pytest; pytest.xfail("with_id is hard to test")
>>> import pathway as pw
>>> t1 = pw.debug.table_from_markdown('''
... | age | owner | pet
... 1 | 10 | Alice | 1
... 2 | 9 | Bob | 1
... 3 | 8 | Alice | 2
... ''')
>>> t2 = pw.debug.table_from_markdown('''
... | new_id
... 1 | 2
... 2 | 3
... 3 | 4
... ''')
>>> t3 = t1.promise_universe_is_subset_of(t2).with_id(t2.new_id)
>>> pw.debug.compute_and_print(t3)
age owner pet
^2 10 Alice 1
^3 9 Bob 1
^4 8 Alice 2
"""
return self._with_new_index(new_index)
def with_id_from(
self,
*args: expr.ColumnExpression | Value,
instance: expr.ColumnReference | None = None,
) -> Table:
"""Compute new ids based on values in columns.
Ids computed from `columns` must be row-wise unique.
Args:
columns: columns to be used as primary keys.
Returns:
Table: `self` updated with recomputed ids.
Example:
>>> import pathway as pw
>>> t1 = pw.debug.table_from_markdown('''
... | age | owner | pet
... 1 | 10 | Alice | 1
... 2 | 9 | Bob | 1
... 3 | 8 | Alice | 2
... ''')
>>> t2 = t1 + t1.select(old_id=t1.id)
>>> t3 = t2.with_id_from(t2.age)
>>> pw.debug.compute_and_print(t3) # doctest: +ELLIPSIS, +NORMALIZE_WHITESPACE
| age | owner | pet | old_id
^... | 8 | Alice | 2 | ^...
^... | 9 | Bob | 1 | ^...
^... | 10 | Alice | 1 | ^...
>>> t4 = t3.select(t3.age, t3.owner, t3.pet, same_as_old=(t3.id == t3.old_id),
... same_as_new=(t3.id == t3.pointer_from(t3.age)))
>>> pw.debug.compute_and_print(t4) # doctest: +ELLIPSIS, +NORMALIZE_WHITESPACE
| age | owner | pet | same_as_old | same_as_new
^... | 8 | Alice | 2 | False | True
^... | 9 | Bob | 1 | False | True
^... | 10 | Alice | 1 | False | True
"""
# new_index should be a column, so a little workaround
new_index = self.select(
ref_column=self.pointer_from(*args, instance=instance)
).ref_column
return self._with_new_index(
new_index=new_index,
)
def _with_new_index(
self,
new_index: expr.ColumnExpression,
) -> Table:
self._validate_expression(new_index)
index_type = self.eval_type(new_index)
if not isinstance(index_type, dt.Pointer):
raise TypeError(
f"Pathway supports reindexing Tables with Pointer type only. The type used was {index_type}."
)
reindex_column = self._eval(new_index)
assert self._universe == reindex_column.universe
context = clmn.ReindexContext(reindex_column)
return self._table_with_context(context)
def rename_columns(self, **kwargs: str | expr.ColumnReference) -> Table:
"""Rename columns according to kwargs.
Columns not in keys(kwargs) are not changed. New name of a column must not be `id`.
Args:
kwargs: mapping from old column names to new names.
Returns:
Table: `self` with columns renamed.
Example:
>>> import pathway as pw
>>> t1 = pw.debug.table_from_markdown('''
... age | owner | pet
... 10 | Alice | 1
... 9 | Bob | 1
... 8 | Alice | 2
... ''')
>>> t2 = t1.rename_columns(years_old=t1.age, animal=t1.pet)
>>> pw.debug.compute_and_print(t2, include_id=False)
owner | years_old | animal
Alice | 8 | 2
Alice | 10 | 1
Bob | 9 | 1
"""
mapping: dict[str, str] = {}
for new_name, old_name_col in kwargs.items():
if isinstance(old_name_col, expr.ColumnReference):
old_name = old_name_col.name
else:
old_name = old_name_col
if old_name not in self._columns:
raise ValueError(f"Column {old_name} does not exist in a given table.")
mapping[new_name] = old_name
renamed_columns = self._columns.copy()
for new_name, old_name in mapping.items():
renamed_columns.pop(old_name)
for new_name, old_name in mapping.items():
renamed_columns[new_name] = self._columns[old_name]
columns_wrapped = {
name: self._wrap_column_in_context(
self._rowwise_context,
column,
mapping[name] if name in mapping else name,
)
for name, column in renamed_columns.items()
}
return self._with_same_universe(*columns_wrapped.items())
def rename_by_dict(
self, names_mapping: dict[str | expr.ColumnReference, str]
) -> Table:
"""Rename columns according to a dictionary.
Columns not in keys(kwargs) are not changed. New name of a column must not be `id`.
Args:
names_mapping: mapping from old column names to new names.
Returns:
Table: `self` with columns renamed.
Example:
>>> import pathway as pw
>>> t1 = pw.debug.table_from_markdown('''
... age | owner | pet
... 10 | Alice | 1
... 9 | Bob | 1
... 8 | Alice | 2
... ''')
>>> t2 = t1.rename_by_dict({"age": "years_old", t1.pet: "animal"})
>>> pw.debug.compute_and_print(t2, include_id=False)
owner | years_old | animal
Alice | 8 | 2
Alice | 10 | 1
Bob | 9 | 1
"""
return self.rename_columns(
**{new_name: self[old_name] for old_name, new_name in names_mapping.items()}
)
def with_prefix(self, prefix: str) -> Table:
"""Rename columns by adding prefix to each name of column.
Example:
>>> import pathway as pw
>>> t1 = pw.debug.table_from_markdown('''
... age | owner | pet
... 10 | Alice | 1
... 9 | Bob | 1
... 8 | Alice | 2
... ''')
>>> t2 = t1.with_prefix("u_")
>>> pw.debug.compute_and_print(t2, include_id=False)
u_age | u_owner | u_pet
8 | Alice | 2
9 | Bob | 1
10 | Alice | 1
"""
return self.rename_by_dict({name: prefix + name for name in self.keys()})
def with_suffix(self, suffix: str) -> Table:
"""Rename columns by adding suffix to each name of column.
Example:
>>> import pathway as pw
>>> t1 = pw.debug.table_from_markdown('''
... age | owner | pet
... 10 | Alice | 1
... 9 | Bob | 1
... 8 | Alice | 2
... ''')
>>> t2 = t1.with_suffix("_current")
>>> pw.debug.compute_and_print(t2, include_id=False)
age_current | owner_current | pet_current
8 | Alice | 2
9 | Bob | 1
10 | Alice | 1
"""
return self.rename_by_dict({name: name + suffix for name in self.keys()})
def rename(
self,
names_mapping: dict[str | expr.ColumnReference, str] | None = None,
**kwargs: expr.ColumnExpression,
) -> Table:
"""Rename columns according either a dictionary or kwargs.
If a mapping is provided using a dictionary, ``rename_by_dict`` will be used.
Otherwise, ``rename_columns`` will be used with kwargs.
Columns not in keys(kwargs) are not changed. New name of a column must not be ``id``.
Args:
names_mapping: mapping from old column names to new names.
kwargs: mapping from old column names to new names.
Returns:
Table: `self` with columns renamed.
"""
if names_mapping is not None:
return self.rename_by_dict(names_mapping=names_mapping)
return self.rename_columns(**kwargs)
def without(self, *columns: str | expr.ColumnReference) -> Table:
"""Selects all columns without named column references.
Args:
columns: columns to be dropped provided by `table.column_name` notation.
Returns:
Table: `self` without specified columns.
Example:
>>> import pathway as pw
>>> t1 = pw.debug.table_from_markdown('''
... age | owner | pet
... 10 | Alice | 1
... 9 | Bob | 1
... 8 | Alice | 2
... ''')
>>> t2 = t1.without(t1.age, pw.this.pet)
>>> pw.debug.compute_and_print(t2, include_id=False)
owner
Alice
Alice
Bob
"""
new_columns = self._columns.copy()
for col in columns:
if isinstance(col, expr.ColumnReference):
new_columns.pop(col.name)
else:
assert isinstance(col, str)
new_columns.pop(col)
columns_wrapped = {
name: self._wrap_column_in_context(self._rowwise_context, column, name)
for name, column in new_columns.items()
}
return self._with_same_universe(*columns_wrapped.items())
def having(self, *indexers: expr.ColumnReference) -> Table[TSchema]:
"""Removes rows so that indexed.ix(indexer) is possible when some rows are missing,
for each indexer in indexers"""
rets: list[Table] = []
for indexer in indexers:
rets.append(self._having(indexer))
if len(rets) == 0:
return self
elif len(rets) == 1:
[ret] = rets
return ret
else:
return rets[0].intersect(*rets[1:])
def update_types(self, **kwargs: Any) -> Table:
"""Updates types in schema. Has no effect on the runtime."""
for name in kwargs.keys():
if name not in self.keys():
raise ValueError(
"Table.update_types() argument name has to be an existing table column name."
)
from pathway.internals.common import declare_type
return self.with_columns(
**{key: declare_type(val, self[key]) for key, val in kwargs.items()}
)
def cast_to_types(self, **kwargs: Any) -> Table:
"""Casts columns to types."""
for name in kwargs.keys():
if name not in self.keys():
raise ValueError(
"Table.cast_to_types() argument name has to be an existing table column name."
)
from pathway.internals.common import cast
return self.with_columns(
**{key: cast(val, self[key]) for key, val in kwargs.items()}
)
def _having(self, indexer: expr.ColumnReference) -> Table[TSchema]:
context = clmn.HavingContext(
orig_id_column=self._id_column, key_column=indexer._column
)
return self._table_with_context(context)
def with_universe_of(self, other: TableLike) -> Table:
"""Returns a copy of self with exactly the same universe as others.
Semantics: Required precondition self.universe == other.universe
Used in situations where Pathway cannot deduce equality of universes, but
those are equal as verified during runtime.
Example:
>>> import pathway as pw
>>> t1 = pw.debug.table_from_markdown('''
... | pet
... 1 | Dog
... 7 | Cat
... ''')
>>> t2 = pw.debug.table_from_markdown('''
... | age
... 1 | 10
... 7 | 3
... 8 | 100
... ''')
>>> t3 = t2.filter(pw.this.age < 30).with_universe_of(t1)
>>> t4 = t1 + t3
>>> pw.debug.compute_and_print(t4, include_id=False)
pet | age
Cat | 3
Dog | 10
"""
if self._universe == other._universe:
return self.copy()
universes.promise_are_equal(self, other)
return self._unsafe_promise_universe(other)
def flatten(self, *args: expr.ColumnReference, **kwargs: Any) -> Table:
"""Performs a flatmap operation on a column or expression given as a first
argument. Datatype of this column or expression has to be iterable or Json array.
Other columns specified in the method arguments are duplicated
as many times as the length of the iterable.
It is possible to get ids of source rows by using `table.id` column, e.g.
`table.flatten(table.column_to_be_flattened, original_id = table.id)`.
Example:
>>> import pathway as pw
>>> t1 = pw.debug.table_from_markdown('''
... | pet | age
... 1 | Dog | 2
... 7 | Cat | 5
... ''')
>>> t2 = t1.flatten(t1.pet)
>>> pw.debug.compute_and_print(t2, include_id=False)
pet
C
D
a
g
o
t
>>> t3 = t1.flatten(t1.pet, t1.age)
>>> pw.debug.compute_and_print(t3, include_id=False)
pet | age
C | 5
D | 2
a | 5
g | 2
o | 2
t | 5
"""
intermediate_table = self.select(*args, **kwargs)
all_args = combine_args_kwargs(args, kwargs)
if not all_args:
raise ValueError("Table.flatten() cannot have empty arguments list.")
all_names_iter = iter(all_args.keys())
flatten_name = next(all_names_iter)
return intermediate_table._flatten(flatten_name)
def _flatten(
self,
flatten_name: str,
) -> Table:
flatten_column = self._columns[flatten_name]
assert isinstance(flatten_column, clmn.ColumnWithExpression)
context = clmn.FlattenContext(
orig_universe=self._universe,
flatten_column=flatten_column,
)
columns = {
name: self._wrap_column_in_context(context, column, name)
for name, column in self._columns.items()
if name != flatten_name
}
return Table(
_columns={
flatten_name: context.flatten_result_column,
**columns,
},
_context=context,
)
def sort(
self,
key: expr.ColumnExpression,
instance: expr.ColumnExpression | None = None,
) -> Table:
"""
Sorts a table by the specified keys.
Args:
table : pw.Table
The table to be sorted.
key (ColumnExpression[int | float | datetime | str | bytes]):
An expression to sort by.
instance : ColumnReference or None
An expression with instance. Rows are sorted within an instance.
``prev`` and ``next`` columns will only point to rows that have the same instance.
Returns:
pw.Table: The sorted table. Contains two columns: ``prev`` and ``next``, containing the pointers
to the previous and next rows.
Example:
>>> import pathway as pw
>>> table = pw.debug.table_from_markdown('''
... name | age | score
... Alice | 25 | 80
... Bob | 20 | 90
... Charlie | 30 | 80
... ''')
>>> table = table.with_id_from(pw.this.name)
>>> table += table.sort(key=pw.this.age)
>>> pw.debug.compute_and_print(table, include_id=True)
| name | age | score | prev | next
^GBSDEEW... | Alice | 25 | 80 | ^EDPSSB1... | ^DS9AT95...
^EDPSSB1... | Bob | 20 | 90 | | ^GBSDEEW...
^DS9AT95... | Charlie | 30 | 80 | ^GBSDEEW... |
>>> table = pw.debug.table_from_markdown('''
... name | age | score
... Alice | 25 | 80
... Bob | 20 | 90
... Charlie | 30 | 80
... David | 35 | 90
... Eve | 15 | 80
... ''')
>>> table = table.with_id_from(pw.this.name)
>>> table += table.sort(key=pw.this.age, instance=pw.this.score)
>>> pw.debug.compute_and_print(table, include_id=True)
| name | age | score | prev | next
^GBSDEEW... | Alice | 25 | 80 | ^T0B95XH... | ^DS9AT95...
^EDPSSB1... | Bob | 20 | 90 | | ^RT0AZWX...
^DS9AT95... | Charlie | 30 | 80 | ^GBSDEEW... |
^RT0AZWX... | David | 35 | 90 | ^EDPSSB1... |
^T0B95XH... | Eve | 15 | 80 | | ^GBSDEEW...
"""
instance = clmn.ColumnExpression._wrap(instance)
context = clmn.SortingContext(
self._eval(key),
self._eval(instance),
)
return Table(
_columns={
"prev": context.prev_column,
"next": context.next_column,
},
_context=context,
)
def _set_source(self, source: OutputHandle):
self._source = source
if not hasattr(self._id_column, "lineage"):
self._id_column.lineage = clmn.ColumnLineage(name="id", source=source)
for name, column in self._columns.items():
if not hasattr(column, "lineage"):
column.lineage = clmn.ColumnLineage(name=name, source=source)
universe = self._universe
if not hasattr(universe, "lineage"):
universe.lineage = clmn.Lineage(source=source)
def _unsafe_promise_universe(self, other: TableLike) -> Table:
context = clmn.PromiseSameUniverseContext(self._id_column, other._id_column)
return self._table_with_context(context)
def _validate_expression(self, expression: expr.ColumnExpression):
for dep in expression._dependencies_above_reducer():
if self._universe != dep._column.universe:
raise ValueError(
f"You cannot use {dep.to_column_expression()} in this context."
+ " Its universe is different than the universe of the table the method"
+ " was called on. You can use <table1>.with_universe_of(<table2>)"
+ " to assign universe of <table2> to <table1> if you're sure their"
+ " sets of keys are equal."
)
def _wrap_column_in_context(
self,
context: clmn.Context,
column: clmn.Column,
name: str,
lineage: clmn.Lineage | None = None,
) -> clmn.Column:
"""Contextualize column by wrapping it in expression."""
expression = expr.ColumnReference(_table=self, _column=column, _name=name)
return expression._column_with_expression_cls(
context=context,
universe=context.universe,
expression=expression,
lineage=lineage,
)
def _table_with_context(self, context: clmn.Context) -> Table:
columns = {
name: self._wrap_column_in_context(context, column, name)
for name, column in self._columns.items()
}
return Table(
_columns=columns,
_context=context,
)
def _table_restricted_context(self) -> clmn.TableRestrictedRowwiseContext:
return clmn.TableRestrictedRowwiseContext(self._id_column, self)
def _eval(
self, expression: expr.ColumnExpression, context: clmn.Context | None = None
) -> clmn.ColumnWithExpression:
"""Desugar expression and wrap it in given context."""
if context is None:
context = self._rowwise_context
column = expression._column_with_expression_cls(
context=context,
universe=context.universe,
expression=expression,
)
return column
def _from_schema(cls: type[TTable], schema: type[Schema]) -> TTable:
universe = Universe()
context = clmn.MaterializedContext(universe, schema.universe_properties)
columns = {
name: clmn.MaterializedColumn(
universe,
schema.column_properties(name),
)
for name in schema.column_names()
}
return cls(_columns=columns, _schema=schema, _context=context)
def __repr__(self) -> str:
return f"<pathway.Table schema={dict(self.typehints())}>"
def _with_same_universe(
self,
*columns: tuple[str, clmn.Column],
schema: type[Schema] | None = None,
) -> Table:
return Table(
_columns=dict(columns),
_schema=schema,
_context=self._rowwise_context,
)
def _sort_columns_by_other(self, other: Table):
assert self.keys() == other.keys()
self._columns = {name: self._columns[name] for name in other.keys()}
def _operator_dependencies(self) -> StableSet[Table]:
return StableSet([self])
def debug(self, name: str):
G.add_operator(
lambda id: DebugOperator(name, id),
lambda operator: operator(self),
)
return self
def to(self, sink: DataSink) -> None:
from pathway.internals import table_io
table_io.table_to_datasink(self, sink)
def _materialize(self, universe: Universe):
context = clmn.MaterializedContext(universe)
columns = {
name: clmn.MaterializedColumn(universe, column.properties)
for (name, column) in self._columns.items()
}
return Table(
_columns=columns,
_schema=self.schema,
_context=context,
)
def pointer_from(
self, *args: Any, optional=False, instance: expr.ColumnReference | None = None
):
"""Pseudo-random hash of its argument. Produces pointer types. Applied column-wise.
Example:
>>> import pathway as pw
>>> t1 = pw.debug.table_from_markdown('''
... age owner pet
... 1 10 Alice dog
... 2 9 Bob dog
... 3 8 Alice cat
... 4 7 Bob dog''')
>>> g = t1.groupby(t1.owner).reduce(refcol = t1.pointer_from(t1.owner)) # g.id == g.refcol
>>> pw.debug.compute_and_print(g.select(test = (g.id == g.refcol)), include_id=False)
test
True
True
"""
if instance is not None:
args = (*args, instance)
# XXX verify types for the table primary_keys
return expr.PointerExpression(self, *args, optional=optional)
def ix_ref(
self,
*args: expr.ColumnExpression | Value,
optional: bool = False,
context=None,
instance: expr.ColumnReference | None = None,
):
"""Reindexes the table using expressions as primary keys.
Uses keys from context, or tries to infer proper context from the expression.
If optional is True, then None in expression values result in None values in the result columns.
Missing values in table keys result in RuntimeError.
Context can be anything that allows for `select` or `reduce`, or `pathway.this` construct
(latter results in returning a delayed operation, and should be only used when using `ix` inside
join().select() or groupby().reduce() sequence).
Args:
args: Column references.
Returns:
Row: indexed row.
Example:
>>> import pathway as pw
>>> t1 = pw.debug.table_from_markdown('''
... name | pet
... Alice | dog
... Bob | cat
... Carole | cat
... David | dog
... ''')
>>> t2 = t1.with_id_from(pw.this.name)
>>> t2 = t2.select(*pw.this, new_value=pw.this.ix_ref("Alice").pet)
>>> pw.debug.compute_and_print(t2, include_id=False)
name | pet | new_value
Alice | dog | dog
Bob | cat | dog
Carole | cat | dog
David | dog | dog
Tables obtained by a groupby/reduce scheme always have primary keys:
>>> import pathway as pw
>>> t1 = pw.debug.table_from_markdown('''
... name | pet
... Alice | dog
... Bob | cat
... Carole | cat
... David | cat
... ''')
>>> t2 = t1.groupby(pw.this.pet).reduce(pw.this.pet, count=pw.reducers.count())
>>> t3 = t1.select(*pw.this, new_value=t2.ix_ref(t1.pet).count)
>>> pw.debug.compute_and_print(t3, include_id=False)
name | pet | new_value
Alice | dog | 1
Bob | cat | 3
Carole | cat | 3
David | cat | 3
Single-row tables can be accessed via `ix_ref()`:
>>> import pathway as pw
>>> t1 = pw.debug.table_from_markdown('''
... name | pet
... Alice | dog
... Bob | cat
... Carole | cat
... David | cat
... ''')
>>> t2 = t1.reduce(count=pw.reducers.count())
>>> t3 = t1.select(*pw.this, new_value=t2.ix_ref(context=t1).count)
>>> pw.debug.compute_and_print(t3, include_id=False)
name | pet | new_value
Alice | dog | 4
Bob | cat | 4
Carole | cat | 4
David | cat | 4
"""
return self.ix(
self.pointer_from(*args, optional=optional, instance=instance),
optional=optional,
context=context,
)
def _subtables(self) -> StableSet[Table]:
return StableSet([self])
def _substitutions(
self,
) -> tuple[Table, dict[expr.InternalColRef, expr.ColumnExpression]]:
return self, {}
def typehints(self) -> Mapping[str, Any]:
"""
Return the types of the columns as a dictionary.
Example:
>>> import pathway as pw
>>> t1 = pw.debug.table_from_markdown('''
... age | owner | pet
... 10 | Alice | dog
... 9 | Bob | dog
... 8 | Alice | cat
... 7 | Bob | dog
... ''')
>>> t1.typehints()
mappingproxy({'age': <class 'int'>, 'owner': <class 'str'>, 'pet': <class 'str'>})
"""
return self.schema.typehints()
def eval_type(self, expression: expr.ColumnExpression) -> dt.DType:
return (
self._rowwise_context._get_type_interpreter()
.eval_expression(expression, state=TypeInterpreterState())
._dtype
)
def _auto_live(self) -> Table:
"""Make self automatically live in interactive mode"""
from pathway.internals.interactive import is_interactive_mode_enabled
if is_interactive_mode_enabled():
return self.live()
else:
return self
def live(self) -> LiveTable[TSchema]:
from pathway.internals.interactive import LiveTable
warnings.warn("live tables are an experimental feature", stacklevel=2)
return LiveTable._create(self)
The provided code snippet includes necessary dependencies for implementing the `interval_join_outer` function. Write a Python function `def interval_join_outer( self: pw.Table, other: pw.Table, self_time: pw.ColumnExpression, other_time: pw.ColumnExpression, interval: Interval[int] | Interval[float] | Interval[datetime.timedelta], *on: pw.ColumnExpression, behavior: CommonBehavior | None = None, left_instance: pw.ColumnReference | None = None, right_instance: pw.ColumnReference | None = None, ) -> IntervalJoinResult` to solve the following problem:
Performs an interval outer join of self with other using a time difference and join expressions. If `self_time + lower_bound <= other_time <= self_time + upper_bound` and conditions in `on` are satisfied, the rows are joined. Rows that haven't been matched with the other side are returned with missing values on the other side replaced with `None`. Args: other: the right side of the join. self_time: time expression in self. other_time: time expression in other. lower_bound: a lower bound on time difference between other_time and self_time. upper_bound: an upper bound on time difference between other_time and self_time. on: a list of column expressions. Each must have == as the top level operation and be of the form LHS: ColumnReference == RHS: ColumnReference. behavior: defines temporal behavior of a join - features like delaying entries or ignoring late entries. left_instance/right_instance: optional arguments describing partitioning of the data into separate instances Returns: IntervalJoinResult: a result of the interval join. A method `.select()` can be called on it to extract relevant columns from the result of a join. Examples: >>> import pathway as pw >>> t1 = pw.debug.table_from_markdown( ... ''' ... | t ... 1 | 3 ... 2 | 4 ... 3 | 5 ... 4 | 11 ... ''' ... ) >>> t2 = pw.debug.table_from_markdown( ... ''' ... | t ... 1 | 0 ... 2 | 1 ... 3 | 4 ... 4 | 7 ... ''' ... ) >>> t3 = t1.interval_join_outer(t2, t1.t, t2.t, pw.temporal.interval(-2, 1)).select( ... left_t=t1.t, right_t=t2.t ... ) >>> pw.debug.compute_and_print(t3, include_id=False) left_t | right_t | 0 | 7 3 | 1 3 | 4 4 | 4 5 | 4 11 | >>> t1 = pw.debug.table_from_markdown( ... ''' ... | a | t ... 1 | 1 | 3 ... 2 | 1 | 4 ... 3 | 1 | 5 ... 4 | 1 | 11 ... 5 | 2 | 2 ... 6 | 2 | 3 ... 7 | 3 | 4 ... ''' ... ) >>> t2 = pw.debug.table_from_markdown( ... ''' ... | b | t ... 1 | 1 | 0 ... 2 | 1 | 1 ... 3 | 1 | 4 ... 4 | 1 | 7 ... 5 | 2 | 0 ... 6 | 2 | 2 ... 7 | 4 | 2 ... ''' ... ) >>> t3 = t1.interval_join_outer( ... t2, t1.t, t2.t, pw.temporal.interval(-2, 1), t1.a == t2.b ... ).select(t1.a, left_t=t1.t, right_t=t2.t) >>> pw.debug.compute_and_print(t3, include_id=False) a | left_t | right_t | | 0 | | 2 | | 7 1 | 3 | 1 1 | 3 | 4 1 | 4 | 4 1 | 5 | 4 1 | 11 | 2 | 2 | 0 2 | 2 | 2 2 | 3 | 2 3 | 4 | Setting `behavior` allows to control temporal behavior of an interval join. Then, each side of the interval join keeps track of the maximal already seen time (`self_time` and `other_time`). The arguments of `behavior` mean in the context of an interval join what follows: - **delay** - buffers results until the maximal already seen time is greater than \ or equal to their time plus `delay`. - **cutoff** - ignores records with times less or equal to the maximal already seen time minus `cutoff`; \ it is also used to garbage collect records that have times lower or equal to the above threshold. \ When `cutoff` is not set, interval join will remember all records from both sides. - **keep_results** - if set to `True`, keeps all results of the operator. If set to `False`, \ keeps only results that are newer than the maximal seen time minus `cutoff`. Example without and with forgetting: >>> import pathway as pw >>> t1 = pw.debug.table_from_markdown( ... ''' ... value | instance | event_time | __time__ ... 1 | 1 | 0 | 2 ... 2 | 2 | 2 | 4 ... 3 | 1 | 4 | 4 ... 4 | 2 | 8 | 8 ... 5 | 1 | 0 | 10 ... 6 | 1 | 4 | 10 ... ''' ... ) >>> t2 = pw.debug.table_from_markdown( ... ''' ... value | instance | event_time | __time__ ... 42 | 1 | 2 | 2 ... 8 | 2 | 10 | 14 ... 10 | 2 | 4 | 30 ... ''' ... ) >>> result_without_cutoff = t1.interval_join_outer( ... t2, ... t1.event_time, ... t2.event_time, ... pw.temporal.interval(-2, 2), ... t1.instance == t2.instance, ... ).select( ... left_value=t1.value, ... right_value=t2.value, ... instance=t1.instance, ... left_time=t1.event_time, ... right_time=t2.event_time, ... ) >>> pw.debug.compute_and_print_update_stream(result_without_cutoff, include_id=False) left_value | right_value | instance | left_time | right_time | __time__ | __diff__ 1 | 42 | 1 | 0 | 2 | 2 | 1 2 | | 2 | 2 | | 4 | 1 3 | 42 | 1 | 4 | 2 | 4 | 1 4 | | 2 | 8 | | 8 | 1 5 | 42 | 1 | 0 | 2 | 10 | 1 6 | 42 | 1 | 4 | 2 | 10 | 1 4 | | 2 | 8 | | 14 | -1 4 | 8 | 2 | 8 | 10 | 14 | 1 2 | | 2 | 2 | | 30 | -1 2 | 10 | 2 | 2 | 4 | 30 | 1 >>> result_with_cutoff = t1.interval_join_outer( ... t2, ... t1.event_time, ... t2.event_time, ... pw.temporal.interval(-2, 2), ... t1.instance == t2.instance, ... behavior=pw.temporal.common_behavior(cutoff=6), ... ).select( ... left_value=t1.value, ... right_value=t2.value, ... instance=t1.instance, ... left_time=t1.event_time, ... right_time=t2.event_time, ... ) >>> pw.debug.compute_and_print_update_stream(result_with_cutoff, include_id=False) left_value | right_value | instance | left_time | right_time | __time__ | __diff__ 1 | 42 | 1 | 0 | 2 | 2 | 1 2 | | 2 | 2 | | 4 | 1 3 | 42 | 1 | 4 | 2 | 4 | 1 4 | | 2 | 8 | | 8 | 1 6 | 42 | 1 | 4 | 2 | 10 | 1 4 | | 2 | 8 | | 14 | -1 4 | 8 | 2 | 8 | 10 | 14 | 1 The record with ``value=5`` from table ``t1`` was not joined because its ``event_time`` was less than the maximal already seen time minus ``cutoff`` (``0 <= 8-6``). The record with ``value=10`` from table ``t2`` was not joined because its ``event_time`` was equal to the maximal already seen time minus ``cutoff`` (``4 <= 10-6``). Notice also the entries with ``__diff__=-1``. They're deletion entries caused by the arrival of matching entries on the right side of the join. The matches caused the removal of entries without values in the fields from the right side and insertion of entries with values in these fields.
Here is the function:
def interval_join_outer(
self: pw.Table,
other: pw.Table,
self_time: pw.ColumnExpression,
other_time: pw.ColumnExpression,
interval: Interval[int] | Interval[float] | Interval[datetime.timedelta],
*on: pw.ColumnExpression,
behavior: CommonBehavior | None = None,
left_instance: pw.ColumnReference | None = None,
right_instance: pw.ColumnReference | None = None,
) -> IntervalJoinResult:
"""Performs an interval outer join of self with other using a time difference
and join expressions. If `self_time + lower_bound <=
other_time <= self_time + upper_bound`
and conditions in `on` are satisfied, the rows are joined. Rows that haven't
been matched with the other side are returned with missing values on the other
side replaced with `None`.
Args:
other: the right side of the join.
self_time: time expression in self.
other_time: time expression in other.
lower_bound: a lower bound on time difference between other_time
and self_time.
upper_bound: an upper bound on time difference between other_time
and self_time.
on: a list of column expressions. Each must have == as the top level
operation and be of the form LHS: ColumnReference == RHS: ColumnReference.
behavior: defines temporal behavior of a join - features like delaying entries
or ignoring late entries.
left_instance/right_instance: optional arguments describing partitioning of the data into separate instances
Returns:
IntervalJoinResult: a result of the interval join. A method `.select()`
can be called on it to extract relevant columns from the result of a join.
Examples:
>>> import pathway as pw
>>> t1 = pw.debug.table_from_markdown(
... '''
... | t
... 1 | 3
... 2 | 4
... 3 | 5
... 4 | 11
... '''
... )
>>> t2 = pw.debug.table_from_markdown(
... '''
... | t
... 1 | 0
... 2 | 1
... 3 | 4
... 4 | 7
... '''
... )
>>> t3 = t1.interval_join_outer(t2, t1.t, t2.t, pw.temporal.interval(-2, 1)).select(
... left_t=t1.t, right_t=t2.t
... )
>>> pw.debug.compute_and_print(t3, include_id=False)
left_t | right_t
| 0
| 7
3 | 1
3 | 4
4 | 4
5 | 4
11 |
>>> t1 = pw.debug.table_from_markdown(
... '''
... | a | t
... 1 | 1 | 3
... 2 | 1 | 4
... 3 | 1 | 5
... 4 | 1 | 11
... 5 | 2 | 2
... 6 | 2 | 3
... 7 | 3 | 4
... '''
... )
>>> t2 = pw.debug.table_from_markdown(
... '''
... | b | t
... 1 | 1 | 0
... 2 | 1 | 1
... 3 | 1 | 4
... 4 | 1 | 7
... 5 | 2 | 0
... 6 | 2 | 2
... 7 | 4 | 2
... '''
... )
>>> t3 = t1.interval_join_outer(
... t2, t1.t, t2.t, pw.temporal.interval(-2, 1), t1.a == t2.b
... ).select(t1.a, left_t=t1.t, right_t=t2.t)
>>> pw.debug.compute_and_print(t3, include_id=False)
a | left_t | right_t
| | 0
| | 2
| | 7
1 | 3 | 1
1 | 3 | 4
1 | 4 | 4
1 | 5 | 4
1 | 11 |
2 | 2 | 0
2 | 2 | 2
2 | 3 | 2
3 | 4 |
Setting `behavior` allows to control temporal behavior of an interval join. Then, each side of
the interval join keeps track of the maximal already seen time (`self_time` and `other_time`).
The arguments of `behavior` mean in the context of an interval join what follows:
- **delay** - buffers results until the maximal already seen time is greater than \
or equal to their time plus `delay`.
- **cutoff** - ignores records with times less or equal to the maximal already seen time minus `cutoff`; \
it is also used to garbage collect records that have times lower or equal to the above threshold. \
When `cutoff` is not set, interval join will remember all records from both sides.
- **keep_results** - if set to `True`, keeps all results of the operator. If set to `False`, \
keeps only results that are newer than the maximal seen time minus `cutoff`.
Example without and with forgetting:
>>> import pathway as pw
>>> t1 = pw.debug.table_from_markdown(
... '''
... value | instance | event_time | __time__
... 1 | 1 | 0 | 2
... 2 | 2 | 2 | 4
... 3 | 1 | 4 | 4
... 4 | 2 | 8 | 8
... 5 | 1 | 0 | 10
... 6 | 1 | 4 | 10
... '''
... )
>>> t2 = pw.debug.table_from_markdown(
... '''
... value | instance | event_time | __time__
... 42 | 1 | 2 | 2
... 8 | 2 | 10 | 14
... 10 | 2 | 4 | 30
... '''
... )
>>> result_without_cutoff = t1.interval_join_outer(
... t2,
... t1.event_time,
... t2.event_time,
... pw.temporal.interval(-2, 2),
... t1.instance == t2.instance,
... ).select(
... left_value=t1.value,
... right_value=t2.value,
... instance=t1.instance,
... left_time=t1.event_time,
... right_time=t2.event_time,
... )
>>> pw.debug.compute_and_print_update_stream(result_without_cutoff, include_id=False)
left_value | right_value | instance | left_time | right_time | __time__ | __diff__
1 | 42 | 1 | 0 | 2 | 2 | 1
2 | | 2 | 2 | | 4 | 1
3 | 42 | 1 | 4 | 2 | 4 | 1
4 | | 2 | 8 | | 8 | 1
5 | 42 | 1 | 0 | 2 | 10 | 1
6 | 42 | 1 | 4 | 2 | 10 | 1
4 | | 2 | 8 | | 14 | -1
4 | 8 | 2 | 8 | 10 | 14 | 1
2 | | 2 | 2 | | 30 | -1
2 | 10 | 2 | 2 | 4 | 30 | 1
>>> result_with_cutoff = t1.interval_join_outer(
... t2,
... t1.event_time,
... t2.event_time,
... pw.temporal.interval(-2, 2),
... t1.instance == t2.instance,
... behavior=pw.temporal.common_behavior(cutoff=6),
... ).select(
... left_value=t1.value,
... right_value=t2.value,
... instance=t1.instance,
... left_time=t1.event_time,
... right_time=t2.event_time,
... )
>>> pw.debug.compute_and_print_update_stream(result_with_cutoff, include_id=False)
left_value | right_value | instance | left_time | right_time | __time__ | __diff__
1 | 42 | 1 | 0 | 2 | 2 | 1
2 | | 2 | 2 | | 4 | 1
3 | 42 | 1 | 4 | 2 | 4 | 1
4 | | 2 | 8 | | 8 | 1
6 | 42 | 1 | 4 | 2 | 10 | 1
4 | | 2 | 8 | | 14 | -1
4 | 8 | 2 | 8 | 10 | 14 | 1
The record with ``value=5`` from table ``t1`` was not joined because its ``event_time``
was less than the maximal already seen time minus ``cutoff`` (``0 <= 8-6``).
The record with ``value=10`` from table ``t2`` was not joined because its ``event_time``
was equal to the maximal already seen time minus ``cutoff`` (``4 <= 10-6``).
Notice also the entries with ``__diff__=-1``. They're deletion entries caused by the arrival
of matching entries on the right side of the join. The matches caused the removal of entries
without values in the fields from the right side and insertion of entries with values
in these fields.
"""
return IntervalJoinResult._interval_join(
self,
other,
self_time,
other_time,
interval,
*on,
behavior=behavior,
mode=pw.JoinMode.OUTER,
left_instance=left_instance,
right_instance=right_instance,
) | Performs an interval outer join of self with other using a time difference and join expressions. If `self_time + lower_bound <= other_time <= self_time + upper_bound` and conditions in `on` are satisfied, the rows are joined. Rows that haven't been matched with the other side are returned with missing values on the other side replaced with `None`. Args: other: the right side of the join. self_time: time expression in self. other_time: time expression in other. lower_bound: a lower bound on time difference between other_time and self_time. upper_bound: an upper bound on time difference between other_time and self_time. on: a list of column expressions. Each must have == as the top level operation and be of the form LHS: ColumnReference == RHS: ColumnReference. behavior: defines temporal behavior of a join - features like delaying entries or ignoring late entries. left_instance/right_instance: optional arguments describing partitioning of the data into separate instances Returns: IntervalJoinResult: a result of the interval join. A method `.select()` can be called on it to extract relevant columns from the result of a join. Examples: >>> import pathway as pw >>> t1 = pw.debug.table_from_markdown( ... ''' ... | t ... 1 | 3 ... 2 | 4 ... 3 | 5 ... 4 | 11 ... ''' ... ) >>> t2 = pw.debug.table_from_markdown( ... ''' ... | t ... 1 | 0 ... 2 | 1 ... 3 | 4 ... 4 | 7 ... ''' ... ) >>> t3 = t1.interval_join_outer(t2, t1.t, t2.t, pw.temporal.interval(-2, 1)).select( ... left_t=t1.t, right_t=t2.t ... ) >>> pw.debug.compute_and_print(t3, include_id=False) left_t | right_t | 0 | 7 3 | 1 3 | 4 4 | 4 5 | 4 11 | >>> t1 = pw.debug.table_from_markdown( ... ''' ... | a | t ... 1 | 1 | 3 ... 2 | 1 | 4 ... 3 | 1 | 5 ... 4 | 1 | 11 ... 5 | 2 | 2 ... 6 | 2 | 3 ... 7 | 3 | 4 ... ''' ... ) >>> t2 = pw.debug.table_from_markdown( ... ''' ... | b | t ... 1 | 1 | 0 ... 2 | 1 | 1 ... 3 | 1 | 4 ... 4 | 1 | 7 ... 5 | 2 | 0 ... 6 | 2 | 2 ... 7 | 4 | 2 ... ''' ... ) >>> t3 = t1.interval_join_outer( ... t2, t1.t, t2.t, pw.temporal.interval(-2, 1), t1.a == t2.b ... ).select(t1.a, left_t=t1.t, right_t=t2.t) >>> pw.debug.compute_and_print(t3, include_id=False) a | left_t | right_t | | 0 | | 2 | | 7 1 | 3 | 1 1 | 3 | 4 1 | 4 | 4 1 | 5 | 4 1 | 11 | 2 | 2 | 0 2 | 2 | 2 2 | 3 | 2 3 | 4 | Setting `behavior` allows to control temporal behavior of an interval join. Then, each side of the interval join keeps track of the maximal already seen time (`self_time` and `other_time`). The arguments of `behavior` mean in the context of an interval join what follows: - **delay** - buffers results until the maximal already seen time is greater than \ or equal to their time plus `delay`. - **cutoff** - ignores records with times less or equal to the maximal already seen time minus `cutoff`; \ it is also used to garbage collect records that have times lower or equal to the above threshold. \ When `cutoff` is not set, interval join will remember all records from both sides. - **keep_results** - if set to `True`, keeps all results of the operator. If set to `False`, \ keeps only results that are newer than the maximal seen time minus `cutoff`. Example without and with forgetting: >>> import pathway as pw >>> t1 = pw.debug.table_from_markdown( ... ''' ... value | instance | event_time | __time__ ... 1 | 1 | 0 | 2 ... 2 | 2 | 2 | 4 ... 3 | 1 | 4 | 4 ... 4 | 2 | 8 | 8 ... 5 | 1 | 0 | 10 ... 6 | 1 | 4 | 10 ... ''' ... ) >>> t2 = pw.debug.table_from_markdown( ... ''' ... value | instance | event_time | __time__ ... 42 | 1 | 2 | 2 ... 8 | 2 | 10 | 14 ... 10 | 2 | 4 | 30 ... ''' ... ) >>> result_without_cutoff = t1.interval_join_outer( ... t2, ... t1.event_time, ... t2.event_time, ... pw.temporal.interval(-2, 2), ... t1.instance == t2.instance, ... ).select( ... left_value=t1.value, ... right_value=t2.value, ... instance=t1.instance, ... left_time=t1.event_time, ... right_time=t2.event_time, ... ) >>> pw.debug.compute_and_print_update_stream(result_without_cutoff, include_id=False) left_value | right_value | instance | left_time | right_time | __time__ | __diff__ 1 | 42 | 1 | 0 | 2 | 2 | 1 2 | | 2 | 2 | | 4 | 1 3 | 42 | 1 | 4 | 2 | 4 | 1 4 | | 2 | 8 | | 8 | 1 5 | 42 | 1 | 0 | 2 | 10 | 1 6 | 42 | 1 | 4 | 2 | 10 | 1 4 | | 2 | 8 | | 14 | -1 4 | 8 | 2 | 8 | 10 | 14 | 1 2 | | 2 | 2 | | 30 | -1 2 | 10 | 2 | 2 | 4 | 30 | 1 >>> result_with_cutoff = t1.interval_join_outer( ... t2, ... t1.event_time, ... t2.event_time, ... pw.temporal.interval(-2, 2), ... t1.instance == t2.instance, ... behavior=pw.temporal.common_behavior(cutoff=6), ... ).select( ... left_value=t1.value, ... right_value=t2.value, ... instance=t1.instance, ... left_time=t1.event_time, ... right_time=t2.event_time, ... ) >>> pw.debug.compute_and_print_update_stream(result_with_cutoff, include_id=False) left_value | right_value | instance | left_time | right_time | __time__ | __diff__ 1 | 42 | 1 | 0 | 2 | 2 | 1 2 | | 2 | 2 | | 4 | 1 3 | 42 | 1 | 4 | 2 | 4 | 1 4 | | 2 | 8 | | 8 | 1 6 | 42 | 1 | 4 | 2 | 10 | 1 4 | | 2 | 8 | | 14 | -1 4 | 8 | 2 | 8 | 10 | 14 | 1 The record with ``value=5`` from table ``t1`` was not joined because its ``event_time`` was less than the maximal already seen time minus ``cutoff`` (``0 <= 8-6``). The record with ``value=10`` from table ``t2`` was not joined because its ``event_time`` was equal to the maximal already seen time minus ``cutoff`` (``4 <= 10-6``). Notice also the entries with ``__diff__=-1``. They're deletion entries caused by the arrival of matching entries on the right side of the join. The matches caused the removal of entries without values in the fields from the right side and insertion of entries with values in these fields. |
166,652 | from __future__ import annotations
import dataclasses
import enum
from typing import Any
import pathway.internals as pw
import pathway.internals.expression as expr
import pathway.stdlib.indexing
from pathway.internals.arg_handlers import (
arg_handler,
join_kwargs_handler,
select_args_handler,
)
from pathway.internals.desugaring import (
DesugaringContext,
SubstitutionDesugaring,
combine_args_kwargs,
desugar,
)
from pathway.internals.joins import validate_join_condition
from pathway.internals.runtime_type_check import check_arg_types
from pathway.internals.trace import trace_user_frame
from pathway.stdlib.temporal.temporal_behavior import (
CommonBehavior,
apply_temporal_behavior,
)
from .utils import TimeEventType, check_joint_types
The provided code snippet includes necessary dependencies for implementing the `_build_groups` function. Write a Python function `def _build_groups(t: pw.Table, dir_next: bool) -> pw.Table` to solve the following problem:
Inputs: - t: ordered table - key: tuple where the last element indicate the group - next/prev pointers - dir_next: boolean Outputs a table with the same number elements with: - peer: next if dir_next else prev - peer_key: t(peer).key - peer_same: id of next/prev element in the table with the same group - peer_diff: id of next/prev element in the table with a different group
Here is the function:
def _build_groups(t: pw.Table, dir_next: bool) -> pw.Table:
"""
Inputs:
- t: ordered table
- key: tuple where the last element indicate the group
- next/prev pointers
- dir_next: boolean
Outputs a table with the same number elements with:
- peer: next if dir_next else prev
- peer_key: t(peer).key
- peer_same: id of next/prev element in the table with the same group
- peer_diff: id of next/prev element in the table with a different group
"""
def proc(cur_id, cur, peer_id, peer) -> pw.Pointer:
if peer is None:
return cur_id
if cur[1] != peer[1]: # check if the same side of a join
return cur_id
return peer_id
succ_table = t.select(
orig_id=t.orig_id,
key=t.key,
peer=t.next if dir_next else t.prev,
)
succ_table += succ_table.select(
peer_key=succ_table.ix(succ_table.peer, optional=True).key,
)
succ_table += succ_table.select(
group_repr=pw.apply(
proc,
succ_table.id,
succ_table.key,
succ_table.peer,
succ_table.peer_key,
)
)
def merge_ccs(data):
data = data.with_columns(data.ix(data.group_repr).group_repr)
return data
group_table = pw.iterate(merge_ccs, data=succ_table)
# At the end of the iterative merge_ccs, we have:
# group_repr = last element of each consecutive group with the same `key`
# We want to compute two things:
# - `next_same`: the next element with the same key
# - `next_diff`: the next element with a different key
# To do so,
# let reprs = elements which are the last elements of each consecutive group
# next_diff(x) = group_repr(x).peer
# next_same(x) = is_repr ? next_diff(x).peer : x.peer
#
reprs = group_table.filter(group_table.id == group_table.group_repr)
group_table += group_table.select(
peer_diff=group_table.ix(group_table.group_repr, optional=True).peer
)
group_table += group_table.select(peer_same=group_table.peer)
group_table <<= reprs.select(peer_same=group_table.ix(reprs.id, optional=True).peer)
return group_table | Inputs: - t: ordered table - key: tuple where the last element indicate the group - next/prev pointers - dir_next: boolean Outputs a table with the same number elements with: - peer: next if dir_next else prev - peer_key: t(peer).key - peer_same: id of next/prev element in the table with the same group - peer_diff: id of next/prev element in the table with a different group |
166,653 | from __future__ import annotations
import dataclasses
import enum
from typing import Any
import pathway.internals as pw
import pathway.internals.expression as expr
import pathway.stdlib.indexing
from pathway.internals.arg_handlers import (
arg_handler,
join_kwargs_handler,
select_args_handler,
)
from pathway.internals.desugaring import (
DesugaringContext,
SubstitutionDesugaring,
combine_args_kwargs,
desugar,
)
from pathway.internals.joins import validate_join_condition
from pathway.internals.runtime_type_check import check_arg_types
from pathway.internals.trace import trace_user_frame
from pathway.stdlib.temporal.temporal_behavior import (
CommonBehavior,
apply_temporal_behavior,
)
from .utils import TimeEventType, check_joint_types
class Direction(enum.Enum):
BACKWARD = 0
FORWARD = 1
NEAREST = 2
def _asof_join(
self: pw.Table,
other: pw.Table,
t_left: pw.ColumnExpression,
t_right: pw.ColumnExpression,
*on: pw.ColumnExpression,
behavior: CommonBehavior | None,
how: pw.JoinMode,
defaults: dict[pw.ColumnReference, Any],
direction: Direction,
left_instance: expr.ColumnReference | None,
right_instance: expr.ColumnReference | None,
):
check_joint_types(
{"t_left": (t_left, TimeEventType), "t_right": (t_right, TimeEventType)}
)
self_with_time = self.with_columns(_pw_time=t_left)
other_with_time = other.with_columns(_pw_time=t_right)
self_with_time = apply_temporal_behavior(self_with_time, behavior)
other_with_time = apply_temporal_behavior(other_with_time, behavior)
side_data = {
False: _SideData(
side=False,
original_table=self,
table=self_with_time,
conds=[],
t=self_with_time._pw_time,
),
True: _SideData(
side=True,
original_table=other,
table=other_with_time,
conds=[],
t=other_with_time._pw_time,
),
}
if left_instance is not None and right_instance is not None:
on = (*on, left_instance == right_instance)
else:
assert left_instance is None and right_instance is None
for cond in on:
cond_left, cond_right, _ = validate_join_condition(cond, self, other)
side_data[False].conds.append(self_with_time[cond_left.name])
side_data[True].conds.append(other_with_time[cond_right.name])
return AsofJoinResult(
side_data=side_data,
mode=how,
defaults={c._to_internal(): v for c, v in defaults.items()},
direction=direction,
_filter_out_results_of_forgetting=behavior is None or behavior.keep_results,
)
class CommonBehavior(Behavior):
"""Defines temporal behavior of windows and temporal joins."""
delay: IntervalType | None
cutoff: IntervalType | None
keep_results: bool
The provided code snippet includes necessary dependencies for implementing the `asof_join` function. Write a Python function `def asof_join( self: pw.Table, other: pw.Table, self_time: pw.ColumnExpression, other_time: pw.ColumnExpression, *on: pw.ColumnExpression, how: pw.JoinMode, behavior: CommonBehavior | None = None, defaults: dict[pw.ColumnReference, Any] = {}, direction: Direction = Direction.BACKWARD, left_instance: expr.ColumnReference | None = None, right_instance: expr.ColumnReference | None = None, )` to solve the following problem:
Perform an ASOF join of two tables. Args: other: Table to join with self, both must contain a column `val` self_time, other_time: time-like column expression to do the join against on: a list of column expressions. Each must have == as the top level operation and be of the form LHS: ColumnReference == RHS: ColumnReference. behavior: defines the temporal behavior of a join - features like delaying entries or ignoring late entries. how: mode of the join (LEFT, RIGHT, FULL) defaults: dictionary column-> default value. Entries in the resulting table that not have a predecessor in the join will be set to this default value. If no default is provided, None will be used. direction: direction of the join, accepted values: Direction.BACKWARD, Direction.FORWARD, Direction.NEAREST left_instance/right_instance: optional arguments describing partitioning of the data into separate instances Example: >>> import pathway as pw >>> t1 = pw.debug.table_from_markdown( ... ''' ... | K | val | t ... 1 | 0 | 1 | 1 ... 2 | 0 | 2 | 4 ... 3 | 0 | 3 | 5 ... 4 | 0 | 4 | 6 ... 5 | 0 | 5 | 7 ... 6 | 0 | 6 | 11 ... 7 | 0 | 7 | 12 ... 8 | 1 | 8 | 5 ... 9 | 1 | 9 | 7 ... ''' ... ) >>> t2 = pw.debug.table_from_markdown( ... ''' ... | K | val | t ... 21 | 1 | 7 | 2 ... 22 | 1 | 3 | 8 ... 23 | 0 | 0 | 2 ... 24 | 0 | 6 | 3 ... 25 | 0 | 2 | 7 ... 26 | 0 | 3 | 8 ... 27 | 0 | 9 | 9 ... 28 | 0 | 7 | 13 ... 29 | 0 | 4 | 14 ... ''' ... ) >>> res = t1.asof_join( ... t2, ... t1.t, ... t2.t, ... t1.K == t2.K, ... how=pw.JoinMode.LEFT, ... defaults={t2.val: -1}, ... ).select( ... pw.this.instance, ... pw.this.t, ... val_left=t1.val, ... val_right=t2.val, ... sum=t1.val + t2.val, ... ) >>> pw.debug.compute_and_print(res, include_id=False) instance | t | val_left | val_right | sum 0 | 1 | 1 | -1 | 0 0 | 4 | 2 | 6 | 8 0 | 5 | 3 | 6 | 9 0 | 6 | 4 | 6 | 10 0 | 7 | 5 | 2 | 7 0 | 11 | 6 | 9 | 15 0 | 12 | 7 | 9 | 16 1 | 5 | 8 | 7 | 15 1 | 7 | 9 | 7 | 16 Setting `behavior` allows to control temporal behavior of an asof join. Then, each side of the asof join keeps track of the maximal already seen time (`self_time` and `other_time`). In the context of `asof_join` the arguments of `behavior` are defined as follows: - **delay** - buffers results until the maximal already seen time is greater than \ or equal to their time plus `delay`. - **cutoff** - ignores records with times less or equal to the maximal already seen time minus `cutoff`; \ it is also used to garbage collect records that have times lower or equal to the above threshold. \ When `cutoff` is not set, the asof join will remember all records from both sides. - **keep_results** - if set to `True`, keeps all results of the operator. If set to `False`, \ keeps only results that are newer than the maximal seen time minus `cutoff`. Examples without and with forgetting: >>> import pathway as pw >>> t1 = pw.debug.table_from_markdown( ... ''' ... value | event_time | __time__ ... 2 | 2 | 4 ... 3 | 5 | 6 ... 4 | 1 | 8 ... 5 | 7 | 14 ... ''' ... ) >>> t2 = pw.debug.table_from_markdown( ... ''' ... value | event_time | __time__ ... 42 | 1 | 2 ... 8 | 4 | 10 ... ''' ... ) >>> result_without_cutoff = t1.asof_join( ... t2, t1.event_time, t2.event_time, how=pw.JoinMode.LEFT ... ).select( ... left_value=t1.value, ... right_value=t2.value, ... left_time=t1.event_time, ... right_time=t2.event_time, ... ) >>> pw.debug.compute_and_print_update_stream(result_without_cutoff, include_id=False) left_value | right_value | left_time | right_time | __time__ | __diff__ 2 | 42 | 2 | 1 | 4 | 1 3 | 42 | 5 | 1 | 6 | 1 4 | 42 | 1 | 1 | 8 | 1 3 | 42 | 5 | 1 | 10 | -1 3 | 8 | 5 | 4 | 10 | 1 5 | 8 | 7 | 4 | 14 | 1 >>> >>> result_without_cutoff = t1.asof_join( ... t2, ... t1.event_time, ... t2.event_time, ... how=pw.JoinMode.LEFT, ... behavior=pw.temporal.common_behavior(cutoff=2), ... ).select( ... left_value=t1.value, ... right_value=t2.value, ... left_time=t1.event_time, ... right_time=t2.event_time, ... ) >>> pw.debug.compute_and_print_update_stream(result_without_cutoff, include_id=False) left_value | right_value | left_time | right_time | __time__ | __diff__ 2 | 42 | 2 | 1 | 4 | 1 3 | 42 | 5 | 1 | 6 | 1 3 | 42 | 5 | 1 | 10 | -1 3 | 8 | 5 | 4 | 10 | 1 5 | 8 | 7 | 4 | 14 | 1 The record with ``value=4`` from table ``t1`` was not joined because its ``event_time`` was less than the maximal already seen time minus ``cutoff`` (``1 <= 5-2``).
Here is the function:
def asof_join(
self: pw.Table,
other: pw.Table,
self_time: pw.ColumnExpression,
other_time: pw.ColumnExpression,
*on: pw.ColumnExpression,
how: pw.JoinMode,
behavior: CommonBehavior | None = None,
defaults: dict[pw.ColumnReference, Any] = {},
direction: Direction = Direction.BACKWARD,
left_instance: expr.ColumnReference | None = None,
right_instance: expr.ColumnReference | None = None,
):
"""Perform an ASOF join of two tables.
Args:
other: Table to join with self, both must contain a column `val`
self_time, other_time: time-like column expression to do the join against
on: a list of column expressions. Each must have == as the top level operation
and be of the form LHS: ColumnReference == RHS: ColumnReference.
behavior: defines the temporal behavior of a join - features like delaying entries
or ignoring late entries.
how: mode of the join (LEFT, RIGHT, FULL)
defaults: dictionary column-> default value. Entries in the resulting table that
not have a predecessor in the join will be set to this default value. If no
default is provided, None will be used.
direction: direction of the join, accepted values: Direction.BACKWARD,
Direction.FORWARD, Direction.NEAREST
left_instance/right_instance: optional arguments describing partitioning of the data into
separate instances
Example:
>>> import pathway as pw
>>> t1 = pw.debug.table_from_markdown(
... '''
... | K | val | t
... 1 | 0 | 1 | 1
... 2 | 0 | 2 | 4
... 3 | 0 | 3 | 5
... 4 | 0 | 4 | 6
... 5 | 0 | 5 | 7
... 6 | 0 | 6 | 11
... 7 | 0 | 7 | 12
... 8 | 1 | 8 | 5
... 9 | 1 | 9 | 7
... '''
... )
>>> t2 = pw.debug.table_from_markdown(
... '''
... | K | val | t
... 21 | 1 | 7 | 2
... 22 | 1 | 3 | 8
... 23 | 0 | 0 | 2
... 24 | 0 | 6 | 3
... 25 | 0 | 2 | 7
... 26 | 0 | 3 | 8
... 27 | 0 | 9 | 9
... 28 | 0 | 7 | 13
... 29 | 0 | 4 | 14
... '''
... )
>>> res = t1.asof_join(
... t2,
... t1.t,
... t2.t,
... t1.K == t2.K,
... how=pw.JoinMode.LEFT,
... defaults={t2.val: -1},
... ).select(
... pw.this.instance,
... pw.this.t,
... val_left=t1.val,
... val_right=t2.val,
... sum=t1.val + t2.val,
... )
>>> pw.debug.compute_and_print(res, include_id=False)
instance | t | val_left | val_right | sum
0 | 1 | 1 | -1 | 0
0 | 4 | 2 | 6 | 8
0 | 5 | 3 | 6 | 9
0 | 6 | 4 | 6 | 10
0 | 7 | 5 | 2 | 7
0 | 11 | 6 | 9 | 15
0 | 12 | 7 | 9 | 16
1 | 5 | 8 | 7 | 15
1 | 7 | 9 | 7 | 16
Setting `behavior` allows to control temporal behavior of an asof join. Then, each side of
the asof join keeps track of the maximal already seen time (`self_time` and `other_time`).
In the context of `asof_join` the arguments of `behavior` are defined as follows:
- **delay** - buffers results until the maximal already seen time is greater than \
or equal to their time plus `delay`.
- **cutoff** - ignores records with times less or equal to the maximal already seen time minus `cutoff`; \
it is also used to garbage collect records that have times lower or equal to the above threshold. \
When `cutoff` is not set, the asof join will remember all records from both sides.
- **keep_results** - if set to `True`, keeps all results of the operator. If set to `False`, \
keeps only results that are newer than the maximal seen time minus `cutoff`.
Examples without and with forgetting:
>>> import pathway as pw
>>> t1 = pw.debug.table_from_markdown(
... '''
... value | event_time | __time__
... 2 | 2 | 4
... 3 | 5 | 6
... 4 | 1 | 8
... 5 | 7 | 14
... '''
... )
>>> t2 = pw.debug.table_from_markdown(
... '''
... value | event_time | __time__
... 42 | 1 | 2
... 8 | 4 | 10
... '''
... )
>>> result_without_cutoff = t1.asof_join(
... t2, t1.event_time, t2.event_time, how=pw.JoinMode.LEFT
... ).select(
... left_value=t1.value,
... right_value=t2.value,
... left_time=t1.event_time,
... right_time=t2.event_time,
... )
>>> pw.debug.compute_and_print_update_stream(result_without_cutoff, include_id=False)
left_value | right_value | left_time | right_time | __time__ | __diff__
2 | 42 | 2 | 1 | 4 | 1
3 | 42 | 5 | 1 | 6 | 1
4 | 42 | 1 | 1 | 8 | 1
3 | 42 | 5 | 1 | 10 | -1
3 | 8 | 5 | 4 | 10 | 1
5 | 8 | 7 | 4 | 14 | 1
>>>
>>> result_without_cutoff = t1.asof_join(
... t2,
... t1.event_time,
... t2.event_time,
... how=pw.JoinMode.LEFT,
... behavior=pw.temporal.common_behavior(cutoff=2),
... ).select(
... left_value=t1.value,
... right_value=t2.value,
... left_time=t1.event_time,
... right_time=t2.event_time,
... )
>>> pw.debug.compute_and_print_update_stream(result_without_cutoff, include_id=False)
left_value | right_value | left_time | right_time | __time__ | __diff__
2 | 42 | 2 | 1 | 4 | 1
3 | 42 | 5 | 1 | 6 | 1
3 | 42 | 5 | 1 | 10 | -1
3 | 8 | 5 | 4 | 10 | 1
5 | 8 | 7 | 4 | 14 | 1
The record with ``value=4`` from table ``t1`` was not joined because its ``event_time``
was less than the maximal already seen time minus ``cutoff`` (``1 <= 5-2``).
"""
return _asof_join(
self,
other,
self_time,
other_time,
*on,
behavior=behavior,
how=how,
defaults=defaults,
direction=direction,
left_instance=left_instance,
right_instance=right_instance,
) | Perform an ASOF join of two tables. Args: other: Table to join with self, both must contain a column `val` self_time, other_time: time-like column expression to do the join against on: a list of column expressions. Each must have == as the top level operation and be of the form LHS: ColumnReference == RHS: ColumnReference. behavior: defines the temporal behavior of a join - features like delaying entries or ignoring late entries. how: mode of the join (LEFT, RIGHT, FULL) defaults: dictionary column-> default value. Entries in the resulting table that not have a predecessor in the join will be set to this default value. If no default is provided, None will be used. direction: direction of the join, accepted values: Direction.BACKWARD, Direction.FORWARD, Direction.NEAREST left_instance/right_instance: optional arguments describing partitioning of the data into separate instances Example: >>> import pathway as pw >>> t1 = pw.debug.table_from_markdown( ... ''' ... | K | val | t ... 1 | 0 | 1 | 1 ... 2 | 0 | 2 | 4 ... 3 | 0 | 3 | 5 ... 4 | 0 | 4 | 6 ... 5 | 0 | 5 | 7 ... 6 | 0 | 6 | 11 ... 7 | 0 | 7 | 12 ... 8 | 1 | 8 | 5 ... 9 | 1 | 9 | 7 ... ''' ... ) >>> t2 = pw.debug.table_from_markdown( ... ''' ... | K | val | t ... 21 | 1 | 7 | 2 ... 22 | 1 | 3 | 8 ... 23 | 0 | 0 | 2 ... 24 | 0 | 6 | 3 ... 25 | 0 | 2 | 7 ... 26 | 0 | 3 | 8 ... 27 | 0 | 9 | 9 ... 28 | 0 | 7 | 13 ... 29 | 0 | 4 | 14 ... ''' ... ) >>> res = t1.asof_join( ... t2, ... t1.t, ... t2.t, ... t1.K == t2.K, ... how=pw.JoinMode.LEFT, ... defaults={t2.val: -1}, ... ).select( ... pw.this.instance, ... pw.this.t, ... val_left=t1.val, ... val_right=t2.val, ... sum=t1.val + t2.val, ... ) >>> pw.debug.compute_and_print(res, include_id=False) instance | t | val_left | val_right | sum 0 | 1 | 1 | -1 | 0 0 | 4 | 2 | 6 | 8 0 | 5 | 3 | 6 | 9 0 | 6 | 4 | 6 | 10 0 | 7 | 5 | 2 | 7 0 | 11 | 6 | 9 | 15 0 | 12 | 7 | 9 | 16 1 | 5 | 8 | 7 | 15 1 | 7 | 9 | 7 | 16 Setting `behavior` allows to control temporal behavior of an asof join. Then, each side of the asof join keeps track of the maximal already seen time (`self_time` and `other_time`). In the context of `asof_join` the arguments of `behavior` are defined as follows: - **delay** - buffers results until the maximal already seen time is greater than \ or equal to their time plus `delay`. - **cutoff** - ignores records with times less or equal to the maximal already seen time minus `cutoff`; \ it is also used to garbage collect records that have times lower or equal to the above threshold. \ When `cutoff` is not set, the asof join will remember all records from both sides. - **keep_results** - if set to `True`, keeps all results of the operator. If set to `False`, \ keeps only results that are newer than the maximal seen time minus `cutoff`. Examples without and with forgetting: >>> import pathway as pw >>> t1 = pw.debug.table_from_markdown( ... ''' ... value | event_time | __time__ ... 2 | 2 | 4 ... 3 | 5 | 6 ... 4 | 1 | 8 ... 5 | 7 | 14 ... ''' ... ) >>> t2 = pw.debug.table_from_markdown( ... ''' ... value | event_time | __time__ ... 42 | 1 | 2 ... 8 | 4 | 10 ... ''' ... ) >>> result_without_cutoff = t1.asof_join( ... t2, t1.event_time, t2.event_time, how=pw.JoinMode.LEFT ... ).select( ... left_value=t1.value, ... right_value=t2.value, ... left_time=t1.event_time, ... right_time=t2.event_time, ... ) >>> pw.debug.compute_and_print_update_stream(result_without_cutoff, include_id=False) left_value | right_value | left_time | right_time | __time__ | __diff__ 2 | 42 | 2 | 1 | 4 | 1 3 | 42 | 5 | 1 | 6 | 1 4 | 42 | 1 | 1 | 8 | 1 3 | 42 | 5 | 1 | 10 | -1 3 | 8 | 5 | 4 | 10 | 1 5 | 8 | 7 | 4 | 14 | 1 >>> >>> result_without_cutoff = t1.asof_join( ... t2, ... t1.event_time, ... t2.event_time, ... how=pw.JoinMode.LEFT, ... behavior=pw.temporal.common_behavior(cutoff=2), ... ).select( ... left_value=t1.value, ... right_value=t2.value, ... left_time=t1.event_time, ... right_time=t2.event_time, ... ) >>> pw.debug.compute_and_print_update_stream(result_without_cutoff, include_id=False) left_value | right_value | left_time | right_time | __time__ | __diff__ 2 | 42 | 2 | 1 | 4 | 1 3 | 42 | 5 | 1 | 6 | 1 3 | 42 | 5 | 1 | 10 | -1 3 | 8 | 5 | 4 | 10 | 1 5 | 8 | 7 | 4 | 14 | 1 The record with ``value=4`` from table ``t1`` was not joined because its ``event_time`` was less than the maximal already seen time minus ``cutoff`` (``1 <= 5-2``). |
166,654 | from __future__ import annotations
import dataclasses
import enum
from typing import Any
import pathway.internals as pw
import pathway.internals.expression as expr
import pathway.stdlib.indexing
from pathway.internals.arg_handlers import (
arg_handler,
join_kwargs_handler,
select_args_handler,
)
from pathway.internals.desugaring import (
DesugaringContext,
SubstitutionDesugaring,
combine_args_kwargs,
desugar,
)
from pathway.internals.joins import validate_join_condition
from pathway.internals.runtime_type_check import check_arg_types
from pathway.internals.trace import trace_user_frame
from pathway.stdlib.temporal.temporal_behavior import (
CommonBehavior,
apply_temporal_behavior,
)
from .utils import TimeEventType, check_joint_types
class Direction(enum.Enum):
BACKWARD = 0
FORWARD = 1
NEAREST = 2
def _asof_join(
self: pw.Table,
other: pw.Table,
t_left: pw.ColumnExpression,
t_right: pw.ColumnExpression,
*on: pw.ColumnExpression,
behavior: CommonBehavior | None,
how: pw.JoinMode,
defaults: dict[pw.ColumnReference, Any],
direction: Direction,
left_instance: expr.ColumnReference | None,
right_instance: expr.ColumnReference | None,
):
check_joint_types(
{"t_left": (t_left, TimeEventType), "t_right": (t_right, TimeEventType)}
)
self_with_time = self.with_columns(_pw_time=t_left)
other_with_time = other.with_columns(_pw_time=t_right)
self_with_time = apply_temporal_behavior(self_with_time, behavior)
other_with_time = apply_temporal_behavior(other_with_time, behavior)
side_data = {
False: _SideData(
side=False,
original_table=self,
table=self_with_time,
conds=[],
t=self_with_time._pw_time,
),
True: _SideData(
side=True,
original_table=other,
table=other_with_time,
conds=[],
t=other_with_time._pw_time,
),
}
if left_instance is not None and right_instance is not None:
on = (*on, left_instance == right_instance)
else:
assert left_instance is None and right_instance is None
for cond in on:
cond_left, cond_right, _ = validate_join_condition(cond, self, other)
side_data[False].conds.append(self_with_time[cond_left.name])
side_data[True].conds.append(other_with_time[cond_right.name])
return AsofJoinResult(
side_data=side_data,
mode=how,
defaults={c._to_internal(): v for c, v in defaults.items()},
direction=direction,
_filter_out_results_of_forgetting=behavior is None or behavior.keep_results,
)
class CommonBehavior(Behavior):
"""Defines temporal behavior of windows and temporal joins."""
delay: IntervalType | None
cutoff: IntervalType | None
keep_results: bool
The provided code snippet includes necessary dependencies for implementing the `asof_join_left` function. Write a Python function `def asof_join_left( self: pw.Table, other: pw.Table, self_time: pw.ColumnExpression, other_time: pw.ColumnExpression, *on: pw.ColumnExpression, behavior: CommonBehavior | None = None, defaults: dict[pw.ColumnReference, Any] = {}, direction: Direction = Direction.BACKWARD, left_instance: expr.ColumnReference | None = None, right_instance: expr.ColumnReference | None = None, )` to solve the following problem:
Perform a left ASOF join of two tables. Args: other: Table to join with self, both must contain a column `val` self_time, other_time: time-like column expression to do the join against on: a list of column expressions. Each must have == as the top level operation and be of the form LHS: ColumnReference == RHS: ColumnReference. behavior: defines the temporal behavior of a join - features like delaying entries or ignoring late entries. defaults: dictionary column-> default value. Entries in the resulting table that not have a predecessor in the join will be set to this default value. If no default is provided, None will be used. direction: direction of the join, accepted values: Direction.BACKWARD, Direction.FORWARD, Direction.NEAREST left_instance/right_instance: optional arguments describing partitioning of the data into separate instances Example: >>> import pathway as pw >>> t1 = pw.debug.table_from_markdown( ... ''' ... | K | val | t ... 1 | 0 | 1 | 1 ... 2 | 0 | 2 | 4 ... 3 | 0 | 3 | 5 ... 4 | 0 | 4 | 6 ... 5 | 0 | 5 | 7 ... 6 | 0 | 6 | 11 ... 7 | 0 | 7 | 12 ... 8 | 1 | 8 | 5 ... 9 | 1 | 9 | 7 ... ''' ... ) >>> t2 = pw.debug.table_from_markdown( ... ''' ... | K | val | t ... 21 | 1 | 7 | 2 ... 22 | 1 | 3 | 8 ... 23 | 0 | 0 | 2 ... 24 | 0 | 6 | 3 ... 25 | 0 | 2 | 7 ... 26 | 0 | 3 | 8 ... 27 | 0 | 9 | 9 ... 28 | 0 | 7 | 13 ... 29 | 0 | 4 | 14 ... ''' ... ) >>> res = t1.asof_join_left( ... t2, ... t1.t, ... t2.t, ... t1.K == t2.K, ... defaults={t2.val: -1}, ... ).select( ... pw.this.instance, ... pw.this.t, ... val_left=t1.val, ... val_right=t2.val, ... sum=t1.val + t2.val, ... ) >>> pw.debug.compute_and_print(res, include_id=False) instance | t | val_left | val_right | sum 0 | 1 | 1 | -1 | 0 0 | 4 | 2 | 6 | 8 0 | 5 | 3 | 6 | 9 0 | 6 | 4 | 6 | 10 0 | 7 | 5 | 2 | 7 0 | 11 | 6 | 9 | 15 0 | 12 | 7 | 9 | 16 1 | 5 | 8 | 7 | 15 1 | 7 | 9 | 7 | 16 Setting `behavior` allows to control temporal behavior of an asof join. Then, each side of the asof join keeps track of the maximal already seen time (`self_time` and `other_time`). In the context of `asof_join` the arguments of `behavior` are defined as follows: - **delay** - buffers results until the maximal already seen time is greater than \ or equal to their time plus `delay`. - **cutoff** - ignores records with times less or equal to the maximal already seen time minus `cutoff`; \ it is also used to garbage collect records that have times lower or equal to the above threshold. \ When `cutoff` is not set, the asof join will remember all records from both sides. - **keep_results** - if set to `True`, keeps all results of the operator. If set to `False`, \ keeps only results that are newer than the maximal seen time minus `cutoff`. Examples without and with forgetting: >>> import pathway as pw >>> t1 = pw.debug.table_from_markdown( ... ''' ... value | event_time | __time__ ... 2 | 2 | 4 ... 3 | 5 | 6 ... 4 | 1 | 8 ... 5 | 7 | 14 ... ''' ... ) >>> t2 = pw.debug.table_from_markdown( ... ''' ... value | event_time | __time__ ... 42 | 1 | 2 ... 8 | 4 | 10 ... ''' ... ) >>> result_without_cutoff = t1.asof_join_left(t2, t1.event_time, t2.event_time).select( ... left_value=t1.value, ... right_value=t2.value, ... left_time=t1.event_time, ... right_time=t2.event_time, ... ) >>> pw.debug.compute_and_print_update_stream(result_without_cutoff, include_id=False) left_value | right_value | left_time | right_time | __time__ | __diff__ 2 | 42 | 2 | 1 | 4 | 1 3 | 42 | 5 | 1 | 6 | 1 4 | 42 | 1 | 1 | 8 | 1 3 | 42 | 5 | 1 | 10 | -1 3 | 8 | 5 | 4 | 10 | 1 5 | 8 | 7 | 4 | 14 | 1 >>> >>> result_without_cutoff = t1.asof_join_left( ... t2, ... t1.event_time, ... t2.event_time, ... behavior=pw.temporal.common_behavior(cutoff=2), ... ).select( ... left_value=t1.value, ... right_value=t2.value, ... left_time=t1.event_time, ... right_time=t2.event_time, ... ) >>> pw.debug.compute_and_print_update_stream(result_without_cutoff, include_id=False) left_value | right_value | left_time | right_time | __time__ | __diff__ 2 | 42 | 2 | 1 | 4 | 1 3 | 42 | 5 | 1 | 6 | 1 3 | 42 | 5 | 1 | 10 | -1 3 | 8 | 5 | 4 | 10 | 1 5 | 8 | 7 | 4 | 14 | 1 The record with ``value=4`` from table ``t1`` was not joined because its ``event_time`` was less than the maximal already seen time minus ``cutoff`` (``1 <= 5-2``).
Here is the function:
def asof_join_left(
self: pw.Table,
other: pw.Table,
self_time: pw.ColumnExpression,
other_time: pw.ColumnExpression,
*on: pw.ColumnExpression,
behavior: CommonBehavior | None = None,
defaults: dict[pw.ColumnReference, Any] = {},
direction: Direction = Direction.BACKWARD,
left_instance: expr.ColumnReference | None = None,
right_instance: expr.ColumnReference | None = None,
):
"""Perform a left ASOF join of two tables.
Args:
other: Table to join with self, both must contain a column `val`
self_time, other_time: time-like column expression to do the join against
on: a list of column expressions. Each must have == as the top level operation
and be of the form LHS: ColumnReference == RHS: ColumnReference.
behavior: defines the temporal behavior of a join - features like delaying entries
or ignoring late entries.
defaults: dictionary column-> default value. Entries in the resulting table that
not have a predecessor in the join will be set to this default value. If no
default is provided, None will be used.
direction: direction of the join, accepted values: Direction.BACKWARD,
Direction.FORWARD, Direction.NEAREST
left_instance/right_instance: optional arguments describing partitioning of the data into
separate instances
Example:
>>> import pathway as pw
>>> t1 = pw.debug.table_from_markdown(
... '''
... | K | val | t
... 1 | 0 | 1 | 1
... 2 | 0 | 2 | 4
... 3 | 0 | 3 | 5
... 4 | 0 | 4 | 6
... 5 | 0 | 5 | 7
... 6 | 0 | 6 | 11
... 7 | 0 | 7 | 12
... 8 | 1 | 8 | 5
... 9 | 1 | 9 | 7
... '''
... )
>>> t2 = pw.debug.table_from_markdown(
... '''
... | K | val | t
... 21 | 1 | 7 | 2
... 22 | 1 | 3 | 8
... 23 | 0 | 0 | 2
... 24 | 0 | 6 | 3
... 25 | 0 | 2 | 7
... 26 | 0 | 3 | 8
... 27 | 0 | 9 | 9
... 28 | 0 | 7 | 13
... 29 | 0 | 4 | 14
... '''
... )
>>> res = t1.asof_join_left(
... t2,
... t1.t,
... t2.t,
... t1.K == t2.K,
... defaults={t2.val: -1},
... ).select(
... pw.this.instance,
... pw.this.t,
... val_left=t1.val,
... val_right=t2.val,
... sum=t1.val + t2.val,
... )
>>> pw.debug.compute_and_print(res, include_id=False)
instance | t | val_left | val_right | sum
0 | 1 | 1 | -1 | 0
0 | 4 | 2 | 6 | 8
0 | 5 | 3 | 6 | 9
0 | 6 | 4 | 6 | 10
0 | 7 | 5 | 2 | 7
0 | 11 | 6 | 9 | 15
0 | 12 | 7 | 9 | 16
1 | 5 | 8 | 7 | 15
1 | 7 | 9 | 7 | 16
Setting `behavior` allows to control temporal behavior of an asof join. Then, each side of
the asof join keeps track of the maximal already seen time (`self_time` and `other_time`).
In the context of `asof_join` the arguments of `behavior` are defined as follows:
- **delay** - buffers results until the maximal already seen time is greater than \
or equal to their time plus `delay`.
- **cutoff** - ignores records with times less or equal to the maximal already seen time minus `cutoff`; \
it is also used to garbage collect records that have times lower or equal to the above threshold. \
When `cutoff` is not set, the asof join will remember all records from both sides.
- **keep_results** - if set to `True`, keeps all results of the operator. If set to `False`, \
keeps only results that are newer than the maximal seen time minus `cutoff`.
Examples without and with forgetting:
>>> import pathway as pw
>>> t1 = pw.debug.table_from_markdown(
... '''
... value | event_time | __time__
... 2 | 2 | 4
... 3 | 5 | 6
... 4 | 1 | 8
... 5 | 7 | 14
... '''
... )
>>> t2 = pw.debug.table_from_markdown(
... '''
... value | event_time | __time__
... 42 | 1 | 2
... 8 | 4 | 10
... '''
... )
>>> result_without_cutoff = t1.asof_join_left(t2, t1.event_time, t2.event_time).select(
... left_value=t1.value,
... right_value=t2.value,
... left_time=t1.event_time,
... right_time=t2.event_time,
... )
>>> pw.debug.compute_and_print_update_stream(result_without_cutoff, include_id=False)
left_value | right_value | left_time | right_time | __time__ | __diff__
2 | 42 | 2 | 1 | 4 | 1
3 | 42 | 5 | 1 | 6 | 1
4 | 42 | 1 | 1 | 8 | 1
3 | 42 | 5 | 1 | 10 | -1
3 | 8 | 5 | 4 | 10 | 1
5 | 8 | 7 | 4 | 14 | 1
>>>
>>> result_without_cutoff = t1.asof_join_left(
... t2,
... t1.event_time,
... t2.event_time,
... behavior=pw.temporal.common_behavior(cutoff=2),
... ).select(
... left_value=t1.value,
... right_value=t2.value,
... left_time=t1.event_time,
... right_time=t2.event_time,
... )
>>> pw.debug.compute_and_print_update_stream(result_without_cutoff, include_id=False)
left_value | right_value | left_time | right_time | __time__ | __diff__
2 | 42 | 2 | 1 | 4 | 1
3 | 42 | 5 | 1 | 6 | 1
3 | 42 | 5 | 1 | 10 | -1
3 | 8 | 5 | 4 | 10 | 1
5 | 8 | 7 | 4 | 14 | 1
The record with ``value=4`` from table ``t1`` was not joined because its ``event_time``
was less than the maximal already seen time minus ``cutoff`` (``1 <= 5-2``).
"""
return _asof_join(
self,
other,
self_time,
other_time,
*on,
behavior=behavior,
how=pw.JoinMode.LEFT,
defaults=defaults,
direction=direction,
left_instance=left_instance,
right_instance=right_instance,
) | Perform a left ASOF join of two tables. Args: other: Table to join with self, both must contain a column `val` self_time, other_time: time-like column expression to do the join against on: a list of column expressions. Each must have == as the top level operation and be of the form LHS: ColumnReference == RHS: ColumnReference. behavior: defines the temporal behavior of a join - features like delaying entries or ignoring late entries. defaults: dictionary column-> default value. Entries in the resulting table that not have a predecessor in the join will be set to this default value. If no default is provided, None will be used. direction: direction of the join, accepted values: Direction.BACKWARD, Direction.FORWARD, Direction.NEAREST left_instance/right_instance: optional arguments describing partitioning of the data into separate instances Example: >>> import pathway as pw >>> t1 = pw.debug.table_from_markdown( ... ''' ... | K | val | t ... 1 | 0 | 1 | 1 ... 2 | 0 | 2 | 4 ... 3 | 0 | 3 | 5 ... 4 | 0 | 4 | 6 ... 5 | 0 | 5 | 7 ... 6 | 0 | 6 | 11 ... 7 | 0 | 7 | 12 ... 8 | 1 | 8 | 5 ... 9 | 1 | 9 | 7 ... ''' ... ) >>> t2 = pw.debug.table_from_markdown( ... ''' ... | K | val | t ... 21 | 1 | 7 | 2 ... 22 | 1 | 3 | 8 ... 23 | 0 | 0 | 2 ... 24 | 0 | 6 | 3 ... 25 | 0 | 2 | 7 ... 26 | 0 | 3 | 8 ... 27 | 0 | 9 | 9 ... 28 | 0 | 7 | 13 ... 29 | 0 | 4 | 14 ... ''' ... ) >>> res = t1.asof_join_left( ... t2, ... t1.t, ... t2.t, ... t1.K == t2.K, ... defaults={t2.val: -1}, ... ).select( ... pw.this.instance, ... pw.this.t, ... val_left=t1.val, ... val_right=t2.val, ... sum=t1.val + t2.val, ... ) >>> pw.debug.compute_and_print(res, include_id=False) instance | t | val_left | val_right | sum 0 | 1 | 1 | -1 | 0 0 | 4 | 2 | 6 | 8 0 | 5 | 3 | 6 | 9 0 | 6 | 4 | 6 | 10 0 | 7 | 5 | 2 | 7 0 | 11 | 6 | 9 | 15 0 | 12 | 7 | 9 | 16 1 | 5 | 8 | 7 | 15 1 | 7 | 9 | 7 | 16 Setting `behavior` allows to control temporal behavior of an asof join. Then, each side of the asof join keeps track of the maximal already seen time (`self_time` and `other_time`). In the context of `asof_join` the arguments of `behavior` are defined as follows: - **delay** - buffers results until the maximal already seen time is greater than \ or equal to their time plus `delay`. - **cutoff** - ignores records with times less or equal to the maximal already seen time minus `cutoff`; \ it is also used to garbage collect records that have times lower or equal to the above threshold. \ When `cutoff` is not set, the asof join will remember all records from both sides. - **keep_results** - if set to `True`, keeps all results of the operator. If set to `False`, \ keeps only results that are newer than the maximal seen time minus `cutoff`. Examples without and with forgetting: >>> import pathway as pw >>> t1 = pw.debug.table_from_markdown( ... ''' ... value | event_time | __time__ ... 2 | 2 | 4 ... 3 | 5 | 6 ... 4 | 1 | 8 ... 5 | 7 | 14 ... ''' ... ) >>> t2 = pw.debug.table_from_markdown( ... ''' ... value | event_time | __time__ ... 42 | 1 | 2 ... 8 | 4 | 10 ... ''' ... ) >>> result_without_cutoff = t1.asof_join_left(t2, t1.event_time, t2.event_time).select( ... left_value=t1.value, ... right_value=t2.value, ... left_time=t1.event_time, ... right_time=t2.event_time, ... ) >>> pw.debug.compute_and_print_update_stream(result_without_cutoff, include_id=False) left_value | right_value | left_time | right_time | __time__ | __diff__ 2 | 42 | 2 | 1 | 4 | 1 3 | 42 | 5 | 1 | 6 | 1 4 | 42 | 1 | 1 | 8 | 1 3 | 42 | 5 | 1 | 10 | -1 3 | 8 | 5 | 4 | 10 | 1 5 | 8 | 7 | 4 | 14 | 1 >>> >>> result_without_cutoff = t1.asof_join_left( ... t2, ... t1.event_time, ... t2.event_time, ... behavior=pw.temporal.common_behavior(cutoff=2), ... ).select( ... left_value=t1.value, ... right_value=t2.value, ... left_time=t1.event_time, ... right_time=t2.event_time, ... ) >>> pw.debug.compute_and_print_update_stream(result_without_cutoff, include_id=False) left_value | right_value | left_time | right_time | __time__ | __diff__ 2 | 42 | 2 | 1 | 4 | 1 3 | 42 | 5 | 1 | 6 | 1 3 | 42 | 5 | 1 | 10 | -1 3 | 8 | 5 | 4 | 10 | 1 5 | 8 | 7 | 4 | 14 | 1 The record with ``value=4`` from table ``t1`` was not joined because its ``event_time`` was less than the maximal already seen time minus ``cutoff`` (``1 <= 5-2``). |
166,655 | from __future__ import annotations
import dataclasses
import enum
from typing import Any
import pathway.internals as pw
import pathway.internals.expression as expr
import pathway.stdlib.indexing
from pathway.internals.arg_handlers import (
arg_handler,
join_kwargs_handler,
select_args_handler,
)
from pathway.internals.desugaring import (
DesugaringContext,
SubstitutionDesugaring,
combine_args_kwargs,
desugar,
)
from pathway.internals.joins import validate_join_condition
from pathway.internals.runtime_type_check import check_arg_types
from pathway.internals.trace import trace_user_frame
from pathway.stdlib.temporal.temporal_behavior import (
CommonBehavior,
apply_temporal_behavior,
)
from .utils import TimeEventType, check_joint_types
class Direction(enum.Enum):
BACKWARD = 0
FORWARD = 1
NEAREST = 2
def _asof_join(
self: pw.Table,
other: pw.Table,
t_left: pw.ColumnExpression,
t_right: pw.ColumnExpression,
*on: pw.ColumnExpression,
behavior: CommonBehavior | None,
how: pw.JoinMode,
defaults: dict[pw.ColumnReference, Any],
direction: Direction,
left_instance: expr.ColumnReference | None,
right_instance: expr.ColumnReference | None,
):
check_joint_types(
{"t_left": (t_left, TimeEventType), "t_right": (t_right, TimeEventType)}
)
self_with_time = self.with_columns(_pw_time=t_left)
other_with_time = other.with_columns(_pw_time=t_right)
self_with_time = apply_temporal_behavior(self_with_time, behavior)
other_with_time = apply_temporal_behavior(other_with_time, behavior)
side_data = {
False: _SideData(
side=False,
original_table=self,
table=self_with_time,
conds=[],
t=self_with_time._pw_time,
),
True: _SideData(
side=True,
original_table=other,
table=other_with_time,
conds=[],
t=other_with_time._pw_time,
),
}
if left_instance is not None and right_instance is not None:
on = (*on, left_instance == right_instance)
else:
assert left_instance is None and right_instance is None
for cond in on:
cond_left, cond_right, _ = validate_join_condition(cond, self, other)
side_data[False].conds.append(self_with_time[cond_left.name])
side_data[True].conds.append(other_with_time[cond_right.name])
return AsofJoinResult(
side_data=side_data,
mode=how,
defaults={c._to_internal(): v for c, v in defaults.items()},
direction=direction,
_filter_out_results_of_forgetting=behavior is None or behavior.keep_results,
)
class CommonBehavior(Behavior):
"""Defines temporal behavior of windows and temporal joins."""
delay: IntervalType | None
cutoff: IntervalType | None
keep_results: bool
The provided code snippet includes necessary dependencies for implementing the `asof_join_right` function. Write a Python function `def asof_join_right( self: pw.Table, other: pw.Table, self_time: pw.ColumnExpression, other_time: pw.ColumnExpression, *on: pw.ColumnExpression, behavior: CommonBehavior | None = None, defaults: dict[pw.ColumnReference, Any] = {}, direction: Direction = Direction.BACKWARD, left_instance: expr.ColumnReference | None = None, right_instance: expr.ColumnReference | None = None, )` to solve the following problem:
Perform a right ASOF join of two tables. Args: other: Table to join with self, both must contain a column `val` self_time, other_time: time-like column expression to do the join against on: a list of column expressions. Each must have == as the top level operation and be of the form LHS: ColumnReference == RHS: ColumnReference. behavior: defines the temporal behavior of a join - features like delaying entries or ignoring late entries. defaults: dictionary column-> default value. Entries in the resulting table that not have a predecessor in the join will be set to this default value. If no default is provided, None will be used. direction: direction of the join, accepted values: Direction.BACKWARD, Direction.FORWARD, Direction.NEAREST left_instance/right_instance: optional arguments describing partitioning of the data into separate instances Example: >>> import pathway as pw >>> t1 = pw.debug.table_from_markdown( ... ''' ... | K | val | t ... 1 | 0 | 1 | 1 ... 2 | 0 | 2 | 4 ... 3 | 0 | 3 | 5 ... 4 | 0 | 4 | 6 ... 5 | 0 | 5 | 7 ... 6 | 0 | 6 | 11 ... 7 | 0 | 7 | 12 ... 8 | 1 | 8 | 5 ... 9 | 1 | 9 | 7 ... ''' ... ) >>> t2 = pw.debug.table_from_markdown( ... ''' ... | K | val | t ... 21 | 1 | 7 | 2 ... 22 | 1 | 3 | 8 ... 23 | 0 | 0 | 2 ... 24 | 0 | 6 | 3 ... 25 | 0 | 2 | 7 ... 26 | 0 | 3 | 8 ... 27 | 0 | 9 | 9 ... 28 | 0 | 7 | 13 ... 29 | 0 | 4 | 14 ... ''' ... ) >>> res = t1.asof_join_right( ... t2, ... t1.t, ... t2.t, ... t1.K == t2.K, ... defaults={t1.val: -1}, ... ).select( ... pw.this.instance, ... pw.this.t, ... val_left=t1.val, ... val_right=t2.val, ... sum=t1.val + t2.val, ... ) >>> pw.debug.compute_and_print(res, include_id=False) instance | t | val_left | val_right | sum 0 | 2 | 1 | 0 | 1 0 | 3 | 1 | 6 | 7 0 | 7 | 5 | 2 | 7 0 | 8 | 5 | 3 | 8 0 | 9 | 5 | 9 | 14 0 | 13 | 7 | 7 | 14 0 | 14 | 7 | 4 | 11 1 | 2 | -1 | 7 | 6 1 | 8 | 9 | 3 | 12 Setting `behavior` allows to control temporal behavior of an asof join. Then, each side of the asof join keeps track of the maximal already seen time (`self_time` and `other_time`). In the context of `asof_join` the arguments of `behavior` are defined as follows: - **delay** - buffers results until the maximal already seen time is greater than \ or equal to their time plus `delay`. - **cutoff** - ignores records with times less or equal to the maximal already seen time minus `cutoff`; \ it is also used to garbage collect records that have times lower or equal to the above threshold. \ When `cutoff` is not set, the asof join will remember all records from both sides. - **keep_results** - if set to `True`, keeps all results of the operator. If set to `False`, \ keeps only results that are newer than the maximal seen time minus `cutoff`. Examples without and with forgetting: >>> import pathway as pw >>> t1 = pw.debug.table_from_markdown( ... ''' ... value | event_time | __time__ ... 42 | 1 | 2 ... 8 | 4 | 10 ... ''' ... ) >>> t2 = pw.debug.table_from_markdown( ... ''' ... value | event_time | __time__ ... 2 | 2 | 4 ... 3 | 5 | 6 ... 4 | 1 | 8 ... 5 | 7 | 14 ... ''' ... ) >>> result_without_cutoff = t1.asof_join_right(t2, t1.event_time, t2.event_time).select( ... left_value=t1.value, ... right_value=t2.value, ... left_time=t1.event_time, ... right_time=t2.event_time, ... ) >>> pw.debug.compute_and_print_update_stream(result_without_cutoff, include_id=False) left_value | right_value | left_time | right_time | __time__ | __diff__ 42 | 2 | 1 | 2 | 4 | 1 42 | 3 | 1 | 5 | 6 | 1 42 | 4 | 1 | 1 | 8 | 1 42 | 3 | 1 | 5 | 10 | -1 8 | 3 | 4 | 5 | 10 | 1 8 | 5 | 4 | 7 | 14 | 1 >>> result_without_cutoff = t1.asof_join_right( ... t2, ... t1.event_time, ... t2.event_time, ... behavior=pw.temporal.common_behavior(cutoff=2), ... ).select( ... left_value=t1.value, ... right_value=t2.value, ... left_time=t1.event_time, ... right_time=t2.event_time, ... ) >>> pw.debug.compute_and_print_update_stream(result_without_cutoff, include_id=False) left_value | right_value | left_time | right_time | __time__ | __diff__ 42 | 2 | 1 | 2 | 4 | 1 42 | 3 | 1 | 5 | 6 | 1 42 | 3 | 1 | 5 | 10 | -1 8 | 3 | 4 | 5 | 10 | 1 8 | 5 | 4 | 7 | 14 | 1 The record with ``value=4`` from table ``t2`` was not joined because its ``event_time`` was less than the maximal already seen time minus ``cutoff`` (``1 <= 5-2``).
Here is the function:
def asof_join_right(
self: pw.Table,
other: pw.Table,
self_time: pw.ColumnExpression,
other_time: pw.ColumnExpression,
*on: pw.ColumnExpression,
behavior: CommonBehavior | None = None,
defaults: dict[pw.ColumnReference, Any] = {},
direction: Direction = Direction.BACKWARD,
left_instance: expr.ColumnReference | None = None,
right_instance: expr.ColumnReference | None = None,
):
"""Perform a right ASOF join of two tables.
Args:
other: Table to join with self, both must contain a column `val`
self_time, other_time: time-like column expression to do the join against
on: a list of column expressions. Each must have == as the top level operation
and be of the form LHS: ColumnReference == RHS: ColumnReference.
behavior: defines the temporal behavior of a join - features like delaying entries
or ignoring late entries.
defaults: dictionary column-> default value. Entries in the resulting table that
not have a predecessor in the join will be set to this default value. If no
default is provided, None will be used.
direction: direction of the join, accepted values: Direction.BACKWARD,
Direction.FORWARD, Direction.NEAREST
left_instance/right_instance: optional arguments describing partitioning of the data into
separate instances
Example:
>>> import pathway as pw
>>> t1 = pw.debug.table_from_markdown(
... '''
... | K | val | t
... 1 | 0 | 1 | 1
... 2 | 0 | 2 | 4
... 3 | 0 | 3 | 5
... 4 | 0 | 4 | 6
... 5 | 0 | 5 | 7
... 6 | 0 | 6 | 11
... 7 | 0 | 7 | 12
... 8 | 1 | 8 | 5
... 9 | 1 | 9 | 7
... '''
... )
>>> t2 = pw.debug.table_from_markdown(
... '''
... | K | val | t
... 21 | 1 | 7 | 2
... 22 | 1 | 3 | 8
... 23 | 0 | 0 | 2
... 24 | 0 | 6 | 3
... 25 | 0 | 2 | 7
... 26 | 0 | 3 | 8
... 27 | 0 | 9 | 9
... 28 | 0 | 7 | 13
... 29 | 0 | 4 | 14
... '''
... )
>>> res = t1.asof_join_right(
... t2,
... t1.t,
... t2.t,
... t1.K == t2.K,
... defaults={t1.val: -1},
... ).select(
... pw.this.instance,
... pw.this.t,
... val_left=t1.val,
... val_right=t2.val,
... sum=t1.val + t2.val,
... )
>>> pw.debug.compute_and_print(res, include_id=False)
instance | t | val_left | val_right | sum
0 | 2 | 1 | 0 | 1
0 | 3 | 1 | 6 | 7
0 | 7 | 5 | 2 | 7
0 | 8 | 5 | 3 | 8
0 | 9 | 5 | 9 | 14
0 | 13 | 7 | 7 | 14
0 | 14 | 7 | 4 | 11
1 | 2 | -1 | 7 | 6
1 | 8 | 9 | 3 | 12
Setting `behavior` allows to control temporal behavior of an asof join. Then, each side of
the asof join keeps track of the maximal already seen time (`self_time` and `other_time`).
In the context of `asof_join` the arguments of `behavior` are defined as follows:
- **delay** - buffers results until the maximal already seen time is greater than \
or equal to their time plus `delay`.
- **cutoff** - ignores records with times less or equal to the maximal already seen time minus `cutoff`; \
it is also used to garbage collect records that have times lower or equal to the above threshold. \
When `cutoff` is not set, the asof join will remember all records from both sides.
- **keep_results** - if set to `True`, keeps all results of the operator. If set to `False`, \
keeps only results that are newer than the maximal seen time minus `cutoff`.
Examples without and with forgetting:
>>> import pathway as pw
>>> t1 = pw.debug.table_from_markdown(
... '''
... value | event_time | __time__
... 42 | 1 | 2
... 8 | 4 | 10
... '''
... )
>>> t2 = pw.debug.table_from_markdown(
... '''
... value | event_time | __time__
... 2 | 2 | 4
... 3 | 5 | 6
... 4 | 1 | 8
... 5 | 7 | 14
... '''
... )
>>> result_without_cutoff = t1.asof_join_right(t2, t1.event_time, t2.event_time).select(
... left_value=t1.value,
... right_value=t2.value,
... left_time=t1.event_time,
... right_time=t2.event_time,
... )
>>> pw.debug.compute_and_print_update_stream(result_without_cutoff, include_id=False)
left_value | right_value | left_time | right_time | __time__ | __diff__
42 | 2 | 1 | 2 | 4 | 1
42 | 3 | 1 | 5 | 6 | 1
42 | 4 | 1 | 1 | 8 | 1
42 | 3 | 1 | 5 | 10 | -1
8 | 3 | 4 | 5 | 10 | 1
8 | 5 | 4 | 7 | 14 | 1
>>> result_without_cutoff = t1.asof_join_right(
... t2,
... t1.event_time,
... t2.event_time,
... behavior=pw.temporal.common_behavior(cutoff=2),
... ).select(
... left_value=t1.value,
... right_value=t2.value,
... left_time=t1.event_time,
... right_time=t2.event_time,
... )
>>> pw.debug.compute_and_print_update_stream(result_without_cutoff, include_id=False)
left_value | right_value | left_time | right_time | __time__ | __diff__
42 | 2 | 1 | 2 | 4 | 1
42 | 3 | 1 | 5 | 6 | 1
42 | 3 | 1 | 5 | 10 | -1
8 | 3 | 4 | 5 | 10 | 1
8 | 5 | 4 | 7 | 14 | 1
The record with ``value=4`` from table ``t2`` was not joined because its ``event_time``
was less than the maximal already seen time minus ``cutoff`` (``1 <= 5-2``).
"""
return _asof_join(
self,
other,
self_time,
other_time,
*on,
behavior=behavior,
how=pw.JoinMode.RIGHT,
defaults=defaults,
direction=direction,
left_instance=left_instance,
right_instance=right_instance,
) | Perform a right ASOF join of two tables. Args: other: Table to join with self, both must contain a column `val` self_time, other_time: time-like column expression to do the join against on: a list of column expressions. Each must have == as the top level operation and be of the form LHS: ColumnReference == RHS: ColumnReference. behavior: defines the temporal behavior of a join - features like delaying entries or ignoring late entries. defaults: dictionary column-> default value. Entries in the resulting table that not have a predecessor in the join will be set to this default value. If no default is provided, None will be used. direction: direction of the join, accepted values: Direction.BACKWARD, Direction.FORWARD, Direction.NEAREST left_instance/right_instance: optional arguments describing partitioning of the data into separate instances Example: >>> import pathway as pw >>> t1 = pw.debug.table_from_markdown( ... ''' ... | K | val | t ... 1 | 0 | 1 | 1 ... 2 | 0 | 2 | 4 ... 3 | 0 | 3 | 5 ... 4 | 0 | 4 | 6 ... 5 | 0 | 5 | 7 ... 6 | 0 | 6 | 11 ... 7 | 0 | 7 | 12 ... 8 | 1 | 8 | 5 ... 9 | 1 | 9 | 7 ... ''' ... ) >>> t2 = pw.debug.table_from_markdown( ... ''' ... | K | val | t ... 21 | 1 | 7 | 2 ... 22 | 1 | 3 | 8 ... 23 | 0 | 0 | 2 ... 24 | 0 | 6 | 3 ... 25 | 0 | 2 | 7 ... 26 | 0 | 3 | 8 ... 27 | 0 | 9 | 9 ... 28 | 0 | 7 | 13 ... 29 | 0 | 4 | 14 ... ''' ... ) >>> res = t1.asof_join_right( ... t2, ... t1.t, ... t2.t, ... t1.K == t2.K, ... defaults={t1.val: -1}, ... ).select( ... pw.this.instance, ... pw.this.t, ... val_left=t1.val, ... val_right=t2.val, ... sum=t1.val + t2.val, ... ) >>> pw.debug.compute_and_print(res, include_id=False) instance | t | val_left | val_right | sum 0 | 2 | 1 | 0 | 1 0 | 3 | 1 | 6 | 7 0 | 7 | 5 | 2 | 7 0 | 8 | 5 | 3 | 8 0 | 9 | 5 | 9 | 14 0 | 13 | 7 | 7 | 14 0 | 14 | 7 | 4 | 11 1 | 2 | -1 | 7 | 6 1 | 8 | 9 | 3 | 12 Setting `behavior` allows to control temporal behavior of an asof join. Then, each side of the asof join keeps track of the maximal already seen time (`self_time` and `other_time`). In the context of `asof_join` the arguments of `behavior` are defined as follows: - **delay** - buffers results until the maximal already seen time is greater than \ or equal to their time plus `delay`. - **cutoff** - ignores records with times less or equal to the maximal already seen time minus `cutoff`; \ it is also used to garbage collect records that have times lower or equal to the above threshold. \ When `cutoff` is not set, the asof join will remember all records from both sides. - **keep_results** - if set to `True`, keeps all results of the operator. If set to `False`, \ keeps only results that are newer than the maximal seen time minus `cutoff`. Examples without and with forgetting: >>> import pathway as pw >>> t1 = pw.debug.table_from_markdown( ... ''' ... value | event_time | __time__ ... 42 | 1 | 2 ... 8 | 4 | 10 ... ''' ... ) >>> t2 = pw.debug.table_from_markdown( ... ''' ... value | event_time | __time__ ... 2 | 2 | 4 ... 3 | 5 | 6 ... 4 | 1 | 8 ... 5 | 7 | 14 ... ''' ... ) >>> result_without_cutoff = t1.asof_join_right(t2, t1.event_time, t2.event_time).select( ... left_value=t1.value, ... right_value=t2.value, ... left_time=t1.event_time, ... right_time=t2.event_time, ... ) >>> pw.debug.compute_and_print_update_stream(result_without_cutoff, include_id=False) left_value | right_value | left_time | right_time | __time__ | __diff__ 42 | 2 | 1 | 2 | 4 | 1 42 | 3 | 1 | 5 | 6 | 1 42 | 4 | 1 | 1 | 8 | 1 42 | 3 | 1 | 5 | 10 | -1 8 | 3 | 4 | 5 | 10 | 1 8 | 5 | 4 | 7 | 14 | 1 >>> result_without_cutoff = t1.asof_join_right( ... t2, ... t1.event_time, ... t2.event_time, ... behavior=pw.temporal.common_behavior(cutoff=2), ... ).select( ... left_value=t1.value, ... right_value=t2.value, ... left_time=t1.event_time, ... right_time=t2.event_time, ... ) >>> pw.debug.compute_and_print_update_stream(result_without_cutoff, include_id=False) left_value | right_value | left_time | right_time | __time__ | __diff__ 42 | 2 | 1 | 2 | 4 | 1 42 | 3 | 1 | 5 | 6 | 1 42 | 3 | 1 | 5 | 10 | -1 8 | 3 | 4 | 5 | 10 | 1 8 | 5 | 4 | 7 | 14 | 1 The record with ``value=4`` from table ``t2`` was not joined because its ``event_time`` was less than the maximal already seen time minus ``cutoff`` (``1 <= 5-2``). |
166,656 | from __future__ import annotations
import dataclasses
import enum
from typing import Any
import pathway.internals as pw
import pathway.internals.expression as expr
import pathway.stdlib.indexing
from pathway.internals.arg_handlers import (
arg_handler,
join_kwargs_handler,
select_args_handler,
)
from pathway.internals.desugaring import (
DesugaringContext,
SubstitutionDesugaring,
combine_args_kwargs,
desugar,
)
from pathway.internals.joins import validate_join_condition
from pathway.internals.runtime_type_check import check_arg_types
from pathway.internals.trace import trace_user_frame
from pathway.stdlib.temporal.temporal_behavior import (
CommonBehavior,
apply_temporal_behavior,
)
from .utils import TimeEventType, check_joint_types
class Direction(enum.Enum):
BACKWARD = 0
FORWARD = 1
NEAREST = 2
def _asof_join(
self: pw.Table,
other: pw.Table,
t_left: pw.ColumnExpression,
t_right: pw.ColumnExpression,
*on: pw.ColumnExpression,
behavior: CommonBehavior | None,
how: pw.JoinMode,
defaults: dict[pw.ColumnReference, Any],
direction: Direction,
left_instance: expr.ColumnReference | None,
right_instance: expr.ColumnReference | None,
):
check_joint_types(
{"t_left": (t_left, TimeEventType), "t_right": (t_right, TimeEventType)}
)
self_with_time = self.with_columns(_pw_time=t_left)
other_with_time = other.with_columns(_pw_time=t_right)
self_with_time = apply_temporal_behavior(self_with_time, behavior)
other_with_time = apply_temporal_behavior(other_with_time, behavior)
side_data = {
False: _SideData(
side=False,
original_table=self,
table=self_with_time,
conds=[],
t=self_with_time._pw_time,
),
True: _SideData(
side=True,
original_table=other,
table=other_with_time,
conds=[],
t=other_with_time._pw_time,
),
}
if left_instance is not None and right_instance is not None:
on = (*on, left_instance == right_instance)
else:
assert left_instance is None and right_instance is None
for cond in on:
cond_left, cond_right, _ = validate_join_condition(cond, self, other)
side_data[False].conds.append(self_with_time[cond_left.name])
side_data[True].conds.append(other_with_time[cond_right.name])
return AsofJoinResult(
side_data=side_data,
mode=how,
defaults={c._to_internal(): v for c, v in defaults.items()},
direction=direction,
_filter_out_results_of_forgetting=behavior is None or behavior.keep_results,
)
class CommonBehavior(Behavior):
"""Defines temporal behavior of windows and temporal joins."""
delay: IntervalType | None
cutoff: IntervalType | None
keep_results: bool
The provided code snippet includes necessary dependencies for implementing the `asof_join_outer` function. Write a Python function `def asof_join_outer( self: pw.Table, other: pw.Table, self_time: pw.ColumnExpression, other_time: pw.ColumnExpression, *on: pw.ColumnExpression, behavior: CommonBehavior | None = None, defaults: dict[pw.ColumnReference, Any] = {}, direction: Direction = Direction.BACKWARD, left_instance: expr.ColumnReference | None = None, right_instance: expr.ColumnReference | None = None, )` to solve the following problem:
Perform an outer ASOF join of two tables. Args: other: Table to join with self, both must contain a column `val` self_time, other_time: time-like column expression to do the join against on: a list of column expressions. Each must have == as the top level operation and be of the form LHS: ColumnReference == RHS: ColumnReference. behavior: defines the temporal behavior of a join - features like delaying entries or ignoring late entries. defaults: dictionary column-> default value. Entries in the resulting table that not have a predecessor in the join will be set to this default value. If no default is provided, None will be used. direction: direction of the join, accepted values: Direction.BACKWARD, Direction.FORWARD, Direction.NEAREST left_instance/right_instance: optional arguments describing partitioning of the data into separate instances Example: >>> import pathway as pw >>> t1 = pw.debug.table_from_markdown( ... ''' ... | K | val | t ... 1 | 0 | 1 | 1 ... 2 | 0 | 2 | 4 ... 3 | 0 | 3 | 5 ... 4 | 0 | 4 | 6 ... 5 | 0 | 5 | 7 ... 6 | 0 | 6 | 11 ... 7 | 0 | 7 | 12 ... 8 | 1 | 8 | 5 ... 9 | 1 | 9 | 7 ... ''' ... ) >>> t2 = pw.debug.table_from_markdown( ... ''' ... | K | val | t ... 21 | 1 | 7 | 2 ... 22 | 1 | 3 | 8 ... 23 | 0 | 0 | 2 ... 24 | 0 | 6 | 3 ... 25 | 0 | 2 | 7 ... 26 | 0 | 3 | 8 ... 27 | 0 | 9 | 9 ... 28 | 0 | 7 | 13 ... 29 | 0 | 4 | 14 ... ''' ... ) >>> res = t1.asof_join_outer( ... t2, ... t1.t, ... t2.t, ... t1.K == t2.K, ... defaults={t1.val: -1, t2.val: -1}, ... ).select( ... pw.this.instance, ... pw.this.t, ... val_left=t1.val, ... val_right=t2.val, ... sum=t1.val + t2.val, ... ) >>> pw.debug.compute_and_print(res, include_id=False) instance | t | val_left | val_right | sum 0 | 1 | 1 | -1 | 0 0 | 2 | 1 | 0 | 1 0 | 3 | 1 | 6 | 7 0 | 4 | 2 | 6 | 8 0 | 5 | 3 | 6 | 9 0 | 6 | 4 | 6 | 10 0 | 7 | 5 | 2 | 7 0 | 7 | 5 | 6 | 11 0 | 8 | 5 | 3 | 8 0 | 9 | 5 | 9 | 14 0 | 11 | 6 | 9 | 15 0 | 12 | 7 | 9 | 16 0 | 13 | 7 | 7 | 14 0 | 14 | 7 | 4 | 11 1 | 2 | -1 | 7 | 6 1 | 5 | 8 | 7 | 15 1 | 7 | 9 | 7 | 16 1 | 8 | 9 | 3 | 12
Here is the function:
def asof_join_outer(
self: pw.Table,
other: pw.Table,
self_time: pw.ColumnExpression,
other_time: pw.ColumnExpression,
*on: pw.ColumnExpression,
behavior: CommonBehavior | None = None,
defaults: dict[pw.ColumnReference, Any] = {},
direction: Direction = Direction.BACKWARD,
left_instance: expr.ColumnReference | None = None,
right_instance: expr.ColumnReference | None = None,
):
"""Perform an outer ASOF join of two tables.
Args:
other: Table to join with self, both must contain a column `val`
self_time, other_time: time-like column expression to do the join against
on: a list of column expressions. Each must have == as the top level operation
and be of the form LHS: ColumnReference == RHS: ColumnReference.
behavior: defines the temporal behavior of a join - features like delaying entries
or ignoring late entries.
defaults: dictionary column-> default value. Entries in the resulting table that
not have a predecessor in the join will be set to this default value. If no
default is provided, None will be used.
direction: direction of the join, accepted values: Direction.BACKWARD,
Direction.FORWARD, Direction.NEAREST
left_instance/right_instance: optional arguments describing partitioning of the data into
separate instances
Example:
>>> import pathway as pw
>>> t1 = pw.debug.table_from_markdown(
... '''
... | K | val | t
... 1 | 0 | 1 | 1
... 2 | 0 | 2 | 4
... 3 | 0 | 3 | 5
... 4 | 0 | 4 | 6
... 5 | 0 | 5 | 7
... 6 | 0 | 6 | 11
... 7 | 0 | 7 | 12
... 8 | 1 | 8 | 5
... 9 | 1 | 9 | 7
... '''
... )
>>> t2 = pw.debug.table_from_markdown(
... '''
... | K | val | t
... 21 | 1 | 7 | 2
... 22 | 1 | 3 | 8
... 23 | 0 | 0 | 2
... 24 | 0 | 6 | 3
... 25 | 0 | 2 | 7
... 26 | 0 | 3 | 8
... 27 | 0 | 9 | 9
... 28 | 0 | 7 | 13
... 29 | 0 | 4 | 14
... '''
... )
>>> res = t1.asof_join_outer(
... t2,
... t1.t,
... t2.t,
... t1.K == t2.K,
... defaults={t1.val: -1, t2.val: -1},
... ).select(
... pw.this.instance,
... pw.this.t,
... val_left=t1.val,
... val_right=t2.val,
... sum=t1.val + t2.val,
... )
>>> pw.debug.compute_and_print(res, include_id=False)
instance | t | val_left | val_right | sum
0 | 1 | 1 | -1 | 0
0 | 2 | 1 | 0 | 1
0 | 3 | 1 | 6 | 7
0 | 4 | 2 | 6 | 8
0 | 5 | 3 | 6 | 9
0 | 6 | 4 | 6 | 10
0 | 7 | 5 | 2 | 7
0 | 7 | 5 | 6 | 11
0 | 8 | 5 | 3 | 8
0 | 9 | 5 | 9 | 14
0 | 11 | 6 | 9 | 15
0 | 12 | 7 | 9 | 16
0 | 13 | 7 | 7 | 14
0 | 14 | 7 | 4 | 11
1 | 2 | -1 | 7 | 6
1 | 5 | 8 | 7 | 15
1 | 7 | 9 | 7 | 16
1 | 8 | 9 | 3 | 12
"""
return _asof_join(
self,
other,
self_time,
other_time,
*on,
behavior=behavior,
how=pw.JoinMode.OUTER,
defaults=defaults,
direction=direction,
left_instance=left_instance,
right_instance=right_instance,
) | Perform an outer ASOF join of two tables. Args: other: Table to join with self, both must contain a column `val` self_time, other_time: time-like column expression to do the join against on: a list of column expressions. Each must have == as the top level operation and be of the form LHS: ColumnReference == RHS: ColumnReference. behavior: defines the temporal behavior of a join - features like delaying entries or ignoring late entries. defaults: dictionary column-> default value. Entries in the resulting table that not have a predecessor in the join will be set to this default value. If no default is provided, None will be used. direction: direction of the join, accepted values: Direction.BACKWARD, Direction.FORWARD, Direction.NEAREST left_instance/right_instance: optional arguments describing partitioning of the data into separate instances Example: >>> import pathway as pw >>> t1 = pw.debug.table_from_markdown( ... ''' ... | K | val | t ... 1 | 0 | 1 | 1 ... 2 | 0 | 2 | 4 ... 3 | 0 | 3 | 5 ... 4 | 0 | 4 | 6 ... 5 | 0 | 5 | 7 ... 6 | 0 | 6 | 11 ... 7 | 0 | 7 | 12 ... 8 | 1 | 8 | 5 ... 9 | 1 | 9 | 7 ... ''' ... ) >>> t2 = pw.debug.table_from_markdown( ... ''' ... | K | val | t ... 21 | 1 | 7 | 2 ... 22 | 1 | 3 | 8 ... 23 | 0 | 0 | 2 ... 24 | 0 | 6 | 3 ... 25 | 0 | 2 | 7 ... 26 | 0 | 3 | 8 ... 27 | 0 | 9 | 9 ... 28 | 0 | 7 | 13 ... 29 | 0 | 4 | 14 ... ''' ... ) >>> res = t1.asof_join_outer( ... t2, ... t1.t, ... t2.t, ... t1.K == t2.K, ... defaults={t1.val: -1, t2.val: -1}, ... ).select( ... pw.this.instance, ... pw.this.t, ... val_left=t1.val, ... val_right=t2.val, ... sum=t1.val + t2.val, ... ) >>> pw.debug.compute_and_print(res, include_id=False) instance | t | val_left | val_right | sum 0 | 1 | 1 | -1 | 0 0 | 2 | 1 | 0 | 1 0 | 3 | 1 | 6 | 7 0 | 4 | 2 | 6 | 8 0 | 5 | 3 | 6 | 9 0 | 6 | 4 | 6 | 10 0 | 7 | 5 | 2 | 7 0 | 7 | 5 | 6 | 11 0 | 8 | 5 | 3 | 8 0 | 9 | 5 | 9 | 14 0 | 11 | 6 | 9 | 15 0 | 12 | 7 | 9 | 16 0 | 13 | 7 | 7 | 14 0 | 14 | 7 | 4 | 11 1 | 2 | -1 | 7 | 6 1 | 5 | 8 | 7 | 15 1 | 7 | 9 | 7 | 16 1 | 8 | 9 | 3 | 12 |
166,657 | import pathway as pw
from pathway.internals.runtime_type_check import check_arg_types
from pathway.internals.trace import trace_user_frame
The provided code snippet includes necessary dependencies for implementing the `diff` function. Write a Python function `def diff( self: pw.Table, timestamp: pw.ColumnReference, *values: pw.ColumnReference, ) -> pw.Table` to solve the following problem:
Compute the difference between the values in the ``values`` columns and the previous values according to the order defined by the column ``timestamp``. Args: - timestamp (pw.ColumnReference[int | float | datetime | str | bytes]): The column reference to the ``timestamp`` column on which the order is computed. - *values (pw.ColumnReference[int | float | datetime]): Variable-length argument representing the column references to the ``values`` columns. Returns: ``Table``: A new table where each column is replaced with a new column containing the difference and whose name is the concatenation of `diff_` and the former name. Raises: ValueError: If the columns are not ColumnReference. Note: - The value of the "first" value (the row with the lower value \ in the ``timestamp`` column) is ``None``. Example: >>> import pathway as pw >>> table = pw.debug.table_from_markdown(''' ... timestamp | values ... 1 | 1 ... 2 | 2 ... 3 | 4 ... 4 | 7 ... 5 | 11 ... 6 | 16 ... ''') >>> table += table.diff(pw.this.timestamp, pw.this.values) >>> pw.debug.compute_and_print(table, include_id=False) timestamp | values | diff_values 1 | 1 | 2 | 2 | 1 3 | 4 | 2 4 | 7 | 3 5 | 11 | 4 6 | 16 | 5
Here is the function:
def diff(
self: pw.Table,
timestamp: pw.ColumnReference,
*values: pw.ColumnReference,
) -> pw.Table:
"""
Compute the difference between the values in the ``values`` columns and the previous values
according to the order defined by the column ``timestamp``.
Args:
- timestamp (pw.ColumnReference[int | float | datetime | str | bytes]):
The column reference to the ``timestamp`` column on which the order is computed.
- *values (pw.ColumnReference[int | float | datetime]):
Variable-length argument representing the column references to the ``values`` columns.
Returns:
``Table``: A new table where each column is replaced with a new column containing
the difference and whose name is the concatenation of `diff_` and the former name.
Raises:
ValueError: If the columns are not ColumnReference.
Note:
- The value of the "first" value (the row with the lower value \
in the ``timestamp`` column) is ``None``.
Example:
>>> import pathway as pw
>>> table = pw.debug.table_from_markdown('''
... timestamp | values
... 1 | 1
... 2 | 2
... 3 | 4
... 4 | 7
... 5 | 11
... 6 | 16
... ''')
>>> table += table.diff(pw.this.timestamp, pw.this.values)
>>> pw.debug.compute_and_print(table, include_id=False)
timestamp | values | diff_values
1 | 1 |
2 | 2 | 1
3 | 4 | 2
4 | 7 | 3
5 | 11 | 4
6 | 16 | 5
"""
if isinstance(timestamp, pw.ColumnReference):
timestamp = self[timestamp]
else:
if isinstance(timestamp, str):
raise ValueError(
"statistical.diff(): Invalid column reference for the parameter timestamp,"
+ f" found a string. Did you mean this.{timestamp} instead of {repr(timestamp)}?"
)
raise ValueError(
"statistical.diff(): Invalid column reference for the parameter timestamp."
)
ordered_table = self.sort(key=timestamp)
for value in values:
if isinstance(value, pw.ColumnReference):
value = self[value]
else:
if isinstance(value, str):
raise ValueError(
"statistical.diff(): Invalid column reference for the parameter value,"
+ f" found a string. Did you mean this.{value} instead of {repr(value)}?"
)
raise ValueError(
"statistical.diff(): Invalid column reference for the parameter value."
)
ordered_table += ordered_table.select(
diff=pw.require(
value
- pw.unwrap(self.ix(ordered_table.prev, optional=True)[value._name]),
value,
ordered_table.prev,
)
)
ordered_table = ordered_table.rename({"diff": "diff_" + value.name})
return ordered_table.without(ordered_table.prev, ordered_table.next) | Compute the difference between the values in the ``values`` columns and the previous values according to the order defined by the column ``timestamp``. Args: - timestamp (pw.ColumnReference[int | float | datetime | str | bytes]): The column reference to the ``timestamp`` column on which the order is computed. - *values (pw.ColumnReference[int | float | datetime]): Variable-length argument representing the column references to the ``values`` columns. Returns: ``Table``: A new table where each column is replaced with a new column containing the difference and whose name is the concatenation of `diff_` and the former name. Raises: ValueError: If the columns are not ColumnReference. Note: - The value of the "first" value (the row with the lower value \ in the ``timestamp`` column) is ``None``. Example: >>> import pathway as pw >>> table = pw.debug.table_from_markdown(''' ... timestamp | values ... 1 | 1 ... 2 | 2 ... 3 | 4 ... 4 | 7 ... 5 | 11 ... 6 | 16 ... ''') >>> table += table.diff(pw.this.timestamp, pw.this.values) >>> pw.debug.compute_and_print(table, include_id=False) timestamp | values | diff_values 1 | 1 | 2 | 2 | 1 3 | 4 | 2 4 | 7 | 3 5 | 11 | 4 6 | 16 | 5 |
166,658 | from __future__ import annotations
from enum import Enum
from typing import TYPE_CHECKING
from pathway.internals.expression import ColumnReference
from pathway.internals.trace import trace_user_frame
def _compute_interpolate(table_with_prev_next: Table) -> Table:
import pathway.internals as pw
class computing_interpolate:
class ordered_ts(pw.ClassArg):
timestamp = pw.input_attribute()
value = pw.input_attribute()
prev_value = pw.input_attribute()
next_value = pw.input_attribute()
def interpolated_value(self) -> float | None:
if self.value is not None:
return self.value
t = self.timestamp
if self.prev_value is None and self.next_value is None:
return None
if self.prev_value is None:
return self.transformer.ordered_ts[self.next_value].value
if self.next_value is None:
return self.transformer.ordered_ts[self.prev_value].value
t_prev = self.transformer.ordered_ts[self.prev_value].timestamp
value_prev = self.transformer.ordered_ts[self.prev_value].value
t_next = self.transformer.ordered_ts[self.next_value].timestamp
value_next = self.transformer.ordered_ts[self.next_value].value
return _linear_interpolate(t, t_prev, value_prev, t_next, value_next)
return computing_interpolate(
ordered_ts=table_with_prev_next
).ordered_ts # type: ignore
class InterpolateMode(Enum):
LINEAR = 0
class ColumnReference(ColumnExpression):
"""Reference to the column.
Inherits from ColumnExpression.
Example:
>>> import pathway as pw
>>> t1 = pw.debug.table_from_markdown('''
... age owner pet
... 1 10 Alice dog
... 2 9 Bob dog
... 3 8 Alice cat
... 4 7 Bob dog''')
>>> isinstance(t1.age, pw.ColumnReference)
True
>>> isinstance(t1["owner"], pw.ColumnReference)
True
"""
_column: Column
_table: Table
_name: str
def __init__(self, _column: Column, _table: Table, _name: str):
super().__init__()
self._column = _column
self._table = _table
self._name = _name
def _deps(self) -> tuple[ColumnExpression, ...]:
return ()
def _to_internal(self) -> InternalColRef:
return InternalColRef.build(type(self), self._column, self._table, self._name)
def _to_original(self) -> ColumnReference:
return self._column.lineage.table[self._column.lineage.name]
def table(self):
"""Table where the referred column belongs to.
Example:
>>> import pathway as pw
>>> t1 = pw.debug.table_from_markdown('''
... age owner pet
... 1 10 Alice dog
... 2 9 Bob dog
... 3 8 Alice cat
... 4 7 Bob dog''')
>>> t1.age.table is t1
True
"""
return self._table
def name(self):
"""Name of the referred column.
Example:
>>> import pathway as pw
>>> t1 = pw.debug.table_from_markdown('''
... age owner pet
... 1 10 Alice dog
... 2 9 Bob dog
... 3 8 Alice cat
... 4 7 Bob dog''')
>>> t1.age.name
'age'
"""
return self._name
def _dependencies(self) -> helpers.StableSet[InternalColRef]:
return helpers.StableSet([self._to_internal()])
def _dependencies_above_reducer(self) -> helpers.StableSet[InternalColRef]:
return helpers.StableSet([self._to_internal()])
def _dependencies_below_reducer(self) -> helpers.StableSet[InternalColRef]:
return helpers.StableSet()
def __call__(self, *args) -> ColumnExpression:
return ColumnCallExpression(self, args)
def _column_with_expression_cls(self) -> type[ColumnWithExpression]:
from pathway.internals.column import ColumnWithReference
return ColumnWithReference
class Table(
Joinable,
OperatorInput,
Generic[TSchema],
):
"""Collection of named columns over identical universes.
Example:
>>> import pathway as pw
>>> t1 = pw.debug.table_from_markdown('''
... age | owner | pet
... 10 | Alice | dog
... 9 | Bob | dog
... 8 | Alice | cat
... 7 | Bob | dog
... ''')
>>> isinstance(t1, pw.Table)
True
"""
if TYPE_CHECKING:
from pathway.stdlib.ordered import diff # type: ignore[misc]
from pathway.stdlib.statistical import interpolate # type: ignore[misc]
from pathway.stdlib.temporal import ( # type: ignore[misc]
asof_join,
asof_join_left,
asof_join_outer,
asof_join_right,
asof_now_join,
asof_now_join_inner,
asof_now_join_left,
interval_join,
interval_join_inner,
interval_join_left,
interval_join_outer,
interval_join_right,
window_join,
window_join_inner,
window_join_left,
window_join_outer,
window_join_right,
windowby,
)
from pathway.stdlib.viz import ( # type: ignore[misc]
_repr_mimebundle_,
plot,
show,
)
_columns: dict[str, clmn.Column]
_schema: type[Schema]
_id_column: clmn.IdColumn
_rowwise_context: clmn.RowwiseContext
_source: SetOnceProperty[OutputHandle] = SetOnceProperty()
"""Lateinit by operator."""
def __init__(
self,
_columns: Mapping[str, clmn.Column],
_context: clmn.Context,
_schema: type[Schema] | None = None,
):
if _schema is None:
_schema = schema_from_columns(_columns)
super().__init__(_context)
self._columns = dict(_columns)
self._schema = _schema
self._id_column = _context.id_column
self._substitution = {thisclass.this: self}
self._rowwise_context = clmn.RowwiseContext(self._id_column)
def id(self) -> expr.ColumnReference:
"""Get reference to pseudocolumn containing id's of a table.
Example:
>>> import pathway as pw
>>> t1 = pw.debug.table_from_markdown('''
... age | owner | pet
... 10 | Alice | dog
... 9 | Bob | dog
... 8 | Alice | cat
... 7 | Bob | dog
... ''')
>>> t2 = t1.select(ids = t1.id)
>>> t2.typehints()['ids']
<class 'pathway.engine.Pointer'>
>>> pw.debug.compute_and_print(t2.select(test=t2.id == t2.ids), include_id=False)
test
True
True
True
True
"""
return expr.ColumnReference(_table=self, _column=self._id_column, _name="id")
def column_names(self):
return self.keys()
def keys(self):
return self._columns.keys()
def _get_column(self, name: str) -> clmn.Column:
return self._columns[name]
def _ipython_key_completions_(self):
return list(self.column_names())
def __dir__(self):
return list(super().__dir__()) + list(self.column_names())
def _C(self) -> TSchema:
return self.C # type: ignore
def schema(self) -> type[Schema]:
"""Get schema of the table.
Example:
>>> import pathway as pw
>>> t1 = pw.debug.table_from_markdown('''
... age | owner | pet
... 10 | Alice | dog
... 9 | Bob | dog
... 8 | Alice | cat
... 7 | Bob | dog
... ''')
>>> t1.schema
<pathway.Schema types={'age': <class 'int'>, 'owner': <class 'str'>, 'pet': <class 'str'>}>
>>> t1.typehints()['age']
<class 'int'>
"""
return self._schema
def _get_colref_by_name(self, name, exception_type) -> expr.ColumnReference:
name = self._column_deprecation_rename(name)
if name == "id":
return self.id
if name not in self.keys():
raise exception_type(f"Table has no column with name {name}.")
return expr.ColumnReference(
_table=self, _column=self._get_column(name), _name=name
)
def __getitem__(self, args: str | expr.ColumnReference) -> expr.ColumnReference: ...
def __getitem__(self, args: list[str | expr.ColumnReference]) -> Table: ...
def __getitem__(
self, args: str | expr.ColumnReference | list[str | expr.ColumnReference]
) -> expr.ColumnReference | Table:
"""Get columns by name.
Warning:
- Does not allow repetitions of columns.
- Fails if tries to access nonexistent column.
Args:
names: a singe column name or list of columns names to be extracted from `self`.
Returns:
Table with specified columns, or column expression (if single argument given).
Instead of column names, column references are valid here.
Example:
>>> import pathway as pw
>>> t1 = pw.debug.table_from_markdown('''
... age | owner | pet
... 10 | Alice | dog
... 9 | Bob | dog
... 8 | Alice | cat
... 7 | Bob | dog
... ''')
>>> t2 = t1[["age", "pet"]]
>>> t2 = t1[["age", t1.pet]]
>>> pw.debug.compute_and_print(t2, include_id=False)
age | pet
7 | dog
8 | cat
9 | dog
10 | dog
"""
if isinstance(args, expr.ColumnReference):
if (args.table is not self) and not isinstance(
args.table, thisclass.ThisMetaclass
):
raise ValueError(
"Table.__getitem__ argument has to be a ColumnReference to the same table or pw.this, or a string "
+ "(or a list of those)."
)
return self._get_colref_by_name(args.name, KeyError)
elif isinstance(args, str):
return self._get_colref_by_name(args, KeyError)
else:
return self.select(*[self[name] for name in args])
def from_columns(
*args: expr.ColumnReference, **kwargs: expr.ColumnReference
) -> Table:
"""Build a table from columns.
All columns must have the same ids. Columns' names must be pairwise distinct.
Args:
args: List of columns.
kwargs: Columns with their new names.
Returns:
Table: Created table.
Example:
>>> import pathway as pw
>>> t1 = pw.Table.empty(age=float, pet=float)
>>> t2 = pw.Table.empty(foo=float, bar=float).with_universe_of(t1)
>>> t3 = pw.Table.from_columns(t1.pet, qux=t2.foo)
>>> pw.debug.compute_and_print(t3, include_id=False)
pet | qux
"""
all_args = cast(
dict[str, expr.ColumnReference], combine_args_kwargs(args, kwargs)
)
if not all_args:
raise ValueError("Table.from_columns() cannot have empty arguments list")
else:
arg = next(iter(all_args.values()))
table: Table = arg.table
for arg in all_args.values():
if not G.universe_solver.query_are_equal(
table._universe, arg.table._universe
):
raise ValueError(
"Universes of all arguments of Table.from_columns() have to be equal.\n"
+ "Consider using Table.promise_universes_are_equal() to assert it.\n"
+ "(However, untrue assertion might result in runtime errors.)"
)
return table.select(*args, **kwargs)
def concat_reindex(self, *tables: Table) -> Table:
"""Concatenate contents of several tables.
This is similar to PySpark union. All tables must have the same schema. Each row is reindexed.
Args:
tables: List of tables to concatenate. All tables must have the same schema.
Returns:
Table: The concatenated table. It will have new, synthetic ids.
Example:
>>> import pathway as pw
>>> t1 = pw.debug.table_from_markdown('''
... | pet
... 1 | Dog
... 7 | Cat
... ''')
>>> t2 = pw.debug.table_from_markdown('''
... | pet
... 1 | Manul
... 8 | Octopus
... ''')
>>> t3 = t1.concat_reindex(t2)
>>> pw.debug.compute_and_print(t3, include_id=False)
pet
Cat
Dog
Manul
Octopus
"""
reindexed = [
table.with_id_from(table.id, i) for i, table in enumerate([self, *tables])
]
universes.promise_are_pairwise_disjoint(*reindexed)
return Table.concat(*reindexed)
def empty(**kwargs: dt.DType) -> Table:
"""Creates an empty table with a schema specified by kwargs.
Args:
kwargs: Dict whose keys are column names and values are column types.
Returns:
Table: Created empty table.
Example:
>>> import pathway as pw
>>> t1 = pw.Table.empty(age=float, pet=float)
>>> pw.debug.compute_and_print(t1, include_id=False)
age | pet
"""
from pathway.internals import table_io
ret = table_io.empty_from_schema(schema_from_types(None, **kwargs))
G.universe_solver.register_as_empty(ret._universe)
return ret
def select(self, *args: expr.ColumnReference, **kwargs: Any) -> Table:
"""Build a new table with columns specified by kwargs.
Output columns' names are keys(kwargs). values(kwargs) can be raw values, boxed
values, columns. Assigning to id reindexes the table.
Args:
args: Column references.
kwargs: Column expressions with their new assigned names.
Returns:
Table: Created table.
Example:
>>> import pathway as pw
>>> t1 = pw.debug.table_from_markdown('''
... pet
... Dog
... Cat
... ''')
>>> t2 = t1.select(animal=t1.pet, desc="fluffy")
>>> pw.debug.compute_and_print(t2, include_id=False)
animal | desc
Cat | fluffy
Dog | fluffy
"""
new_columns = []
all_args = combine_args_kwargs(args, kwargs)
for new_name, expression in all_args.items():
self._validate_expression(expression)
column = self._eval(expression)
new_columns.append((new_name, column))
return self._with_same_universe(*new_columns)
def __add__(self, other: Table) -> Table:
"""Build a union of `self` with `other`.
Semantics: Returns a table C, such that
- C.columns == self.columns + other.columns
- C.id == self.id == other.id
Args:
other: The other table. `self.id` must be equal `other.id` and
`self.columns` and `other.columns` must be disjoint (or overlapping names
are THE SAME COLUMN)
Returns:
Table: Created table.
Example:
>>> import pathway as pw
>>> t1 = pw.debug.table_from_markdown('''
... pet
... 1 Dog
... 7 Cat
... ''')
>>> t2 = pw.debug.table_from_markdown('''
... age
... 1 10
... 7 3
... ''')
>>> t3 = t1 + t2
>>> pw.debug.compute_and_print(t3, include_id=False)
pet | age
Cat | 3
Dog | 10
"""
if not G.universe_solver.query_are_equal(self._universe, other._universe):
raise ValueError(
"Universes of all arguments of Table.__add__() have to be equal.\n"
+ "Consider using Table.promise_universes_are_equal() to assert it.\n"
+ "(However, untrue assertion might result in runtime errors.)"
)
return self.select(*self, *other)
def slice(self) -> TableSlice:
"""Creates a collection of references to self columns.
Supports basic column manipulation methods.
Example:
>>> import pathway as pw
>>> t1 = pw.debug.table_from_markdown('''
... age | owner | pet
... 10 | Alice | dog
... 9 | Bob | dog
... 8 | Alice | cat
... 7 | Bob | dog
... ''')
>>> t1.slice.without("age")
TableSlice({'owner': <table1>.owner, 'pet': <table1>.pet})
"""
return TableSlice(dict(**self), self)
def filter(self, filter_expression: expr.ColumnExpression) -> Table[TSchema]:
"""Filter a table according to `filter_expression` condition.
Args:
filter_expression: `ColumnExpression` that specifies the filtering condition.
Returns:
Table: Result has the same schema as `self` and its ids are subset of `self.id`.
Example:
>>> import pathway as pw
>>> vertices = pw.debug.table_from_markdown('''
... label outdegree
... 1 3
... 7 0
... ''')
>>> filtered = vertices.filter(vertices.outdegree == 0)
>>> pw.debug.compute_and_print(filtered, include_id=False)
label | outdegree
7 | 0
"""
filter_type = self.eval_type(filter_expression)
if filter_type != dt.BOOL:
raise TypeError(
f"Filter argument of Table.filter() has to be bool, found {filter_type}."
)
ret = self._filter(filter_expression)
if (
filter_col := expr.get_column_filtered_by_is_none(filter_expression)
) is not None and filter_col.table == self:
name = filter_col.name
dtype = self._columns[name].dtype
ret = ret.update_types(**{name: dt.unoptionalize(dtype)})
return ret
def split(
self, split_expression: expr.ColumnExpression
) -> tuple[Table[TSchema], Table[TSchema]]:
"""Split a table according to `split_expression` condition.
Args:
split_expression: `ColumnExpression` that specifies the split condition.
Returns:
positive_table, negative_table: tuple of tables,
with the same schemas as `self` and with ids that are subsets of `self.id`,
and provably disjoint.
Example:
>>> import pathway as pw
>>> vertices = pw.debug.table_from_markdown('''
... label outdegree
... 1 3
... 7 0
... ''')
>>> positive, negative = vertices.split(vertices.outdegree == 0)
>>> pw.debug.compute_and_print(positive, include_id=False)
label | outdegree
7 | 0
>>> pw.debug.compute_and_print(negative, include_id=False)
label | outdegree
1 | 3
"""
positive = self.filter(split_expression)
negative = self.filter(~split_expression)
universes.promise_are_pairwise_disjoint(positive, negative)
universes.promise_are_equal(
self, Table.concat(positive, negative)
) # TODO: add API method for this
return positive, negative
def _filter(self, filter_expression: expr.ColumnExpression) -> Table[TSchema]:
self._validate_expression(filter_expression)
filtering_column = self._eval(filter_expression)
assert self._universe == filtering_column.universe
context = clmn.FilterContext(filtering_column, self._id_column)
return self._table_with_context(context)
def _gradual_broadcast(
self,
threshold_table,
lower_column,
value_column,
upper_column,
) -> Table:
return self + self.__gradual_broadcast(
threshold_table, lower_column, value_column, upper_column
)
def __gradual_broadcast(
self,
threshold_table,
lower_column,
value_column,
upper_column,
):
context = clmn.GradualBroadcastContext(
self._id_column,
threshold_table._eval(lower_column),
threshold_table._eval(value_column),
threshold_table._eval(upper_column),
)
return Table(_columns={"apx_value": context.apx_value_column}, _context=context)
def _forget(
self,
threshold_column: expr.ColumnExpression,
time_column: expr.ColumnExpression,
mark_forgetting_records: bool,
) -> Table:
context = clmn.ForgetContext(
self._id_column,
self._eval(threshold_column),
self._eval(time_column),
mark_forgetting_records,
)
return self._table_with_context(context)
def _forget_immediately(
self,
) -> Table:
context = clmn.ForgetImmediatelyContext(self._id_column)
return self._table_with_context(context)
def _filter_out_results_of_forgetting(
self,
) -> Table:
# The output universe is a superset of input universe because forgetting entries
# are filtered out. At each point in time, the set of keys with +1 diff can be
# bigger than a set of keys with +1 diff in an input table.
context = clmn.FilterOutForgettingContext(self._id_column)
return self._table_with_context(context)
def _freeze(
self,
threshold_column: expr.ColumnExpression,
time_column: expr.ColumnExpression,
) -> Table:
context = clmn.FreezeContext(
self._id_column,
self._eval(threshold_column),
self._eval(time_column),
)
return self._table_with_context(context)
def _buffer(
self,
threshold_column: expr.ColumnExpression,
time_column: expr.ColumnExpression,
) -> Table:
context = clmn.BufferContext(
self._id_column,
self._eval(threshold_column),
self._eval(time_column),
)
return self._table_with_context(context)
def difference(self, other: Table) -> Table[TSchema]:
r"""Restrict self universe to keys not appearing in the other table.
Args:
other: table with ids to remove from self.
Returns:
Table: table with restricted universe, with the same set of columns
Example:
>>> import pathway as pw
>>> t1 = pw.debug.table_from_markdown('''
... | age | owner | pet
... 1 | 10 | Alice | 1
... 2 | 9 | Bob | 1
... 3 | 8 | Alice | 2
... ''')
>>> t2 = pw.debug.table_from_markdown('''
... | cost
... 2 | 100
... 3 | 200
... 4 | 300
... ''')
>>> t3 = t1.difference(t2)
>>> pw.debug.compute_and_print(t3, include_id=False)
age | owner | pet
10 | Alice | 1
"""
context = clmn.DifferenceContext(
left=self._id_column,
right=other._id_column,
)
return self._table_with_context(context)
def intersect(self, *tables: Table) -> Table[TSchema]:
"""Restrict self universe to keys appearing in all of the tables.
Args:
tables: tables keys of which are used to restrict universe.
Returns:
Table: table with restricted universe, with the same set of columns
Example:
>>> import pathway as pw
>>> t1 = pw.debug.table_from_markdown('''
... | age | owner | pet
... 1 | 10 | Alice | 1
... 2 | 9 | Bob | 1
... 3 | 8 | Alice | 2
... ''')
>>> t2 = pw.debug.table_from_markdown('''
... | cost
... 2 | 100
... 3 | 200
... 4 | 300
... ''')
>>> t3 = t1.intersect(t2)
>>> pw.debug.compute_and_print(t3, include_id=False)
age | owner | pet
8 | Alice | 2
9 | Bob | 1
"""
intersecting_universes = (
self._universe,
*tuple(table._universe for table in tables),
)
universe = G.universe_solver.get_intersection(*intersecting_universes)
if universe in intersecting_universes:
context: clmn.Context = clmn.RestrictContext(self._id_column, universe)
else:
intersecting_ids = (
self._id_column,
*tuple(table._id_column for table in tables),
)
context = clmn.IntersectContext(
intersecting_ids=intersecting_ids,
)
return self._table_with_context(context)
def restrict(self, other: TableLike) -> Table[TSchema]:
"""Restrict self universe to keys appearing in other.
Args:
other: table which universe is used to restrict universe of self.
Returns:
Table: table with restricted universe, with the same set of columns
Example:
>>> import pathway as pw
>>> t1 = pw.debug.table_from_markdown(
... '''
... | age | owner | pet
... 1 | 10 | Alice | 1
... 2 | 9 | Bob | 1
... 3 | 8 | Alice | 2
... '''
... )
>>> t2 = pw.debug.table_from_markdown(
... '''
... | cost
... 2 | 100
... 3 | 200
... '''
... )
>>> t2.promise_universe_is_subset_of(t1)
<pathway.Table schema={'cost': <class 'int'>}>
>>> t3 = t1.restrict(t2)
>>> pw.debug.compute_and_print(t3, include_id=False)
age | owner | pet
8 | Alice | 2
9 | Bob | 1
"""
if not G.universe_solver.query_is_subset(other._universe, self._universe):
raise ValueError(
"Table.restrict(): other universe has to be a subset of self universe."
+ "Consider using Table.promise_universe_is_subset_of() to assert it."
)
context = clmn.RestrictContext(self._id_column, other._universe)
columns = {
name: self._wrap_column_in_context(context, column, name)
for name, column in self._columns.items()
}
return Table(
_columns=columns,
_context=context,
)
def copy(self) -> Table[TSchema]:
"""Returns a copy of a table.
Example:
>>> import pathway as pw
>>> t1 = pw.debug.table_from_markdown('''
... age | owner | pet
... 10 | Alice | dog
... 9 | Bob | dog
... 8 | Alice | cat
... 7 | Bob | dog
... ''')
>>> t2 = t1.copy()
>>> pw.debug.compute_and_print(t2, include_id=False)
age | owner | pet
7 | Bob | dog
8 | Alice | cat
9 | Bob | dog
10 | Alice | dog
>>> t1 is t2
False
"""
return self._copy_as(type(self))
def _copy_as(self, table_type: type[TTable], /, **kwargs) -> TTable:
columns = {
name: self._wrap_column_in_context(self._rowwise_context, column, name)
for name, column in self._columns.items()
}
return table_type(_columns=columns, _context=self._rowwise_context, **kwargs)
def groupby(
self,
*args: expr.ColumnReference,
id: expr.ColumnReference | None = None,
sort_by: expr.ColumnReference | None = None,
_filter_out_results_of_forgetting: bool = False,
instance: expr.ColumnReference | None = None,
) -> groupbys.GroupedTable:
"""Groups table by columns from args.
Note:
Usually followed by `.reduce()` that aggregates the result and returns a table.
Args:
args: columns to group by.
id: if provided, is the column used to set id's of the rows of the result
sort_by: if provided, column values are used as sorting keys for particular reducers
instance: optional argument describing partitioning of the data into separate instances
Returns:
GroupedTable: Groupby object.
Example:
>>> import pathway as pw
>>> t1 = pw.debug.table_from_markdown('''
... age | owner | pet
... 10 | Alice | dog
... 9 | Bob | dog
... 8 | Alice | cat
... 7 | Bob | dog
... ''')
>>> t2 = t1.groupby(t1.pet, t1.owner).reduce(t1.owner, t1.pet, ageagg=pw.reducers.sum(t1.age))
>>> pw.debug.compute_and_print(t2, include_id=False)
owner | pet | ageagg
Alice | cat | 8
Alice | dog | 10
Bob | dog | 16
"""
if instance is not None:
args = (*args, instance)
if id is not None:
if len(args) == 0:
args = (id,)
elif len(args) > 1:
raise ValueError(
"Table.groupby() cannot have id argument when grouping by multiple columns."
)
elif args[0]._column != id._column:
raise ValueError(
"Table.groupby() received id argument and is grouped by a single column,"
+ " but the arguments are not equal.\n"
+ "Consider using <table>.groupby(id=...), skipping the positional argument."
)
for arg in args:
if not isinstance(arg, expr.ColumnReference):
if isinstance(arg, str):
raise ValueError(
f"Expected a ColumnReference, found a string. Did you mean <table>.{arg}"
+ f" instead of {repr(arg)}?"
)
else:
raise ValueError(
"All Table.groupby() arguments have to be a ColumnReference."
)
return groupbys.GroupedTable.create(
table=self,
grouping_columns=args,
set_id=id is not None,
sort_by=sort_by,
_filter_out_results_of_forgetting=_filter_out_results_of_forgetting,
)
def reduce(
self, *args: expr.ColumnReference, **kwargs: expr.ColumnExpression
) -> Table:
"""Reduce a table to a single row.
Equivalent to `self.groupby().reduce(*args, **kwargs)`.
Args:
args: reducer to reduce the table with
kwargs: reducer to reduce the table with. Its key is the new name of a column.
Returns:
Table: Reduced table.
Example:
>>> import pathway as pw
>>> t1 = pw.debug.table_from_markdown('''
... age | owner | pet
... 10 | Alice | dog
... 9 | Bob | dog
... 8 | Alice | cat
... 7 | Bob | dog
... ''')
>>> t2 = t1.reduce(ageagg=pw.reducers.argmin(t1.age))
>>> pw.debug.compute_and_print(t2, include_id=False) # doctest: +ELLIPSIS
ageagg
^...
>>> t3 = t2.select(t1.ix(t2.ageagg).age, t1.ix(t2.ageagg).pet)
>>> pw.debug.compute_and_print(t3, include_id=False)
age | pet
7 | dog
"""
return self.groupby().reduce(*args, **kwargs)
def deduplicate(
self,
*,
value: expr.ColumnExpression,
instance: expr.ColumnExpression | None = None,
acceptor: Callable[[T, T], bool],
persistent_id: str | None = None,
) -> Table:
"""Deduplicates rows in `self` on `value` column using acceptor function.
It keeps rows which where accepted by the acceptor function.
Acceptor operates on two arguments - current value and the previously accepted value.
Args:
value: column expression used for deduplication.
instance: Grouping column. For rows with different
values in this column, deduplication will be performed separately.
Defaults to None.
acceptor: callback telling whether two values are different.
persistent_id: (unstable) An identifier, under which the state of the table
will be persisted or ``None``, if there is no need to persist the state of this table.
When a program restarts, it restores the state for all input tables according to what
was saved for their ``persistent_id``. This way it's possible to configure the start of
computations from the moment they were terminated last time.
Returns:
Table: the result of deduplication.
Example:
>>> import pathway as pw
>>> table = pw.debug.table_from_markdown(
... '''
... val | __time__
... 1 | 2
... 2 | 4
... 3 | 6
... 4 | 8
... '''
... )
>>>
>>> def acceptor(new_value, old_value) -> bool:
... return new_value >= old_value + 2
...
>>>
>>> result = table.deduplicate(value=pw.this.val, acceptor=acceptor)
>>> pw.debug.compute_and_print_update_stream(result, include_id=False)
val | __time__ | __diff__
1 | 2 | 1
1 | 6 | -1
3 | 6 | 1
>>>
>>> table = pw.debug.table_from_markdown(
... '''
... val | instance | __time__
... 1 | 1 | 2
... 2 | 1 | 4
... 3 | 2 | 6
... 4 | 1 | 8
... 4 | 2 | 8
... 5 | 1 | 10
... '''
... )
>>>
>>> def acceptor(new_value, old_value) -> bool:
... return new_value >= old_value + 2
...
>>>
>>> result = table.deduplicate(
... value=pw.this.val, instance=pw.this.instance, acceptor=acceptor
... )
>>> pw.debug.compute_and_print_update_stream(result, include_id=False)
val | instance | __time__ | __diff__
1 | 1 | 2 | 1
3 | 2 | 6 | 1
1 | 1 | 8 | -1
4 | 1 | 8 | 1
"""
if instance is None:
instance = expr.ColumnConstExpression(None)
self._validate_expression(value)
self._validate_expression(instance)
value_col = self._eval(value)
instance_col = self._eval(instance)
context = clmn.DeduplicateContext(
value_col,
(instance_col,),
acceptor,
self._id_column,
persistent_id,
)
return self._table_with_context(context)
def ix(
self, expression: expr.ColumnExpression, *, optional: bool = False, context=None
) -> Table:
"""Reindexes the table using expression values as keys. Uses keys from context, or tries to infer
proper context from the expression.
If optional is True, then None in expression values result in None values in the result columns.
Missing values in table keys result in RuntimeError.
Context can be anything that allows for `select` or `reduce`, or `pathway.this` construct
(latter results in returning a delayed operation, and should be only used when using `ix` inside
join().select() or groupby().reduce() sequence).
Returns:
Reindexed table with the same set of columns.
Example:
>>> import pathway as pw
>>> t_animals = pw.debug.table_from_markdown('''
... | epithet | genus
... 1 | upupa | epops
... 2 | acherontia | atropos
... 3 | bubo | scandiacus
... 4 | dynastes | hercules
... ''')
>>> t_birds = pw.debug.table_from_markdown('''
... | desc
... 2 | hoopoe
... 4 | owl
... ''')
>>> ret = t_birds.select(t_birds.desc, latin=t_animals.ix(t_birds.id).genus)
>>> pw.debug.compute_and_print(ret, include_id=False)
desc | latin
hoopoe | atropos
owl | hercules
"""
if context is None:
all_tables = collect_tables(expression)
if len(all_tables) == 0:
context = thisclass.this
elif all(tab == all_tables[0] for tab in all_tables):
context = all_tables[0]
if context is None:
for tab in all_tables:
if not isinstance(tab, Table):
raise ValueError("Table expected here.")
if len(all_tables) == 0:
raise ValueError("Const value provided.")
context = all_tables[0]
for tab in all_tables:
assert context._universe.is_equal_to(tab._universe)
if isinstance(context, groupbys.GroupedJoinable):
context = thisclass.this
if isinstance(context, thisclass.ThisMetaclass):
return context._delayed_op(
lambda table, expression: self.ix(
expression=expression, optional=optional, context=table
),
expression=expression,
qualname=f"{self}.ix(...)",
name="ix",
)
restrict_universe = RestrictUniverseDesugaring(context)
expression = restrict_universe.eval_expression(expression)
key_col = context.select(tmp=expression).tmp
key_dtype = self.eval_type(key_col)
if (
optional and not dt.dtype_issubclass(key_dtype, dt.Optional(dt.POINTER))
) or (not optional and not isinstance(key_dtype, dt.Pointer)):
raise TypeError(
f"Pathway supports indexing with Pointer type only. The type used was {key_dtype}."
)
if optional and isinstance(key_dtype, dt.Optional):
self_ = self.update_types(
**{name: dt.Optional(self.typehints()[name]) for name in self.keys()}
)
else:
self_ = self
return self_._ix(key_col, optional)
def _ix(
self,
key_expression: expr.ColumnReference,
optional: bool,
) -> Table:
key_column = key_expression._column
context = clmn.IxContext(key_column, self._id_column, optional)
return self._table_with_context(context)
def __lshift__(self, other: Table) -> Table:
"""Alias to update_cells method.
Updates cells of `self`, breaking ties in favor of the values in `other`.
Semantics:
- result.columns == self.columns
- result.id == self.id
- conflicts are resolved preferring other's values
Requires:
- other.columns ⊆ self.columns
- other.id ⊆ self.id
Args:
other: the other table.
Returns:
Table: `self` updated with cells form `other`.
Example:
>>> import pathway as pw
>>> t1 = pw.debug.table_from_markdown('''
... | age | owner | pet
... 1 | 10 | Alice | 1
... 2 | 9 | Bob | 1
... 3 | 8 | Alice | 2
... ''')
>>> t2 = pw.debug.table_from_markdown('''
... | age | owner | pet
... 1 | 10 | Alice | 30
... ''')
>>> pw.universes.promise_is_subset_of(t2, t1)
>>> t3 = t1 << t2
>>> pw.debug.compute_and_print(t3, include_id=False)
age | owner | pet
8 | Alice | 2
9 | Bob | 1
10 | Alice | 30
"""
return self.update_cells(other, _stacklevel=2)
def concat(self, *others: Table[TSchema]) -> Table[TSchema]:
"""Concats `self` with every `other` ∊ `others`.
Semantics:
- result.columns == self.columns == other.columns
- result.id == self.id ∪ other.id
if self.id and other.id collide, throws an exception.
Requires:
- other.columns == self.columns
- self.id disjoint with other.id
Args:
other: the other table.
Returns:
Table: The concatenated table. Id's of rows from original tables are preserved.
Example:
>>> import pathway as pw
>>> t1 = pw.debug.table_from_markdown('''
... | age | owner | pet
... 1 | 10 | Alice | 1
... 2 | 9 | Bob | 1
... 3 | 8 | Alice | 2
... ''')
>>> t2 = pw.debug.table_from_markdown('''
... | age | owner | pet
... 11 | 11 | Alice | 30
... 12 | 12 | Tom | 40
... ''')
>>> pw.universes.promise_are_pairwise_disjoint(t1, t2)
>>> t3 = t1.concat(t2)
>>> pw.debug.compute_and_print(t3, include_id=False)
age | owner | pet
8 | Alice | 2
9 | Bob | 1
10 | Alice | 1
11 | Alice | 30
12 | Tom | 40
"""
for other in others:
if other.keys() != self.keys():
raise ValueError(
"columns do not match in the argument of Table.concat()"
)
schema = {
key: functools.reduce(
dt.types_lca,
[other.schema._dtypes()[key] for other in others],
self.schema._dtypes()[key],
)
for key in self.keys()
}
return Table._concat(
self.cast_to_types(**schema),
*[other.cast_to_types(**schema) for other in others],
)
def _concat(self, *others: Table[TSchema]) -> Table[TSchema]:
union_ids = (self._id_column, *(other._id_column for other in others))
if not G.universe_solver.query_are_disjoint(*(c.universe for c in union_ids)):
raise ValueError(
"Universes of the arguments of Table.concat() have to be disjoint.\n"
+ "Consider using Table.promise_universes_are_disjoint() to assert it.\n"
+ "(However, untrue assertion might result in runtime errors.)"
)
context = clmn.ConcatUnsafeContext(
union_ids=union_ids,
updates=tuple(
{col_name: other._columns[col_name] for col_name in self.keys()}
for other in others
),
)
return self._table_with_context(context)
def update_cells(self, other: Table, _stacklevel: int = 1) -> Table:
"""Updates cells of `self`, breaking ties in favor of the values in `other`.
Semantics:
- result.columns == self.columns
- result.id == self.id
- conflicts are resolved preferring other's values
Requires:
- other.columns ⊆ self.columns
- other.id ⊆ self.id
Args:
other: the other table.
Returns:
Table: `self` updated with cells form `other`.
Example:
>>> import pathway as pw
>>> t1 = pw.debug.table_from_markdown('''
... | age | owner | pet
... 1 | 10 | Alice | 1
... 2 | 9 | Bob | 1
... 3 | 8 | Alice | 2
... ''')
>>> t2 = pw.debug.table_from_markdown('''
... age | owner | pet
... 1 | 10 | Alice | 30
... ''')
>>> pw.universes.promise_is_subset_of(t2, t1)
>>> t3 = t1.update_cells(t2)
>>> pw.debug.compute_and_print(t3, include_id=False)
age | owner | pet
8 | Alice | 2
9 | Bob | 1
10 | Alice | 30
"""
if names := (set(other.keys()) - set(self.keys())):
raise ValueError(
f"Columns of the argument in Table.update_cells() not present in the updated table: {list(names)}."
)
if self._universe == other._universe:
warnings.warn(
"Key sets of self and other in update_cells are the same."
+ " Using with_columns instead of update_cells.",
stacklevel=_stacklevel + 4,
)
return self.with_columns(*(other[name] for name in other))
schema = {
key: dt.types_lca(self.schema.__dtypes__[key], other.schema.__dtypes__[key])
for key in other.keys()
}
return Table._update_cells(
self.cast_to_types(**schema), other.cast_to_types(**schema)
)
def _update_cells(self, other: Table) -> Table:
if not other._universe.is_subset_of(self._universe):
raise ValueError(
"Universe of the argument of Table.update_cells() needs to be "
+ "a subset of the universe of the updated table.\n"
+ "Consider using Table.promise_is_subset_of() to assert this.\n"
+ "(However, untrue assertion might result in runtime errors.)"
)
context = clmn.UpdateCellsContext(
left=self._id_column,
right=other._id_column,
updates={name: other._columns[name] for name in other.keys()},
)
return self._table_with_context(context)
def update_rows(self, other: Table[TSchema]) -> Table[TSchema]:
"""Updates rows of `self`, breaking ties in favor for the rows in `other`.
Semantics:
- result.columns == self.columns == other.columns
- result.id == self.id ∪ other.id
Requires:
- other.columns == self.columns
Args:
other: the other table.
Returns:
Table: `self` updated with rows form `other`.
Example:
>>> import pathway as pw
>>> t1 = pw.debug.table_from_markdown('''
... | age | owner | pet
... 1 | 10 | Alice | 1
... 2 | 9 | Bob | 1
... 3 | 8 | Alice | 2
... ''')
>>> t2 = pw.debug.table_from_markdown('''
... | age | owner | pet
... 1 | 10 | Alice | 30
... 12 | 12 | Tom | 40
... ''')
>>> t3 = t1.update_rows(t2)
>>> pw.debug.compute_and_print(t3, include_id=False)
age | owner | pet
8 | Alice | 2
9 | Bob | 1
10 | Alice | 30
12 | Tom | 40
"""
if other.keys() != self.keys():
raise ValueError(
"Columns do not match between argument of Table.update_rows() and the updated table."
)
if self._universe.is_subset_of(other._universe):
warnings.warn(
"Universe of self is a subset of universe of other in update_rows. Returning other.",
stacklevel=5,
)
return other
schema = {
key: dt.types_lca(self.schema.__dtypes__[key], other.schema.__dtypes__[key])
for key in self.keys()
}
union_universes = (self._universe, other._universe)
universe = G.universe_solver.get_union(*union_universes)
if universe == self._universe:
return Table._update_cells(
self.cast_to_types(**schema), other.cast_to_types(**schema)
)
else:
return Table._update_rows(
self.cast_to_types(**schema), other.cast_to_types(**schema)
)
def _update_rows(self, other: Table[TSchema]) -> Table[TSchema]:
union_ids = (self._id_column, other._id_column)
context = clmn.UpdateRowsContext(
updates={col_name: other._columns[col_name] for col_name in self.keys()},
union_ids=union_ids,
)
return self._table_with_context(context)
def with_columns(self, *args: expr.ColumnReference, **kwargs: Any) -> Table:
"""Updates columns of `self`, according to args and kwargs.
See `table.select` specification for evaluation of args and kwargs.
Example:
>>> import pathway as pw
>>> t1 = pw.debug.table_from_markdown('''
... | age | owner | pet
... 1 | 10 | Alice | 1
... 2 | 9 | Bob | 1
... 3 | 8 | Alice | 2
... ''')
>>> t2 = pw.debug.table_from_markdown('''
... | owner | pet | size
... 1 | Tom | 1 | 10
... 2 | Bob | 1 | 9
... 3 | Tom | 2 | 8
... ''')
>>> t3 = t1.with_columns(*t2)
>>> pw.debug.compute_and_print(t3, include_id=False)
age | owner | pet | size
8 | Tom | 2 | 8
9 | Bob | 1 | 9
10 | Tom | 1 | 10
"""
other = self.select(*args, **kwargs)
columns = dict(self)
columns.update(other)
return self.select(**columns)
def with_id(self, new_index: expr.ColumnReference) -> Table:
"""Set new ids based on another column containing id-typed values.
To generate ids based on arbitrary valued columns, use `with_id_from`.
Values assigned must be row-wise unique.
Args:
new_id: column to be used as the new index.
Returns:
Table with updated ids.
Example:
>>> import pytest; pytest.xfail("with_id is hard to test")
>>> import pathway as pw
>>> t1 = pw.debug.table_from_markdown('''
... | age | owner | pet
... 1 | 10 | Alice | 1
... 2 | 9 | Bob | 1
... 3 | 8 | Alice | 2
... ''')
>>> t2 = pw.debug.table_from_markdown('''
... | new_id
... 1 | 2
... 2 | 3
... 3 | 4
... ''')
>>> t3 = t1.promise_universe_is_subset_of(t2).with_id(t2.new_id)
>>> pw.debug.compute_and_print(t3)
age owner pet
^2 10 Alice 1
^3 9 Bob 1
^4 8 Alice 2
"""
return self._with_new_index(new_index)
def with_id_from(
self,
*args: expr.ColumnExpression | Value,
instance: expr.ColumnReference | None = None,
) -> Table:
"""Compute new ids based on values in columns.
Ids computed from `columns` must be row-wise unique.
Args:
columns: columns to be used as primary keys.
Returns:
Table: `self` updated with recomputed ids.
Example:
>>> import pathway as pw
>>> t1 = pw.debug.table_from_markdown('''
... | age | owner | pet
... 1 | 10 | Alice | 1
... 2 | 9 | Bob | 1
... 3 | 8 | Alice | 2
... ''')
>>> t2 = t1 + t1.select(old_id=t1.id)
>>> t3 = t2.with_id_from(t2.age)
>>> pw.debug.compute_and_print(t3) # doctest: +ELLIPSIS, +NORMALIZE_WHITESPACE
| age | owner | pet | old_id
^... | 8 | Alice | 2 | ^...
^... | 9 | Bob | 1 | ^...
^... | 10 | Alice | 1 | ^...
>>> t4 = t3.select(t3.age, t3.owner, t3.pet, same_as_old=(t3.id == t3.old_id),
... same_as_new=(t3.id == t3.pointer_from(t3.age)))
>>> pw.debug.compute_and_print(t4) # doctest: +ELLIPSIS, +NORMALIZE_WHITESPACE
| age | owner | pet | same_as_old | same_as_new
^... | 8 | Alice | 2 | False | True
^... | 9 | Bob | 1 | False | True
^... | 10 | Alice | 1 | False | True
"""
# new_index should be a column, so a little workaround
new_index = self.select(
ref_column=self.pointer_from(*args, instance=instance)
).ref_column
return self._with_new_index(
new_index=new_index,
)
def _with_new_index(
self,
new_index: expr.ColumnExpression,
) -> Table:
self._validate_expression(new_index)
index_type = self.eval_type(new_index)
if not isinstance(index_type, dt.Pointer):
raise TypeError(
f"Pathway supports reindexing Tables with Pointer type only. The type used was {index_type}."
)
reindex_column = self._eval(new_index)
assert self._universe == reindex_column.universe
context = clmn.ReindexContext(reindex_column)
return self._table_with_context(context)
def rename_columns(self, **kwargs: str | expr.ColumnReference) -> Table:
"""Rename columns according to kwargs.
Columns not in keys(kwargs) are not changed. New name of a column must not be `id`.
Args:
kwargs: mapping from old column names to new names.
Returns:
Table: `self` with columns renamed.
Example:
>>> import pathway as pw
>>> t1 = pw.debug.table_from_markdown('''
... age | owner | pet
... 10 | Alice | 1
... 9 | Bob | 1
... 8 | Alice | 2
... ''')
>>> t2 = t1.rename_columns(years_old=t1.age, animal=t1.pet)
>>> pw.debug.compute_and_print(t2, include_id=False)
owner | years_old | animal
Alice | 8 | 2
Alice | 10 | 1
Bob | 9 | 1
"""
mapping: dict[str, str] = {}
for new_name, old_name_col in kwargs.items():
if isinstance(old_name_col, expr.ColumnReference):
old_name = old_name_col.name
else:
old_name = old_name_col
if old_name not in self._columns:
raise ValueError(f"Column {old_name} does not exist in a given table.")
mapping[new_name] = old_name
renamed_columns = self._columns.copy()
for new_name, old_name in mapping.items():
renamed_columns.pop(old_name)
for new_name, old_name in mapping.items():
renamed_columns[new_name] = self._columns[old_name]
columns_wrapped = {
name: self._wrap_column_in_context(
self._rowwise_context,
column,
mapping[name] if name in mapping else name,
)
for name, column in renamed_columns.items()
}
return self._with_same_universe(*columns_wrapped.items())
def rename_by_dict(
self, names_mapping: dict[str | expr.ColumnReference, str]
) -> Table:
"""Rename columns according to a dictionary.
Columns not in keys(kwargs) are not changed. New name of a column must not be `id`.
Args:
names_mapping: mapping from old column names to new names.
Returns:
Table: `self` with columns renamed.
Example:
>>> import pathway as pw
>>> t1 = pw.debug.table_from_markdown('''
... age | owner | pet
... 10 | Alice | 1
... 9 | Bob | 1
... 8 | Alice | 2
... ''')
>>> t2 = t1.rename_by_dict({"age": "years_old", t1.pet: "animal"})
>>> pw.debug.compute_and_print(t2, include_id=False)
owner | years_old | animal
Alice | 8 | 2
Alice | 10 | 1
Bob | 9 | 1
"""
return self.rename_columns(
**{new_name: self[old_name] for old_name, new_name in names_mapping.items()}
)
def with_prefix(self, prefix: str) -> Table:
"""Rename columns by adding prefix to each name of column.
Example:
>>> import pathway as pw
>>> t1 = pw.debug.table_from_markdown('''
... age | owner | pet
... 10 | Alice | 1
... 9 | Bob | 1
... 8 | Alice | 2
... ''')
>>> t2 = t1.with_prefix("u_")
>>> pw.debug.compute_and_print(t2, include_id=False)
u_age | u_owner | u_pet
8 | Alice | 2
9 | Bob | 1
10 | Alice | 1
"""
return self.rename_by_dict({name: prefix + name for name in self.keys()})
def with_suffix(self, suffix: str) -> Table:
"""Rename columns by adding suffix to each name of column.
Example:
>>> import pathway as pw
>>> t1 = pw.debug.table_from_markdown('''
... age | owner | pet
... 10 | Alice | 1
... 9 | Bob | 1
... 8 | Alice | 2
... ''')
>>> t2 = t1.with_suffix("_current")
>>> pw.debug.compute_and_print(t2, include_id=False)
age_current | owner_current | pet_current
8 | Alice | 2
9 | Bob | 1
10 | Alice | 1
"""
return self.rename_by_dict({name: name + suffix for name in self.keys()})
def rename(
self,
names_mapping: dict[str | expr.ColumnReference, str] | None = None,
**kwargs: expr.ColumnExpression,
) -> Table:
"""Rename columns according either a dictionary or kwargs.
If a mapping is provided using a dictionary, ``rename_by_dict`` will be used.
Otherwise, ``rename_columns`` will be used with kwargs.
Columns not in keys(kwargs) are not changed. New name of a column must not be ``id``.
Args:
names_mapping: mapping from old column names to new names.
kwargs: mapping from old column names to new names.
Returns:
Table: `self` with columns renamed.
"""
if names_mapping is not None:
return self.rename_by_dict(names_mapping=names_mapping)
return self.rename_columns(**kwargs)
def without(self, *columns: str | expr.ColumnReference) -> Table:
"""Selects all columns without named column references.
Args:
columns: columns to be dropped provided by `table.column_name` notation.
Returns:
Table: `self` without specified columns.
Example:
>>> import pathway as pw
>>> t1 = pw.debug.table_from_markdown('''
... age | owner | pet
... 10 | Alice | 1
... 9 | Bob | 1
... 8 | Alice | 2
... ''')
>>> t2 = t1.without(t1.age, pw.this.pet)
>>> pw.debug.compute_and_print(t2, include_id=False)
owner
Alice
Alice
Bob
"""
new_columns = self._columns.copy()
for col in columns:
if isinstance(col, expr.ColumnReference):
new_columns.pop(col.name)
else:
assert isinstance(col, str)
new_columns.pop(col)
columns_wrapped = {
name: self._wrap_column_in_context(self._rowwise_context, column, name)
for name, column in new_columns.items()
}
return self._with_same_universe(*columns_wrapped.items())
def having(self, *indexers: expr.ColumnReference) -> Table[TSchema]:
"""Removes rows so that indexed.ix(indexer) is possible when some rows are missing,
for each indexer in indexers"""
rets: list[Table] = []
for indexer in indexers:
rets.append(self._having(indexer))
if len(rets) == 0:
return self
elif len(rets) == 1:
[ret] = rets
return ret
else:
return rets[0].intersect(*rets[1:])
def update_types(self, **kwargs: Any) -> Table:
"""Updates types in schema. Has no effect on the runtime."""
for name in kwargs.keys():
if name not in self.keys():
raise ValueError(
"Table.update_types() argument name has to be an existing table column name."
)
from pathway.internals.common import declare_type
return self.with_columns(
**{key: declare_type(val, self[key]) for key, val in kwargs.items()}
)
def cast_to_types(self, **kwargs: Any) -> Table:
"""Casts columns to types."""
for name in kwargs.keys():
if name not in self.keys():
raise ValueError(
"Table.cast_to_types() argument name has to be an existing table column name."
)
from pathway.internals.common import cast
return self.with_columns(
**{key: cast(val, self[key]) for key, val in kwargs.items()}
)
def _having(self, indexer: expr.ColumnReference) -> Table[TSchema]:
context = clmn.HavingContext(
orig_id_column=self._id_column, key_column=indexer._column
)
return self._table_with_context(context)
def with_universe_of(self, other: TableLike) -> Table:
"""Returns a copy of self with exactly the same universe as others.
Semantics: Required precondition self.universe == other.universe
Used in situations where Pathway cannot deduce equality of universes, but
those are equal as verified during runtime.
Example:
>>> import pathway as pw
>>> t1 = pw.debug.table_from_markdown('''
... | pet
... 1 | Dog
... 7 | Cat
... ''')
>>> t2 = pw.debug.table_from_markdown('''
... | age
... 1 | 10
... 7 | 3
... 8 | 100
... ''')
>>> t3 = t2.filter(pw.this.age < 30).with_universe_of(t1)
>>> t4 = t1 + t3
>>> pw.debug.compute_and_print(t4, include_id=False)
pet | age
Cat | 3
Dog | 10
"""
if self._universe == other._universe:
return self.copy()
universes.promise_are_equal(self, other)
return self._unsafe_promise_universe(other)
def flatten(self, *args: expr.ColumnReference, **kwargs: Any) -> Table:
"""Performs a flatmap operation on a column or expression given as a first
argument. Datatype of this column or expression has to be iterable or Json array.
Other columns specified in the method arguments are duplicated
as many times as the length of the iterable.
It is possible to get ids of source rows by using `table.id` column, e.g.
`table.flatten(table.column_to_be_flattened, original_id = table.id)`.
Example:
>>> import pathway as pw
>>> t1 = pw.debug.table_from_markdown('''
... | pet | age
... 1 | Dog | 2
... 7 | Cat | 5
... ''')
>>> t2 = t1.flatten(t1.pet)
>>> pw.debug.compute_and_print(t2, include_id=False)
pet
C
D
a
g
o
t
>>> t3 = t1.flatten(t1.pet, t1.age)
>>> pw.debug.compute_and_print(t3, include_id=False)
pet | age
C | 5
D | 2
a | 5
g | 2
o | 2
t | 5
"""
intermediate_table = self.select(*args, **kwargs)
all_args = combine_args_kwargs(args, kwargs)
if not all_args:
raise ValueError("Table.flatten() cannot have empty arguments list.")
all_names_iter = iter(all_args.keys())
flatten_name = next(all_names_iter)
return intermediate_table._flatten(flatten_name)
def _flatten(
self,
flatten_name: str,
) -> Table:
flatten_column = self._columns[flatten_name]
assert isinstance(flatten_column, clmn.ColumnWithExpression)
context = clmn.FlattenContext(
orig_universe=self._universe,
flatten_column=flatten_column,
)
columns = {
name: self._wrap_column_in_context(context, column, name)
for name, column in self._columns.items()
if name != flatten_name
}
return Table(
_columns={
flatten_name: context.flatten_result_column,
**columns,
},
_context=context,
)
def sort(
self,
key: expr.ColumnExpression,
instance: expr.ColumnExpression | None = None,
) -> Table:
"""
Sorts a table by the specified keys.
Args:
table : pw.Table
The table to be sorted.
key (ColumnExpression[int | float | datetime | str | bytes]):
An expression to sort by.
instance : ColumnReference or None
An expression with instance. Rows are sorted within an instance.
``prev`` and ``next`` columns will only point to rows that have the same instance.
Returns:
pw.Table: The sorted table. Contains two columns: ``prev`` and ``next``, containing the pointers
to the previous and next rows.
Example:
>>> import pathway as pw
>>> table = pw.debug.table_from_markdown('''
... name | age | score
... Alice | 25 | 80
... Bob | 20 | 90
... Charlie | 30 | 80
... ''')
>>> table = table.with_id_from(pw.this.name)
>>> table += table.sort(key=pw.this.age)
>>> pw.debug.compute_and_print(table, include_id=True)
| name | age | score | prev | next
^GBSDEEW... | Alice | 25 | 80 | ^EDPSSB1... | ^DS9AT95...
^EDPSSB1... | Bob | 20 | 90 | | ^GBSDEEW...
^DS9AT95... | Charlie | 30 | 80 | ^GBSDEEW... |
>>> table = pw.debug.table_from_markdown('''
... name | age | score
... Alice | 25 | 80
... Bob | 20 | 90
... Charlie | 30 | 80
... David | 35 | 90
... Eve | 15 | 80
... ''')
>>> table = table.with_id_from(pw.this.name)
>>> table += table.sort(key=pw.this.age, instance=pw.this.score)
>>> pw.debug.compute_and_print(table, include_id=True)
| name | age | score | prev | next
^GBSDEEW... | Alice | 25 | 80 | ^T0B95XH... | ^DS9AT95...
^EDPSSB1... | Bob | 20 | 90 | | ^RT0AZWX...
^DS9AT95... | Charlie | 30 | 80 | ^GBSDEEW... |
^RT0AZWX... | David | 35 | 90 | ^EDPSSB1... |
^T0B95XH... | Eve | 15 | 80 | | ^GBSDEEW...
"""
instance = clmn.ColumnExpression._wrap(instance)
context = clmn.SortingContext(
self._eval(key),
self._eval(instance),
)
return Table(
_columns={
"prev": context.prev_column,
"next": context.next_column,
},
_context=context,
)
def _set_source(self, source: OutputHandle):
self._source = source
if not hasattr(self._id_column, "lineage"):
self._id_column.lineage = clmn.ColumnLineage(name="id", source=source)
for name, column in self._columns.items():
if not hasattr(column, "lineage"):
column.lineage = clmn.ColumnLineage(name=name, source=source)
universe = self._universe
if not hasattr(universe, "lineage"):
universe.lineage = clmn.Lineage(source=source)
def _unsafe_promise_universe(self, other: TableLike) -> Table:
context = clmn.PromiseSameUniverseContext(self._id_column, other._id_column)
return self._table_with_context(context)
def _validate_expression(self, expression: expr.ColumnExpression):
for dep in expression._dependencies_above_reducer():
if self._universe != dep._column.universe:
raise ValueError(
f"You cannot use {dep.to_column_expression()} in this context."
+ " Its universe is different than the universe of the table the method"
+ " was called on. You can use <table1>.with_universe_of(<table2>)"
+ " to assign universe of <table2> to <table1> if you're sure their"
+ " sets of keys are equal."
)
def _wrap_column_in_context(
self,
context: clmn.Context,
column: clmn.Column,
name: str,
lineage: clmn.Lineage | None = None,
) -> clmn.Column:
"""Contextualize column by wrapping it in expression."""
expression = expr.ColumnReference(_table=self, _column=column, _name=name)
return expression._column_with_expression_cls(
context=context,
universe=context.universe,
expression=expression,
lineage=lineage,
)
def _table_with_context(self, context: clmn.Context) -> Table:
columns = {
name: self._wrap_column_in_context(context, column, name)
for name, column in self._columns.items()
}
return Table(
_columns=columns,
_context=context,
)
def _table_restricted_context(self) -> clmn.TableRestrictedRowwiseContext:
return clmn.TableRestrictedRowwiseContext(self._id_column, self)
def _eval(
self, expression: expr.ColumnExpression, context: clmn.Context | None = None
) -> clmn.ColumnWithExpression:
"""Desugar expression and wrap it in given context."""
if context is None:
context = self._rowwise_context
column = expression._column_with_expression_cls(
context=context,
universe=context.universe,
expression=expression,
)
return column
def _from_schema(cls: type[TTable], schema: type[Schema]) -> TTable:
universe = Universe()
context = clmn.MaterializedContext(universe, schema.universe_properties)
columns = {
name: clmn.MaterializedColumn(
universe,
schema.column_properties(name),
)
for name in schema.column_names()
}
return cls(_columns=columns, _schema=schema, _context=context)
def __repr__(self) -> str:
return f"<pathway.Table schema={dict(self.typehints())}>"
def _with_same_universe(
self,
*columns: tuple[str, clmn.Column],
schema: type[Schema] | None = None,
) -> Table:
return Table(
_columns=dict(columns),
_schema=schema,
_context=self._rowwise_context,
)
def _sort_columns_by_other(self, other: Table):
assert self.keys() == other.keys()
self._columns = {name: self._columns[name] for name in other.keys()}
def _operator_dependencies(self) -> StableSet[Table]:
return StableSet([self])
def debug(self, name: str):
G.add_operator(
lambda id: DebugOperator(name, id),
lambda operator: operator(self),
)
return self
def to(self, sink: DataSink) -> None:
from pathway.internals import table_io
table_io.table_to_datasink(self, sink)
def _materialize(self, universe: Universe):
context = clmn.MaterializedContext(universe)
columns = {
name: clmn.MaterializedColumn(universe, column.properties)
for (name, column) in self._columns.items()
}
return Table(
_columns=columns,
_schema=self.schema,
_context=context,
)
def pointer_from(
self, *args: Any, optional=False, instance: expr.ColumnReference | None = None
):
"""Pseudo-random hash of its argument. Produces pointer types. Applied column-wise.
Example:
>>> import pathway as pw
>>> t1 = pw.debug.table_from_markdown('''
... age owner pet
... 1 10 Alice dog
... 2 9 Bob dog
... 3 8 Alice cat
... 4 7 Bob dog''')
>>> g = t1.groupby(t1.owner).reduce(refcol = t1.pointer_from(t1.owner)) # g.id == g.refcol
>>> pw.debug.compute_and_print(g.select(test = (g.id == g.refcol)), include_id=False)
test
True
True
"""
if instance is not None:
args = (*args, instance)
# XXX verify types for the table primary_keys
return expr.PointerExpression(self, *args, optional=optional)
def ix_ref(
self,
*args: expr.ColumnExpression | Value,
optional: bool = False,
context=None,
instance: expr.ColumnReference | None = None,
):
"""Reindexes the table using expressions as primary keys.
Uses keys from context, or tries to infer proper context from the expression.
If optional is True, then None in expression values result in None values in the result columns.
Missing values in table keys result in RuntimeError.
Context can be anything that allows for `select` or `reduce`, or `pathway.this` construct
(latter results in returning a delayed operation, and should be only used when using `ix` inside
join().select() or groupby().reduce() sequence).
Args:
args: Column references.
Returns:
Row: indexed row.
Example:
>>> import pathway as pw
>>> t1 = pw.debug.table_from_markdown('''
... name | pet
... Alice | dog
... Bob | cat
... Carole | cat
... David | dog
... ''')
>>> t2 = t1.with_id_from(pw.this.name)
>>> t2 = t2.select(*pw.this, new_value=pw.this.ix_ref("Alice").pet)
>>> pw.debug.compute_and_print(t2, include_id=False)
name | pet | new_value
Alice | dog | dog
Bob | cat | dog
Carole | cat | dog
David | dog | dog
Tables obtained by a groupby/reduce scheme always have primary keys:
>>> import pathway as pw
>>> t1 = pw.debug.table_from_markdown('''
... name | pet
... Alice | dog
... Bob | cat
... Carole | cat
... David | cat
... ''')
>>> t2 = t1.groupby(pw.this.pet).reduce(pw.this.pet, count=pw.reducers.count())
>>> t3 = t1.select(*pw.this, new_value=t2.ix_ref(t1.pet).count)
>>> pw.debug.compute_and_print(t3, include_id=False)
name | pet | new_value
Alice | dog | 1
Bob | cat | 3
Carole | cat | 3
David | cat | 3
Single-row tables can be accessed via `ix_ref()`:
>>> import pathway as pw
>>> t1 = pw.debug.table_from_markdown('''
... name | pet
... Alice | dog
... Bob | cat
... Carole | cat
... David | cat
... ''')
>>> t2 = t1.reduce(count=pw.reducers.count())
>>> t3 = t1.select(*pw.this, new_value=t2.ix_ref(context=t1).count)
>>> pw.debug.compute_and_print(t3, include_id=False)
name | pet | new_value
Alice | dog | 4
Bob | cat | 4
Carole | cat | 4
David | cat | 4
"""
return self.ix(
self.pointer_from(*args, optional=optional, instance=instance),
optional=optional,
context=context,
)
def _subtables(self) -> StableSet[Table]:
return StableSet([self])
def _substitutions(
self,
) -> tuple[Table, dict[expr.InternalColRef, expr.ColumnExpression]]:
return self, {}
def typehints(self) -> Mapping[str, Any]:
"""
Return the types of the columns as a dictionary.
Example:
>>> import pathway as pw
>>> t1 = pw.debug.table_from_markdown('''
... age | owner | pet
... 10 | Alice | dog
... 9 | Bob | dog
... 8 | Alice | cat
... 7 | Bob | dog
... ''')
>>> t1.typehints()
mappingproxy({'age': <class 'int'>, 'owner': <class 'str'>, 'pet': <class 'str'>})
"""
return self.schema.typehints()
def eval_type(self, expression: expr.ColumnExpression) -> dt.DType:
return (
self._rowwise_context._get_type_interpreter()
.eval_expression(expression, state=TypeInterpreterState())
._dtype
)
def _auto_live(self) -> Table:
"""Make self automatically live in interactive mode"""
from pathway.internals.interactive import is_interactive_mode_enabled
if is_interactive_mode_enabled():
return self.live()
else:
return self
def live(self) -> LiveTable[TSchema]:
from pathway.internals.interactive import LiveTable
warnings.warn("live tables are an experimental feature", stacklevel=2)
return LiveTable._create(self)
def retrieve_prev_next_values(
ordered_table: pw.Table, value: pw.ColumnReference | None = None
) -> pw.Table:
"""
Retrieve, for each row, a pointer to the first row in the ordered_table that \
contains a non-"None" value, based on the orders defined by the prev and next columns.
Args:
ordered_table (pw.Table): Table with three columns: value, prev, next.
The prev and next columns contain pointers to other rows.
value (Optional[pw.ColumnReference]): Column reference pointing to the column containing values.
If not provided, assumes the column name is "value".
Returns:
pw.Table: Table with two columns: prev_value and next_value.
The prev_value column contains the values of the first row, according \
to the order defined by the column next, with a value different from None.
The next_value column contains the values of the first row, according \
to the order defined by the column prev, with a value different from None.
"""
if value is None:
value = ordered_table.value
else:
if isinstance(value, pw.ColumnReference):
value = ordered_table[value]
else:
if isinstance(value, str):
raise ValueError(
"sorting.retrieving_prev_next_values():"
+ "Invalid column reference for the parameter value"
+ f", found a string. Did you mean this.{value} instead of {repr(value)}?"
)
raise ValueError(
"sorting.retrieving_prev_next_values():"
+ "Invalid column reference for the parameter value."
)
ordered_table = ordered_table.select(pw.this.prev, pw.this.next, value=value)
return _retrieving_prev_next_value(
ordered_table=ordered_table
).ordered_table # type: ignore
The provided code snippet includes necessary dependencies for implementing the `interpolate` function. Write a Python function `def interpolate( self: Table, timestamp: ColumnReference, *values: ColumnReference, mode: InterpolateMode = InterpolateMode.LINEAR, )` to solve the following problem:
Interpolates missing values in a column using the previous and next values based on a timestamps column. Args: timestamp (ColumnReference): Reference to the column containing timestamps. *values (ColumnReference): References to the columns containing values to be interpolated. mode (InterpolateMode, optional): The interpolation mode. Currently,\ only InterpolateMode.LINEAR is supported. Default is InterpolateMode.LINEAR. Returns: Table: A new table with the interpolated values. Raises: ValueError: If the columns are not ColumnReference or if the interpolation mode is not supported. Note: - The interpolation is performed based on linear interpolation between the previous and next values. - If a value is missing at the beginning or end of the column, no interpolation is performed. Example: >>> import pathway as pw >>> table = pw.debug.table_from_markdown(''' ... timestamp | values_a | values_b ... 1 | 1 | 10 ... 2 | | ... 3 | 3 | ... 4 | | ... 5 | | ... 6 | 6 | 60 ... ''') >>> table = table.interpolate(pw.this.timestamp, pw.this.values_a, pw.this.values_b) >>> pw.debug.compute_and_print(table, include_id=False) timestamp | values_a | values_b 1 | 1 | 10 2 | 2.0 | 20.0 3 | 3 | 30.0 4 | 4.0 | 40.0 5 | 5.0 | 50.0 6 | 6 | 60
Here is the function:
def interpolate(
self: Table,
timestamp: ColumnReference,
*values: ColumnReference,
mode: InterpolateMode = InterpolateMode.LINEAR,
):
"""
Interpolates missing values in a column using the previous and next values based on a timestamps column.
Args:
timestamp (ColumnReference): Reference to the column containing timestamps.
*values (ColumnReference): References to the columns containing values to be interpolated.
mode (InterpolateMode, optional): The interpolation mode. Currently,\
only InterpolateMode.LINEAR is supported. Default is InterpolateMode.LINEAR.
Returns:
Table: A new table with the interpolated values.
Raises:
ValueError: If the columns are not ColumnReference or if the interpolation mode is not supported.
Note:
- The interpolation is performed based on linear interpolation between the previous and next values.
- If a value is missing at the beginning or end of the column, no interpolation is performed.
Example:
>>> import pathway as pw
>>> table = pw.debug.table_from_markdown('''
... timestamp | values_a | values_b
... 1 | 1 | 10
... 2 | |
... 3 | 3 |
... 4 | |
... 5 | |
... 6 | 6 | 60
... ''')
>>> table = table.interpolate(pw.this.timestamp, pw.this.values_a, pw.this.values_b)
>>> pw.debug.compute_and_print(table, include_id=False)
timestamp | values_a | values_b
1 | 1 | 10
2 | 2.0 | 20.0
3 | 3 | 30.0
4 | 4.0 | 40.0
5 | 5.0 | 50.0
6 | 6 | 60
"""
from pathway.stdlib.indexing.sorting import retrieve_prev_next_values
if mode != InterpolateMode.LINEAR:
raise ValueError(
"""interpolate: Invalid mode. Only Interpolate.LINEAR is currently available."""
)
if isinstance(timestamp, ColumnReference):
timestamp = self[timestamp]
else:
if isinstance(timestamp, str):
raise ValueError(
"Table.interpolate(): Invalid column reference for the parameter timestamp,"
+ f" found a string. Did you mean this.{timestamp} instead of {repr(timestamp)}?"
)
raise ValueError(
"Table.interpolate(): Invalid column reference for the parameter timestamp."
)
ordered_table = self.sort(key=timestamp)
table = self
for value in values:
if isinstance(value, ColumnReference):
value = self[value]
else:
if isinstance(value, str):
raise ValueError(
"Table.interpolate(): Invalid column reference for the parameter value,"
+ f" found a string. Did you mean this.{value} instead of {repr(value)}?"
)
raise ValueError(
"Table.interpolate(): Invalid column reference for the parameter value."
)
assert timestamp.name != value.name
sorted_timestamp_value = ordered_table + self.select(
timestamp=timestamp, value=value
)
table_with_prev_next = retrieve_prev_next_values(sorted_timestamp_value)
interpolated_table = _compute_interpolate(
table_with_prev_next + sorted_timestamp_value
)
table = table.with_columns(
**{value.name: interpolated_table.interpolated_value}
)
return table | Interpolates missing values in a column using the previous and next values based on a timestamps column. Args: timestamp (ColumnReference): Reference to the column containing timestamps. *values (ColumnReference): References to the columns containing values to be interpolated. mode (InterpolateMode, optional): The interpolation mode. Currently,\ only InterpolateMode.LINEAR is supported. Default is InterpolateMode.LINEAR. Returns: Table: A new table with the interpolated values. Raises: ValueError: If the columns are not ColumnReference or if the interpolation mode is not supported. Note: - The interpolation is performed based on linear interpolation between the previous and next values. - If a value is missing at the beginning or end of the column, no interpolation is performed. Example: >>> import pathway as pw >>> table = pw.debug.table_from_markdown(''' ... timestamp | values_a | values_b ... 1 | 1 | 10 ... 2 | | ... 3 | 3 | ... 4 | | ... 5 | | ... 6 | 6 | 60 ... ''') >>> table = table.interpolate(pw.this.timestamp, pw.this.values_a, pw.this.values_b) >>> pw.debug.compute_and_print(table, include_id=False) timestamp | values_a | values_b 1 | 1 | 10 2 | 2.0 | 20.0 3 | 3 | 30.0 4 | 4.0 | 40.0 5 | 5.0 | 50.0 6 | 6 | 60 |
166,659 | from __future__ import annotations
import math
import pathway.internals as pw
from pathway.internals.fingerprints import fingerprint
from pathway.internals.runtime_type_check import check_arg_types
from pathway.internals.trace import trace_user_frame
from pathway.internals.udfs import udf
from pathway.stdlib.graphs.common import Clustering, Edge, Weight
from pathway.stdlib.graphs.graph import WeightedGraph
from pathway.stdlib.utils.filtering import argmax_rows
def _one_step(
G: WeightedGraph, clustering: pw.Table[Clustering], iter
) -> pw.Table[Clustering]:
r"""
This function selects a set of vertices that can be moved in parallel,
while increasing the Louvain objective function.
First, it calls _propose_clusters to compute a possible new cluster for each
vertex. Then, it computes an independent set of movements that can be safely
executed in parallel (i.e., no cluster participates in two movements)
In some cases, this might be as slow as sequential implementation, however it
uses parallel movements whenever they are easily detectable.
"""
"""
Most of the code within this function handles the detection of parallel
movements that can be safely executed.
"""
# Select vertices that actually move, attach cluster of a vertex,
# to determine the edge adjacency in the cluster graph, also on the u endpoint
proposed_clusters = _propose_clusters(G.WE, clustering)
candidate_moves = proposed_clusters.filter(
proposed_clusters.c != clustering.ix(proposed_clusters.id).c
).with_columns(
u=pw.this.id,
uc=clustering.ix(pw.this.id).c,
vc=pw.this.c,
total_weight=pw.this.total_weight,
)
"""
find independent set of edges in the cluster graph
by selecting local maxima over random priority
"""
def rand(x) -> int:
return fingerprint((x, iter), format="i64")
# sample priorities
candidate_moves += candidate_moves.select(r=rand(candidate_moves.id))
# compute maximum priority over all incident edges
out_priorities = candidate_moves.select(
candidate_moves.r, c=candidate_moves.uc, total_weight=pw.this.total_weight
)
in_priorities = candidate_moves.select(
candidate_moves.r, c=candidate_moves.vc, total_weight=pw.this.total_weight
)
all_priorities = pw.Table.concat_reindex(out_priorities, in_priorities)
cluster_max_priority = argmax_rows(
all_priorities, all_priorities.c, what=all_priorities.r
).with_id(pw.this.c)
# take edges e with same priority as the max priorities of clusters
# containing the endpoints of e
delta = (
candidate_moves.filter(
(candidate_moves.r == cluster_max_priority.ix(candidate_moves.uc).r)
& (candidate_moves.r == cluster_max_priority.ix(candidate_moves.vc).r)
)
.with_id(pw.this.u)
.select(c=pw.this.vc, total_weight=pw.this.total_weight)
)
return clustering.update_rows(delta).with_universe_of(clustering)
class Clustering(pw.Schema):
r"""
Class describing cluster membership relation:
vertex u (id-column) belongs to cluster c.
"""
c: pw.Pointer[Cluster]
class WeightedGraph(Graph):
r"""
Basic class representing undirected, unweighted (multi)graph.
"""
WE: pw.Table[Edge | Weight]
def from_vertices_and_weighted_edges(V, WE):
return WeightedGraph(V, WE, WE)
def contracted_to_weighted_simple_graph(
self,
clustering: pw.Table[Clustering],
**reducer_expressions: pw.ReducerExpression,
) -> WeightedGraph:
contracted_graph = self.contracted_to_multi_graph(clustering)
contracted_graph.WE = contracted_graph.WE.groupby(
contracted_graph.WE.u, contracted_graph.WE.v
).reduce(contracted_graph.WE.u, contracted_graph.WE.v, **reducer_expressions)
return contracted_graph
def contracted_to_multi_graph(
self,
clustering: pw.Table[Clustering],
) -> WeightedGraph:
full_clustering = _extended_to_full_clustering(self.V, clustering)
return _contract_weighted(self.WE, full_clustering)
def without_self_loops(self) -> WeightedGraph:
new_edges = self.WE.filter(self.WE.u != self.WE.v)
return WeightedGraph.from_vertices_and_weighted_edges(self.V, new_edges)
The provided code snippet includes necessary dependencies for implementing the `_louvain_level` function. Write a Python function `def _louvain_level(G: WeightedGraph) -> pw.Table[Clustering]` to solve the following problem:
r""" This function, given a weighted graph, finds a clustering that is a local maximum with respect to the objective function as defined by Louvain community detection algorithm
Here is the function:
def _louvain_level(G: WeightedGraph) -> pw.Table[Clustering]:
r"""
This function, given a weighted graph, finds a clustering that
is a local maximum with respect to the objective function as defined
by Louvain community detection algorithm
"""
# arbitrary new ID generation;
# without re-generating we sometimes end up in a situation in which
# a cluster of id X does not contain a vertex of id X
clustering = G.V.select(c=G.V.pointer_from(G.V.id), total_weight=G.V.apx_value)
return pw.iterate(
lambda clustering, V, WE: dict(
clustering=_one_step(
WeightedGraph.from_vertices_and_weighted_edges(V, WE),
clustering,
# number below needs to be replaced by the iteration number
42,
)
),
V=G.V,
WE=G.WE,
clustering=clustering,
).clustering.without(pw.this.total_weight) | r""" This function, given a weighted graph, finds a clustering that is a local maximum with respect to the objective function as defined by Louvain community detection algorithm |
166,660 | from __future__ import annotations
import math
import pathway.internals as pw
from pathway.internals.fingerprints import fingerprint
from pathway.internals.runtime_type_check import check_arg_types
from pathway.internals.trace import trace_user_frame
from pathway.internals.udfs import udf
from pathway.stdlib.graphs.common import Clustering, Edge, Weight
from pathway.stdlib.graphs.graph import WeightedGraph
from pathway.stdlib.utils.filtering import argmax_rows
def _one_step(
G: WeightedGraph, clustering: pw.Table[Clustering], iter
) -> pw.Table[Clustering]:
r"""
This function selects a set of vertices that can be moved in parallel,
while increasing the Louvain objective function.
First, it calls _propose_clusters to compute a possible new cluster for each
vertex. Then, it computes an independent set of movements that can be safely
executed in parallel (i.e., no cluster participates in two movements)
In some cases, this might be as slow as sequential implementation, however it
uses parallel movements whenever they are easily detectable.
"""
"""
Most of the code within this function handles the detection of parallel
movements that can be safely executed.
"""
# Select vertices that actually move, attach cluster of a vertex,
# to determine the edge adjacency in the cluster graph, also on the u endpoint
proposed_clusters = _propose_clusters(G.WE, clustering)
candidate_moves = proposed_clusters.filter(
proposed_clusters.c != clustering.ix(proposed_clusters.id).c
).with_columns(
u=pw.this.id,
uc=clustering.ix(pw.this.id).c,
vc=pw.this.c,
total_weight=pw.this.total_weight,
)
"""
find independent set of edges in the cluster graph
by selecting local maxima over random priority
"""
def rand(x) -> int:
return fingerprint((x, iter), format="i64")
# sample priorities
candidate_moves += candidate_moves.select(r=rand(candidate_moves.id))
# compute maximum priority over all incident edges
out_priorities = candidate_moves.select(
candidate_moves.r, c=candidate_moves.uc, total_weight=pw.this.total_weight
)
in_priorities = candidate_moves.select(
candidate_moves.r, c=candidate_moves.vc, total_weight=pw.this.total_weight
)
all_priorities = pw.Table.concat_reindex(out_priorities, in_priorities)
cluster_max_priority = argmax_rows(
all_priorities, all_priorities.c, what=all_priorities.r
).with_id(pw.this.c)
# take edges e with same priority as the max priorities of clusters
# containing the endpoints of e
delta = (
candidate_moves.filter(
(candidate_moves.r == cluster_max_priority.ix(candidate_moves.uc).r)
& (candidate_moves.r == cluster_max_priority.ix(candidate_moves.vc).r)
)
.with_id(pw.this.u)
.select(c=pw.this.vc, total_weight=pw.this.total_weight)
)
return clustering.update_rows(delta).with_universe_of(clustering)
class WeightedGraph(Graph):
r"""
Basic class representing undirected, unweighted (multi)graph.
"""
WE: pw.Table[Edge | Weight]
def from_vertices_and_weighted_edges(V, WE):
return WeightedGraph(V, WE, WE)
def contracted_to_weighted_simple_graph(
self,
clustering: pw.Table[Clustering],
**reducer_expressions: pw.ReducerExpression,
) -> WeightedGraph:
contracted_graph = self.contracted_to_multi_graph(clustering)
contracted_graph.WE = contracted_graph.WE.groupby(
contracted_graph.WE.u, contracted_graph.WE.v
).reduce(contracted_graph.WE.u, contracted_graph.WE.v, **reducer_expressions)
return contracted_graph
def contracted_to_multi_graph(
self,
clustering: pw.Table[Clustering],
) -> WeightedGraph:
full_clustering = _extended_to_full_clustering(self.V, clustering)
return _contract_weighted(self.WE, full_clustering)
def without_self_loops(self) -> WeightedGraph:
new_edges = self.WE.filter(self.WE.u != self.WE.v)
return WeightedGraph.from_vertices_and_weighted_edges(self.V, new_edges)
def _louvain_level_fixed_iterations(G: WeightedGraph, number_of_iterations):
# arbitrary new ID generation;
# without re-generating we sometimes end up in a situation in which
# a cluster of id X does not contain a vertex of id X
clustering = G.V.select(c=G.V.pointer_from(G.V.id), total_weight=G.V.apx_value)
for iter in range(number_of_iterations):
clustering = _one_step(G, clustering, iter)
return clustering.without(clustering.total_weight) | null |
166,661 | from __future__ import annotations
import math
import pathway.internals as pw
from pathway.internals.fingerprints import fingerprint
from pathway.internals.runtime_type_check import check_arg_types
from pathway.internals.trace import trace_user_frame
from pathway.internals.udfs import udf
from pathway.stdlib.graphs.common import Clustering, Edge, Weight
from pathway.stdlib.graphs.graph import WeightedGraph
from pathway.stdlib.utils.filtering import argmax_rows
class Edge(pw.Schema):
r"""
Basic edge class, holds pointers to the endpoint vertices.
"""
u: pw.Pointer[Vertex]
v: pw.Pointer[Vertex]
class Weight(pw.Schema):
r"""
Basic weight class. To be used as extension of Vertex / Edge
"""
weight: float
def _approximate_total_weight(edges: pw.Table[Edge | Weight], epsilon):
# compute total weight
exact = edges.groupby().reduce(m=pw.reducers.sum(edges.weight))
# return approximate total weight
return exact.select(
lower=pw.apply_with_type(
lambda x: (1 + epsilon) ** math.floor(math.log(x, 1 + epsilon)),
float,
exact.m,
),
value=exact.m,
upper=pw.apply_with_type(
lambda x: (1 + epsilon) ** (math.floor(math.log(x, 1 + epsilon) + 1)),
float,
exact.m,
),
) | null |
166,662 | from __future__ import annotations
import math
import pathway.internals as pw
from pathway.internals.fingerprints import fingerprint
from pathway.internals.runtime_type_check import check_arg_types
from pathway.internals.trace import trace_user_frame
from pathway.internals.udfs import udf
from pathway.stdlib.graphs.common import Clustering, Edge, Weight
from pathway.stdlib.graphs.graph import WeightedGraph
from pathway.stdlib.utils.filtering import argmax_rows
class Clustering(pw.Schema):
r"""
Class describing cluster membership relation:
vertex u (id-column) belongs to cluster c.
"""
c: pw.Pointer[Cluster]
class WeightedGraph(Graph):
r"""
Basic class representing undirected, unweighted (multi)graph.
"""
WE: pw.Table[Edge | Weight]
def from_vertices_and_weighted_edges(V, WE):
return WeightedGraph(V, WE, WE)
def contracted_to_weighted_simple_graph(
self,
clustering: pw.Table[Clustering],
**reducer_expressions: pw.ReducerExpression,
) -> WeightedGraph:
contracted_graph = self.contracted_to_multi_graph(clustering)
contracted_graph.WE = contracted_graph.WE.groupby(
contracted_graph.WE.u, contracted_graph.WE.v
).reduce(contracted_graph.WE.u, contracted_graph.WE.v, **reducer_expressions)
return contracted_graph
def contracted_to_multi_graph(
self,
clustering: pw.Table[Clustering],
) -> WeightedGraph:
full_clustering = _extended_to_full_clustering(self.V, clustering)
return _contract_weighted(self.WE, full_clustering)
def without_self_loops(self) -> WeightedGraph:
new_edges = self.WE.filter(self.WE.u != self.WE.v)
return WeightedGraph.from_vertices_and_weighted_edges(self.V, new_edges)
The provided code snippet includes necessary dependencies for implementing the `exact_modularity` function. Write a Python function `def exact_modularity( G: WeightedGraph, C: pw.Table[Clustering], round_digits=16 ) -> pw.Table` to solve the following problem:
r""" This function computes modularity of a given weighted graph G with respect to clustering C. This implementation is meant to be used for testing / development, as computing exact value requires us to know the exact sum of the edge weights, which creates long dependency chains, and may be slow. This implementation rounds the modularity to round_digits decimal places (default is 16), for result res it returns round(res, ndigits = round_digits)
Here is the function:
def exact_modularity(
G: WeightedGraph, C: pw.Table[Clustering], round_digits=16
) -> pw.Table:
r"""
This function computes modularity of a given weighted graph G with
respect to clustering C.
This implementation is meant to be used for testing / development,
as computing exact value requires us to know the exact sum of the edge weights,
which creates long dependency chains, and may be slow.
This implementation rounds the modularity to round_digits decimal places
(default is 16), for result res it returns round(res, ndigits = round_digits)
"""
clusters = C.groupby(id=C.c).reduce()
cluster_degrees = clusters.with_columns(degree=0.0).update_rows(
G.WE.with_columns(c=C.ix(G.WE.u).c)
.groupby(id=pw.this.c)
.reduce(degree=pw.reducers.sum(pw.this.weight))
)
#
cluster_internal = clusters.with_columns(internal=0.0).update_rows(
G.WE.with_columns(cu=C.ix(G.WE.u).c, cv=C.ix(G.WE.v).c)
.filter(pw.this.cu == pw.this.cv)
.groupby(id=pw.this.cu)
.reduce(internal=pw.reducers.sum(pw.this.weight))
)
total_weight = G.WE.reduce(m=pw.reducers.sum(pw.this.weight))
def cluster_modularity(internal: float, degree: float, total: float) -> float:
return (internal * total - degree * degree) / (total * total)
score = clusters.join(total_weight, id=clusters.id).select(
modularity=pw.apply(
cluster_modularity,
cluster_internal.ix(pw.this.id).internal,
cluster_degrees.ix(pw.this.id).degree,
total_weight.m,
)
)
return score.reduce(
modularity=pw.reducers.sum(score.modularity).num.round(round_digits)
) | r""" This function computes modularity of a given weighted graph G with respect to clustering C. This implementation is meant to be used for testing / development, as computing exact value requires us to know the exact sum of the edge weights, which creates long dependency chains, and may be slow. This implementation rounds the modularity to round_digits decimal places (default is 16), for result res it returns round(res, ndigits = round_digits) |
166,663 | from __future__ import annotations
import math
import pathway.internals as pw
from pathway.internals.runtime_type_check import check_arg_types
from pathway.internals.trace import trace_user_frame
from ..common import Edge
class Vertex(pw.Schema):
is_source: bool
class Dist(pw.Schema):
dist: float
class DistFromSource(pw.Schema):
dist_from_source: float
def _bellman_ford_step(
vertices_dist: pw.Table[DistFromSource], edges: pw.Table[Edge | Dist]
) -> pw.Table[DistFromSource]:
relaxed_edges = edges + edges.select(
dist_from_source=vertices_dist.ix(edges.u).dist_from_source + edges.dist
)
vertices_dist = vertices_dist.update_rows(
relaxed_edges.groupby(id=relaxed_edges.v).reduce(
dist_from_source=pw.reducers.min(relaxed_edges.dist_from_source),
)
)
return vertices_dist
class Edge(pw.Schema):
r"""
Basic edge class, holds pointers to the endpoint vertices.
"""
u: pw.Pointer[Vertex]
v: pw.Pointer[Vertex]
def bellman_ford(vertices: pw.Table[Vertex], edges: pw.Table[Edge | Dist]):
vertices_dist: pw.Table[DistFromSource] = vertices.select(
dist_from_source=pw.if_else(vertices.is_source, 0.0, math.inf)
)
return pw.iterate(
_bellman_ford_step,
vertices_dist=pw.iterate_universe(vertices_dist),
edges=edges,
) | null |
166,664 | from __future__ import annotations
import pathway.internals as pw
from pathway.internals.runtime_type_check import check_arg_types
from pathway.internals.trace import trace_user_frame
from ..common import Edge
class Result(pw.Schema):
rank: int
class Edge(pw.Schema):
r"""
Basic edge class, holds pointers to the endpoint vertices.
"""
u: pw.Pointer[Vertex]
v: pw.Pointer[Vertex]
def pagerank(edges: pw.Table[Edge], steps: int = 5) -> pw.Table[Result]:
in_vertices: pw.Table = edges.groupby(id=edges.v).reduce(degree=0)
out_vertices: pw.Table = edges.groupby(id=edges.u).reduce(
degree=pw.reducers.count()
)
degrees: pw.Table = pw.Table.update_rows(in_vertices, out_vertices)
base: pw.Table = out_vertices.difference(in_vertices).select(rank=1_000)
ranks: pw.Table = degrees.select(rank=6_000)
for step in range(steps):
outflow = degrees.select(
flow=pw.if_else(
degrees.degree == 0, 0, (ranks.rank * 5) // (degrees.degree * 6)
),
)
inflows = edges.groupby(id=edges.v).reduce(
rank=pw.reducers.sum(outflow.ix(edges.u).flow) + 1_000
)
ranks = pw.Table.concat(base, inflows).with_universe_of(degrees)
return ranks | null |
166,665 | from __future__ import annotations
from dataclasses import dataclass
import pathway.internals as pw
from .common import Clustering, Edge, Vertex, Weight
class Graph:
r"""
Basic class representing undirected, unweighted (multi)graph.
"""
V: pw.Table[Vertex]
E: pw.Table[Edge]
def contracted_to_unweighted_simple_graph(
self,
clustering: pw.Table[Clustering],
**reducer_expressions: pw.ReducerExpression,
) -> Graph:
contracted_graph = self.contracted_to_multi_graph(clustering)
contracted_graph.E = contracted_graph.E.groupby(
contracted_graph.E.u, contracted_graph.E.v
).reduce(contracted_graph.E.u, contracted_graph.E.v)
return contracted_graph
def contracted_to_weighted_simple_graph(
self,
clustering: pw.Table[Clustering],
**reducer_expressions: pw.ReducerExpression,
) -> WeightedGraph:
contracted_graph = self.contracted_to_multi_graph(clustering)
WE = contracted_graph.E.groupby(
contracted_graph.E.u, contracted_graph.E.v
).reduce(contracted_graph.E.u, contracted_graph.E.v, **reducer_expressions)
return WeightedGraph.from_vertices_and_weighted_edges(contracted_graph.V, WE)
def contracted_to_multi_graph(
self,
clustering: pw.Table[Clustering],
) -> Graph:
full_clustering = _extended_to_full_clustering(self.V, clustering)
return _contract(self.E, full_clustering)
def without_self_loops(self) -> Graph:
return Graph(self.V, self.E.filter(self.E.u != self.E.v))
class Edge(pw.Schema):
r"""
Basic edge class, holds pointers to the endpoint vertices.
"""
u: pw.Pointer[Vertex]
v: pw.Pointer[Vertex]
class Clustering(pw.Schema):
r"""
Class describing cluster membership relation:
vertex u (id-column) belongs to cluster c.
"""
c: pw.Pointer[Cluster]
The provided code snippet includes necessary dependencies for implementing the `_contract` function. Write a Python function `def _contract(edges: pw.Table[Edge], clustering: pw.Table[Clustering]) -> Graph` to solve the following problem:
r""" This function contracts the clusters of the graph, under the assumption that it was given a full clustering, i.e., all vertices have exactly one cluster in clustering Returns: a graph in which: - each vertex is a cluster from the clustering, - each original edge now points to clusters containing the original endpoints
Here is the function:
def _contract(edges: pw.Table[Edge], clustering: pw.Table[Clustering]) -> Graph:
r"""
This function contracts the clusters of the graph,
under the assumption that it was given a full clustering,
i.e., all vertices have exactly one cluster in clustering
Returns:
a graph in which:
- each vertex is a cluster from the clustering,
- each original edge now points to clusters containing the original endpoints
"""
new_vertices = (
clustering.groupby(clustering.c)
.reduce(v=clustering.c)
.with_id(pw.this.v)
.select()
)
new_edges = edges.select(u=clustering.ix(edges.u).c, v=clustering.ix(edges.v).c)
return Graph(new_vertices, new_edges) | r""" This function contracts the clusters of the graph, under the assumption that it was given a full clustering, i.e., all vertices have exactly one cluster in clustering Returns: a graph in which: - each vertex is a cluster from the clustering, - each original edge now points to clusters containing the original endpoints |
166,666 | from __future__ import annotations
from dataclasses import dataclass
import pathway.internals as pw
from .common import Clustering, Edge, Vertex, Weight
class WeightedGraph(Graph):
r"""
Basic class representing undirected, unweighted (multi)graph.
"""
WE: pw.Table[Edge | Weight]
def from_vertices_and_weighted_edges(V, WE):
return WeightedGraph(V, WE, WE)
def contracted_to_weighted_simple_graph(
self,
clustering: pw.Table[Clustering],
**reducer_expressions: pw.ReducerExpression,
) -> WeightedGraph:
contracted_graph = self.contracted_to_multi_graph(clustering)
contracted_graph.WE = contracted_graph.WE.groupby(
contracted_graph.WE.u, contracted_graph.WE.v
).reduce(contracted_graph.WE.u, contracted_graph.WE.v, **reducer_expressions)
return contracted_graph
def contracted_to_multi_graph(
self,
clustering: pw.Table[Clustering],
) -> WeightedGraph:
full_clustering = _extended_to_full_clustering(self.V, clustering)
return _contract_weighted(self.WE, full_clustering)
def without_self_loops(self) -> WeightedGraph:
new_edges = self.WE.filter(self.WE.u != self.WE.v)
return WeightedGraph.from_vertices_and_weighted_edges(self.V, new_edges)
class Edge(pw.Schema):
r"""
Basic edge class, holds pointers to the endpoint vertices.
"""
u: pw.Pointer[Vertex]
v: pw.Pointer[Vertex]
class Weight(pw.Schema):
r"""
Basic weight class. To be used as extension of Vertex / Edge
"""
weight: float
class Clustering(pw.Schema):
r"""
Class describing cluster membership relation:
vertex u (id-column) belongs to cluster c.
"""
c: pw.Pointer[Cluster]
The provided code snippet includes necessary dependencies for implementing the `_contract_weighted` function. Write a Python function `def _contract_weighted( edges: pw.Table[Edge | Weight], clustering: pw.Table[Clustering] ) -> WeightedGraph` to solve the following problem:
r""" This function contracts the clusters of the graph, under the assumption that it was given a full clustering, i.e., all vertices have exactly one cluster in clustering Returns: a graph in which: - each vertex is a cluster from the clustering, - each original edge now points to clusters containing the original endpoints
Here is the function:
def _contract_weighted(
edges: pw.Table[Edge | Weight], clustering: pw.Table[Clustering]
) -> WeightedGraph:
r"""
This function contracts the clusters of the graph,
under the assumption that it was given a full clustering,
i.e., all vertices have exactly one cluster in clustering
Returns:
a graph in which:
- each vertex is a cluster from the clustering,
- each original edge now points to clusters containing the original endpoints
"""
new_vertices = (
clustering.groupby(clustering.c)
.reduce(v=clustering.c)
.with_id(pw.this.v)
.select()
)
new_edges = edges.select(u=clustering.ix(edges.u).c, v=clustering.ix(edges.v).c)
return WeightedGraph.from_vertices_and_weighted_edges(new_vertices, new_edges) | r""" This function contracts the clusters of the graph, under the assumption that it was given a full clustering, i.e., all vertices have exactly one cluster in clustering Returns: a graph in which: - each vertex is a cluster from the clustering, - each original edge now points to clusters containing the original endpoints |
166,667 | from __future__ import annotations
from dataclasses import dataclass
import pathway.internals as pw
from .common import Clustering, Edge, Vertex, Weight
class Vertex(pw.Schema):
pass
class Clustering(pw.Schema):
r"""
Class describing cluster membership relation:
vertex u (id-column) belongs to cluster c.
"""
c: pw.Pointer[Cluster]
The provided code snippet includes necessary dependencies for implementing the `_extended_to_full_clustering` function. Write a Python function `def _extended_to_full_clustering( vertices: pw.Table[Vertex], clustering: pw.Table[Clustering] ) -> pw.Table[Clustering]` to solve the following problem:
r""" This function, given a set of vertices and a partial clustering, i.e., a clustering in which not every vertex has assigned a cluster, creates extended clustering in which those vertices are in singleton clusters. The id of the new singleton cluster is the same as id of vertex
Here is the function:
def _extended_to_full_clustering(
vertices: pw.Table[Vertex], clustering: pw.Table[Clustering]
) -> pw.Table[Clustering]:
r"""
This function, given a set of vertices and a partial clustering,
i.e., a clustering in which not every vertex has assigned a cluster,
creates extended clustering in which those vertices are in singleton clusters.
The id of the new singleton cluster is the same as id of vertex
"""
return vertices.select(c=vertices.id).update_rows(clustering) | r""" This function, given a set of vertices and a partial clustering, i.e., a clustering in which not every vertex has assigned a cluster, creates extended clustering in which those vertices are in singleton clusters. The id of the new singleton cluster is the same as id of vertex |
166,668 | from __future__ import annotations
import math
from collections.abc import Callable
from typing import Optional, TypedDict
import pathway.internals as pw
from pathway.internals.arg_tuple import wrap_arg_tuple
from pathway.internals.fingerprints import fingerprint
from pathway.internals.runtime_type_check import check_arg_types
from pathway.internals.trace import trace_user_frame
class LeftRight(pw.Schema):
left: pw.Pointer[Node] | None
right: pw.Pointer[Node] | None
class Parent(pw.Schema):
parent: pw.Pointer[Node] | None
class PrevNext(pw.Schema):
prev: pw.Pointer[Node] | None
next: pw.Pointer[Node] | None
class _treesort:
class index(pw.ClassArg, input=LeftRight | Parent, output=PrevNext):
parent = pw.input_attribute()
left = pw.input_attribute()
right = pw.input_attribute()
def leftmost(self) -> pw.Pointer[Node]:
if self.left is None:
return self.id
else:
return self.transformer.index[self.left].leftmost
def rightmost(self) -> pw.Pointer[Node]:
if self.right is None:
return self.id
else:
return self.transformer.index[self.right].rightmost
def inverse_rightmost(self) -> pw.Pointer[Node]:
"""Lowest ancestor that is not a right son."""
if self.parent is None:
return self.id
elif self.transformer.index[self.parent].right != self.id:
return self.id
else:
return self.transformer.index[self.parent].inverse_rightmost
def inverse_leftmost(self) -> pw.Pointer[Node]:
"""Lowest ancestor that is not a right son."""
if self.parent is None:
return self.id
elif self.transformer.index[self.parent].left != self.id:
return self.id
else:
return self.transformer.index[self.parent].inverse_leftmost
def next(self) -> pw.Pointer[Node] | None:
if self.right is not None:
return self.transformer.index[self.right].leftmost
return self.transformer.index[self.inverse_rightmost].parent
def prev(self) -> pw.Pointer[Node] | None:
if self.left is not None:
return self.transformer.index[self.left].rightmost
return self.transformer.index[self.inverse_leftmost].parent
def sort_from_index(
index: pw.Table[LeftRight | Parent], oracle=None
) -> pw.Table[PrevNext]:
return _treesort(index=index).index # type: ignore | null |
166,669 | from __future__ import annotations
import math
from collections.abc import Callable
from typing import Optional, TypedDict
import pathway.internals as pw
from pathway.internals.arg_tuple import wrap_arg_tuple
from pathway.internals.fingerprints import fingerprint
from pathway.internals.runtime_type_check import check_arg_types
from pathway.internals.trace import trace_user_frame
class BinsearchOracle(pw.Schema):
lowerbound: Callable[..., pw.Pointer | None]
upperbound: Callable[..., pw.Pointer | None]
class _binsearch_oracle:
class oracle(pw.ClassArg): # indexed by Instance
root = pw.input_attribute()
def lowerbound(self, value) -> pw.Pointer | None:
"""Returns id of item such that item.key <= value and item.key is maximal."""
return self.transformer.index[self.root].lowerbound(value)
def upperbound(self, value) -> pw.Pointer | None:
"""Returns id of item such that item.key >= value and item.key is minimal."""
return self.transformer.index[self.root].upperbound(value)
class index(pw.ClassArg, input=LeftRight | Key):
key = pw.input_attribute()
left = pw.input_attribute()
right = pw.input_attribute()
def lowerbound(self, value) -> pw.Pointer | None:
if self.key <= value:
if self.right is not None:
right_lowerbound = self.transformer.index[self.right].lowerbound(
value
)
if right_lowerbound is not None:
return right_lowerbound
return self.id
elif self.left is not None:
return self.transformer.index[self.left].lowerbound(value)
else:
return None
def upperbound(self, value) -> pw.Pointer | None:
if self.key >= value:
if self.left is not None:
left_upperbound = self.transformer.index[self.left].upperbound(
value
)
if left_upperbound is not None:
return left_upperbound
return self.id
elif self.right is not None:
return self.transformer.index[self.right].upperbound(value)
else:
return None
def binsearch_oracle(oracle, index) -> pw.Table[BinsearchOracle]:
return _binsearch_oracle(oracle=oracle, index=index).oracle # type: ignore | null |
166,670 | from __future__ import annotations
import math
from collections.abc import Callable
from typing import Optional, TypedDict
import pathway.internals as pw
from pathway.internals.arg_tuple import wrap_arg_tuple
from pathway.internals.fingerprints import fingerprint
from pathway.internals.runtime_type_check import check_arg_types
from pathway.internals.trace import trace_user_frame
def build_sorted_index(nodes: pw.Table[Key | Instance]) -> SortedIndex:
def filter_cmp_helper(filter_val, index, oracle=None) -> pw.Table[ComparisonRet]:
def prefix_sum_oracle(oracle, index) -> pw.Table[PrefixSumOracle]: # type: ignore
def filter_smallest_k(
column: pw.ColumnReference, instance: pw.ColumnReference, ks: pw.Table
) -> pw.Table:
ks = ks.with_id_from(ks.instance)
table = column.table
colname = column.name
sorted_index = build_sorted_index(nodes=table.select(instance=instance, key=column))
sorted_index.index += table.select(val=1)
oracle = prefix_sum_oracle(**sorted_index)
pw.universes.promise_is_subset_of(ks, oracle)
oracle_restricted = oracle.restrict(ks)
# root is pked with instance, ks also
res = ks.select(res=oracle_restricted.prefix_sum_upperbound(ks.k))
validres = res.filter(res.res.is_not_none())
validres = validres.select(res=getattr(table.ix(validres.res), colname))
res <<= res.filter(res.res.is_none()).select(res=math.inf)
res <<= validres
selector = filter_cmp_helper(filter_val=res.select(val=res.res), **sorted_index)
# todo drop agg
return table.filter(selector.comparison_ret < 0) | null |
166,671 | from __future__ import annotations
import pandas as pd
import pathway.internals as pw
from pathway.debug import table_from_pandas
from pathway.internals import schema
from pathway.internals.api import Pointer, ref_scalar
from pathway.internals.helpers import FunctionSpec, function_spec
from pathway.stdlib.utils.col import unpack_col
def _pandas_transformer(
*inputs: pw.Table,
func_spec: FunctionSpec,
output_schema: type[schema.Schema],
output_universe: str | int | None,
) -> pw.Table:
output_universe_arg_index = _argument_index(func_spec, output_universe)
func = func_spec.func
def process_pandas_output(
result: pd.DataFrame | pd.Series, pandas_input: list[pd.DataFrame] = []
):
if isinstance(result, pd.Series):
result = pd.DataFrame(result)
result.columns = output_schema.column_names()
if output_universe_arg_index is not None and not result.index.equals(
pandas_input[output_universe_arg_index].index
):
raise ValueError(
"resulting universe does not match the universe of the indicated argument"
)
else:
if not result.index.is_unique:
raise ValueError("index of resulting DataFrame must be unique")
index_as_series = result.index.to_series()
if not index_as_series.map(lambda x: isinstance(x, Pointer)).all():
new_index = index_as_series.map(lambda x: ref_scalar(x))
result.reindex(new_index)
assert result.index.is_unique
return result
if len(func_spec.arg_names) == 0:
result = func()
result = process_pandas_output(result)
output = table_from_pandas(result)
else:
input_table = _create_input_table(*inputs)
def wrapper(*input_tables, inputs=inputs):
pandas_input = []
for idx, table in enumerate(input_tables):
df = pd.DataFrame(table)
df.set_index(df.columns[0], inplace=True)
df.columns = inputs[idx].schema.column_names()
pandas_input.append(df)
result = func(*pandas_input)
result = process_pandas_output(result, pandas_input)
result.insert(0, "_id", result.index)
return tuple(result.apply(tuple, axis=1))
applied = input_table.select(
all_cols=pw.apply(wrapper, *_table_columns(input_table))
)
flattened = applied.flatten(pw.this.all_cols)
output = unpack_col(
flattened.all_cols, pw.this._id, *output_schema.column_names()
).update_types(_id=pw.Pointer)
output = output.with_id(output._id).without(pw.this._id)
if output_universe_arg_index is not None:
output = output.with_universe_of(inputs[output_universe_arg_index])
output = output.update_types(**output_schema.typehints())
return output
) # necessary for doctests to work, see https://www.rosipov.com/blog/python-doctests-and-decorators-bug/
def function_spec(fn):
fn = inspect.unwrap(fn)
fullspec = inspect.getfullargspec(fn)
defaults = {}
if fullspec.defaults is not None:
for index, default in enumerate(reversed(fullspec.defaults)):
defaults[fullspec.args[-index - 1]] = default
arg_names = fn.__code__.co_varnames[: fn.__code__.co_argcount]
return FunctionSpec(fn, arg_names, defaults)
The provided code snippet includes necessary dependencies for implementing the `pandas_transformer` function. Write a Python function `def pandas_transformer( output_schema: type[schema.Schema], output_universe: str | int | None = None )` to solve the following problem:
Decorator that turns python function operating on pandas.DataFrame into pathway transformer. Input universes are converted into input DataFrame indexes. The resulting index is treated as the output universe, so it must maintain uniqueness and be of integer type. Args: output_schema: Schema of a resulting table. output_universe: Index or name of an argument whose universe will be used \ in resulting table. Defaults to `None`. Returns: Transformer that can be applied on Pathway tables. Example: >>> import pathway as pw >>> input = pw.debug.table_from_markdown( ... ''' ... | foo | bar ... 0 | 10 | 100 ... 1 | 20 | 200 ... 2 | 30 | 300 ... ''' ... ) >>> class Output(pw.Schema): ... sum: int >>> @pw.pandas_transformer(output_schema=Output) ... def sum_cols(t: pd.DataFrame) -> pd.DataFrame: ... return pd.DataFrame(t.sum(axis=1)) >>> output = sum_cols(input) >>> pw.debug.compute_and_print(output, include_id=False) sum 110 220 330
Here is the function:
def pandas_transformer(
output_schema: type[schema.Schema], output_universe: str | int | None = None
):
"""Decorator that turns python function operating on pandas.DataFrame into pathway transformer.
Input universes are converted into input DataFrame indexes.
The resulting index is treated as the output universe, so it must maintain uniqueness
and be of integer type.
Args:
output_schema: Schema of a resulting table.
output_universe: Index or name of an argument whose universe will be used \
in resulting table. Defaults to `None`.
Returns:
Transformer that can be applied on Pathway tables.
Example:
>>> import pathway as pw
>>> input = pw.debug.table_from_markdown(
... '''
... | foo | bar
... 0 | 10 | 100
... 1 | 20 | 200
... 2 | 30 | 300
... '''
... )
>>> class Output(pw.Schema):
... sum: int
>>> @pw.pandas_transformer(output_schema=Output)
... def sum_cols(t: pd.DataFrame) -> pd.DataFrame:
... return pd.DataFrame(t.sum(axis=1))
>>> output = sum_cols(input)
>>> pw.debug.compute_and_print(output, include_id=False)
sum
110
220
330
"""
def decorator(func):
func_specs = function_spec(func)
def wrapper(*args):
return _pandas_transformer(
*args,
func_spec=func_specs,
output_schema=output_schema,
output_universe=output_universe,
)
return wrapper
return decorator | Decorator that turns python function operating on pandas.DataFrame into pathway transformer. Input universes are converted into input DataFrame indexes. The resulting index is treated as the output universe, so it must maintain uniqueness and be of integer type. Args: output_schema: Schema of a resulting table. output_universe: Index or name of an argument whose universe will be used \ in resulting table. Defaults to `None`. Returns: Transformer that can be applied on Pathway tables. Example: >>> import pathway as pw >>> input = pw.debug.table_from_markdown( ... ''' ... | foo | bar ... 0 | 10 | 100 ... 1 | 20 | 200 ... 2 | 30 | 300 ... ''' ... ) >>> class Output(pw.Schema): ... sum: int >>> @pw.pandas_transformer(output_schema=Output) ... def sum_cols(t: pd.DataFrame) -> pd.DataFrame: ... return pd.DataFrame(t.sum(axis=1)) >>> output = sum_cols(input) >>> pw.debug.compute_and_print(output, include_id=False) sum 110 220 330 |
166,672 | import annotations
import warnings
from collections.abc import Callable, Sequence
from typing import overload
import pathway.internals as pw
from pathway.internals import dtype as dt
from pathway.internals.runtime_type_check import check_arg_types
from pathway.internals.trace import trace_user_frame
The provided code snippet includes necessary dependencies for implementing the `flatten_column` function. Write a Python function `def flatten_column( column: pw.ColumnReference, origin_id: str | pw.ColumnReference | None = "origin_id", ) -> pw.Table` to solve the following problem:
Deprecated: use pw.Table.flatten instead. Flattens a column of a table. Input: - column: Column expression of column to be flattened - origin_id: name of output column where to store id's of input rows Output: - Table with columns: colname_to_flatten and origin_id (if not None) Example: >>> import pathway as pw >>> t1 = pw.debug.table_from_markdown(''' ... | pet | age ... 1 | Dog | 2 ... 7 | Cat | 5 ... ''') >>> t2 = pw.utils.col.flatten_column(t1.pet) >>> pw.debug.compute_and_print(t2.without(pw.this.origin_id), include_id=False) pet C D a g o t
Here is the function:
def flatten_column(
column: pw.ColumnReference,
origin_id: str | pw.ColumnReference | None = "origin_id",
) -> pw.Table:
"""Deprecated: use pw.Table.flatten instead.
Flattens a column of a table.
Input:
- column: Column expression of column to be flattened
- origin_id: name of output column where to store id's of input rows
Output:
- Table with columns: colname_to_flatten and origin_id (if not None)
Example:
>>> import pathway as pw
>>> t1 = pw.debug.table_from_markdown('''
... | pet | age
... 1 | Dog | 2
... 7 | Cat | 5
... ''')
>>> t2 = pw.utils.col.flatten_column(t1.pet)
>>> pw.debug.compute_and_print(t2.without(pw.this.origin_id), include_id=False)
pet
C
D
a
g
o
t
"""
warnings.warn(
"pw.stdlib.utils.col.flatten_column() is deprecated, use pw.Table.flatten() instead",
DeprecationWarning,
stacklevel=5,
)
input_table = column.table
kwargs = {column.name: column}
if origin_id is not None:
origin_id_name = pw.this[origin_id].name
kwargs[origin_id_name] = input_table.id
return input_table.flatten(**kwargs) | Deprecated: use pw.Table.flatten instead. Flattens a column of a table. Input: - column: Column expression of column to be flattened - origin_id: name of output column where to store id's of input rows Output: - Table with columns: colname_to_flatten and origin_id (if not None) Example: >>> import pathway as pw >>> t1 = pw.debug.table_from_markdown(''' ... | pet | age ... 1 | Dog | 2 ... 7 | Cat | 5 ... ''') >>> t2 = pw.utils.col.flatten_column(t1.pet) >>> pw.debug.compute_and_print(t2.without(pw.this.origin_id), include_id=False) pet C D a g o t |
166,673 | import annotations
import warnings
from collections.abc import Callable, Sequence
from typing import overload
import pathway.internals as pw
from pathway.internals import dtype as dt
from pathway.internals.runtime_type_check import check_arg_types
from pathway.internals.trace import trace_user_frame
The provided code snippet includes necessary dependencies for implementing the `unpack_col_dict` function. Write a Python function `def unpack_col_dict( column: pw.ColumnReference, schema: type[pw.Schema], ) -> pw.Table` to solve the following problem:
Unpacks columns from a json object Input: - column: Column expression of column containing some pw.Json with an object - schema: Schema for columns to extract Output: - Table with columns given by the schema Example: >>> import pathway as pw >>> t = pw.debug.table_from_rows( ... schema=pw.schema_from_types(data=pw.Json), ... rows=[ ... ({"field_a": 13, "field_b": "foo", "field_c": False},), ... ({"field_a": 17, "field_c": True, "field_d": 3.4},) ... ] ... ) >>> class DataSchema(pw.Schema): ... field_a: int ... field_b: str | None ... field_c: bool ... field_d: float | None >>> t2 = pw.utils.col.unpack_col_dict(t.data, schema=DataSchema) >>> pw.debug.compute_and_print(t2, include_id=False) field_a | field_b | field_c | field_d 13 | foo | False | 17 | | True | 3.4
Here is the function:
def unpack_col_dict(
column: pw.ColumnReference,
schema: type[pw.Schema],
) -> pw.Table:
"""Unpacks columns from a json object
Input:
- column: Column expression of column containing some pw.Json with an object
- schema: Schema for columns to extract
Output:
- Table with columns given by the schema
Example:
>>> import pathway as pw
>>> t = pw.debug.table_from_rows(
... schema=pw.schema_from_types(data=pw.Json),
... rows=[
... ({"field_a": 13, "field_b": "foo", "field_c": False},),
... ({"field_a": 17, "field_c": True, "field_d": 3.4},)
... ]
... )
>>> class DataSchema(pw.Schema):
... field_a: int
... field_b: str | None
... field_c: bool
... field_d: float | None
>>> t2 = pw.utils.col.unpack_col_dict(t.data, schema=DataSchema)
>>> pw.debug.compute_and_print(t2, include_id=False)
field_a | field_b | field_c | field_d
13 | foo | False |
17 | | True | 3.4
"""
typehints = schema._dtypes()
def _convert_from_json(name: str, col: pw.ColumnExpression):
_type = typehints[name]
is_optional = isinstance(_type, dt.Optional)
_type = dt.unoptionalize(_type)
result = col
if _type == dt.BOOL:
result = col.as_bool()
elif _type == dt.FLOAT:
result = col.as_float()
elif _type == dt.INT:
result = col.as_int()
elif _type == dt.STR:
result = col.as_str()
elif _type == dt.JSON:
result = col
else:
raise TypeError(f"Unsupported conversion from pw.Json to {typehints[name]}")
if not is_optional:
result = pw.unwrap(result)
return result
colrefs = [pw.this[column_name] for column_name in schema.column_names()]
kw = {
colref.name: _convert_from_json(colref.name, column.get(colref.name))
for colref in colrefs
}
result = column.table.select(**kw).update_types(**schema)
return result | Unpacks columns from a json object Input: - column: Column expression of column containing some pw.Json with an object - schema: Schema for columns to extract Output: - Table with columns given by the schema Example: >>> import pathway as pw >>> t = pw.debug.table_from_rows( ... schema=pw.schema_from_types(data=pw.Json), ... rows=[ ... ({"field_a": 13, "field_b": "foo", "field_c": False},), ... ({"field_a": 17, "field_c": True, "field_d": 3.4},) ... ] ... ) >>> class DataSchema(pw.Schema): ... field_a: int ... field_b: str | None ... field_c: bool ... field_d: float | None >>> t2 = pw.utils.col.unpack_col_dict(t.data, schema=DataSchema) >>> pw.debug.compute_and_print(t2, include_id=False) field_a | field_b | field_c | field_d 13 | foo | False | 17 | | True | 3.4 |
166,674 | import annotations
import warnings
from collections.abc import Callable, Sequence
from typing import overload
import pathway.internals as pw
from pathway.internals import dtype as dt
from pathway.internals.runtime_type_check import check_arg_types
from pathway.internals.trace import trace_user_frame
def multiapply_all_rows(
*cols: pw.ColumnReference,
fun: Callable[..., list[Sequence]],
result_col_names: list[str | pw.ColumnReference],
) -> pw.Table:
"""Applies a function to all the data in selected columns at once, returning multiple columns.
This transformer is meant to be run infrequently on a relativelly small tables.
Input:
- cols: list of columns to which function will be applied
- fun: function taking lists of columns and returning a corresponding list of outputs.
- result_col_names: names of the output columns
Output:
- Table indexed with original indices with columns named by "result_col_names" argument
containing results of the apply
Example:
>>> import pathway as pw
>>> table = pw.debug.table_from_markdown(
... '''
... | colA | colB
... 1 | 1 | 10
... 2 | 2 | 20
... 3 | 3 | 30
... ''')
>>> def add_total_sum(col1, col2):
... sum_all = sum(col1) + sum(col2)
... return [x + sum_all for x in col1], [x + sum_all for x in col2]
>>> result = pw.utils.col.multiapply_all_rows(
... table.colA, table.colB, fun=add_total_sum, result_col_names=["res1", "res2"]
... )
>>> pw.debug.compute_and_print(result, include_id=False)
res1 | res2
67 | 76
68 | 86
69 | 96
"""
assert len(cols) > 0
table = cols[0].table
assert all([col.table == table for col in cols[1:]])
def zip_cols(id, *cols):
return (id, *cols)
tmp = table.select(id_and_cols=pw.apply(zip_cols, table.id, *cols))
reduced = tmp.reduce(ids_and_cols=pw.reducers.sorted_tuple(tmp.id_and_cols))
def fun_wrapped(ids_and_cols):
ids, *cols = zip(*ids_and_cols)
res = fun(*cols)
return tuple(zip(ids, *res))
applied = reduced.select(ids_and_res=pw.apply(fun_wrapped, reduced.ids_and_cols))
flatted = applied.flatten(pw.this.ids_and_res)
result = unpack_col(flatted.ids_and_res, "idd", *result_col_names).update_types(
idd=pw.Pointer
)
result = result.with_id(result.idd).without(pw.this.idd)
return result.with_universe_of(table)
The provided code snippet includes necessary dependencies for implementing the `apply_all_rows` function. Write a Python function `def apply_all_rows( *cols: pw.ColumnReference, fun: Callable[..., Sequence], result_col_name: str | pw.ColumnReference, ) -> pw.Table` to solve the following problem:
Applies a function to all the data in selected columns at once, returning a single column. This transformer is meant to be run infrequently on a relativelly small tables. Input: - cols: list of columns to which function will be applied - fun: function taking lists of columns and returning a corresponding list of outputs. - result_col_name: name of the output column Output: - Table indexed with original indices with a single column named by "result_col_name" argument containing results of the apply Example: >>> import pathway as pw >>> table = pw.debug.table_from_markdown( ... ''' ... | colA | colB ... 1 | 1 | 10 ... 2 | 2 | 20 ... 3 | 3 | 30 ... ''') >>> def add_total_sum(col1, col2): ... sum_all = sum(col1) + sum(col2) ... return [x + sum_all for x in col1] >>> result = pw.utils.col.apply_all_rows( ... table.colA, table.colB, fun=add_total_sum, result_col_name="res" ... ) >>> pw.debug.compute_and_print(result, include_id=False) res 67 68 69
Here is the function:
def apply_all_rows(
*cols: pw.ColumnReference,
fun: Callable[..., Sequence],
result_col_name: str | pw.ColumnReference,
) -> pw.Table:
"""Applies a function to all the data in selected columns at once, returning a single column.
This transformer is meant to be run infrequently on a relativelly small tables.
Input:
- cols: list of columns to which function will be applied
- fun: function taking lists of columns and returning a corresponding list of outputs.
- result_col_name: name of the output column
Output:
- Table indexed with original indices with a single column named by "result_col_name" argument
containing results of the apply
Example:
>>> import pathway as pw
>>> table = pw.debug.table_from_markdown(
... '''
... | colA | colB
... 1 | 1 | 10
... 2 | 2 | 20
... 3 | 3 | 30
... ''')
>>> def add_total_sum(col1, col2):
... sum_all = sum(col1) + sum(col2)
... return [x + sum_all for x in col1]
>>> result = pw.utils.col.apply_all_rows(
... table.colA, table.colB, fun=add_total_sum, result_col_name="res"
... )
>>> pw.debug.compute_and_print(result, include_id=False)
res
67
68
69
"""
def fun_wrapped(*cols):
return [fun(*cols)]
return multiapply_all_rows(
*cols, fun=fun_wrapped, result_col_names=[result_col_name]
) | Applies a function to all the data in selected columns at once, returning a single column. This transformer is meant to be run infrequently on a relativelly small tables. Input: - cols: list of columns to which function will be applied - fun: function taking lists of columns and returning a corresponding list of outputs. - result_col_name: name of the output column Output: - Table indexed with original indices with a single column named by "result_col_name" argument containing results of the apply Example: >>> import pathway as pw >>> table = pw.debug.table_from_markdown( ... ''' ... | colA | colB ... 1 | 1 | 10 ... 2 | 2 | 20 ... 3 | 3 | 30 ... ''') >>> def add_total_sum(col1, col2): ... sum_all = sum(col1) + sum(col2) ... return [x + sum_all for x in col1] >>> result = pw.utils.col.apply_all_rows( ... table.colA, table.colB, fun=add_total_sum, result_col_name="res" ... ) >>> pw.debug.compute_and_print(result, include_id=False) res 67 68 69 |
166,675 | from __future__ import annotations
import pathway.internals as pw
def argmin_rows(
table: pw.Table, *on: pw.ColumnReference, what: pw.ColumnReference
) -> pw.Table:
filter = (
table.groupby(*on)
.reduce(argmin_id=pw.reducers.argmin(what))
.with_id(pw.this.argmin_id)
)
return table.ix(filter.argmin_id).promise_universe_is_subset_of(table) | null |
166,676 | from __future__ import annotations
import datetime
def truncate_to_minutes(time: datetime.datetime) -> datetime.datetime:
return time - datetime.timedelta(seconds=time.second, microseconds=time.microsecond) | null |
166,677 | from __future__ import annotations
import datetime
from typing import TYPE_CHECKING, Any, Generic, Protocol, TypeAlias, TypeVar, Union
import numpy as np
import pandas as pd
from pathway.engine import *
from pathway.internals import dtype as dt, json
from pathway.internals.schema import Schema
CapturedStream = list[DataRow]
def denumpify(x, type_from_schema: dt.DType | None = None):
def denumpify_inner(x):
if pd.api.types.is_scalar(x) and pd.isna(x):
return None
if isinstance(x, np.generic):
return x.item()
return x
def _is_instance_of_simple_type(x):
return (
dt.INT.is_value_compatible(x)
or dt.BOOL.is_value_compatible(x)
or dt.STR.is_value_compatible(x)
or dt.BYTES.is_value_compatible(x)
or dt.FLOAT.is_value_compatible(x)
)
def fix_possibly_misassigned_type(entry, type_from_schema):
assert (
(type_from_schema.is_value_compatible(entry))
# the only exception for str should be conversion to bytes; however,
# some places use schema_from_pandas, which considers some complex types
# as str, which means we enter here, as it looks like simple type STR even
# though it's not, below the exception that should be here
# or (isinstance(v, str) and type_from_schema.wrapped == bytes)
or type_from_schema.wrapped == str
)
if type_from_schema == dt.STR and _is_instance_of_simple_type(entry):
return str(entry)
if type_from_schema == dt.FLOAT:
return float(entry)
if isinstance(entry, str) and type_from_schema == dt.BYTES:
return entry.encode("utf-8")
return entry
v = denumpify_inner(x)
if isinstance(type_from_schema, dt._SimpleDType):
v = fix_possibly_misassigned_type(v, type_from_schema)
elif (
isinstance(type_from_schema, dt.Optional)
and isinstance(type_from_schema.wrapped, dt._SimpleDType)
and not dt.NONE.is_value_compatible(v)
):
# pandas stores optional ints as floats
if isinstance(v, float) and type_from_schema.wrapped == dt.INT:
assert v.is_integer()
v = fix_possibly_misassigned_type(int(v), type_from_schema.wrapped)
else:
v = fix_possibly_misassigned_type(v, type_from_schema.wrapped)
if isinstance(v, str):
return v.encode("utf-8", "ignore").decode("utf-8")
else:
return v
def ids_from_pandas(
df: pd.DataFrame,
connector_properties: ConnectorProperties | None,
id_from: list[str] | None,
) -> dict[Any, Pointer]:
if id_from is None:
if connector_properties is not None and connector_properties.unsafe_trusted_ids:
return {k: unsafe_make_pointer(k) for k in df.index}
else:
return {k: ref_scalar(k) for k in df.index}
else:
return {k: ref_scalar(*args) for (k, *args) in df[id_from].itertuples()}
TIME_PSEUDOCOLUMN = "__time__"
DIFF_PSEUDOCOLUMN = "__diff__"
SHARD_PSEUDOCOLUMN = "__shard__"
PANDAS_PSEUDOCOLUMNS = {TIME_PSEUDOCOLUMN, DIFF_PSEUDOCOLUMN, SHARD_PSEUDOCOLUMN}
class Schema(metaclass=SchemaMetaclass):
"""Base class to inherit from when creating schemas.
All schemas should be subclasses of this one.
Example:
>>> import pathway as pw
>>> t1 = pw.debug.table_from_markdown('''
... age owner pet
... 1 10 Alice dog
... 2 9 Bob dog
... 3 8 Alice cat
... 4 7 Bob dog''')
>>> t1.schema
<pathway.Schema types={'age': <class 'int'>, 'owner': <class 'str'>, 'pet': <class 'str'>}>
>>> issubclass(t1.schema, pw.Schema)
True
>>> class NewSchema(pw.Schema):
... foo: int
>>> SchemaSum = NewSchema | t1.schema
>>> SchemaSum
<pathway.Schema types={'age': <class 'int'>, 'owner': <class 'str'>, 'pet': <class 'str'>, 'foo': <class 'int'>}>
"""
def __init_subclass__(cls, /, append_only: bool | None = None, **kwargs) -> None:
super().__init_subclass__(**kwargs)
def static_table_from_pandas(
scope,
df: pd.DataFrame,
connector_properties: ConnectorProperties | None = None,
id_from: list[str] | None = None,
schema: type[Schema] | None = None,
) -> Table:
if schema is not None and id_from is not None:
assert schema.primary_key_columns() == id_from
if id_from is None and schema is not None:
id_from = schema.primary_key_columns()
ids = ids_from_pandas(df, connector_properties, id_from)
column_types: dict[str, dt.DType] | None = None
if schema is not None:
column_types = dict(schema.__dtypes__)
for column in PANDAS_PSEUDOCOLUMNS:
column_types[column] = dt.INT
data = {}
for c in df.columns:
type_from_schema = None if column_types is None else column_types[c]
data[c] = [denumpify(v, type_from_schema) for _, v in df[c].items()]
# df[c].items() is used because df[c].values is a numpy array
ordinary_columns = [
column for column in df.columns if column not in PANDAS_PSEUDOCOLUMNS
]
if connector_properties is None:
column_properties = []
for c in ordinary_columns:
dtype: type = int
for v in data[c]:
if v is not None:
dtype = type(v)
break
column_properties.append(
ColumnProperties(dtype=dt.wrap(dtype).map_to_engine())
)
connector_properties = ConnectorProperties(column_properties=column_properties)
assert len(connector_properties.column_properties) == len(
ordinary_columns
), "provided connector properties do not match the dataframe"
input_data: CapturedStream = []
for i, index in enumerate(df.index):
key = ids[index]
values = [data[c][i] for c in ordinary_columns]
time = data[TIME_PSEUDOCOLUMN][i] if TIME_PSEUDOCOLUMN in data else 0
diff = data[DIFF_PSEUDOCOLUMN][i] if DIFF_PSEUDOCOLUMN in data else 1
if diff not in [-1, 1]:
raise ValueError(f"Column {DIFF_PSEUDOCOLUMN} can only contain 1 and -1.")
shard = data[SHARD_PSEUDOCOLUMN][i] if SHARD_PSEUDOCOLUMN in data else None
input_row = DataRow(key, values, time=time, diff=diff, shard=shard)
input_data.append(input_row)
return scope.static_table(input_data, connector_properties) | null |
166,678 | from __future__ import annotations
import builtins
import logging
import sys
import warnings
from collections.abc import Callable
from dataclasses import dataclass
from threading import Event, Lock, Thread
from typing import Any, TypeVar
from pathway.internals import (
api,
datasink as datasinks,
datasource as datasources,
graph_runner as graph_runners,
monitoring,
operator as operators,
parse_graph as parse_graphs,
schema as schemas,
table as tables,
table_io,
)
def is_interactive_mode_enabled() -> bool:
graph = parse_graphs.G
return graph.interactive_mode_controller is not None | null |
166,679 | from __future__ import annotations
import builtins
import logging
import sys
import warnings
from collections.abc import Callable
from dataclasses import dataclass
from threading import Event, Lock, Thread
from typing import Any, TypeVar
from pathway.internals import (
api,
datasink as datasinks,
datasource as datasources,
graph_runner as graph_runners,
monitoring,
operator as operators,
parse_graph as parse_graphs,
schema as schemas,
table as tables,
table_io,
)
class InteractiveModeController:
_orig_displayhook: Callable[[object], None]
def __init__(self, _pathway_internal: bool = False) -> None:
assert _pathway_internal, "InteractiveModeController is an internal class"
self._orig_displayhook = sys.displayhook
sys.displayhook = self._displayhook
def _displayhook(self, value: object) -> None:
if isinstance(value, DisplayAsStr):
builtins._ = value # type: ignore [attr-defined]
print(str(value))
else:
self._orig_displayhook(value)
def enable_interactive_mode() -> InteractiveModeController:
warnings.warn("interactive mode is experimental", stacklevel=2)
graph = parse_graphs.G
if graph.interactive_mode_controller is not None:
return graph.interactive_mode_controller
if not graph.global_scope.is_empty():
# XXX: is this a correct test?
raise ValueError("Cannot enable interactive mode")
import logging
logging.basicConfig(level=logging.INFO)
graph.interactive_mode_controller = InteractiveModeController(
_pathway_internal=True
)
return graph.interactive_mode_controller | null |
166,680 | from __future__ import annotations
import datetime
import warnings
from collections.abc import Iterable
from dataclasses import dataclass
from types import EllipsisType
from typing import TYPE_CHECKING, Any, TypeVar
from pathway.internals import dtype as dt, expression as expr
from pathway.internals.expression_printer import get_expression_info
from pathway.internals.expression_visitor import IdentityTransform
from pathway.internals.json import Json
from pathway.internals.operator_mapping import (
common_dtype_in_binary_operator,
get_binary_operators_mapping,
get_binary_operators_mapping_optionals,
get_unary_operators_mapping,
tuple_handling_operators,
)
ColExprT = TypeVar("ColExprT", bound=expr.ColumnExpression)
def _wrap(expression: ColExprT, dtype: dt.DType) -> ColExprT:
assert not hasattr(expression, "_dtype")
assert isinstance(dtype, dt.DType)
expression._dtype = dtype
return expression | null |
166,681 | from __future__ import annotations
from functools import wraps
from warnings import warn
import pathway.internals.expression as expr
from pathway.internals.join_mode import JoinMode
from pathway.internals.trace import trace_user_frame
def trace_user_frame(func: Callable[P, T]) -> Callable[P, T]:
def _pathway_trace_marker(*args, **kwargs):
try:
return func(*args, **kwargs)
except Exception as e:
_reraise_with_user_frame(e)
return _pathway_trace_marker
def arg_handler(*, handler):
handler = trace_user_frame(handler)
def wrapper(func):
@wraps(func)
def inner(*args, **kwargs):
args, kwargs = handler(*args, **kwargs)
return func(*args, **kwargs)
return inner
return wrapper | null |
166,682 | from __future__ import annotations
from functools import wraps
from warnings import warn
import pathway.internals.expression as expr
from pathway.internals.join_mode import JoinMode
from pathway.internals.trace import trace_user_frame
def groupby_handler(
self,
*args,
id=None,
sort_by=None,
_filter_out_results_of_forgetting=False,
instance=None,
**kwargs,
):
if kwargs:
raise ValueError(
"Table.groupby() received extra kwargs.\n"
+ "You probably want to use Table.groupby(...).reduce(**kwargs) to compute output columns."
)
return (self, *args), {
"id": id,
"sort_by": sort_by,
"_filter_out_results_of_forgetting": _filter_out_results_of_forgetting,
"instance": instance,
} | null |
166,683 | from __future__ import annotations
from functools import wraps
from warnings import warn
import pathway.internals.expression as expr
from pathway.internals.join_mode import JoinMode
from pathway.internals.trace import trace_user_frame
def windowby_handler(
self, time_expr, *args, window, behavior=None, instance=None, **kwargs
):
if args:
raise ValueError(
"Table.windowby() received extra args.\n"
+ "It handles grouping only by a single column."
)
if kwargs:
raise ValueError(
"Table.windowby() received extra kwargs.\n"
+ "You probably want to use Table.windowby(...).reduce(**kwargs) to compute output columns."
)
return (self, time_expr), {
"window": window,
"behavior": behavior,
"instance": instance,
} | null |
166,684 | from __future__ import annotations
from functools import wraps
from warnings import warn
import pathway.internals.expression as expr
from pathway.internals.join_mode import JoinMode
from pathway.internals.trace import trace_user_frame
def shard_deprecation(self, *args, shard=None, instance=None, **kwargs):
if shard is not None:
if instance is None:
instance = shard
warn(
"The `shard` argument is deprecated. Please use `instance` instead.",
DeprecationWarning,
stacklevel=6,
)
else:
raise ValueError(
"The arguments `shard` and `instance` cannot be set at the same moment.\n"
+ "Please use `instance` only, as `shard` is deprecated."
)
return (self, *args), {"instance": instance, **kwargs} | null |
166,685 | from __future__ import annotations
from functools import wraps
from warnings import warn
import pathway.internals.expression as expr
from pathway.internals.join_mode import JoinMode
from pathway.internals.trace import trace_user_frame
def offset_deprecation(*args, offset=None, origin=None, **kwargs):
if offset is not None:
if origin is None:
origin = offset
warn(
"The `offset` argument is deprecated. Please use `origin` instead.",
DeprecationWarning,
stacklevel=7,
)
else:
raise ValueError(
"The arguments `offset` and `instance` cannot be set at the same moment.\n"
+ "Please use `origin` only, as `origin` is deprecated."
)
return args, {"origin": origin, **kwargs} | null |
166,686 | from __future__ import annotations
from functools import wraps
from warnings import warn
import pathway.internals.expression as expr
from pathway.internals.join_mode import JoinMode
from pathway.internals.trace import trace_user_frame
class JoinMode(Enum):
"""Enum used for controlling type of a join when passed to a generic join function.
Consists of values: JoinMode.INNER, JoinMode.LEFT, JoinMode.RIGHT, JoinMode.OUTER
>>> import pathway as pw
>>> t1 = pw.debug.table_from_markdown('''
... age | owner | pet
... 10 | Alice | 1
... 9 | Bob | 1
... 8 | Alice | 2
... ''')
>>> t2 = pw.debug.table_from_markdown('''
... age | owner | pet | size
... 10 | Alice | 3 | M
... 9 | Bob | 1 | L
... 8 | Tom | 1 | XL
... ''')
>>> inner_join = t1.join(
... t2, t1.pet == t2.pet, t1.owner == t2.owner, how=pw.JoinMode.INNER
... ).select(age=t1.age, owner_name=t2.owner, size=t2.size)
>>> pw.debug.compute_and_print(inner_join, include_id = False)
age | owner_name | size
9 | Bob | L
>>> outer_join = t1.join(
... t2, t1.pet == t2.pet, t1.owner == t2.owner, how=pw.JoinMode.OUTER
... ).select(age=t1.age, owner_name=t2.owner, size=t2.size)
>>> pw.debug.compute_and_print(outer_join, include_id = False)
age | owner_name | size
| Alice | M
| Tom | XL
8 | |
9 | Bob | L
10 | |
"""
INNER = 0
"""Use inner join."""
LEFT = 1
"""Use left join."""
RIGHT = 2
"""Use right join."""
OUTER = 3
"""Use outer join."""
def join_kwargs_handler(*, allow_how: bool, allow_id: bool):
def handler(self, other, *on, **kwargs):
processed_kwargs = {}
if "how" in kwargs:
how = kwargs.pop("how")
processed_kwargs["how"] = how
if not allow_how:
raise ValueError(
"Received `how` argument but was not expecting any.\n"
+ "Consider using a generic join method that handles `how` "
+ "to decide on a type of a join to be used."
)
elif isinstance(how, JoinMode):
pass
elif isinstance(how, str):
raise ValueError(
"Received `how` argument of join that is a string.\n"
+ "You probably want to use one of "
+ "JoinMode.INNER, JoinMode.LEFT, JoinMode.RIGHT or JoinMode.OUTER values."
)
else:
raise ValueError(
"How argument of join should be one of "
+ "JoinMode.INNER, JoinMode.LEFT, JoinMode.RIGHT or JoinMode.OUTER values."
)
if "id" in kwargs:
id = kwargs.pop("id")
processed_kwargs["id"] = id
if not allow_id:
raise ValueError(
"Received `id` argument but was not expecting any.\n"
+ "Not every join type supports `id` argument."
)
elif id is None:
pass
elif isinstance(id, str):
raise ValueError(
"Received `id` argument of join that is a string.\n"
+ f"Did you mean <table>.{id}"
+ f" instead of {repr(id)}?"
)
elif not isinstance(id, expr.ColumnReference):
raise ValueError(
"The id argument of a join has to be a ColumnReference."
)
if "defaults" in kwargs:
processed_kwargs["defaults"] = kwargs.pop("defaults")
if "left_instance" in kwargs and "right_instance" in kwargs:
processed_kwargs["left_instance"] = kwargs.pop("left_instance")
processed_kwargs["right_instance"] = kwargs.pop("right_instance")
elif "left_instance" in kwargs or "right_instance" in kwargs:
raise ValueError(
"`left_instance` and `right_instance` arguments to join "
+ "should always be provided simultaneously"
)
if "direction" in kwargs:
direction = processed_kwargs["direction"] = kwargs.pop("direction")
from pathway.stdlib.temporal import Direction
if isinstance(direction, str):
raise ValueError(
"Received `direction` argument of join that is a string.\n"
+ "You probably want to use one of "
+ "Direction.BACKWARD, Direction.FORWARD or Direction.NEAREST values."
)
if not isinstance(direction, Direction):
raise ValueError(
"direction argument of join should be of type asof_join.Direction."
)
if "behavior" in kwargs:
behavior = processed_kwargs["behavior"] = kwargs.pop("behavior")
from pathway.stdlib.temporal import CommonBehavior
if not isinstance(behavior, CommonBehavior):
raise ValueError(
"The behavior argument of join should be of type pathway.temporal.CommonBehavior."
)
if "interval" in kwargs:
from pathway.stdlib.temporal import Interval
interval = processed_kwargs["interval"] = kwargs.pop("interval")
if not isinstance(interval, Interval):
raise ValueError(
"The interval argument of a join should be of a type pathway.temporal.Interval."
)
if kwargs:
raise ValueError(
"Join received extra kwargs.\n"
+ "You probably want to use TableLike.join(...).select(**kwargs) to compute output columns."
)
return (self, other, *on), processed_kwargs
return handler | null |
166,687 | from __future__ import annotations
from functools import wraps
from warnings import warn
import pathway.internals.expression as expr
from pathway.internals.join_mode import JoinMode
from pathway.internals.trace import trace_user_frame
def reduce_args_handler(self, *args, **kwargs):
for arg in args:
if expr.smart_name(arg) is None:
if isinstance(arg, str):
raise ValueError(
f"Expected a ColumnReference, found a string. Did you mean this.{arg} instead of {repr(arg)}?"
)
else:
raise ValueError(
"In reduce() all positional arguments have to be a ColumnReference."
)
return (self, *args), kwargs | null |
166,688 | from __future__ import annotations
from functools import wraps
from warnings import warn
import pathway.internals.expression as expr
from pathway.internals.join_mode import JoinMode
from pathway.internals.trace import trace_user_frame
def select_args_handler(self, *args, **kwargs):
for arg in args:
if not isinstance(arg, expr.ColumnReference):
if isinstance(arg, str):
raise ValueError(
f"Expected a ColumnReference, found a string. Did you mean this.{arg} instead of {repr(arg)}?"
)
else:
raise ValueError(
"In select() all positional arguments have to be a ColumnReference."
)
return (self, *args), kwargs | null |
166,689 | from __future__ import annotations
import operator
from operator import *
from typing import Any
def _binary_arithmetic_wrap(op, symbol: str):
def wrapped(left: float, right: float) -> float:
return op(left, right)
wrapped.__name__ = op.__name__
wrapped._symbol = symbol # type: ignore[attr-defined]
return wrapped | null |
166,690 | from __future__ import annotations
import operator
from operator import *
from typing import Any
def _binary_cmp_wrap(op, symbol):
def wrapped(left: Any, right: Any) -> bool:
return op(left, right)
wrapped.__name__ = op.__name__
wrapped._symbol = symbol # type: ignore[attr-defined]
return wrapped | null |
166,691 | from __future__ import annotations
import operator
from operator import *
from typing import Any
def _binary_boolean_wrap(op, symbol):
def wrapped(left: bool, right: bool) -> bool:
return op(left, right)
wrapped.__name__ = op.__name__
wrapped._symbol = symbol # type: ignore[attr-defined]
return wrapped | null |
166,692 | from __future__ import annotations
import operator
from operator import *
from typing import Any
import operator
from operator import *
def neg(expr: float) -> float: # type: ignore # we replace the other signature
return operator.neg(expr) | null |
166,693 | from __future__ import annotations
import operator
from operator import *
from typing import Any
def inv(expr: bool) -> bool: # type: ignore # we overwrite the behavior
return not expr | null |
166,694 | from __future__ import annotations
import operator
from operator import *
from typing import Any
import operator
from operator import *
def itemgetter(*items, target_type=Any): # type: ignore # we replace the other signature
def wrapped(x):
return operator.itemgetter(*items)(x)
wrapped.__annotations__["return"] = target_type
return wrapped | null |
166,695 | from __future__ import annotations
from abc import ABC, abstractmethod
from collections.abc import Callable
from typing import TYPE_CHECKING, Any, TypeVar
from pathway.internals import expression as expr
class TableCollector(IdentityTransform):
table_list: list[Table]
def __init__(self):
self.table_list = []
def eval_column_val(self, expression: expr.ColumnReference, **kwargs: Any):
self.table_list.append(expression.table)
return super().eval_column_val(expression, **kwargs)
class Table(
Joinable,
OperatorInput,
Generic[TSchema],
):
"""Collection of named columns over identical universes.
Example:
>>> import pathway as pw
>>> t1 = pw.debug.table_from_markdown('''
... age | owner | pet
... 10 | Alice | dog
... 9 | Bob | dog
... 8 | Alice | cat
... 7 | Bob | dog
... ''')
>>> isinstance(t1, pw.Table)
True
"""
if TYPE_CHECKING:
from pathway.stdlib.ordered import diff # type: ignore[misc]
from pathway.stdlib.statistical import interpolate # type: ignore[misc]
from pathway.stdlib.temporal import ( # type: ignore[misc]
asof_join,
asof_join_left,
asof_join_outer,
asof_join_right,
asof_now_join,
asof_now_join_inner,
asof_now_join_left,
interval_join,
interval_join_inner,
interval_join_left,
interval_join_outer,
interval_join_right,
window_join,
window_join_inner,
window_join_left,
window_join_outer,
window_join_right,
windowby,
)
from pathway.stdlib.viz import ( # type: ignore[misc]
_repr_mimebundle_,
plot,
show,
)
_columns: dict[str, clmn.Column]
_schema: type[Schema]
_id_column: clmn.IdColumn
_rowwise_context: clmn.RowwiseContext
_source: SetOnceProperty[OutputHandle] = SetOnceProperty()
"""Lateinit by operator."""
def __init__(
self,
_columns: Mapping[str, clmn.Column],
_context: clmn.Context,
_schema: type[Schema] | None = None,
):
if _schema is None:
_schema = schema_from_columns(_columns)
super().__init__(_context)
self._columns = dict(_columns)
self._schema = _schema
self._id_column = _context.id_column
self._substitution = {thisclass.this: self}
self._rowwise_context = clmn.RowwiseContext(self._id_column)
def id(self) -> expr.ColumnReference:
"""Get reference to pseudocolumn containing id's of a table.
Example:
>>> import pathway as pw
>>> t1 = pw.debug.table_from_markdown('''
... age | owner | pet
... 10 | Alice | dog
... 9 | Bob | dog
... 8 | Alice | cat
... 7 | Bob | dog
... ''')
>>> t2 = t1.select(ids = t1.id)
>>> t2.typehints()['ids']
<class 'pathway.engine.Pointer'>
>>> pw.debug.compute_and_print(t2.select(test=t2.id == t2.ids), include_id=False)
test
True
True
True
True
"""
return expr.ColumnReference(_table=self, _column=self._id_column, _name="id")
def column_names(self):
return self.keys()
def keys(self):
return self._columns.keys()
def _get_column(self, name: str) -> clmn.Column:
return self._columns[name]
def _ipython_key_completions_(self):
return list(self.column_names())
def __dir__(self):
return list(super().__dir__()) + list(self.column_names())
def _C(self) -> TSchema:
return self.C # type: ignore
def schema(self) -> type[Schema]:
"""Get schema of the table.
Example:
>>> import pathway as pw
>>> t1 = pw.debug.table_from_markdown('''
... age | owner | pet
... 10 | Alice | dog
... 9 | Bob | dog
... 8 | Alice | cat
... 7 | Bob | dog
... ''')
>>> t1.schema
<pathway.Schema types={'age': <class 'int'>, 'owner': <class 'str'>, 'pet': <class 'str'>}>
>>> t1.typehints()['age']
<class 'int'>
"""
return self._schema
def _get_colref_by_name(self, name, exception_type) -> expr.ColumnReference:
name = self._column_deprecation_rename(name)
if name == "id":
return self.id
if name not in self.keys():
raise exception_type(f"Table has no column with name {name}.")
return expr.ColumnReference(
_table=self, _column=self._get_column(name), _name=name
)
def __getitem__(self, args: str | expr.ColumnReference) -> expr.ColumnReference: ...
def __getitem__(self, args: list[str | expr.ColumnReference]) -> Table: ...
def __getitem__(
self, args: str | expr.ColumnReference | list[str | expr.ColumnReference]
) -> expr.ColumnReference | Table:
"""Get columns by name.
Warning:
- Does not allow repetitions of columns.
- Fails if tries to access nonexistent column.
Args:
names: a singe column name or list of columns names to be extracted from `self`.
Returns:
Table with specified columns, or column expression (if single argument given).
Instead of column names, column references are valid here.
Example:
>>> import pathway as pw
>>> t1 = pw.debug.table_from_markdown('''
... age | owner | pet
... 10 | Alice | dog
... 9 | Bob | dog
... 8 | Alice | cat
... 7 | Bob | dog
... ''')
>>> t2 = t1[["age", "pet"]]
>>> t2 = t1[["age", t1.pet]]
>>> pw.debug.compute_and_print(t2, include_id=False)
age | pet
7 | dog
8 | cat
9 | dog
10 | dog
"""
if isinstance(args, expr.ColumnReference):
if (args.table is not self) and not isinstance(
args.table, thisclass.ThisMetaclass
):
raise ValueError(
"Table.__getitem__ argument has to be a ColumnReference to the same table or pw.this, or a string "
+ "(or a list of those)."
)
return self._get_colref_by_name(args.name, KeyError)
elif isinstance(args, str):
return self._get_colref_by_name(args, KeyError)
else:
return self.select(*[self[name] for name in args])
def from_columns(
*args: expr.ColumnReference, **kwargs: expr.ColumnReference
) -> Table:
"""Build a table from columns.
All columns must have the same ids. Columns' names must be pairwise distinct.
Args:
args: List of columns.
kwargs: Columns with their new names.
Returns:
Table: Created table.
Example:
>>> import pathway as pw
>>> t1 = pw.Table.empty(age=float, pet=float)
>>> t2 = pw.Table.empty(foo=float, bar=float).with_universe_of(t1)
>>> t3 = pw.Table.from_columns(t1.pet, qux=t2.foo)
>>> pw.debug.compute_and_print(t3, include_id=False)
pet | qux
"""
all_args = cast(
dict[str, expr.ColumnReference], combine_args_kwargs(args, kwargs)
)
if not all_args:
raise ValueError("Table.from_columns() cannot have empty arguments list")
else:
arg = next(iter(all_args.values()))
table: Table = arg.table
for arg in all_args.values():
if not G.universe_solver.query_are_equal(
table._universe, arg.table._universe
):
raise ValueError(
"Universes of all arguments of Table.from_columns() have to be equal.\n"
+ "Consider using Table.promise_universes_are_equal() to assert it.\n"
+ "(However, untrue assertion might result in runtime errors.)"
)
return table.select(*args, **kwargs)
def concat_reindex(self, *tables: Table) -> Table:
"""Concatenate contents of several tables.
This is similar to PySpark union. All tables must have the same schema. Each row is reindexed.
Args:
tables: List of tables to concatenate. All tables must have the same schema.
Returns:
Table: The concatenated table. It will have new, synthetic ids.
Example:
>>> import pathway as pw
>>> t1 = pw.debug.table_from_markdown('''
... | pet
... 1 | Dog
... 7 | Cat
... ''')
>>> t2 = pw.debug.table_from_markdown('''
... | pet
... 1 | Manul
... 8 | Octopus
... ''')
>>> t3 = t1.concat_reindex(t2)
>>> pw.debug.compute_and_print(t3, include_id=False)
pet
Cat
Dog
Manul
Octopus
"""
reindexed = [
table.with_id_from(table.id, i) for i, table in enumerate([self, *tables])
]
universes.promise_are_pairwise_disjoint(*reindexed)
return Table.concat(*reindexed)
def empty(**kwargs: dt.DType) -> Table:
"""Creates an empty table with a schema specified by kwargs.
Args:
kwargs: Dict whose keys are column names and values are column types.
Returns:
Table: Created empty table.
Example:
>>> import pathway as pw
>>> t1 = pw.Table.empty(age=float, pet=float)
>>> pw.debug.compute_and_print(t1, include_id=False)
age | pet
"""
from pathway.internals import table_io
ret = table_io.empty_from_schema(schema_from_types(None, **kwargs))
G.universe_solver.register_as_empty(ret._universe)
return ret
def select(self, *args: expr.ColumnReference, **kwargs: Any) -> Table:
"""Build a new table with columns specified by kwargs.
Output columns' names are keys(kwargs). values(kwargs) can be raw values, boxed
values, columns. Assigning to id reindexes the table.
Args:
args: Column references.
kwargs: Column expressions with their new assigned names.
Returns:
Table: Created table.
Example:
>>> import pathway as pw
>>> t1 = pw.debug.table_from_markdown('''
... pet
... Dog
... Cat
... ''')
>>> t2 = t1.select(animal=t1.pet, desc="fluffy")
>>> pw.debug.compute_and_print(t2, include_id=False)
animal | desc
Cat | fluffy
Dog | fluffy
"""
new_columns = []
all_args = combine_args_kwargs(args, kwargs)
for new_name, expression in all_args.items():
self._validate_expression(expression)
column = self._eval(expression)
new_columns.append((new_name, column))
return self._with_same_universe(*new_columns)
def __add__(self, other: Table) -> Table:
"""Build a union of `self` with `other`.
Semantics: Returns a table C, such that
- C.columns == self.columns + other.columns
- C.id == self.id == other.id
Args:
other: The other table. `self.id` must be equal `other.id` and
`self.columns` and `other.columns` must be disjoint (or overlapping names
are THE SAME COLUMN)
Returns:
Table: Created table.
Example:
>>> import pathway as pw
>>> t1 = pw.debug.table_from_markdown('''
... pet
... 1 Dog
... 7 Cat
... ''')
>>> t2 = pw.debug.table_from_markdown('''
... age
... 1 10
... 7 3
... ''')
>>> t3 = t1 + t2
>>> pw.debug.compute_and_print(t3, include_id=False)
pet | age
Cat | 3
Dog | 10
"""
if not G.universe_solver.query_are_equal(self._universe, other._universe):
raise ValueError(
"Universes of all arguments of Table.__add__() have to be equal.\n"
+ "Consider using Table.promise_universes_are_equal() to assert it.\n"
+ "(However, untrue assertion might result in runtime errors.)"
)
return self.select(*self, *other)
def slice(self) -> TableSlice:
"""Creates a collection of references to self columns.
Supports basic column manipulation methods.
Example:
>>> import pathway as pw
>>> t1 = pw.debug.table_from_markdown('''
... age | owner | pet
... 10 | Alice | dog
... 9 | Bob | dog
... 8 | Alice | cat
... 7 | Bob | dog
... ''')
>>> t1.slice.without("age")
TableSlice({'owner': <table1>.owner, 'pet': <table1>.pet})
"""
return TableSlice(dict(**self), self)
def filter(self, filter_expression: expr.ColumnExpression) -> Table[TSchema]:
"""Filter a table according to `filter_expression` condition.
Args:
filter_expression: `ColumnExpression` that specifies the filtering condition.
Returns:
Table: Result has the same schema as `self` and its ids are subset of `self.id`.
Example:
>>> import pathway as pw
>>> vertices = pw.debug.table_from_markdown('''
... label outdegree
... 1 3
... 7 0
... ''')
>>> filtered = vertices.filter(vertices.outdegree == 0)
>>> pw.debug.compute_and_print(filtered, include_id=False)
label | outdegree
7 | 0
"""
filter_type = self.eval_type(filter_expression)
if filter_type != dt.BOOL:
raise TypeError(
f"Filter argument of Table.filter() has to be bool, found {filter_type}."
)
ret = self._filter(filter_expression)
if (
filter_col := expr.get_column_filtered_by_is_none(filter_expression)
) is not None and filter_col.table == self:
name = filter_col.name
dtype = self._columns[name].dtype
ret = ret.update_types(**{name: dt.unoptionalize(dtype)})
return ret
def split(
self, split_expression: expr.ColumnExpression
) -> tuple[Table[TSchema], Table[TSchema]]:
"""Split a table according to `split_expression` condition.
Args:
split_expression: `ColumnExpression` that specifies the split condition.
Returns:
positive_table, negative_table: tuple of tables,
with the same schemas as `self` and with ids that are subsets of `self.id`,
and provably disjoint.
Example:
>>> import pathway as pw
>>> vertices = pw.debug.table_from_markdown('''
... label outdegree
... 1 3
... 7 0
... ''')
>>> positive, negative = vertices.split(vertices.outdegree == 0)
>>> pw.debug.compute_and_print(positive, include_id=False)
label | outdegree
7 | 0
>>> pw.debug.compute_and_print(negative, include_id=False)
label | outdegree
1 | 3
"""
positive = self.filter(split_expression)
negative = self.filter(~split_expression)
universes.promise_are_pairwise_disjoint(positive, negative)
universes.promise_are_equal(
self, Table.concat(positive, negative)
) # TODO: add API method for this
return positive, negative
def _filter(self, filter_expression: expr.ColumnExpression) -> Table[TSchema]:
self._validate_expression(filter_expression)
filtering_column = self._eval(filter_expression)
assert self._universe == filtering_column.universe
context = clmn.FilterContext(filtering_column, self._id_column)
return self._table_with_context(context)
def _gradual_broadcast(
self,
threshold_table,
lower_column,
value_column,
upper_column,
) -> Table:
return self + self.__gradual_broadcast(
threshold_table, lower_column, value_column, upper_column
)
def __gradual_broadcast(
self,
threshold_table,
lower_column,
value_column,
upper_column,
):
context = clmn.GradualBroadcastContext(
self._id_column,
threshold_table._eval(lower_column),
threshold_table._eval(value_column),
threshold_table._eval(upper_column),
)
return Table(_columns={"apx_value": context.apx_value_column}, _context=context)
def _forget(
self,
threshold_column: expr.ColumnExpression,
time_column: expr.ColumnExpression,
mark_forgetting_records: bool,
) -> Table:
context = clmn.ForgetContext(
self._id_column,
self._eval(threshold_column),
self._eval(time_column),
mark_forgetting_records,
)
return self._table_with_context(context)
def _forget_immediately(
self,
) -> Table:
context = clmn.ForgetImmediatelyContext(self._id_column)
return self._table_with_context(context)
def _filter_out_results_of_forgetting(
self,
) -> Table:
# The output universe is a superset of input universe because forgetting entries
# are filtered out. At each point in time, the set of keys with +1 diff can be
# bigger than a set of keys with +1 diff in an input table.
context = clmn.FilterOutForgettingContext(self._id_column)
return self._table_with_context(context)
def _freeze(
self,
threshold_column: expr.ColumnExpression,
time_column: expr.ColumnExpression,
) -> Table:
context = clmn.FreezeContext(
self._id_column,
self._eval(threshold_column),
self._eval(time_column),
)
return self._table_with_context(context)
def _buffer(
self,
threshold_column: expr.ColumnExpression,
time_column: expr.ColumnExpression,
) -> Table:
context = clmn.BufferContext(
self._id_column,
self._eval(threshold_column),
self._eval(time_column),
)
return self._table_with_context(context)
def difference(self, other: Table) -> Table[TSchema]:
r"""Restrict self universe to keys not appearing in the other table.
Args:
other: table with ids to remove from self.
Returns:
Table: table with restricted universe, with the same set of columns
Example:
>>> import pathway as pw
>>> t1 = pw.debug.table_from_markdown('''
... | age | owner | pet
... 1 | 10 | Alice | 1
... 2 | 9 | Bob | 1
... 3 | 8 | Alice | 2
... ''')
>>> t2 = pw.debug.table_from_markdown('''
... | cost
... 2 | 100
... 3 | 200
... 4 | 300
... ''')
>>> t3 = t1.difference(t2)
>>> pw.debug.compute_and_print(t3, include_id=False)
age | owner | pet
10 | Alice | 1
"""
context = clmn.DifferenceContext(
left=self._id_column,
right=other._id_column,
)
return self._table_with_context(context)
def intersect(self, *tables: Table) -> Table[TSchema]:
"""Restrict self universe to keys appearing in all of the tables.
Args:
tables: tables keys of which are used to restrict universe.
Returns:
Table: table with restricted universe, with the same set of columns
Example:
>>> import pathway as pw
>>> t1 = pw.debug.table_from_markdown('''
... | age | owner | pet
... 1 | 10 | Alice | 1
... 2 | 9 | Bob | 1
... 3 | 8 | Alice | 2
... ''')
>>> t2 = pw.debug.table_from_markdown('''
... | cost
... 2 | 100
... 3 | 200
... 4 | 300
... ''')
>>> t3 = t1.intersect(t2)
>>> pw.debug.compute_and_print(t3, include_id=False)
age | owner | pet
8 | Alice | 2
9 | Bob | 1
"""
intersecting_universes = (
self._universe,
*tuple(table._universe for table in tables),
)
universe = G.universe_solver.get_intersection(*intersecting_universes)
if universe in intersecting_universes:
context: clmn.Context = clmn.RestrictContext(self._id_column, universe)
else:
intersecting_ids = (
self._id_column,
*tuple(table._id_column for table in tables),
)
context = clmn.IntersectContext(
intersecting_ids=intersecting_ids,
)
return self._table_with_context(context)
def restrict(self, other: TableLike) -> Table[TSchema]:
"""Restrict self universe to keys appearing in other.
Args:
other: table which universe is used to restrict universe of self.
Returns:
Table: table with restricted universe, with the same set of columns
Example:
>>> import pathway as pw
>>> t1 = pw.debug.table_from_markdown(
... '''
... | age | owner | pet
... 1 | 10 | Alice | 1
... 2 | 9 | Bob | 1
... 3 | 8 | Alice | 2
... '''
... )
>>> t2 = pw.debug.table_from_markdown(
... '''
... | cost
... 2 | 100
... 3 | 200
... '''
... )
>>> t2.promise_universe_is_subset_of(t1)
<pathway.Table schema={'cost': <class 'int'>}>
>>> t3 = t1.restrict(t2)
>>> pw.debug.compute_and_print(t3, include_id=False)
age | owner | pet
8 | Alice | 2
9 | Bob | 1
"""
if not G.universe_solver.query_is_subset(other._universe, self._universe):
raise ValueError(
"Table.restrict(): other universe has to be a subset of self universe."
+ "Consider using Table.promise_universe_is_subset_of() to assert it."
)
context = clmn.RestrictContext(self._id_column, other._universe)
columns = {
name: self._wrap_column_in_context(context, column, name)
for name, column in self._columns.items()
}
return Table(
_columns=columns,
_context=context,
)
def copy(self) -> Table[TSchema]:
"""Returns a copy of a table.
Example:
>>> import pathway as pw
>>> t1 = pw.debug.table_from_markdown('''
... age | owner | pet
... 10 | Alice | dog
... 9 | Bob | dog
... 8 | Alice | cat
... 7 | Bob | dog
... ''')
>>> t2 = t1.copy()
>>> pw.debug.compute_and_print(t2, include_id=False)
age | owner | pet
7 | Bob | dog
8 | Alice | cat
9 | Bob | dog
10 | Alice | dog
>>> t1 is t2
False
"""
return self._copy_as(type(self))
def _copy_as(self, table_type: type[TTable], /, **kwargs) -> TTable:
columns = {
name: self._wrap_column_in_context(self._rowwise_context, column, name)
for name, column in self._columns.items()
}
return table_type(_columns=columns, _context=self._rowwise_context, **kwargs)
def groupby(
self,
*args: expr.ColumnReference,
id: expr.ColumnReference | None = None,
sort_by: expr.ColumnReference | None = None,
_filter_out_results_of_forgetting: bool = False,
instance: expr.ColumnReference | None = None,
) -> groupbys.GroupedTable:
"""Groups table by columns from args.
Note:
Usually followed by `.reduce()` that aggregates the result and returns a table.
Args:
args: columns to group by.
id: if provided, is the column used to set id's of the rows of the result
sort_by: if provided, column values are used as sorting keys for particular reducers
instance: optional argument describing partitioning of the data into separate instances
Returns:
GroupedTable: Groupby object.
Example:
>>> import pathway as pw
>>> t1 = pw.debug.table_from_markdown('''
... age | owner | pet
... 10 | Alice | dog
... 9 | Bob | dog
... 8 | Alice | cat
... 7 | Bob | dog
... ''')
>>> t2 = t1.groupby(t1.pet, t1.owner).reduce(t1.owner, t1.pet, ageagg=pw.reducers.sum(t1.age))
>>> pw.debug.compute_and_print(t2, include_id=False)
owner | pet | ageagg
Alice | cat | 8
Alice | dog | 10
Bob | dog | 16
"""
if instance is not None:
args = (*args, instance)
if id is not None:
if len(args) == 0:
args = (id,)
elif len(args) > 1:
raise ValueError(
"Table.groupby() cannot have id argument when grouping by multiple columns."
)
elif args[0]._column != id._column:
raise ValueError(
"Table.groupby() received id argument and is grouped by a single column,"
+ " but the arguments are not equal.\n"
+ "Consider using <table>.groupby(id=...), skipping the positional argument."
)
for arg in args:
if not isinstance(arg, expr.ColumnReference):
if isinstance(arg, str):
raise ValueError(
f"Expected a ColumnReference, found a string. Did you mean <table>.{arg}"
+ f" instead of {repr(arg)}?"
)
else:
raise ValueError(
"All Table.groupby() arguments have to be a ColumnReference."
)
return groupbys.GroupedTable.create(
table=self,
grouping_columns=args,
set_id=id is not None,
sort_by=sort_by,
_filter_out_results_of_forgetting=_filter_out_results_of_forgetting,
)
def reduce(
self, *args: expr.ColumnReference, **kwargs: expr.ColumnExpression
) -> Table:
"""Reduce a table to a single row.
Equivalent to `self.groupby().reduce(*args, **kwargs)`.
Args:
args: reducer to reduce the table with
kwargs: reducer to reduce the table with. Its key is the new name of a column.
Returns:
Table: Reduced table.
Example:
>>> import pathway as pw
>>> t1 = pw.debug.table_from_markdown('''
... age | owner | pet
... 10 | Alice | dog
... 9 | Bob | dog
... 8 | Alice | cat
... 7 | Bob | dog
... ''')
>>> t2 = t1.reduce(ageagg=pw.reducers.argmin(t1.age))
>>> pw.debug.compute_and_print(t2, include_id=False) # doctest: +ELLIPSIS
ageagg
^...
>>> t3 = t2.select(t1.ix(t2.ageagg).age, t1.ix(t2.ageagg).pet)
>>> pw.debug.compute_and_print(t3, include_id=False)
age | pet
7 | dog
"""
return self.groupby().reduce(*args, **kwargs)
def deduplicate(
self,
*,
value: expr.ColumnExpression,
instance: expr.ColumnExpression | None = None,
acceptor: Callable[[T, T], bool],
persistent_id: str | None = None,
) -> Table:
"""Deduplicates rows in `self` on `value` column using acceptor function.
It keeps rows which where accepted by the acceptor function.
Acceptor operates on two arguments - current value and the previously accepted value.
Args:
value: column expression used for deduplication.
instance: Grouping column. For rows with different
values in this column, deduplication will be performed separately.
Defaults to None.
acceptor: callback telling whether two values are different.
persistent_id: (unstable) An identifier, under which the state of the table
will be persisted or ``None``, if there is no need to persist the state of this table.
When a program restarts, it restores the state for all input tables according to what
was saved for their ``persistent_id``. This way it's possible to configure the start of
computations from the moment they were terminated last time.
Returns:
Table: the result of deduplication.
Example:
>>> import pathway as pw
>>> table = pw.debug.table_from_markdown(
... '''
... val | __time__
... 1 | 2
... 2 | 4
... 3 | 6
... 4 | 8
... '''
... )
>>>
>>> def acceptor(new_value, old_value) -> bool:
... return new_value >= old_value + 2
...
>>>
>>> result = table.deduplicate(value=pw.this.val, acceptor=acceptor)
>>> pw.debug.compute_and_print_update_stream(result, include_id=False)
val | __time__ | __diff__
1 | 2 | 1
1 | 6 | -1
3 | 6 | 1
>>>
>>> table = pw.debug.table_from_markdown(
... '''
... val | instance | __time__
... 1 | 1 | 2
... 2 | 1 | 4
... 3 | 2 | 6
... 4 | 1 | 8
... 4 | 2 | 8
... 5 | 1 | 10
... '''
... )
>>>
>>> def acceptor(new_value, old_value) -> bool:
... return new_value >= old_value + 2
...
>>>
>>> result = table.deduplicate(
... value=pw.this.val, instance=pw.this.instance, acceptor=acceptor
... )
>>> pw.debug.compute_and_print_update_stream(result, include_id=False)
val | instance | __time__ | __diff__
1 | 1 | 2 | 1
3 | 2 | 6 | 1
1 | 1 | 8 | -1
4 | 1 | 8 | 1
"""
if instance is None:
instance = expr.ColumnConstExpression(None)
self._validate_expression(value)
self._validate_expression(instance)
value_col = self._eval(value)
instance_col = self._eval(instance)
context = clmn.DeduplicateContext(
value_col,
(instance_col,),
acceptor,
self._id_column,
persistent_id,
)
return self._table_with_context(context)
def ix(
self, expression: expr.ColumnExpression, *, optional: bool = False, context=None
) -> Table:
"""Reindexes the table using expression values as keys. Uses keys from context, or tries to infer
proper context from the expression.
If optional is True, then None in expression values result in None values in the result columns.
Missing values in table keys result in RuntimeError.
Context can be anything that allows for `select` or `reduce`, or `pathway.this` construct
(latter results in returning a delayed operation, and should be only used when using `ix` inside
join().select() or groupby().reduce() sequence).
Returns:
Reindexed table with the same set of columns.
Example:
>>> import pathway as pw
>>> t_animals = pw.debug.table_from_markdown('''
... | epithet | genus
... 1 | upupa | epops
... 2 | acherontia | atropos
... 3 | bubo | scandiacus
... 4 | dynastes | hercules
... ''')
>>> t_birds = pw.debug.table_from_markdown('''
... | desc
... 2 | hoopoe
... 4 | owl
... ''')
>>> ret = t_birds.select(t_birds.desc, latin=t_animals.ix(t_birds.id).genus)
>>> pw.debug.compute_and_print(ret, include_id=False)
desc | latin
hoopoe | atropos
owl | hercules
"""
if context is None:
all_tables = collect_tables(expression)
if len(all_tables) == 0:
context = thisclass.this
elif all(tab == all_tables[0] for tab in all_tables):
context = all_tables[0]
if context is None:
for tab in all_tables:
if not isinstance(tab, Table):
raise ValueError("Table expected here.")
if len(all_tables) == 0:
raise ValueError("Const value provided.")
context = all_tables[0]
for tab in all_tables:
assert context._universe.is_equal_to(tab._universe)
if isinstance(context, groupbys.GroupedJoinable):
context = thisclass.this
if isinstance(context, thisclass.ThisMetaclass):
return context._delayed_op(
lambda table, expression: self.ix(
expression=expression, optional=optional, context=table
),
expression=expression,
qualname=f"{self}.ix(...)",
name="ix",
)
restrict_universe = RestrictUniverseDesugaring(context)
expression = restrict_universe.eval_expression(expression)
key_col = context.select(tmp=expression).tmp
key_dtype = self.eval_type(key_col)
if (
optional and not dt.dtype_issubclass(key_dtype, dt.Optional(dt.POINTER))
) or (not optional and not isinstance(key_dtype, dt.Pointer)):
raise TypeError(
f"Pathway supports indexing with Pointer type only. The type used was {key_dtype}."
)
if optional and isinstance(key_dtype, dt.Optional):
self_ = self.update_types(
**{name: dt.Optional(self.typehints()[name]) for name in self.keys()}
)
else:
self_ = self
return self_._ix(key_col, optional)
def _ix(
self,
key_expression: expr.ColumnReference,
optional: bool,
) -> Table:
key_column = key_expression._column
context = clmn.IxContext(key_column, self._id_column, optional)
return self._table_with_context(context)
def __lshift__(self, other: Table) -> Table:
"""Alias to update_cells method.
Updates cells of `self`, breaking ties in favor of the values in `other`.
Semantics:
- result.columns == self.columns
- result.id == self.id
- conflicts are resolved preferring other's values
Requires:
- other.columns ⊆ self.columns
- other.id ⊆ self.id
Args:
other: the other table.
Returns:
Table: `self` updated with cells form `other`.
Example:
>>> import pathway as pw
>>> t1 = pw.debug.table_from_markdown('''
... | age | owner | pet
... 1 | 10 | Alice | 1
... 2 | 9 | Bob | 1
... 3 | 8 | Alice | 2
... ''')
>>> t2 = pw.debug.table_from_markdown('''
... | age | owner | pet
... 1 | 10 | Alice | 30
... ''')
>>> pw.universes.promise_is_subset_of(t2, t1)
>>> t3 = t1 << t2
>>> pw.debug.compute_and_print(t3, include_id=False)
age | owner | pet
8 | Alice | 2
9 | Bob | 1
10 | Alice | 30
"""
return self.update_cells(other, _stacklevel=2)
def concat(self, *others: Table[TSchema]) -> Table[TSchema]:
"""Concats `self` with every `other` ∊ `others`.
Semantics:
- result.columns == self.columns == other.columns
- result.id == self.id ∪ other.id
if self.id and other.id collide, throws an exception.
Requires:
- other.columns == self.columns
- self.id disjoint with other.id
Args:
other: the other table.
Returns:
Table: The concatenated table. Id's of rows from original tables are preserved.
Example:
>>> import pathway as pw
>>> t1 = pw.debug.table_from_markdown('''
... | age | owner | pet
... 1 | 10 | Alice | 1
... 2 | 9 | Bob | 1
... 3 | 8 | Alice | 2
... ''')
>>> t2 = pw.debug.table_from_markdown('''
... | age | owner | pet
... 11 | 11 | Alice | 30
... 12 | 12 | Tom | 40
... ''')
>>> pw.universes.promise_are_pairwise_disjoint(t1, t2)
>>> t3 = t1.concat(t2)
>>> pw.debug.compute_and_print(t3, include_id=False)
age | owner | pet
8 | Alice | 2
9 | Bob | 1
10 | Alice | 1
11 | Alice | 30
12 | Tom | 40
"""
for other in others:
if other.keys() != self.keys():
raise ValueError(
"columns do not match in the argument of Table.concat()"
)
schema = {
key: functools.reduce(
dt.types_lca,
[other.schema._dtypes()[key] for other in others],
self.schema._dtypes()[key],
)
for key in self.keys()
}
return Table._concat(
self.cast_to_types(**schema),
*[other.cast_to_types(**schema) for other in others],
)
def _concat(self, *others: Table[TSchema]) -> Table[TSchema]:
union_ids = (self._id_column, *(other._id_column for other in others))
if not G.universe_solver.query_are_disjoint(*(c.universe for c in union_ids)):
raise ValueError(
"Universes of the arguments of Table.concat() have to be disjoint.\n"
+ "Consider using Table.promise_universes_are_disjoint() to assert it.\n"
+ "(However, untrue assertion might result in runtime errors.)"
)
context = clmn.ConcatUnsafeContext(
union_ids=union_ids,
updates=tuple(
{col_name: other._columns[col_name] for col_name in self.keys()}
for other in others
),
)
return self._table_with_context(context)
def update_cells(self, other: Table, _stacklevel: int = 1) -> Table:
"""Updates cells of `self`, breaking ties in favor of the values in `other`.
Semantics:
- result.columns == self.columns
- result.id == self.id
- conflicts are resolved preferring other's values
Requires:
- other.columns ⊆ self.columns
- other.id ⊆ self.id
Args:
other: the other table.
Returns:
Table: `self` updated with cells form `other`.
Example:
>>> import pathway as pw
>>> t1 = pw.debug.table_from_markdown('''
... | age | owner | pet
... 1 | 10 | Alice | 1
... 2 | 9 | Bob | 1
... 3 | 8 | Alice | 2
... ''')
>>> t2 = pw.debug.table_from_markdown('''
... age | owner | pet
... 1 | 10 | Alice | 30
... ''')
>>> pw.universes.promise_is_subset_of(t2, t1)
>>> t3 = t1.update_cells(t2)
>>> pw.debug.compute_and_print(t3, include_id=False)
age | owner | pet
8 | Alice | 2
9 | Bob | 1
10 | Alice | 30
"""
if names := (set(other.keys()) - set(self.keys())):
raise ValueError(
f"Columns of the argument in Table.update_cells() not present in the updated table: {list(names)}."
)
if self._universe == other._universe:
warnings.warn(
"Key sets of self and other in update_cells are the same."
+ " Using with_columns instead of update_cells.",
stacklevel=_stacklevel + 4,
)
return self.with_columns(*(other[name] for name in other))
schema = {
key: dt.types_lca(self.schema.__dtypes__[key], other.schema.__dtypes__[key])
for key in other.keys()
}
return Table._update_cells(
self.cast_to_types(**schema), other.cast_to_types(**schema)
)
def _update_cells(self, other: Table) -> Table:
if not other._universe.is_subset_of(self._universe):
raise ValueError(
"Universe of the argument of Table.update_cells() needs to be "
+ "a subset of the universe of the updated table.\n"
+ "Consider using Table.promise_is_subset_of() to assert this.\n"
+ "(However, untrue assertion might result in runtime errors.)"
)
context = clmn.UpdateCellsContext(
left=self._id_column,
right=other._id_column,
updates={name: other._columns[name] for name in other.keys()},
)
return self._table_with_context(context)
def update_rows(self, other: Table[TSchema]) -> Table[TSchema]:
"""Updates rows of `self`, breaking ties in favor for the rows in `other`.
Semantics:
- result.columns == self.columns == other.columns
- result.id == self.id ∪ other.id
Requires:
- other.columns == self.columns
Args:
other: the other table.
Returns:
Table: `self` updated with rows form `other`.
Example:
>>> import pathway as pw
>>> t1 = pw.debug.table_from_markdown('''
... | age | owner | pet
... 1 | 10 | Alice | 1
... 2 | 9 | Bob | 1
... 3 | 8 | Alice | 2
... ''')
>>> t2 = pw.debug.table_from_markdown('''
... | age | owner | pet
... 1 | 10 | Alice | 30
... 12 | 12 | Tom | 40
... ''')
>>> t3 = t1.update_rows(t2)
>>> pw.debug.compute_and_print(t3, include_id=False)
age | owner | pet
8 | Alice | 2
9 | Bob | 1
10 | Alice | 30
12 | Tom | 40
"""
if other.keys() != self.keys():
raise ValueError(
"Columns do not match between argument of Table.update_rows() and the updated table."
)
if self._universe.is_subset_of(other._universe):
warnings.warn(
"Universe of self is a subset of universe of other in update_rows. Returning other.",
stacklevel=5,
)
return other
schema = {
key: dt.types_lca(self.schema.__dtypes__[key], other.schema.__dtypes__[key])
for key in self.keys()
}
union_universes = (self._universe, other._universe)
universe = G.universe_solver.get_union(*union_universes)
if universe == self._universe:
return Table._update_cells(
self.cast_to_types(**schema), other.cast_to_types(**schema)
)
else:
return Table._update_rows(
self.cast_to_types(**schema), other.cast_to_types(**schema)
)
def _update_rows(self, other: Table[TSchema]) -> Table[TSchema]:
union_ids = (self._id_column, other._id_column)
context = clmn.UpdateRowsContext(
updates={col_name: other._columns[col_name] for col_name in self.keys()},
union_ids=union_ids,
)
return self._table_with_context(context)
def with_columns(self, *args: expr.ColumnReference, **kwargs: Any) -> Table:
"""Updates columns of `self`, according to args and kwargs.
See `table.select` specification for evaluation of args and kwargs.
Example:
>>> import pathway as pw
>>> t1 = pw.debug.table_from_markdown('''
... | age | owner | pet
... 1 | 10 | Alice | 1
... 2 | 9 | Bob | 1
... 3 | 8 | Alice | 2
... ''')
>>> t2 = pw.debug.table_from_markdown('''
... | owner | pet | size
... 1 | Tom | 1 | 10
... 2 | Bob | 1 | 9
... 3 | Tom | 2 | 8
... ''')
>>> t3 = t1.with_columns(*t2)
>>> pw.debug.compute_and_print(t3, include_id=False)
age | owner | pet | size
8 | Tom | 2 | 8
9 | Bob | 1 | 9
10 | Tom | 1 | 10
"""
other = self.select(*args, **kwargs)
columns = dict(self)
columns.update(other)
return self.select(**columns)
def with_id(self, new_index: expr.ColumnReference) -> Table:
"""Set new ids based on another column containing id-typed values.
To generate ids based on arbitrary valued columns, use `with_id_from`.
Values assigned must be row-wise unique.
Args:
new_id: column to be used as the new index.
Returns:
Table with updated ids.
Example:
>>> import pytest; pytest.xfail("with_id is hard to test")
>>> import pathway as pw
>>> t1 = pw.debug.table_from_markdown('''
... | age | owner | pet
... 1 | 10 | Alice | 1
... 2 | 9 | Bob | 1
... 3 | 8 | Alice | 2
... ''')
>>> t2 = pw.debug.table_from_markdown('''
... | new_id
... 1 | 2
... 2 | 3
... 3 | 4
... ''')
>>> t3 = t1.promise_universe_is_subset_of(t2).with_id(t2.new_id)
>>> pw.debug.compute_and_print(t3)
age owner pet
^2 10 Alice 1
^3 9 Bob 1
^4 8 Alice 2
"""
return self._with_new_index(new_index)
def with_id_from(
self,
*args: expr.ColumnExpression | Value,
instance: expr.ColumnReference | None = None,
) -> Table:
"""Compute new ids based on values in columns.
Ids computed from `columns` must be row-wise unique.
Args:
columns: columns to be used as primary keys.
Returns:
Table: `self` updated with recomputed ids.
Example:
>>> import pathway as pw
>>> t1 = pw.debug.table_from_markdown('''
... | age | owner | pet
... 1 | 10 | Alice | 1
... 2 | 9 | Bob | 1
... 3 | 8 | Alice | 2
... ''')
>>> t2 = t1 + t1.select(old_id=t1.id)
>>> t3 = t2.with_id_from(t2.age)
>>> pw.debug.compute_and_print(t3) # doctest: +ELLIPSIS, +NORMALIZE_WHITESPACE
| age | owner | pet | old_id
^... | 8 | Alice | 2 | ^...
^... | 9 | Bob | 1 | ^...
^... | 10 | Alice | 1 | ^...
>>> t4 = t3.select(t3.age, t3.owner, t3.pet, same_as_old=(t3.id == t3.old_id),
... same_as_new=(t3.id == t3.pointer_from(t3.age)))
>>> pw.debug.compute_and_print(t4) # doctest: +ELLIPSIS, +NORMALIZE_WHITESPACE
| age | owner | pet | same_as_old | same_as_new
^... | 8 | Alice | 2 | False | True
^... | 9 | Bob | 1 | False | True
^... | 10 | Alice | 1 | False | True
"""
# new_index should be a column, so a little workaround
new_index = self.select(
ref_column=self.pointer_from(*args, instance=instance)
).ref_column
return self._with_new_index(
new_index=new_index,
)
def _with_new_index(
self,
new_index: expr.ColumnExpression,
) -> Table:
self._validate_expression(new_index)
index_type = self.eval_type(new_index)
if not isinstance(index_type, dt.Pointer):
raise TypeError(
f"Pathway supports reindexing Tables with Pointer type only. The type used was {index_type}."
)
reindex_column = self._eval(new_index)
assert self._universe == reindex_column.universe
context = clmn.ReindexContext(reindex_column)
return self._table_with_context(context)
def rename_columns(self, **kwargs: str | expr.ColumnReference) -> Table:
"""Rename columns according to kwargs.
Columns not in keys(kwargs) are not changed. New name of a column must not be `id`.
Args:
kwargs: mapping from old column names to new names.
Returns:
Table: `self` with columns renamed.
Example:
>>> import pathway as pw
>>> t1 = pw.debug.table_from_markdown('''
... age | owner | pet
... 10 | Alice | 1
... 9 | Bob | 1
... 8 | Alice | 2
... ''')
>>> t2 = t1.rename_columns(years_old=t1.age, animal=t1.pet)
>>> pw.debug.compute_and_print(t2, include_id=False)
owner | years_old | animal
Alice | 8 | 2
Alice | 10 | 1
Bob | 9 | 1
"""
mapping: dict[str, str] = {}
for new_name, old_name_col in kwargs.items():
if isinstance(old_name_col, expr.ColumnReference):
old_name = old_name_col.name
else:
old_name = old_name_col
if old_name not in self._columns:
raise ValueError(f"Column {old_name} does not exist in a given table.")
mapping[new_name] = old_name
renamed_columns = self._columns.copy()
for new_name, old_name in mapping.items():
renamed_columns.pop(old_name)
for new_name, old_name in mapping.items():
renamed_columns[new_name] = self._columns[old_name]
columns_wrapped = {
name: self._wrap_column_in_context(
self._rowwise_context,
column,
mapping[name] if name in mapping else name,
)
for name, column in renamed_columns.items()
}
return self._with_same_universe(*columns_wrapped.items())
def rename_by_dict(
self, names_mapping: dict[str | expr.ColumnReference, str]
) -> Table:
"""Rename columns according to a dictionary.
Columns not in keys(kwargs) are not changed. New name of a column must not be `id`.
Args:
names_mapping: mapping from old column names to new names.
Returns:
Table: `self` with columns renamed.
Example:
>>> import pathway as pw
>>> t1 = pw.debug.table_from_markdown('''
... age | owner | pet
... 10 | Alice | 1
... 9 | Bob | 1
... 8 | Alice | 2
... ''')
>>> t2 = t1.rename_by_dict({"age": "years_old", t1.pet: "animal"})
>>> pw.debug.compute_and_print(t2, include_id=False)
owner | years_old | animal
Alice | 8 | 2
Alice | 10 | 1
Bob | 9 | 1
"""
return self.rename_columns(
**{new_name: self[old_name] for old_name, new_name in names_mapping.items()}
)
def with_prefix(self, prefix: str) -> Table:
"""Rename columns by adding prefix to each name of column.
Example:
>>> import pathway as pw
>>> t1 = pw.debug.table_from_markdown('''
... age | owner | pet
... 10 | Alice | 1
... 9 | Bob | 1
... 8 | Alice | 2
... ''')
>>> t2 = t1.with_prefix("u_")
>>> pw.debug.compute_and_print(t2, include_id=False)
u_age | u_owner | u_pet
8 | Alice | 2
9 | Bob | 1
10 | Alice | 1
"""
return self.rename_by_dict({name: prefix + name for name in self.keys()})
def with_suffix(self, suffix: str) -> Table:
"""Rename columns by adding suffix to each name of column.
Example:
>>> import pathway as pw
>>> t1 = pw.debug.table_from_markdown('''
... age | owner | pet
... 10 | Alice | 1
... 9 | Bob | 1
... 8 | Alice | 2
... ''')
>>> t2 = t1.with_suffix("_current")
>>> pw.debug.compute_and_print(t2, include_id=False)
age_current | owner_current | pet_current
8 | Alice | 2
9 | Bob | 1
10 | Alice | 1
"""
return self.rename_by_dict({name: name + suffix for name in self.keys()})
def rename(
self,
names_mapping: dict[str | expr.ColumnReference, str] | None = None,
**kwargs: expr.ColumnExpression,
) -> Table:
"""Rename columns according either a dictionary or kwargs.
If a mapping is provided using a dictionary, ``rename_by_dict`` will be used.
Otherwise, ``rename_columns`` will be used with kwargs.
Columns not in keys(kwargs) are not changed. New name of a column must not be ``id``.
Args:
names_mapping: mapping from old column names to new names.
kwargs: mapping from old column names to new names.
Returns:
Table: `self` with columns renamed.
"""
if names_mapping is not None:
return self.rename_by_dict(names_mapping=names_mapping)
return self.rename_columns(**kwargs)
def without(self, *columns: str | expr.ColumnReference) -> Table:
"""Selects all columns without named column references.
Args:
columns: columns to be dropped provided by `table.column_name` notation.
Returns:
Table: `self` without specified columns.
Example:
>>> import pathway as pw
>>> t1 = pw.debug.table_from_markdown('''
... age | owner | pet
... 10 | Alice | 1
... 9 | Bob | 1
... 8 | Alice | 2
... ''')
>>> t2 = t1.without(t1.age, pw.this.pet)
>>> pw.debug.compute_and_print(t2, include_id=False)
owner
Alice
Alice
Bob
"""
new_columns = self._columns.copy()
for col in columns:
if isinstance(col, expr.ColumnReference):
new_columns.pop(col.name)
else:
assert isinstance(col, str)
new_columns.pop(col)
columns_wrapped = {
name: self._wrap_column_in_context(self._rowwise_context, column, name)
for name, column in new_columns.items()
}
return self._with_same_universe(*columns_wrapped.items())
def having(self, *indexers: expr.ColumnReference) -> Table[TSchema]:
"""Removes rows so that indexed.ix(indexer) is possible when some rows are missing,
for each indexer in indexers"""
rets: list[Table] = []
for indexer in indexers:
rets.append(self._having(indexer))
if len(rets) == 0:
return self
elif len(rets) == 1:
[ret] = rets
return ret
else:
return rets[0].intersect(*rets[1:])
def update_types(self, **kwargs: Any) -> Table:
"""Updates types in schema. Has no effect on the runtime."""
for name in kwargs.keys():
if name not in self.keys():
raise ValueError(
"Table.update_types() argument name has to be an existing table column name."
)
from pathway.internals.common import declare_type
return self.with_columns(
**{key: declare_type(val, self[key]) for key, val in kwargs.items()}
)
def cast_to_types(self, **kwargs: Any) -> Table:
"""Casts columns to types."""
for name in kwargs.keys():
if name not in self.keys():
raise ValueError(
"Table.cast_to_types() argument name has to be an existing table column name."
)
from pathway.internals.common import cast
return self.with_columns(
**{key: cast(val, self[key]) for key, val in kwargs.items()}
)
def _having(self, indexer: expr.ColumnReference) -> Table[TSchema]:
context = clmn.HavingContext(
orig_id_column=self._id_column, key_column=indexer._column
)
return self._table_with_context(context)
def with_universe_of(self, other: TableLike) -> Table:
"""Returns a copy of self with exactly the same universe as others.
Semantics: Required precondition self.universe == other.universe
Used in situations where Pathway cannot deduce equality of universes, but
those are equal as verified during runtime.
Example:
>>> import pathway as pw
>>> t1 = pw.debug.table_from_markdown('''
... | pet
... 1 | Dog
... 7 | Cat
... ''')
>>> t2 = pw.debug.table_from_markdown('''
... | age
... 1 | 10
... 7 | 3
... 8 | 100
... ''')
>>> t3 = t2.filter(pw.this.age < 30).with_universe_of(t1)
>>> t4 = t1 + t3
>>> pw.debug.compute_and_print(t4, include_id=False)
pet | age
Cat | 3
Dog | 10
"""
if self._universe == other._universe:
return self.copy()
universes.promise_are_equal(self, other)
return self._unsafe_promise_universe(other)
def flatten(self, *args: expr.ColumnReference, **kwargs: Any) -> Table:
"""Performs a flatmap operation on a column or expression given as a first
argument. Datatype of this column or expression has to be iterable or Json array.
Other columns specified in the method arguments are duplicated
as many times as the length of the iterable.
It is possible to get ids of source rows by using `table.id` column, e.g.
`table.flatten(table.column_to_be_flattened, original_id = table.id)`.
Example:
>>> import pathway as pw
>>> t1 = pw.debug.table_from_markdown('''
... | pet | age
... 1 | Dog | 2
... 7 | Cat | 5
... ''')
>>> t2 = t1.flatten(t1.pet)
>>> pw.debug.compute_and_print(t2, include_id=False)
pet
C
D
a
g
o
t
>>> t3 = t1.flatten(t1.pet, t1.age)
>>> pw.debug.compute_and_print(t3, include_id=False)
pet | age
C | 5
D | 2
a | 5
g | 2
o | 2
t | 5
"""
intermediate_table = self.select(*args, **kwargs)
all_args = combine_args_kwargs(args, kwargs)
if not all_args:
raise ValueError("Table.flatten() cannot have empty arguments list.")
all_names_iter = iter(all_args.keys())
flatten_name = next(all_names_iter)
return intermediate_table._flatten(flatten_name)
def _flatten(
self,
flatten_name: str,
) -> Table:
flatten_column = self._columns[flatten_name]
assert isinstance(flatten_column, clmn.ColumnWithExpression)
context = clmn.FlattenContext(
orig_universe=self._universe,
flatten_column=flatten_column,
)
columns = {
name: self._wrap_column_in_context(context, column, name)
for name, column in self._columns.items()
if name != flatten_name
}
return Table(
_columns={
flatten_name: context.flatten_result_column,
**columns,
},
_context=context,
)
def sort(
self,
key: expr.ColumnExpression,
instance: expr.ColumnExpression | None = None,
) -> Table:
"""
Sorts a table by the specified keys.
Args:
table : pw.Table
The table to be sorted.
key (ColumnExpression[int | float | datetime | str | bytes]):
An expression to sort by.
instance : ColumnReference or None
An expression with instance. Rows are sorted within an instance.
``prev`` and ``next`` columns will only point to rows that have the same instance.
Returns:
pw.Table: The sorted table. Contains two columns: ``prev`` and ``next``, containing the pointers
to the previous and next rows.
Example:
>>> import pathway as pw
>>> table = pw.debug.table_from_markdown('''
... name | age | score
... Alice | 25 | 80
... Bob | 20 | 90
... Charlie | 30 | 80
... ''')
>>> table = table.with_id_from(pw.this.name)
>>> table += table.sort(key=pw.this.age)
>>> pw.debug.compute_and_print(table, include_id=True)
| name | age | score | prev | next
^GBSDEEW... | Alice | 25 | 80 | ^EDPSSB1... | ^DS9AT95...
^EDPSSB1... | Bob | 20 | 90 | | ^GBSDEEW...
^DS9AT95... | Charlie | 30 | 80 | ^GBSDEEW... |
>>> table = pw.debug.table_from_markdown('''
... name | age | score
... Alice | 25 | 80
... Bob | 20 | 90
... Charlie | 30 | 80
... David | 35 | 90
... Eve | 15 | 80
... ''')
>>> table = table.with_id_from(pw.this.name)
>>> table += table.sort(key=pw.this.age, instance=pw.this.score)
>>> pw.debug.compute_and_print(table, include_id=True)
| name | age | score | prev | next
^GBSDEEW... | Alice | 25 | 80 | ^T0B95XH... | ^DS9AT95...
^EDPSSB1... | Bob | 20 | 90 | | ^RT0AZWX...
^DS9AT95... | Charlie | 30 | 80 | ^GBSDEEW... |
^RT0AZWX... | David | 35 | 90 | ^EDPSSB1... |
^T0B95XH... | Eve | 15 | 80 | | ^GBSDEEW...
"""
instance = clmn.ColumnExpression._wrap(instance)
context = clmn.SortingContext(
self._eval(key),
self._eval(instance),
)
return Table(
_columns={
"prev": context.prev_column,
"next": context.next_column,
},
_context=context,
)
def _set_source(self, source: OutputHandle):
self._source = source
if not hasattr(self._id_column, "lineage"):
self._id_column.lineage = clmn.ColumnLineage(name="id", source=source)
for name, column in self._columns.items():
if not hasattr(column, "lineage"):
column.lineage = clmn.ColumnLineage(name=name, source=source)
universe = self._universe
if not hasattr(universe, "lineage"):
universe.lineage = clmn.Lineage(source=source)
def _unsafe_promise_universe(self, other: TableLike) -> Table:
context = clmn.PromiseSameUniverseContext(self._id_column, other._id_column)
return self._table_with_context(context)
def _validate_expression(self, expression: expr.ColumnExpression):
for dep in expression._dependencies_above_reducer():
if self._universe != dep._column.universe:
raise ValueError(
f"You cannot use {dep.to_column_expression()} in this context."
+ " Its universe is different than the universe of the table the method"
+ " was called on. You can use <table1>.with_universe_of(<table2>)"
+ " to assign universe of <table2> to <table1> if you're sure their"
+ " sets of keys are equal."
)
def _wrap_column_in_context(
self,
context: clmn.Context,
column: clmn.Column,
name: str,
lineage: clmn.Lineage | None = None,
) -> clmn.Column:
"""Contextualize column by wrapping it in expression."""
expression = expr.ColumnReference(_table=self, _column=column, _name=name)
return expression._column_with_expression_cls(
context=context,
universe=context.universe,
expression=expression,
lineage=lineage,
)
def _table_with_context(self, context: clmn.Context) -> Table:
columns = {
name: self._wrap_column_in_context(context, column, name)
for name, column in self._columns.items()
}
return Table(
_columns=columns,
_context=context,
)
def _table_restricted_context(self) -> clmn.TableRestrictedRowwiseContext:
return clmn.TableRestrictedRowwiseContext(self._id_column, self)
def _eval(
self, expression: expr.ColumnExpression, context: clmn.Context | None = None
) -> clmn.ColumnWithExpression:
"""Desugar expression and wrap it in given context."""
if context is None:
context = self._rowwise_context
column = expression._column_with_expression_cls(
context=context,
universe=context.universe,
expression=expression,
)
return column
def _from_schema(cls: type[TTable], schema: type[Schema]) -> TTable:
universe = Universe()
context = clmn.MaterializedContext(universe, schema.universe_properties)
columns = {
name: clmn.MaterializedColumn(
universe,
schema.column_properties(name),
)
for name in schema.column_names()
}
return cls(_columns=columns, _schema=schema, _context=context)
def __repr__(self) -> str:
return f"<pathway.Table schema={dict(self.typehints())}>"
def _with_same_universe(
self,
*columns: tuple[str, clmn.Column],
schema: type[Schema] | None = None,
) -> Table:
return Table(
_columns=dict(columns),
_schema=schema,
_context=self._rowwise_context,
)
def _sort_columns_by_other(self, other: Table):
assert self.keys() == other.keys()
self._columns = {name: self._columns[name] for name in other.keys()}
def _operator_dependencies(self) -> StableSet[Table]:
return StableSet([self])
def debug(self, name: str):
G.add_operator(
lambda id: DebugOperator(name, id),
lambda operator: operator(self),
)
return self
def to(self, sink: DataSink) -> None:
from pathway.internals import table_io
table_io.table_to_datasink(self, sink)
def _materialize(self, universe: Universe):
context = clmn.MaterializedContext(universe)
columns = {
name: clmn.MaterializedColumn(universe, column.properties)
for (name, column) in self._columns.items()
}
return Table(
_columns=columns,
_schema=self.schema,
_context=context,
)
def pointer_from(
self, *args: Any, optional=False, instance: expr.ColumnReference | None = None
):
"""Pseudo-random hash of its argument. Produces pointer types. Applied column-wise.
Example:
>>> import pathway as pw
>>> t1 = pw.debug.table_from_markdown('''
... age owner pet
... 1 10 Alice dog
... 2 9 Bob dog
... 3 8 Alice cat
... 4 7 Bob dog''')
>>> g = t1.groupby(t1.owner).reduce(refcol = t1.pointer_from(t1.owner)) # g.id == g.refcol
>>> pw.debug.compute_and_print(g.select(test = (g.id == g.refcol)), include_id=False)
test
True
True
"""
if instance is not None:
args = (*args, instance)
# XXX verify types for the table primary_keys
return expr.PointerExpression(self, *args, optional=optional)
def ix_ref(
self,
*args: expr.ColumnExpression | Value,
optional: bool = False,
context=None,
instance: expr.ColumnReference | None = None,
):
"""Reindexes the table using expressions as primary keys.
Uses keys from context, or tries to infer proper context from the expression.
If optional is True, then None in expression values result in None values in the result columns.
Missing values in table keys result in RuntimeError.
Context can be anything that allows for `select` or `reduce`, or `pathway.this` construct
(latter results in returning a delayed operation, and should be only used when using `ix` inside
join().select() or groupby().reduce() sequence).
Args:
args: Column references.
Returns:
Row: indexed row.
Example:
>>> import pathway as pw
>>> t1 = pw.debug.table_from_markdown('''
... name | pet
... Alice | dog
... Bob | cat
... Carole | cat
... David | dog
... ''')
>>> t2 = t1.with_id_from(pw.this.name)
>>> t2 = t2.select(*pw.this, new_value=pw.this.ix_ref("Alice").pet)
>>> pw.debug.compute_and_print(t2, include_id=False)
name | pet | new_value
Alice | dog | dog
Bob | cat | dog
Carole | cat | dog
David | dog | dog
Tables obtained by a groupby/reduce scheme always have primary keys:
>>> import pathway as pw
>>> t1 = pw.debug.table_from_markdown('''
... name | pet
... Alice | dog
... Bob | cat
... Carole | cat
... David | cat
... ''')
>>> t2 = t1.groupby(pw.this.pet).reduce(pw.this.pet, count=pw.reducers.count())
>>> t3 = t1.select(*pw.this, new_value=t2.ix_ref(t1.pet).count)
>>> pw.debug.compute_and_print(t3, include_id=False)
name | pet | new_value
Alice | dog | 1
Bob | cat | 3
Carole | cat | 3
David | cat | 3
Single-row tables can be accessed via `ix_ref()`:
>>> import pathway as pw
>>> t1 = pw.debug.table_from_markdown('''
... name | pet
... Alice | dog
... Bob | cat
... Carole | cat
... David | cat
... ''')
>>> t2 = t1.reduce(count=pw.reducers.count())
>>> t3 = t1.select(*pw.this, new_value=t2.ix_ref(context=t1).count)
>>> pw.debug.compute_and_print(t3, include_id=False)
name | pet | new_value
Alice | dog | 4
Bob | cat | 4
Carole | cat | 4
David | cat | 4
"""
return self.ix(
self.pointer_from(*args, optional=optional, instance=instance),
optional=optional,
context=context,
)
def _subtables(self) -> StableSet[Table]:
return StableSet([self])
def _substitutions(
self,
) -> tuple[Table, dict[expr.InternalColRef, expr.ColumnExpression]]:
return self, {}
def typehints(self) -> Mapping[str, Any]:
"""
Return the types of the columns as a dictionary.
Example:
>>> import pathway as pw
>>> t1 = pw.debug.table_from_markdown('''
... age | owner | pet
... 10 | Alice | dog
... 9 | Bob | dog
... 8 | Alice | cat
... 7 | Bob | dog
... ''')
>>> t1.typehints()
mappingproxy({'age': <class 'int'>, 'owner': <class 'str'>, 'pet': <class 'str'>})
"""
return self.schema.typehints()
def eval_type(self, expression: expr.ColumnExpression) -> dt.DType:
return (
self._rowwise_context._get_type_interpreter()
.eval_expression(expression, state=TypeInterpreterState())
._dtype
)
def _auto_live(self) -> Table:
"""Make self automatically live in interactive mode"""
from pathway.internals.interactive import is_interactive_mode_enabled
if is_interactive_mode_enabled():
return self.live()
else:
return self
def live(self) -> LiveTable[TSchema]:
from pathway.internals.interactive import LiveTable
warnings.warn("live tables are an experimental feature", stacklevel=2)
return LiveTable._create(self)
def collect_tables(expression: expr.ColumnExpression) -> list[Table]:
collector = TableCollector()
collector.eval_expression(expression)
return collector.table_list | null |
166,696 | from __future__ import annotations
import logging
import sys
from contextlib import contextmanager
from functools import cached_property
from opentelemetry import trace
from opentelemetry.context import Context
from opentelemetry.exporter.otlp.proto.grpc._log_exporter import OTLPLogExporter
from opentelemetry.exporter.otlp.proto.grpc.trace_exporter import OTLPSpanExporter
from opentelemetry.sdk._logs import LoggerProvider, LoggingHandler
from opentelemetry.sdk._logs.export import BatchLogRecordProcessor
from opentelemetry.sdk.resources import (
SERVICE_INSTANCE_ID,
SERVICE_NAME,
SERVICE_NAMESPACE,
SERVICE_VERSION,
Resource,
)
from opentelemetry.sdk.trace import TracerProvider
from opentelemetry.sdk.trace.export import BatchSpanProcessor
from opentelemetry.trace.propagation.tracecontext import TraceContextTextMapPropagator
from pathway.internals import api
propagator = TraceContextTextMapPropagator()
def get_current_context() -> tuple[Context, str | None]:
carrier: dict[str, str | list[str]] = {}
propagator.inject(carrier)
context = propagator.extract(carrier)
trace_parent = carrier.get("traceparent", None)
assert trace_parent is None or isinstance(trace_parent, str)
return context, trace_parent | null |
166,697 | from __future__ import annotations
import itertools
from abc import ABC, abstractmethod
from collections.abc import Iterable
from typing import ClassVar
import pathway.internals.column as clmn
import pathway.internals.expression as expr
import pathway.internals.operator as op
from pathway.internals.column_path import ColumnPath
from pathway.internals.graph_runner.path_storage import Storage
from pathway.internals.universe import Universe
class PathEvaluator(ABC):
def __init__(self, context: clmn.Context) -> None:
def compute(
self,
output_columns: Iterable[clmn.Column],
input_storages: dict[Universe, Storage],
) -> Storage:
def __init_subclass__(cls, /, context_types=[], **kwargs):
def for_context(cls, context: clmn.Context) -> type[PathEvaluator]:
class FlatStoragePathEvaluator(
PathEvaluator,
context_types=[clmn.GroupedContext],
):
def compute(
self,
output_columns: Iterable[clmn.Column],
input_storages: dict[Universe, Storage],
) -> Storage:
class Storage:
def get_columns(self) -> Iterable[Column]:
def has_column(self, column: Column) -> bool:
def get_path(self, column: Column) -> ColumnPath:
def max_depth(self) -> int:
def validate(self) -> None:
def with_updated_paths(self, paths: dict[Column, ColumnPath]) -> Storage:
def with_flattened_output(self, storage: Storage) -> Storage:
def with_flattened_inputs(self, storages: list[Storage] | None = None) -> Storage:
def restrict_to_table(self, table: Table) -> Storage:
def merge_storages(cls, universe: Universe, *storages: Storage) -> Storage:
def flat(
cls, universe: Universe, columns: Iterable[Column], shift: int = 0
) -> Storage:
class Universe:
def __init__(self) -> None:
def subset(self) -> Universe:
def superset(self) -> Universe:
def is_subset_of(self, other: Universe) -> bool:
def is_equal_to(self, other: Universe) -> bool:
def compute_paths(
output_columns: Iterable[clmn.Column],
input_storages: dict[Universe, Storage],
operator: op.Operator,
context: clmn.Context,
):
evaluator: PathEvaluator
match operator:
case op.InputOperator():
evaluator = FlatStoragePathEvaluator(context)
case op.RowTransformerOperator():
evaluator = FlatStoragePathEvaluator(context)
case op.ContextualizedIntermediateOperator():
evaluator = PathEvaluator.for_context(context)(context)
case _:
raise ValueError(
f"Operator {operator} in update_storage() but it shouldn't produce tables."
)
return evaluator.compute(output_columns, input_storages) | null |
166,698 | from __future__ import annotations
import asyncio
import contextlib
import threading
def new_event_loop():
event_loop = asyncio.new_event_loop()
def target(event_loop: asyncio.AbstractEventLoop):
try:
event_loop.run_forever()
finally:
event_loop.close()
thread = threading.Thread(target=target, args=(event_loop,))
thread.start()
try:
yield event_loop
finally:
event_loop.call_soon_threadsafe(event_loop.stop)
thread.join() | null |
166,699 | from __future__ import annotations
import boto3
from pathway.internals import api, dtype as dt, schema
from pathway.internals.table import Table
from pathway.internals.trace import trace_user_frame
class Table(
Joinable,
OperatorInput,
Generic[TSchema],
):
"""Collection of named columns over identical universes.
Example:
>>> import pathway as pw
>>> t1 = pw.debug.table_from_markdown('''
... age | owner | pet
... 10 | Alice | dog
... 9 | Bob | dog
... 8 | Alice | cat
... 7 | Bob | dog
... ''')
>>> isinstance(t1, pw.Table)
True
"""
if TYPE_CHECKING:
from pathway.stdlib.ordered import diff # type: ignore[misc]
from pathway.stdlib.statistical import interpolate # type: ignore[misc]
from pathway.stdlib.temporal import ( # type: ignore[misc]
asof_join,
asof_join_left,
asof_join_outer,
asof_join_right,
asof_now_join,
asof_now_join_inner,
asof_now_join_left,
interval_join,
interval_join_inner,
interval_join_left,
interval_join_outer,
interval_join_right,
window_join,
window_join_inner,
window_join_left,
window_join_outer,
window_join_right,
windowby,
)
from pathway.stdlib.viz import ( # type: ignore[misc]
_repr_mimebundle_,
plot,
show,
)
_columns: dict[str, clmn.Column]
_schema: type[Schema]
_id_column: clmn.IdColumn
_rowwise_context: clmn.RowwiseContext
_source: SetOnceProperty[OutputHandle] = SetOnceProperty()
"""Lateinit by operator."""
def __init__(
self,
_columns: Mapping[str, clmn.Column],
_context: clmn.Context,
_schema: type[Schema] | None = None,
):
if _schema is None:
_schema = schema_from_columns(_columns)
super().__init__(_context)
self._columns = dict(_columns)
self._schema = _schema
self._id_column = _context.id_column
self._substitution = {thisclass.this: self}
self._rowwise_context = clmn.RowwiseContext(self._id_column)
def id(self) -> expr.ColumnReference:
"""Get reference to pseudocolumn containing id's of a table.
Example:
>>> import pathway as pw
>>> t1 = pw.debug.table_from_markdown('''
... age | owner | pet
... 10 | Alice | dog
... 9 | Bob | dog
... 8 | Alice | cat
... 7 | Bob | dog
... ''')
>>> t2 = t1.select(ids = t1.id)
>>> t2.typehints()['ids']
<class 'pathway.engine.Pointer'>
>>> pw.debug.compute_and_print(t2.select(test=t2.id == t2.ids), include_id=False)
test
True
True
True
True
"""
return expr.ColumnReference(_table=self, _column=self._id_column, _name="id")
def column_names(self):
return self.keys()
def keys(self):
return self._columns.keys()
def _get_column(self, name: str) -> clmn.Column:
return self._columns[name]
def _ipython_key_completions_(self):
return list(self.column_names())
def __dir__(self):
return list(super().__dir__()) + list(self.column_names())
def _C(self) -> TSchema:
return self.C # type: ignore
def schema(self) -> type[Schema]:
"""Get schema of the table.
Example:
>>> import pathway as pw
>>> t1 = pw.debug.table_from_markdown('''
... age | owner | pet
... 10 | Alice | dog
... 9 | Bob | dog
... 8 | Alice | cat
... 7 | Bob | dog
... ''')
>>> t1.schema
<pathway.Schema types={'age': <class 'int'>, 'owner': <class 'str'>, 'pet': <class 'str'>}>
>>> t1.typehints()['age']
<class 'int'>
"""
return self._schema
def _get_colref_by_name(self, name, exception_type) -> expr.ColumnReference:
name = self._column_deprecation_rename(name)
if name == "id":
return self.id
if name not in self.keys():
raise exception_type(f"Table has no column with name {name}.")
return expr.ColumnReference(
_table=self, _column=self._get_column(name), _name=name
)
def __getitem__(self, args: str | expr.ColumnReference) -> expr.ColumnReference: ...
def __getitem__(self, args: list[str | expr.ColumnReference]) -> Table: ...
def __getitem__(
self, args: str | expr.ColumnReference | list[str | expr.ColumnReference]
) -> expr.ColumnReference | Table:
"""Get columns by name.
Warning:
- Does not allow repetitions of columns.
- Fails if tries to access nonexistent column.
Args:
names: a singe column name or list of columns names to be extracted from `self`.
Returns:
Table with specified columns, or column expression (if single argument given).
Instead of column names, column references are valid here.
Example:
>>> import pathway as pw
>>> t1 = pw.debug.table_from_markdown('''
... age | owner | pet
... 10 | Alice | dog
... 9 | Bob | dog
... 8 | Alice | cat
... 7 | Bob | dog
... ''')
>>> t2 = t1[["age", "pet"]]
>>> t2 = t1[["age", t1.pet]]
>>> pw.debug.compute_and_print(t2, include_id=False)
age | pet
7 | dog
8 | cat
9 | dog
10 | dog
"""
if isinstance(args, expr.ColumnReference):
if (args.table is not self) and not isinstance(
args.table, thisclass.ThisMetaclass
):
raise ValueError(
"Table.__getitem__ argument has to be a ColumnReference to the same table or pw.this, or a string "
+ "(or a list of those)."
)
return self._get_colref_by_name(args.name, KeyError)
elif isinstance(args, str):
return self._get_colref_by_name(args, KeyError)
else:
return self.select(*[self[name] for name in args])
def from_columns(
*args: expr.ColumnReference, **kwargs: expr.ColumnReference
) -> Table:
"""Build a table from columns.
All columns must have the same ids. Columns' names must be pairwise distinct.
Args:
args: List of columns.
kwargs: Columns with their new names.
Returns:
Table: Created table.
Example:
>>> import pathway as pw
>>> t1 = pw.Table.empty(age=float, pet=float)
>>> t2 = pw.Table.empty(foo=float, bar=float).with_universe_of(t1)
>>> t3 = pw.Table.from_columns(t1.pet, qux=t2.foo)
>>> pw.debug.compute_and_print(t3, include_id=False)
pet | qux
"""
all_args = cast(
dict[str, expr.ColumnReference], combine_args_kwargs(args, kwargs)
)
if not all_args:
raise ValueError("Table.from_columns() cannot have empty arguments list")
else:
arg = next(iter(all_args.values()))
table: Table = arg.table
for arg in all_args.values():
if not G.universe_solver.query_are_equal(
table._universe, arg.table._universe
):
raise ValueError(
"Universes of all arguments of Table.from_columns() have to be equal.\n"
+ "Consider using Table.promise_universes_are_equal() to assert it.\n"
+ "(However, untrue assertion might result in runtime errors.)"
)
return table.select(*args, **kwargs)
def concat_reindex(self, *tables: Table) -> Table:
"""Concatenate contents of several tables.
This is similar to PySpark union. All tables must have the same schema. Each row is reindexed.
Args:
tables: List of tables to concatenate. All tables must have the same schema.
Returns:
Table: The concatenated table. It will have new, synthetic ids.
Example:
>>> import pathway as pw
>>> t1 = pw.debug.table_from_markdown('''
... | pet
... 1 | Dog
... 7 | Cat
... ''')
>>> t2 = pw.debug.table_from_markdown('''
... | pet
... 1 | Manul
... 8 | Octopus
... ''')
>>> t3 = t1.concat_reindex(t2)
>>> pw.debug.compute_and_print(t3, include_id=False)
pet
Cat
Dog
Manul
Octopus
"""
reindexed = [
table.with_id_from(table.id, i) for i, table in enumerate([self, *tables])
]
universes.promise_are_pairwise_disjoint(*reindexed)
return Table.concat(*reindexed)
def empty(**kwargs: dt.DType) -> Table:
"""Creates an empty table with a schema specified by kwargs.
Args:
kwargs: Dict whose keys are column names and values are column types.
Returns:
Table: Created empty table.
Example:
>>> import pathway as pw
>>> t1 = pw.Table.empty(age=float, pet=float)
>>> pw.debug.compute_and_print(t1, include_id=False)
age | pet
"""
from pathway.internals import table_io
ret = table_io.empty_from_schema(schema_from_types(None, **kwargs))
G.universe_solver.register_as_empty(ret._universe)
return ret
def select(self, *args: expr.ColumnReference, **kwargs: Any) -> Table:
"""Build a new table with columns specified by kwargs.
Output columns' names are keys(kwargs). values(kwargs) can be raw values, boxed
values, columns. Assigning to id reindexes the table.
Args:
args: Column references.
kwargs: Column expressions with their new assigned names.
Returns:
Table: Created table.
Example:
>>> import pathway as pw
>>> t1 = pw.debug.table_from_markdown('''
... pet
... Dog
... Cat
... ''')
>>> t2 = t1.select(animal=t1.pet, desc="fluffy")
>>> pw.debug.compute_and_print(t2, include_id=False)
animal | desc
Cat | fluffy
Dog | fluffy
"""
new_columns = []
all_args = combine_args_kwargs(args, kwargs)
for new_name, expression in all_args.items():
self._validate_expression(expression)
column = self._eval(expression)
new_columns.append((new_name, column))
return self._with_same_universe(*new_columns)
def __add__(self, other: Table) -> Table:
"""Build a union of `self` with `other`.
Semantics: Returns a table C, such that
- C.columns == self.columns + other.columns
- C.id == self.id == other.id
Args:
other: The other table. `self.id` must be equal `other.id` and
`self.columns` and `other.columns` must be disjoint (or overlapping names
are THE SAME COLUMN)
Returns:
Table: Created table.
Example:
>>> import pathway as pw
>>> t1 = pw.debug.table_from_markdown('''
... pet
... 1 Dog
... 7 Cat
... ''')
>>> t2 = pw.debug.table_from_markdown('''
... age
... 1 10
... 7 3
... ''')
>>> t3 = t1 + t2
>>> pw.debug.compute_and_print(t3, include_id=False)
pet | age
Cat | 3
Dog | 10
"""
if not G.universe_solver.query_are_equal(self._universe, other._universe):
raise ValueError(
"Universes of all arguments of Table.__add__() have to be equal.\n"
+ "Consider using Table.promise_universes_are_equal() to assert it.\n"
+ "(However, untrue assertion might result in runtime errors.)"
)
return self.select(*self, *other)
def slice(self) -> TableSlice:
"""Creates a collection of references to self columns.
Supports basic column manipulation methods.
Example:
>>> import pathway as pw
>>> t1 = pw.debug.table_from_markdown('''
... age | owner | pet
... 10 | Alice | dog
... 9 | Bob | dog
... 8 | Alice | cat
... 7 | Bob | dog
... ''')
>>> t1.slice.without("age")
TableSlice({'owner': <table1>.owner, 'pet': <table1>.pet})
"""
return TableSlice(dict(**self), self)
def filter(self, filter_expression: expr.ColumnExpression) -> Table[TSchema]:
"""Filter a table according to `filter_expression` condition.
Args:
filter_expression: `ColumnExpression` that specifies the filtering condition.
Returns:
Table: Result has the same schema as `self` and its ids are subset of `self.id`.
Example:
>>> import pathway as pw
>>> vertices = pw.debug.table_from_markdown('''
... label outdegree
... 1 3
... 7 0
... ''')
>>> filtered = vertices.filter(vertices.outdegree == 0)
>>> pw.debug.compute_and_print(filtered, include_id=False)
label | outdegree
7 | 0
"""
filter_type = self.eval_type(filter_expression)
if filter_type != dt.BOOL:
raise TypeError(
f"Filter argument of Table.filter() has to be bool, found {filter_type}."
)
ret = self._filter(filter_expression)
if (
filter_col := expr.get_column_filtered_by_is_none(filter_expression)
) is not None and filter_col.table == self:
name = filter_col.name
dtype = self._columns[name].dtype
ret = ret.update_types(**{name: dt.unoptionalize(dtype)})
return ret
def split(
self, split_expression: expr.ColumnExpression
) -> tuple[Table[TSchema], Table[TSchema]]:
"""Split a table according to `split_expression` condition.
Args:
split_expression: `ColumnExpression` that specifies the split condition.
Returns:
positive_table, negative_table: tuple of tables,
with the same schemas as `self` and with ids that are subsets of `self.id`,
and provably disjoint.
Example:
>>> import pathway as pw
>>> vertices = pw.debug.table_from_markdown('''
... label outdegree
... 1 3
... 7 0
... ''')
>>> positive, negative = vertices.split(vertices.outdegree == 0)
>>> pw.debug.compute_and_print(positive, include_id=False)
label | outdegree
7 | 0
>>> pw.debug.compute_and_print(negative, include_id=False)
label | outdegree
1 | 3
"""
positive = self.filter(split_expression)
negative = self.filter(~split_expression)
universes.promise_are_pairwise_disjoint(positive, negative)
universes.promise_are_equal(
self, Table.concat(positive, negative)
) # TODO: add API method for this
return positive, negative
def _filter(self, filter_expression: expr.ColumnExpression) -> Table[TSchema]:
self._validate_expression(filter_expression)
filtering_column = self._eval(filter_expression)
assert self._universe == filtering_column.universe
context = clmn.FilterContext(filtering_column, self._id_column)
return self._table_with_context(context)
def _gradual_broadcast(
self,
threshold_table,
lower_column,
value_column,
upper_column,
) -> Table:
return self + self.__gradual_broadcast(
threshold_table, lower_column, value_column, upper_column
)
def __gradual_broadcast(
self,
threshold_table,
lower_column,
value_column,
upper_column,
):
context = clmn.GradualBroadcastContext(
self._id_column,
threshold_table._eval(lower_column),
threshold_table._eval(value_column),
threshold_table._eval(upper_column),
)
return Table(_columns={"apx_value": context.apx_value_column}, _context=context)
def _forget(
self,
threshold_column: expr.ColumnExpression,
time_column: expr.ColumnExpression,
mark_forgetting_records: bool,
) -> Table:
context = clmn.ForgetContext(
self._id_column,
self._eval(threshold_column),
self._eval(time_column),
mark_forgetting_records,
)
return self._table_with_context(context)
def _forget_immediately(
self,
) -> Table:
context = clmn.ForgetImmediatelyContext(self._id_column)
return self._table_with_context(context)
def _filter_out_results_of_forgetting(
self,
) -> Table:
# The output universe is a superset of input universe because forgetting entries
# are filtered out. At each point in time, the set of keys with +1 diff can be
# bigger than a set of keys with +1 diff in an input table.
context = clmn.FilterOutForgettingContext(self._id_column)
return self._table_with_context(context)
def _freeze(
self,
threshold_column: expr.ColumnExpression,
time_column: expr.ColumnExpression,
) -> Table:
context = clmn.FreezeContext(
self._id_column,
self._eval(threshold_column),
self._eval(time_column),
)
return self._table_with_context(context)
def _buffer(
self,
threshold_column: expr.ColumnExpression,
time_column: expr.ColumnExpression,
) -> Table:
context = clmn.BufferContext(
self._id_column,
self._eval(threshold_column),
self._eval(time_column),
)
return self._table_with_context(context)
def difference(self, other: Table) -> Table[TSchema]:
r"""Restrict self universe to keys not appearing in the other table.
Args:
other: table with ids to remove from self.
Returns:
Table: table with restricted universe, with the same set of columns
Example:
>>> import pathway as pw
>>> t1 = pw.debug.table_from_markdown('''
... | age | owner | pet
... 1 | 10 | Alice | 1
... 2 | 9 | Bob | 1
... 3 | 8 | Alice | 2
... ''')
>>> t2 = pw.debug.table_from_markdown('''
... | cost
... 2 | 100
... 3 | 200
... 4 | 300
... ''')
>>> t3 = t1.difference(t2)
>>> pw.debug.compute_and_print(t3, include_id=False)
age | owner | pet
10 | Alice | 1
"""
context = clmn.DifferenceContext(
left=self._id_column,
right=other._id_column,
)
return self._table_with_context(context)
def intersect(self, *tables: Table) -> Table[TSchema]:
"""Restrict self universe to keys appearing in all of the tables.
Args:
tables: tables keys of which are used to restrict universe.
Returns:
Table: table with restricted universe, with the same set of columns
Example:
>>> import pathway as pw
>>> t1 = pw.debug.table_from_markdown('''
... | age | owner | pet
... 1 | 10 | Alice | 1
... 2 | 9 | Bob | 1
... 3 | 8 | Alice | 2
... ''')
>>> t2 = pw.debug.table_from_markdown('''
... | cost
... 2 | 100
... 3 | 200
... 4 | 300
... ''')
>>> t3 = t1.intersect(t2)
>>> pw.debug.compute_and_print(t3, include_id=False)
age | owner | pet
8 | Alice | 2
9 | Bob | 1
"""
intersecting_universes = (
self._universe,
*tuple(table._universe for table in tables),
)
universe = G.universe_solver.get_intersection(*intersecting_universes)
if universe in intersecting_universes:
context: clmn.Context = clmn.RestrictContext(self._id_column, universe)
else:
intersecting_ids = (
self._id_column,
*tuple(table._id_column for table in tables),
)
context = clmn.IntersectContext(
intersecting_ids=intersecting_ids,
)
return self._table_with_context(context)
def restrict(self, other: TableLike) -> Table[TSchema]:
"""Restrict self universe to keys appearing in other.
Args:
other: table which universe is used to restrict universe of self.
Returns:
Table: table with restricted universe, with the same set of columns
Example:
>>> import pathway as pw
>>> t1 = pw.debug.table_from_markdown(
... '''
... | age | owner | pet
... 1 | 10 | Alice | 1
... 2 | 9 | Bob | 1
... 3 | 8 | Alice | 2
... '''
... )
>>> t2 = pw.debug.table_from_markdown(
... '''
... | cost
... 2 | 100
... 3 | 200
... '''
... )
>>> t2.promise_universe_is_subset_of(t1)
<pathway.Table schema={'cost': <class 'int'>}>
>>> t3 = t1.restrict(t2)
>>> pw.debug.compute_and_print(t3, include_id=False)
age | owner | pet
8 | Alice | 2
9 | Bob | 1
"""
if not G.universe_solver.query_is_subset(other._universe, self._universe):
raise ValueError(
"Table.restrict(): other universe has to be a subset of self universe."
+ "Consider using Table.promise_universe_is_subset_of() to assert it."
)
context = clmn.RestrictContext(self._id_column, other._universe)
columns = {
name: self._wrap_column_in_context(context, column, name)
for name, column in self._columns.items()
}
return Table(
_columns=columns,
_context=context,
)
def copy(self) -> Table[TSchema]:
"""Returns a copy of a table.
Example:
>>> import pathway as pw
>>> t1 = pw.debug.table_from_markdown('''
... age | owner | pet
... 10 | Alice | dog
... 9 | Bob | dog
... 8 | Alice | cat
... 7 | Bob | dog
... ''')
>>> t2 = t1.copy()
>>> pw.debug.compute_and_print(t2, include_id=False)
age | owner | pet
7 | Bob | dog
8 | Alice | cat
9 | Bob | dog
10 | Alice | dog
>>> t1 is t2
False
"""
return self._copy_as(type(self))
def _copy_as(self, table_type: type[TTable], /, **kwargs) -> TTable:
columns = {
name: self._wrap_column_in_context(self._rowwise_context, column, name)
for name, column in self._columns.items()
}
return table_type(_columns=columns, _context=self._rowwise_context, **kwargs)
def groupby(
self,
*args: expr.ColumnReference,
id: expr.ColumnReference | None = None,
sort_by: expr.ColumnReference | None = None,
_filter_out_results_of_forgetting: bool = False,
instance: expr.ColumnReference | None = None,
) -> groupbys.GroupedTable:
"""Groups table by columns from args.
Note:
Usually followed by `.reduce()` that aggregates the result and returns a table.
Args:
args: columns to group by.
id: if provided, is the column used to set id's of the rows of the result
sort_by: if provided, column values are used as sorting keys for particular reducers
instance: optional argument describing partitioning of the data into separate instances
Returns:
GroupedTable: Groupby object.
Example:
>>> import pathway as pw
>>> t1 = pw.debug.table_from_markdown('''
... age | owner | pet
... 10 | Alice | dog
... 9 | Bob | dog
... 8 | Alice | cat
... 7 | Bob | dog
... ''')
>>> t2 = t1.groupby(t1.pet, t1.owner).reduce(t1.owner, t1.pet, ageagg=pw.reducers.sum(t1.age))
>>> pw.debug.compute_and_print(t2, include_id=False)
owner | pet | ageagg
Alice | cat | 8
Alice | dog | 10
Bob | dog | 16
"""
if instance is not None:
args = (*args, instance)
if id is not None:
if len(args) == 0:
args = (id,)
elif len(args) > 1:
raise ValueError(
"Table.groupby() cannot have id argument when grouping by multiple columns."
)
elif args[0]._column != id._column:
raise ValueError(
"Table.groupby() received id argument and is grouped by a single column,"
+ " but the arguments are not equal.\n"
+ "Consider using <table>.groupby(id=...), skipping the positional argument."
)
for arg in args:
if not isinstance(arg, expr.ColumnReference):
if isinstance(arg, str):
raise ValueError(
f"Expected a ColumnReference, found a string. Did you mean <table>.{arg}"
+ f" instead of {repr(arg)}?"
)
else:
raise ValueError(
"All Table.groupby() arguments have to be a ColumnReference."
)
return groupbys.GroupedTable.create(
table=self,
grouping_columns=args,
set_id=id is not None,
sort_by=sort_by,
_filter_out_results_of_forgetting=_filter_out_results_of_forgetting,
)
def reduce(
self, *args: expr.ColumnReference, **kwargs: expr.ColumnExpression
) -> Table:
"""Reduce a table to a single row.
Equivalent to `self.groupby().reduce(*args, **kwargs)`.
Args:
args: reducer to reduce the table with
kwargs: reducer to reduce the table with. Its key is the new name of a column.
Returns:
Table: Reduced table.
Example:
>>> import pathway as pw
>>> t1 = pw.debug.table_from_markdown('''
... age | owner | pet
... 10 | Alice | dog
... 9 | Bob | dog
... 8 | Alice | cat
... 7 | Bob | dog
... ''')
>>> t2 = t1.reduce(ageagg=pw.reducers.argmin(t1.age))
>>> pw.debug.compute_and_print(t2, include_id=False) # doctest: +ELLIPSIS
ageagg
^...
>>> t3 = t2.select(t1.ix(t2.ageagg).age, t1.ix(t2.ageagg).pet)
>>> pw.debug.compute_and_print(t3, include_id=False)
age | pet
7 | dog
"""
return self.groupby().reduce(*args, **kwargs)
def deduplicate(
self,
*,
value: expr.ColumnExpression,
instance: expr.ColumnExpression | None = None,
acceptor: Callable[[T, T], bool],
persistent_id: str | None = None,
) -> Table:
"""Deduplicates rows in `self` on `value` column using acceptor function.
It keeps rows which where accepted by the acceptor function.
Acceptor operates on two arguments - current value and the previously accepted value.
Args:
value: column expression used for deduplication.
instance: Grouping column. For rows with different
values in this column, deduplication will be performed separately.
Defaults to None.
acceptor: callback telling whether two values are different.
persistent_id: (unstable) An identifier, under which the state of the table
will be persisted or ``None``, if there is no need to persist the state of this table.
When a program restarts, it restores the state for all input tables according to what
was saved for their ``persistent_id``. This way it's possible to configure the start of
computations from the moment they were terminated last time.
Returns:
Table: the result of deduplication.
Example:
>>> import pathway as pw
>>> table = pw.debug.table_from_markdown(
... '''
... val | __time__
... 1 | 2
... 2 | 4
... 3 | 6
... 4 | 8
... '''
... )
>>>
>>> def acceptor(new_value, old_value) -> bool:
... return new_value >= old_value + 2
...
>>>
>>> result = table.deduplicate(value=pw.this.val, acceptor=acceptor)
>>> pw.debug.compute_and_print_update_stream(result, include_id=False)
val | __time__ | __diff__
1 | 2 | 1
1 | 6 | -1
3 | 6 | 1
>>>
>>> table = pw.debug.table_from_markdown(
... '''
... val | instance | __time__
... 1 | 1 | 2
... 2 | 1 | 4
... 3 | 2 | 6
... 4 | 1 | 8
... 4 | 2 | 8
... 5 | 1 | 10
... '''
... )
>>>
>>> def acceptor(new_value, old_value) -> bool:
... return new_value >= old_value + 2
...
>>>
>>> result = table.deduplicate(
... value=pw.this.val, instance=pw.this.instance, acceptor=acceptor
... )
>>> pw.debug.compute_and_print_update_stream(result, include_id=False)
val | instance | __time__ | __diff__
1 | 1 | 2 | 1
3 | 2 | 6 | 1
1 | 1 | 8 | -1
4 | 1 | 8 | 1
"""
if instance is None:
instance = expr.ColumnConstExpression(None)
self._validate_expression(value)
self._validate_expression(instance)
value_col = self._eval(value)
instance_col = self._eval(instance)
context = clmn.DeduplicateContext(
value_col,
(instance_col,),
acceptor,
self._id_column,
persistent_id,
)
return self._table_with_context(context)
def ix(
self, expression: expr.ColumnExpression, *, optional: bool = False, context=None
) -> Table:
"""Reindexes the table using expression values as keys. Uses keys from context, or tries to infer
proper context from the expression.
If optional is True, then None in expression values result in None values in the result columns.
Missing values in table keys result in RuntimeError.
Context can be anything that allows for `select` or `reduce`, or `pathway.this` construct
(latter results in returning a delayed operation, and should be only used when using `ix` inside
join().select() or groupby().reduce() sequence).
Returns:
Reindexed table with the same set of columns.
Example:
>>> import pathway as pw
>>> t_animals = pw.debug.table_from_markdown('''
... | epithet | genus
... 1 | upupa | epops
... 2 | acherontia | atropos
... 3 | bubo | scandiacus
... 4 | dynastes | hercules
... ''')
>>> t_birds = pw.debug.table_from_markdown('''
... | desc
... 2 | hoopoe
... 4 | owl
... ''')
>>> ret = t_birds.select(t_birds.desc, latin=t_animals.ix(t_birds.id).genus)
>>> pw.debug.compute_and_print(ret, include_id=False)
desc | latin
hoopoe | atropos
owl | hercules
"""
if context is None:
all_tables = collect_tables(expression)
if len(all_tables) == 0:
context = thisclass.this
elif all(tab == all_tables[0] for tab in all_tables):
context = all_tables[0]
if context is None:
for tab in all_tables:
if not isinstance(tab, Table):
raise ValueError("Table expected here.")
if len(all_tables) == 0:
raise ValueError("Const value provided.")
context = all_tables[0]
for tab in all_tables:
assert context._universe.is_equal_to(tab._universe)
if isinstance(context, groupbys.GroupedJoinable):
context = thisclass.this
if isinstance(context, thisclass.ThisMetaclass):
return context._delayed_op(
lambda table, expression: self.ix(
expression=expression, optional=optional, context=table
),
expression=expression,
qualname=f"{self}.ix(...)",
name="ix",
)
restrict_universe = RestrictUniverseDesugaring(context)
expression = restrict_universe.eval_expression(expression)
key_col = context.select(tmp=expression).tmp
key_dtype = self.eval_type(key_col)
if (
optional and not dt.dtype_issubclass(key_dtype, dt.Optional(dt.POINTER))
) or (not optional and not isinstance(key_dtype, dt.Pointer)):
raise TypeError(
f"Pathway supports indexing with Pointer type only. The type used was {key_dtype}."
)
if optional and isinstance(key_dtype, dt.Optional):
self_ = self.update_types(
**{name: dt.Optional(self.typehints()[name]) for name in self.keys()}
)
else:
self_ = self
return self_._ix(key_col, optional)
def _ix(
self,
key_expression: expr.ColumnReference,
optional: bool,
) -> Table:
key_column = key_expression._column
context = clmn.IxContext(key_column, self._id_column, optional)
return self._table_with_context(context)
def __lshift__(self, other: Table) -> Table:
"""Alias to update_cells method.
Updates cells of `self`, breaking ties in favor of the values in `other`.
Semantics:
- result.columns == self.columns
- result.id == self.id
- conflicts are resolved preferring other's values
Requires:
- other.columns ⊆ self.columns
- other.id ⊆ self.id
Args:
other: the other table.
Returns:
Table: `self` updated with cells form `other`.
Example:
>>> import pathway as pw
>>> t1 = pw.debug.table_from_markdown('''
... | age | owner | pet
... 1 | 10 | Alice | 1
... 2 | 9 | Bob | 1
... 3 | 8 | Alice | 2
... ''')
>>> t2 = pw.debug.table_from_markdown('''
... | age | owner | pet
... 1 | 10 | Alice | 30
... ''')
>>> pw.universes.promise_is_subset_of(t2, t1)
>>> t3 = t1 << t2
>>> pw.debug.compute_and_print(t3, include_id=False)
age | owner | pet
8 | Alice | 2
9 | Bob | 1
10 | Alice | 30
"""
return self.update_cells(other, _stacklevel=2)
def concat(self, *others: Table[TSchema]) -> Table[TSchema]:
"""Concats `self` with every `other` ∊ `others`.
Semantics:
- result.columns == self.columns == other.columns
- result.id == self.id ∪ other.id
if self.id and other.id collide, throws an exception.
Requires:
- other.columns == self.columns
- self.id disjoint with other.id
Args:
other: the other table.
Returns:
Table: The concatenated table. Id's of rows from original tables are preserved.
Example:
>>> import pathway as pw
>>> t1 = pw.debug.table_from_markdown('''
... | age | owner | pet
... 1 | 10 | Alice | 1
... 2 | 9 | Bob | 1
... 3 | 8 | Alice | 2
... ''')
>>> t2 = pw.debug.table_from_markdown('''
... | age | owner | pet
... 11 | 11 | Alice | 30
... 12 | 12 | Tom | 40
... ''')
>>> pw.universes.promise_are_pairwise_disjoint(t1, t2)
>>> t3 = t1.concat(t2)
>>> pw.debug.compute_and_print(t3, include_id=False)
age | owner | pet
8 | Alice | 2
9 | Bob | 1
10 | Alice | 1
11 | Alice | 30
12 | Tom | 40
"""
for other in others:
if other.keys() != self.keys():
raise ValueError(
"columns do not match in the argument of Table.concat()"
)
schema = {
key: functools.reduce(
dt.types_lca,
[other.schema._dtypes()[key] for other in others],
self.schema._dtypes()[key],
)
for key in self.keys()
}
return Table._concat(
self.cast_to_types(**schema),
*[other.cast_to_types(**schema) for other in others],
)
def _concat(self, *others: Table[TSchema]) -> Table[TSchema]:
union_ids = (self._id_column, *(other._id_column for other in others))
if not G.universe_solver.query_are_disjoint(*(c.universe for c in union_ids)):
raise ValueError(
"Universes of the arguments of Table.concat() have to be disjoint.\n"
+ "Consider using Table.promise_universes_are_disjoint() to assert it.\n"
+ "(However, untrue assertion might result in runtime errors.)"
)
context = clmn.ConcatUnsafeContext(
union_ids=union_ids,
updates=tuple(
{col_name: other._columns[col_name] for col_name in self.keys()}
for other in others
),
)
return self._table_with_context(context)
def update_cells(self, other: Table, _stacklevel: int = 1) -> Table:
"""Updates cells of `self`, breaking ties in favor of the values in `other`.
Semantics:
- result.columns == self.columns
- result.id == self.id
- conflicts are resolved preferring other's values
Requires:
- other.columns ⊆ self.columns
- other.id ⊆ self.id
Args:
other: the other table.
Returns:
Table: `self` updated with cells form `other`.
Example:
>>> import pathway as pw
>>> t1 = pw.debug.table_from_markdown('''
... | age | owner | pet
... 1 | 10 | Alice | 1
... 2 | 9 | Bob | 1
... 3 | 8 | Alice | 2
... ''')
>>> t2 = pw.debug.table_from_markdown('''
... age | owner | pet
... 1 | 10 | Alice | 30
... ''')
>>> pw.universes.promise_is_subset_of(t2, t1)
>>> t3 = t1.update_cells(t2)
>>> pw.debug.compute_and_print(t3, include_id=False)
age | owner | pet
8 | Alice | 2
9 | Bob | 1
10 | Alice | 30
"""
if names := (set(other.keys()) - set(self.keys())):
raise ValueError(
f"Columns of the argument in Table.update_cells() not present in the updated table: {list(names)}."
)
if self._universe == other._universe:
warnings.warn(
"Key sets of self and other in update_cells are the same."
+ " Using with_columns instead of update_cells.",
stacklevel=_stacklevel + 4,
)
return self.with_columns(*(other[name] for name in other))
schema = {
key: dt.types_lca(self.schema.__dtypes__[key], other.schema.__dtypes__[key])
for key in other.keys()
}
return Table._update_cells(
self.cast_to_types(**schema), other.cast_to_types(**schema)
)
def _update_cells(self, other: Table) -> Table:
if not other._universe.is_subset_of(self._universe):
raise ValueError(
"Universe of the argument of Table.update_cells() needs to be "
+ "a subset of the universe of the updated table.\n"
+ "Consider using Table.promise_is_subset_of() to assert this.\n"
+ "(However, untrue assertion might result in runtime errors.)"
)
context = clmn.UpdateCellsContext(
left=self._id_column,
right=other._id_column,
updates={name: other._columns[name] for name in other.keys()},
)
return self._table_with_context(context)
def update_rows(self, other: Table[TSchema]) -> Table[TSchema]:
"""Updates rows of `self`, breaking ties in favor for the rows in `other`.
Semantics:
- result.columns == self.columns == other.columns
- result.id == self.id ∪ other.id
Requires:
- other.columns == self.columns
Args:
other: the other table.
Returns:
Table: `self` updated with rows form `other`.
Example:
>>> import pathway as pw
>>> t1 = pw.debug.table_from_markdown('''
... | age | owner | pet
... 1 | 10 | Alice | 1
... 2 | 9 | Bob | 1
... 3 | 8 | Alice | 2
... ''')
>>> t2 = pw.debug.table_from_markdown('''
... | age | owner | pet
... 1 | 10 | Alice | 30
... 12 | 12 | Tom | 40
... ''')
>>> t3 = t1.update_rows(t2)
>>> pw.debug.compute_and_print(t3, include_id=False)
age | owner | pet
8 | Alice | 2
9 | Bob | 1
10 | Alice | 30
12 | Tom | 40
"""
if other.keys() != self.keys():
raise ValueError(
"Columns do not match between argument of Table.update_rows() and the updated table."
)
if self._universe.is_subset_of(other._universe):
warnings.warn(
"Universe of self is a subset of universe of other in update_rows. Returning other.",
stacklevel=5,
)
return other
schema = {
key: dt.types_lca(self.schema.__dtypes__[key], other.schema.__dtypes__[key])
for key in self.keys()
}
union_universes = (self._universe, other._universe)
universe = G.universe_solver.get_union(*union_universes)
if universe == self._universe:
return Table._update_cells(
self.cast_to_types(**schema), other.cast_to_types(**schema)
)
else:
return Table._update_rows(
self.cast_to_types(**schema), other.cast_to_types(**schema)
)
def _update_rows(self, other: Table[TSchema]) -> Table[TSchema]:
union_ids = (self._id_column, other._id_column)
context = clmn.UpdateRowsContext(
updates={col_name: other._columns[col_name] for col_name in self.keys()},
union_ids=union_ids,
)
return self._table_with_context(context)
def with_columns(self, *args: expr.ColumnReference, **kwargs: Any) -> Table:
"""Updates columns of `self`, according to args and kwargs.
See `table.select` specification for evaluation of args and kwargs.
Example:
>>> import pathway as pw
>>> t1 = pw.debug.table_from_markdown('''
... | age | owner | pet
... 1 | 10 | Alice | 1
... 2 | 9 | Bob | 1
... 3 | 8 | Alice | 2
... ''')
>>> t2 = pw.debug.table_from_markdown('''
... | owner | pet | size
... 1 | Tom | 1 | 10
... 2 | Bob | 1 | 9
... 3 | Tom | 2 | 8
... ''')
>>> t3 = t1.with_columns(*t2)
>>> pw.debug.compute_and_print(t3, include_id=False)
age | owner | pet | size
8 | Tom | 2 | 8
9 | Bob | 1 | 9
10 | Tom | 1 | 10
"""
other = self.select(*args, **kwargs)
columns = dict(self)
columns.update(other)
return self.select(**columns)
def with_id(self, new_index: expr.ColumnReference) -> Table:
"""Set new ids based on another column containing id-typed values.
To generate ids based on arbitrary valued columns, use `with_id_from`.
Values assigned must be row-wise unique.
Args:
new_id: column to be used as the new index.
Returns:
Table with updated ids.
Example:
>>> import pytest; pytest.xfail("with_id is hard to test")
>>> import pathway as pw
>>> t1 = pw.debug.table_from_markdown('''
... | age | owner | pet
... 1 | 10 | Alice | 1
... 2 | 9 | Bob | 1
... 3 | 8 | Alice | 2
... ''')
>>> t2 = pw.debug.table_from_markdown('''
... | new_id
... 1 | 2
... 2 | 3
... 3 | 4
... ''')
>>> t3 = t1.promise_universe_is_subset_of(t2).with_id(t2.new_id)
>>> pw.debug.compute_and_print(t3)
age owner pet
^2 10 Alice 1
^3 9 Bob 1
^4 8 Alice 2
"""
return self._with_new_index(new_index)
def with_id_from(
self,
*args: expr.ColumnExpression | Value,
instance: expr.ColumnReference | None = None,
) -> Table:
"""Compute new ids based on values in columns.
Ids computed from `columns` must be row-wise unique.
Args:
columns: columns to be used as primary keys.
Returns:
Table: `self` updated with recomputed ids.
Example:
>>> import pathway as pw
>>> t1 = pw.debug.table_from_markdown('''
... | age | owner | pet
... 1 | 10 | Alice | 1
... 2 | 9 | Bob | 1
... 3 | 8 | Alice | 2
... ''')
>>> t2 = t1 + t1.select(old_id=t1.id)
>>> t3 = t2.with_id_from(t2.age)
>>> pw.debug.compute_and_print(t3) # doctest: +ELLIPSIS, +NORMALIZE_WHITESPACE
| age | owner | pet | old_id
^... | 8 | Alice | 2 | ^...
^... | 9 | Bob | 1 | ^...
^... | 10 | Alice | 1 | ^...
>>> t4 = t3.select(t3.age, t3.owner, t3.pet, same_as_old=(t3.id == t3.old_id),
... same_as_new=(t3.id == t3.pointer_from(t3.age)))
>>> pw.debug.compute_and_print(t4) # doctest: +ELLIPSIS, +NORMALIZE_WHITESPACE
| age | owner | pet | same_as_old | same_as_new
^... | 8 | Alice | 2 | False | True
^... | 9 | Bob | 1 | False | True
^... | 10 | Alice | 1 | False | True
"""
# new_index should be a column, so a little workaround
new_index = self.select(
ref_column=self.pointer_from(*args, instance=instance)
).ref_column
return self._with_new_index(
new_index=new_index,
)
def _with_new_index(
self,
new_index: expr.ColumnExpression,
) -> Table:
self._validate_expression(new_index)
index_type = self.eval_type(new_index)
if not isinstance(index_type, dt.Pointer):
raise TypeError(
f"Pathway supports reindexing Tables with Pointer type only. The type used was {index_type}."
)
reindex_column = self._eval(new_index)
assert self._universe == reindex_column.universe
context = clmn.ReindexContext(reindex_column)
return self._table_with_context(context)
def rename_columns(self, **kwargs: str | expr.ColumnReference) -> Table:
"""Rename columns according to kwargs.
Columns not in keys(kwargs) are not changed. New name of a column must not be `id`.
Args:
kwargs: mapping from old column names to new names.
Returns:
Table: `self` with columns renamed.
Example:
>>> import pathway as pw
>>> t1 = pw.debug.table_from_markdown('''
... age | owner | pet
... 10 | Alice | 1
... 9 | Bob | 1
... 8 | Alice | 2
... ''')
>>> t2 = t1.rename_columns(years_old=t1.age, animal=t1.pet)
>>> pw.debug.compute_and_print(t2, include_id=False)
owner | years_old | animal
Alice | 8 | 2
Alice | 10 | 1
Bob | 9 | 1
"""
mapping: dict[str, str] = {}
for new_name, old_name_col in kwargs.items():
if isinstance(old_name_col, expr.ColumnReference):
old_name = old_name_col.name
else:
old_name = old_name_col
if old_name not in self._columns:
raise ValueError(f"Column {old_name} does not exist in a given table.")
mapping[new_name] = old_name
renamed_columns = self._columns.copy()
for new_name, old_name in mapping.items():
renamed_columns.pop(old_name)
for new_name, old_name in mapping.items():
renamed_columns[new_name] = self._columns[old_name]
columns_wrapped = {
name: self._wrap_column_in_context(
self._rowwise_context,
column,
mapping[name] if name in mapping else name,
)
for name, column in renamed_columns.items()
}
return self._with_same_universe(*columns_wrapped.items())
def rename_by_dict(
self, names_mapping: dict[str | expr.ColumnReference, str]
) -> Table:
"""Rename columns according to a dictionary.
Columns not in keys(kwargs) are not changed. New name of a column must not be `id`.
Args:
names_mapping: mapping from old column names to new names.
Returns:
Table: `self` with columns renamed.
Example:
>>> import pathway as pw
>>> t1 = pw.debug.table_from_markdown('''
... age | owner | pet
... 10 | Alice | 1
... 9 | Bob | 1
... 8 | Alice | 2
... ''')
>>> t2 = t1.rename_by_dict({"age": "years_old", t1.pet: "animal"})
>>> pw.debug.compute_and_print(t2, include_id=False)
owner | years_old | animal
Alice | 8 | 2
Alice | 10 | 1
Bob | 9 | 1
"""
return self.rename_columns(
**{new_name: self[old_name] for old_name, new_name in names_mapping.items()}
)
def with_prefix(self, prefix: str) -> Table:
"""Rename columns by adding prefix to each name of column.
Example:
>>> import pathway as pw
>>> t1 = pw.debug.table_from_markdown('''
... age | owner | pet
... 10 | Alice | 1
... 9 | Bob | 1
... 8 | Alice | 2
... ''')
>>> t2 = t1.with_prefix("u_")
>>> pw.debug.compute_and_print(t2, include_id=False)
u_age | u_owner | u_pet
8 | Alice | 2
9 | Bob | 1
10 | Alice | 1
"""
return self.rename_by_dict({name: prefix + name for name in self.keys()})
def with_suffix(self, suffix: str) -> Table:
"""Rename columns by adding suffix to each name of column.
Example:
>>> import pathway as pw
>>> t1 = pw.debug.table_from_markdown('''
... age | owner | pet
... 10 | Alice | 1
... 9 | Bob | 1
... 8 | Alice | 2
... ''')
>>> t2 = t1.with_suffix("_current")
>>> pw.debug.compute_and_print(t2, include_id=False)
age_current | owner_current | pet_current
8 | Alice | 2
9 | Bob | 1
10 | Alice | 1
"""
return self.rename_by_dict({name: name + suffix for name in self.keys()})
def rename(
self,
names_mapping: dict[str | expr.ColumnReference, str] | None = None,
**kwargs: expr.ColumnExpression,
) -> Table:
"""Rename columns according either a dictionary or kwargs.
If a mapping is provided using a dictionary, ``rename_by_dict`` will be used.
Otherwise, ``rename_columns`` will be used with kwargs.
Columns not in keys(kwargs) are not changed. New name of a column must not be ``id``.
Args:
names_mapping: mapping from old column names to new names.
kwargs: mapping from old column names to new names.
Returns:
Table: `self` with columns renamed.
"""
if names_mapping is not None:
return self.rename_by_dict(names_mapping=names_mapping)
return self.rename_columns(**kwargs)
def without(self, *columns: str | expr.ColumnReference) -> Table:
"""Selects all columns without named column references.
Args:
columns: columns to be dropped provided by `table.column_name` notation.
Returns:
Table: `self` without specified columns.
Example:
>>> import pathway as pw
>>> t1 = pw.debug.table_from_markdown('''
... age | owner | pet
... 10 | Alice | 1
... 9 | Bob | 1
... 8 | Alice | 2
... ''')
>>> t2 = t1.without(t1.age, pw.this.pet)
>>> pw.debug.compute_and_print(t2, include_id=False)
owner
Alice
Alice
Bob
"""
new_columns = self._columns.copy()
for col in columns:
if isinstance(col, expr.ColumnReference):
new_columns.pop(col.name)
else:
assert isinstance(col, str)
new_columns.pop(col)
columns_wrapped = {
name: self._wrap_column_in_context(self._rowwise_context, column, name)
for name, column in new_columns.items()
}
return self._with_same_universe(*columns_wrapped.items())
def having(self, *indexers: expr.ColumnReference) -> Table[TSchema]:
"""Removes rows so that indexed.ix(indexer) is possible when some rows are missing,
for each indexer in indexers"""
rets: list[Table] = []
for indexer in indexers:
rets.append(self._having(indexer))
if len(rets) == 0:
return self
elif len(rets) == 1:
[ret] = rets
return ret
else:
return rets[0].intersect(*rets[1:])
def update_types(self, **kwargs: Any) -> Table:
"""Updates types in schema. Has no effect on the runtime."""
for name in kwargs.keys():
if name not in self.keys():
raise ValueError(
"Table.update_types() argument name has to be an existing table column name."
)
from pathway.internals.common import declare_type
return self.with_columns(
**{key: declare_type(val, self[key]) for key, val in kwargs.items()}
)
def cast_to_types(self, **kwargs: Any) -> Table:
"""Casts columns to types."""
for name in kwargs.keys():
if name not in self.keys():
raise ValueError(
"Table.cast_to_types() argument name has to be an existing table column name."
)
from pathway.internals.common import cast
return self.with_columns(
**{key: cast(val, self[key]) for key, val in kwargs.items()}
)
def _having(self, indexer: expr.ColumnReference) -> Table[TSchema]:
context = clmn.HavingContext(
orig_id_column=self._id_column, key_column=indexer._column
)
return self._table_with_context(context)
def with_universe_of(self, other: TableLike) -> Table:
"""Returns a copy of self with exactly the same universe as others.
Semantics: Required precondition self.universe == other.universe
Used in situations where Pathway cannot deduce equality of universes, but
those are equal as verified during runtime.
Example:
>>> import pathway as pw
>>> t1 = pw.debug.table_from_markdown('''
... | pet
... 1 | Dog
... 7 | Cat
... ''')
>>> t2 = pw.debug.table_from_markdown('''
... | age
... 1 | 10
... 7 | 3
... 8 | 100
... ''')
>>> t3 = t2.filter(pw.this.age < 30).with_universe_of(t1)
>>> t4 = t1 + t3
>>> pw.debug.compute_and_print(t4, include_id=False)
pet | age
Cat | 3
Dog | 10
"""
if self._universe == other._universe:
return self.copy()
universes.promise_are_equal(self, other)
return self._unsafe_promise_universe(other)
def flatten(self, *args: expr.ColumnReference, **kwargs: Any) -> Table:
"""Performs a flatmap operation on a column or expression given as a first
argument. Datatype of this column or expression has to be iterable or Json array.
Other columns specified in the method arguments are duplicated
as many times as the length of the iterable.
It is possible to get ids of source rows by using `table.id` column, e.g.
`table.flatten(table.column_to_be_flattened, original_id = table.id)`.
Example:
>>> import pathway as pw
>>> t1 = pw.debug.table_from_markdown('''
... | pet | age
... 1 | Dog | 2
... 7 | Cat | 5
... ''')
>>> t2 = t1.flatten(t1.pet)
>>> pw.debug.compute_and_print(t2, include_id=False)
pet
C
D
a
g
o
t
>>> t3 = t1.flatten(t1.pet, t1.age)
>>> pw.debug.compute_and_print(t3, include_id=False)
pet | age
C | 5
D | 2
a | 5
g | 2
o | 2
t | 5
"""
intermediate_table = self.select(*args, **kwargs)
all_args = combine_args_kwargs(args, kwargs)
if not all_args:
raise ValueError("Table.flatten() cannot have empty arguments list.")
all_names_iter = iter(all_args.keys())
flatten_name = next(all_names_iter)
return intermediate_table._flatten(flatten_name)
def _flatten(
self,
flatten_name: str,
) -> Table:
flatten_column = self._columns[flatten_name]
assert isinstance(flatten_column, clmn.ColumnWithExpression)
context = clmn.FlattenContext(
orig_universe=self._universe,
flatten_column=flatten_column,
)
columns = {
name: self._wrap_column_in_context(context, column, name)
for name, column in self._columns.items()
if name != flatten_name
}
return Table(
_columns={
flatten_name: context.flatten_result_column,
**columns,
},
_context=context,
)
def sort(
self,
key: expr.ColumnExpression,
instance: expr.ColumnExpression | None = None,
) -> Table:
"""
Sorts a table by the specified keys.
Args:
table : pw.Table
The table to be sorted.
key (ColumnExpression[int | float | datetime | str | bytes]):
An expression to sort by.
instance : ColumnReference or None
An expression with instance. Rows are sorted within an instance.
``prev`` and ``next`` columns will only point to rows that have the same instance.
Returns:
pw.Table: The sorted table. Contains two columns: ``prev`` and ``next``, containing the pointers
to the previous and next rows.
Example:
>>> import pathway as pw
>>> table = pw.debug.table_from_markdown('''
... name | age | score
... Alice | 25 | 80
... Bob | 20 | 90
... Charlie | 30 | 80
... ''')
>>> table = table.with_id_from(pw.this.name)
>>> table += table.sort(key=pw.this.age)
>>> pw.debug.compute_and_print(table, include_id=True)
| name | age | score | prev | next
^GBSDEEW... | Alice | 25 | 80 | ^EDPSSB1... | ^DS9AT95...
^EDPSSB1... | Bob | 20 | 90 | | ^GBSDEEW...
^DS9AT95... | Charlie | 30 | 80 | ^GBSDEEW... |
>>> table = pw.debug.table_from_markdown('''
... name | age | score
... Alice | 25 | 80
... Bob | 20 | 90
... Charlie | 30 | 80
... David | 35 | 90
... Eve | 15 | 80
... ''')
>>> table = table.with_id_from(pw.this.name)
>>> table += table.sort(key=pw.this.age, instance=pw.this.score)
>>> pw.debug.compute_and_print(table, include_id=True)
| name | age | score | prev | next
^GBSDEEW... | Alice | 25 | 80 | ^T0B95XH... | ^DS9AT95...
^EDPSSB1... | Bob | 20 | 90 | | ^RT0AZWX...
^DS9AT95... | Charlie | 30 | 80 | ^GBSDEEW... |
^RT0AZWX... | David | 35 | 90 | ^EDPSSB1... |
^T0B95XH... | Eve | 15 | 80 | | ^GBSDEEW...
"""
instance = clmn.ColumnExpression._wrap(instance)
context = clmn.SortingContext(
self._eval(key),
self._eval(instance),
)
return Table(
_columns={
"prev": context.prev_column,
"next": context.next_column,
},
_context=context,
)
def _set_source(self, source: OutputHandle):
self._source = source
if not hasattr(self._id_column, "lineage"):
self._id_column.lineage = clmn.ColumnLineage(name="id", source=source)
for name, column in self._columns.items():
if not hasattr(column, "lineage"):
column.lineage = clmn.ColumnLineage(name=name, source=source)
universe = self._universe
if not hasattr(universe, "lineage"):
universe.lineage = clmn.Lineage(source=source)
def _unsafe_promise_universe(self, other: TableLike) -> Table:
context = clmn.PromiseSameUniverseContext(self._id_column, other._id_column)
return self._table_with_context(context)
def _validate_expression(self, expression: expr.ColumnExpression):
for dep in expression._dependencies_above_reducer():
if self._universe != dep._column.universe:
raise ValueError(
f"You cannot use {dep.to_column_expression()} in this context."
+ " Its universe is different than the universe of the table the method"
+ " was called on. You can use <table1>.with_universe_of(<table2>)"
+ " to assign universe of <table2> to <table1> if you're sure their"
+ " sets of keys are equal."
)
def _wrap_column_in_context(
self,
context: clmn.Context,
column: clmn.Column,
name: str,
lineage: clmn.Lineage | None = None,
) -> clmn.Column:
"""Contextualize column by wrapping it in expression."""
expression = expr.ColumnReference(_table=self, _column=column, _name=name)
return expression._column_with_expression_cls(
context=context,
universe=context.universe,
expression=expression,
lineage=lineage,
)
def _table_with_context(self, context: clmn.Context) -> Table:
columns = {
name: self._wrap_column_in_context(context, column, name)
for name, column in self._columns.items()
}
return Table(
_columns=columns,
_context=context,
)
def _table_restricted_context(self) -> clmn.TableRestrictedRowwiseContext:
return clmn.TableRestrictedRowwiseContext(self._id_column, self)
def _eval(
self, expression: expr.ColumnExpression, context: clmn.Context | None = None
) -> clmn.ColumnWithExpression:
"""Desugar expression and wrap it in given context."""
if context is None:
context = self._rowwise_context
column = expression._column_with_expression_cls(
context=context,
universe=context.universe,
expression=expression,
)
return column
def _from_schema(cls: type[TTable], schema: type[Schema]) -> TTable:
universe = Universe()
context = clmn.MaterializedContext(universe, schema.universe_properties)
columns = {
name: clmn.MaterializedColumn(
universe,
schema.column_properties(name),
)
for name in schema.column_names()
}
return cls(_columns=columns, _schema=schema, _context=context)
def __repr__(self) -> str:
return f"<pathway.Table schema={dict(self.typehints())}>"
def _with_same_universe(
self,
*columns: tuple[str, clmn.Column],
schema: type[Schema] | None = None,
) -> Table:
return Table(
_columns=dict(columns),
_schema=schema,
_context=self._rowwise_context,
)
def _sort_columns_by_other(self, other: Table):
assert self.keys() == other.keys()
self._columns = {name: self._columns[name] for name in other.keys()}
def _operator_dependencies(self) -> StableSet[Table]:
return StableSet([self])
def debug(self, name: str):
G.add_operator(
lambda id: DebugOperator(name, id),
lambda operator: operator(self),
)
return self
def to(self, sink: DataSink) -> None:
from pathway.internals import table_io
table_io.table_to_datasink(self, sink)
def _materialize(self, universe: Universe):
context = clmn.MaterializedContext(universe)
columns = {
name: clmn.MaterializedColumn(universe, column.properties)
for (name, column) in self._columns.items()
}
return Table(
_columns=columns,
_schema=self.schema,
_context=context,
)
def pointer_from(
self, *args: Any, optional=False, instance: expr.ColumnReference | None = None
):
"""Pseudo-random hash of its argument. Produces pointer types. Applied column-wise.
Example:
>>> import pathway as pw
>>> t1 = pw.debug.table_from_markdown('''
... age owner pet
... 1 10 Alice dog
... 2 9 Bob dog
... 3 8 Alice cat
... 4 7 Bob dog''')
>>> g = t1.groupby(t1.owner).reduce(refcol = t1.pointer_from(t1.owner)) # g.id == g.refcol
>>> pw.debug.compute_and_print(g.select(test = (g.id == g.refcol)), include_id=False)
test
True
True
"""
if instance is not None:
args = (*args, instance)
# XXX verify types for the table primary_keys
return expr.PointerExpression(self, *args, optional=optional)
def ix_ref(
self,
*args: expr.ColumnExpression | Value,
optional: bool = False,
context=None,
instance: expr.ColumnReference | None = None,
):
"""Reindexes the table using expressions as primary keys.
Uses keys from context, or tries to infer proper context from the expression.
If optional is True, then None in expression values result in None values in the result columns.
Missing values in table keys result in RuntimeError.
Context can be anything that allows for `select` or `reduce`, or `pathway.this` construct
(latter results in returning a delayed operation, and should be only used when using `ix` inside
join().select() or groupby().reduce() sequence).
Args:
args: Column references.
Returns:
Row: indexed row.
Example:
>>> import pathway as pw
>>> t1 = pw.debug.table_from_markdown('''
... name | pet
... Alice | dog
... Bob | cat
... Carole | cat
... David | dog
... ''')
>>> t2 = t1.with_id_from(pw.this.name)
>>> t2 = t2.select(*pw.this, new_value=pw.this.ix_ref("Alice").pet)
>>> pw.debug.compute_and_print(t2, include_id=False)
name | pet | new_value
Alice | dog | dog
Bob | cat | dog
Carole | cat | dog
David | dog | dog
Tables obtained by a groupby/reduce scheme always have primary keys:
>>> import pathway as pw
>>> t1 = pw.debug.table_from_markdown('''
... name | pet
... Alice | dog
... Bob | cat
... Carole | cat
... David | cat
... ''')
>>> t2 = t1.groupby(pw.this.pet).reduce(pw.this.pet, count=pw.reducers.count())
>>> t3 = t1.select(*pw.this, new_value=t2.ix_ref(t1.pet).count)
>>> pw.debug.compute_and_print(t3, include_id=False)
name | pet | new_value
Alice | dog | 1
Bob | cat | 3
Carole | cat | 3
David | cat | 3
Single-row tables can be accessed via `ix_ref()`:
>>> import pathway as pw
>>> t1 = pw.debug.table_from_markdown('''
... name | pet
... Alice | dog
... Bob | cat
... Carole | cat
... David | cat
... ''')
>>> t2 = t1.reduce(count=pw.reducers.count())
>>> t3 = t1.select(*pw.this, new_value=t2.ix_ref(context=t1).count)
>>> pw.debug.compute_and_print(t3, include_id=False)
name | pet | new_value
Alice | dog | 4
Bob | cat | 4
Carole | cat | 4
David | cat | 4
"""
return self.ix(
self.pointer_from(*args, optional=optional, instance=instance),
optional=optional,
context=context,
)
def _subtables(self) -> StableSet[Table]:
return StableSet([self])
def _substitutions(
self,
) -> tuple[Table, dict[expr.InternalColRef, expr.ColumnExpression]]:
return self, {}
def typehints(self) -> Mapping[str, Any]:
"""
Return the types of the columns as a dictionary.
Example:
>>> import pathway as pw
>>> t1 = pw.debug.table_from_markdown('''
... age | owner | pet
... 10 | Alice | dog
... 9 | Bob | dog
... 8 | Alice | cat
... 7 | Bob | dog
... ''')
>>> t1.typehints()
mappingproxy({'age': <class 'int'>, 'owner': <class 'str'>, 'pet': <class 'str'>})
"""
return self.schema.typehints()
def eval_type(self, expression: expr.ColumnExpression) -> dt.DType:
return (
self._rowwise_context._get_type_interpreter()
.eval_expression(expression, state=TypeInterpreterState())
._dtype
)
def _auto_live(self) -> Table:
"""Make self automatically live in interactive mode"""
from pathway.internals.interactive import is_interactive_mode_enabled
if is_interactive_mode_enabled():
return self.live()
else:
return self
def live(self) -> LiveTable[TSchema]:
from pathway.internals.interactive import LiveTable
warnings.warn("live tables are an experimental feature", stacklevel=2)
return LiveTable._create(self)
def _format_output_value_fields(table: Table) -> list[api.ValueField]:
value_fields = []
for column_name in table._columns.keys():
value_fields.append(api.ValueField(column_name, api.PathwayType.ANY))
return value_fields | null |
166,700 | from __future__ import annotations
from typing import TYPE_CHECKING
from pathway.internals.parse_graph import G
G = ParseGraph()
The provided code snippet includes necessary dependencies for implementing the `promise_are_pairwise_disjoint` function. Write a Python function `def promise_are_pairwise_disjoint(self: TableLike, *others: TableLike) -> None` to solve the following problem:
Asserts to Pathway that an universe of self is a subset of universe of each of the others. Semantics: Used in situations where Pathway cannot deduce universes are disjoint. Returns: None Note: The assertion works in place. >>> import pathway as pw >>> t1 = pw.debug.table_from_markdown(''' ... | age | owner | pet ... 1 | 10 | Alice | 1 ... 2 | 9 | Bob | 1 ... 3 | 8 | Alice | 2 ... ''') >>> t2 = pw.debug.table_from_markdown(''' ... | age | owner | pet ... 11 | 11 | Alice | 30 ... 12 | 12 | Tom | 40 ... ''') >>> pw.universes.promise_are_pairwise_disjoint(t1, t2) >>> t3 = t1.concat(t2) >>> pw.debug.compute_and_print(t3, include_id=False) age | owner | pet 8 | Alice | 2 9 | Bob | 1 10 | Alice | 1 11 | Alice | 30 12 | Tom | 40
Here is the function:
def promise_are_pairwise_disjoint(self: TableLike, *others: TableLike) -> None:
"""Asserts to Pathway that an universe of self is a subset of universe of each of the others.
Semantics: Used in situations where Pathway cannot deduce universes are disjoint.
Returns: None
Note: The assertion works in place.
>>> import pathway as pw
>>> t1 = pw.debug.table_from_markdown('''
... | age | owner | pet
... 1 | 10 | Alice | 1
... 2 | 9 | Bob | 1
... 3 | 8 | Alice | 2
... ''')
>>> t2 = pw.debug.table_from_markdown('''
... | age | owner | pet
... 11 | 11 | Alice | 30
... 12 | 12 | Tom | 40
... ''')
>>> pw.universes.promise_are_pairwise_disjoint(t1, t2)
>>> t3 = t1.concat(t2)
>>> pw.debug.compute_and_print(t3, include_id=False)
age | owner | pet
8 | Alice | 2
9 | Bob | 1
10 | Alice | 1
11 | Alice | 30
12 | Tom | 40
"""
G.universe_solver.register_as_disjoint(
self._universe, *(other._universe for other in others)
) | Asserts to Pathway that an universe of self is a subset of universe of each of the others. Semantics: Used in situations where Pathway cannot deduce universes are disjoint. Returns: None Note: The assertion works in place. >>> import pathway as pw >>> t1 = pw.debug.table_from_markdown(''' ... | age | owner | pet ... 1 | 10 | Alice | 1 ... 2 | 9 | Bob | 1 ... 3 | 8 | Alice | 2 ... ''') >>> t2 = pw.debug.table_from_markdown(''' ... | age | owner | pet ... 11 | 11 | Alice | 30 ... 12 | 12 | Tom | 40 ... ''') >>> pw.universes.promise_are_pairwise_disjoint(t1, t2) >>> t3 = t1.concat(t2) >>> pw.debug.compute_and_print(t3, include_id=False) age | owner | pet 8 | Alice | 2 9 | Bob | 1 10 | Alice | 1 11 | Alice | 30 12 | Tom | 40 |
166,701 | from __future__ import annotations
from typing import TYPE_CHECKING
from pathway.internals.parse_graph import G
G = ParseGraph()
The provided code snippet includes necessary dependencies for implementing the `promise_is_subset_of` function. Write a Python function `def promise_is_subset_of(self: TableLike, *others: TableLike) -> None` to solve the following problem:
Asserts to Pathway that an universe of self is a subset of universe of each of the others. Semantics: Used in situations where Pathway cannot deduce one universe being a subset of another. Returns: None Note: The assertion works in place. Example: >>> import pathway as pw >>> t1 = pw.debug.table_from_markdown(''' ... | age | owner | pet ... 1 | 10 | Alice | 1 ... 2 | 9 | Bob | 1 ... 3 | 8 | Alice | 2 ... ''') >>> t2 = pw.debug.table_from_markdown(''' ... | age | owner | pet ... 1 | 10 | Alice | 30 ... ''') >>> pw.universes.promise_is_subset_of(t2, t1) >>> t3 = t1 << t2 >>> pw.debug.compute_and_print(t3, include_id=False) age | owner | pet 8 | Alice | 2 9 | Bob | 1 10 | Alice | 30
Here is the function:
def promise_is_subset_of(self: TableLike, *others: TableLike) -> None:
"""Asserts to Pathway that an universe of self is a subset of universe of each of the others.
Semantics: Used in situations where Pathway cannot deduce one universe being a subset of another.
Returns: None
Note: The assertion works in place.
Example:
>>> import pathway as pw
>>> t1 = pw.debug.table_from_markdown('''
... | age | owner | pet
... 1 | 10 | Alice | 1
... 2 | 9 | Bob | 1
... 3 | 8 | Alice | 2
... ''')
>>> t2 = pw.debug.table_from_markdown('''
... | age | owner | pet
... 1 | 10 | Alice | 30
... ''')
>>> pw.universes.promise_is_subset_of(t2, t1)
>>> t3 = t1 << t2
>>> pw.debug.compute_and_print(t3, include_id=False)
age | owner | pet
8 | Alice | 2
9 | Bob | 1
10 | Alice | 30
"""
for other in others:
G.universe_solver.register_as_subset(self._universe, other._universe) | Asserts to Pathway that an universe of self is a subset of universe of each of the others. Semantics: Used in situations where Pathway cannot deduce one universe being a subset of another. Returns: None Note: The assertion works in place. Example: >>> import pathway as pw >>> t1 = pw.debug.table_from_markdown(''' ... | age | owner | pet ... 1 | 10 | Alice | 1 ... 2 | 9 | Bob | 1 ... 3 | 8 | Alice | 2 ... ''') >>> t2 = pw.debug.table_from_markdown(''' ... | age | owner | pet ... 1 | 10 | Alice | 30 ... ''') >>> pw.universes.promise_is_subset_of(t2, t1) >>> t3 = t1 << t2 >>> pw.debug.compute_and_print(t3, include_id=False) age | owner | pet 8 | Alice | 2 9 | Bob | 1 10 | Alice | 30 |
166,702 | from __future__ import annotations
from typing import TYPE_CHECKING
from pathway.internals.parse_graph import G
G = ParseGraph()
The provided code snippet includes necessary dependencies for implementing the `promise_are_equal` function. Write a Python function `def promise_are_equal(self: TableLike, *others: TableLike) -> None` to solve the following problem:
r"""Asserts to Pathway that an universe of self is equal to each of the others universes. Semantics: Used in situations where Pathway cannot deduce one universe being equal to another universe. Returns: None Note: The assertion works in place. Example: >>> import pathway as pw >>> import pytest >>> t1 = pw.debug.table_from_markdown( ... ''' ... | age | owner | pet ... 1 | 8 | Alice | cat ... 2 | 9 | Bob | dog ... 3 | 15 | Alice | tortoise ... ''' ... ) >>> t2 = pw.debug.table_from_markdown( ... ''' ... | age | owner ... 1 | 11 | Alice ... 2 | 12 | Tom ... 3 | 7 | Eve ... 4 | 99 | Papa ... ''' ... ).filter(pw.this.age<20) >>> t3 = t2.filter(pw.this.age > 10) >>> with pytest.raises(ValueError): ... t1.update_cells(t3) >>> pw.universes.promise_are_equal(t1, t2) >>> result = t1.update_cells(t3) >>> pw.debug.compute_and_print(result, include_id=False) age | owner | pet 11 | Alice | cat 12 | Tom | dog 15 | Alice | tortoise
Here is the function:
def promise_are_equal(self: TableLike, *others: TableLike) -> None:
r"""Asserts to Pathway that an universe of self is equal to each of the others universes.
Semantics: Used in situations where Pathway cannot deduce one universe being equal to another universe.
Returns: None
Note: The assertion works in place.
Example:
>>> import pathway as pw
>>> import pytest
>>> t1 = pw.debug.table_from_markdown(
... '''
... | age | owner | pet
... 1 | 8 | Alice | cat
... 2 | 9 | Bob | dog
... 3 | 15 | Alice | tortoise
... '''
... )
>>> t2 = pw.debug.table_from_markdown(
... '''
... | age | owner
... 1 | 11 | Alice
... 2 | 12 | Tom
... 3 | 7 | Eve
... 4 | 99 | Papa
... '''
... ).filter(pw.this.age<20)
>>> t3 = t2.filter(pw.this.age > 10)
>>> with pytest.raises(ValueError):
... t1.update_cells(t3)
>>> pw.universes.promise_are_equal(t1, t2)
>>> result = t1.update_cells(t3)
>>> pw.debug.compute_and_print(result, include_id=False)
age | owner | pet
11 | Alice | cat
12 | Tom | dog
15 | Alice | tortoise
"""
for other in others:
G.universe_solver.register_as_equal(self._universe, other._universe) | r"""Asserts to Pathway that an universe of self is equal to each of the others universes. Semantics: Used in situations where Pathway cannot deduce one universe being equal to another universe. Returns: None Note: The assertion works in place. Example: >>> import pathway as pw >>> import pytest >>> t1 = pw.debug.table_from_markdown( ... ''' ... | age | owner | pet ... 1 | 8 | Alice | cat ... 2 | 9 | Bob | dog ... 3 | 15 | Alice | tortoise ... ''' ... ) >>> t2 = pw.debug.table_from_markdown( ... ''' ... | age | owner ... 1 | 11 | Alice ... 2 | 12 | Tom ... 3 | 7 | Eve ... 4 | 99 | Papa ... ''' ... ).filter(pw.this.age<20) >>> t3 = t2.filter(pw.this.age > 10) >>> with pytest.raises(ValueError): ... t1.update_cells(t3) >>> pw.universes.promise_are_equal(t1, t2) >>> result = t1.update_cells(t3) >>> pw.debug.compute_and_print(result, include_id=False) age | owner | pet 11 | Alice | cat 12 | Tom | dog 15 | Alice | tortoise |
166,703 | import contextlib
import logging
from enum import Enum
from typing import Any
from rich import box
from rich.align import Align
from rich.console import Console, ConsoleOptions, Group, RenderResult
from rich.layout import Layout
from rich.live import Live
from rich.logging import RichHandler
from rich.panel import Panel
from rich.segment import Segment
from rich.table import Table
from pathway.internals import api
class StatsMonitor:
def __init__(self, node_names: list[tuple[int, str]]) -> None:
self.layout = Layout(name="root")
if len(node_names) > 0:
ratio = 2
else:
ratio = 1
self.layout.split(
Layout(name="monitoring", ratio=ratio),
Layout(name="logs"),
)
self.layout["monitoring"].update("")
console = ConsolePrintingToBuffer()
self.handler = RichHandler(console=console, show_path=False)
self.layout["logs"].update(LogsOutput(console))
self.node_names = node_names
def get_logging_handler(self) -> RichHandler:
return self.handler
def update_monitoring(self, data: Any, now: int) -> None:
self.layout["monitoring"].update(MonitoringOutput(self.node_names, data, now))
class MonitoringLevel(Enum):
"""Specifies a verbosity of Pathway monitoring mechanism."""
AUTO = 0
"""
Automatically sets IN_OUT in an interactive terminal and jupyter notebook.
Sets NONE otherwise.
"""
AUTO_ALL = 1
"""
Automatically sets ALL in an interactive terminal and jupyter notebook.
Sets NONE otherwise.
"""
NONE = 2
"""No monitoring."""
IN_OUT = 3
"""
Monitor input connectors and input and output latency. The latency is measured as
the difference between the time when the operator processed the data and the time
when pathway acquired the data.
"""
ALL = 4
"""
Monitor input connectors and latency for each operator in the execution graph. The
latency is measured as the difference between the time when the operator processed
the data and the time when pathway acquired the data.
"""
def to_internal(self) -> api.MonitoringLevel:
if (
self in {MonitoringLevel.AUTO, MonitoringLevel.AUTO_ALL}
and _disable_monitoring_when_auto()
):
return api.MonitoringLevel.NONE
return {
MonitoringLevel.AUTO: api.MonitoringLevel.IN_OUT,
MonitoringLevel.AUTO_ALL: api.MonitoringLevel.ALL,
MonitoringLevel.NONE: api.MonitoringLevel.NONE,
MonitoringLevel.IN_OUT: api.MonitoringLevel.IN_OUT,
MonitoringLevel.ALL: api.MonitoringLevel.ALL,
}[self]
def monitor_stats(
monitoring_level: api.MonitoringLevel,
node_names: list[tuple[int, str]],
default_logging: bool,
refresh_per_second: int = 4,
):
if monitoring_level != api.MonitoringLevel.ALL:
node_names = []
if monitoring_level != api.MonitoringLevel.NONE:
stats_monitor = StatsMonitor(node_names)
handler = stats_monitor.get_logging_handler()
logging.basicConfig(level=logging.INFO, handlers=[])
logging.getLogger().addHandler(handler)
with Live(
stats_monitor.layout, refresh_per_second=refresh_per_second, screen=True
):
yield stats_monitor
logging.getLogger().removeHandler(handler)
else:
if default_logging:
logging.basicConfig(
level=logging.INFO,
format="[%(asctime)s]:%(levelname)s:%(message)s",
datefmt="%Y-%m-%dT%H:%M:%S",
)
yield None | null |
166,704 | import contextlib
import logging
from enum import Enum
from typing import Any
from rich import box
from rich.align import Align
from rich.console import Console, ConsoleOptions, Group, RenderResult
from rich.layout import Layout
from rich.live import Live
from rich.logging import RichHandler
from rich.panel import Panel
from rich.segment import Segment
from rich.table import Table
from pathway.internals import api
def _disable_monitoring_when_auto() -> bool:
console = Console()
return not (console.is_interactive or console.is_jupyter) | null |
166,705 | from pathway.internals import parse_graph
from pathway.internals.graph_runner import GraphRunner
from pathway.internals.monitoring import MonitoringLevel
from pathway.internals.runtime_type_check import check_arg_types
from pathway.persistence import Config as PersistenceConfig
class GraphRunner:
"""Runs evaluation of ParseGraph."""
_graph: graph.ParseGraph
debug: bool
ignore_asserts: bool
runtime_typechecking: bool
telemetry: telemetry.Telemetry
def __init__(
self,
input_graph: graph.ParseGraph,
*,
debug: bool = False,
ignore_asserts: bool | None = None,
monitoring_level: MonitoringLevel = MonitoringLevel.AUTO,
with_http_server: bool = False,
default_logging: bool = True,
persistence_config: PersistenceConfig | None = None,
runtime_typechecking: bool | None = None,
license_key: str | None = None,
) -> None:
self._graph = input_graph
self.debug = debug
if ignore_asserts is None:
ignore_asserts = pathway_config.ignore_asserts
self.ignore_asserts = ignore_asserts
self.monitoring_level = monitoring_level
self.with_http_server = with_http_server
self.default_logging = default_logging
self.persistence_config = persistence_config or pathway_config.replay_config
if runtime_typechecking is None:
self.runtime_typechecking = pathway_config.runtime_typechecking
else:
self.runtime_typechecking = runtime_typechecking
if license_key is None:
license_key = pathway_config.license_key
self.license_key = license_key
self.telemetry = telemetry.Telemetry.create(
license_key=self.license_key,
telemetry_server=pathway_config.telemetry_server,
)
def run_nodes(
self,
nodes: Iterable[Operator],
/,
*,
after_build: Callable[[ScopeState, OperatorStorageGraph], None] | None = None,
):
all_nodes = self._tree_shake(self._graph.global_scope, nodes)
self._run(all_nodes, after_build=after_build)
def run_tables(
self,
*tables: table.Table,
after_build: Callable[[ScopeState, OperatorStorageGraph], None] | None = None,
) -> list[api.CapturedStream]:
nodes = self.tree_shake_tables(self._graph.global_scope, tables)
return self._run(nodes, output_tables=tables, after_build=after_build)
def run_all(
self,
*,
after_build: Callable[[ScopeState, OperatorStorageGraph], None] | None = None,
) -> None:
self._run(
self._graph.global_scope.normal_nodes, after_build=after_build, run_all=True
)
def run_outputs(
self,
*,
after_build: Callable[[ScopeState, OperatorStorageGraph], None] | None = None,
) -> None:
self.run_nodes(self._graph.global_scope.output_nodes, after_build=after_build)
def has_bounded_input(self, table: table.Table) -> bool:
nodes = self.tree_shake_tables(self._graph.global_scope, [table])
for node in nodes:
if isinstance(node, InputOperator) and not node.datasource.is_bounded():
return False
return True
def _run(
self,
nodes: Iterable[Operator],
/,
*,
output_tables: Collection[table.Table] = (),
after_build: Callable[[ScopeState, OperatorStorageGraph], None] | None = None,
run_all: bool = False,
) -> list[api.CapturedStream]:
with self.telemetry.tracer.start_as_current_span("graph_runner.run"):
trace_context, trace_parent = telemetry.get_current_context()
context = ScopeContext(
nodes=StableSet(nodes),
runtime_typechecking=self.runtime_typechecking,
run_all=run_all,
)
storage_graph = OperatorStorageGraph.from_scope_context(
context, self, output_tables
)
"graph_runner.build",
context=trace_context,
attributes=dict(
graph=repr(self._graph),
debug=self.debug,
),
)
def logic(
scope: api.Scope,
/,
*,
storage_graph: OperatorStorageGraph = storage_graph,
output_tables: Collection[table.Table] = output_tables,
) -> list[tuple[api.Table, list[ColumnPath]]]:
state = ScopeState(scope)
storage_graph.build_scope(scope, state, self)
if after_build is not None:
after_build(state, storage_graph)
return storage_graph.get_output_tables(output_tables, state)
node_names = [
(operator.id, operator.label())
for operator in context.nodes
if isinstance(operator, ContextualizedIntermediateOperator)
]
monitoring_level = self.monitoring_level.to_internal()
with (
new_event_loop() as event_loop,
monitor_stats(
monitoring_level, node_names, self.default_logging
) as stats_monitor,
self.telemetry.with_logging_handler(),
get_persistence_engine_config(
self.persistence_config
) as persistence_engine_config,
):
try:
return api.run_with_new_graph(
logic,
event_loop=event_loop,
ignore_asserts=self.ignore_asserts,
stats_monitor=stats_monitor,
monitoring_level=monitoring_level,
with_http_server=self.with_http_server,
persistence_config=persistence_engine_config,
license_key=self.license_key,
telemetry_server=pathway_config.telemetry_server,
trace_parent=trace_parent,
)
except api.EngineErrorWithTrace as e:
error, frame = e.args
if frame is not None:
trace.add_pathway_trace_note(
error,
trace.Frame(
filename=frame.file_name,
line_number=frame.line_number,
line=frame.line,
function=frame.function,
),
)
raise error from None
def tree_shake_tables(
self, graph_scope: graph.Scope, tables: Iterable[table.Table]
) -> StableSet[Operator]:
starting_nodes = (table._source.operator for table in tables)
return self._tree_shake(graph_scope, starting_nodes)
def _tree_shake(
self,
graph_scope: graph.Scope,
starting_nodes: Iterable[Operator],
) -> StableSet[Operator]:
if self.debug:
starting_nodes = chain(starting_nodes, graph_scope.debug_nodes)
nodes = StableSet(graph_scope.relevant_nodes(starting_nodes))
return nodes
class MonitoringLevel(Enum):
"""Specifies a verbosity of Pathway monitoring mechanism."""
AUTO = 0
"""
Automatically sets IN_OUT in an interactive terminal and jupyter notebook.
Sets NONE otherwise.
"""
AUTO_ALL = 1
"""
Automatically sets ALL in an interactive terminal and jupyter notebook.
Sets NONE otherwise.
"""
NONE = 2
"""No monitoring."""
IN_OUT = 3
"""
Monitor input connectors and input and output latency. The latency is measured as
the difference between the time when the operator processed the data and the time
when pathway acquired the data.
"""
ALL = 4
"""
Monitor input connectors and latency for each operator in the execution graph. The
latency is measured as the difference between the time when the operator processed
the data and the time when pathway acquired the data.
"""
def to_internal(self) -> api.MonitoringLevel:
if (
self in {MonitoringLevel.AUTO, MonitoringLevel.AUTO_ALL}
and _disable_monitoring_when_auto()
):
return api.MonitoringLevel.NONE
return {
MonitoringLevel.AUTO: api.MonitoringLevel.IN_OUT,
MonitoringLevel.AUTO_ALL: api.MonitoringLevel.ALL,
MonitoringLevel.NONE: api.MonitoringLevel.NONE,
MonitoringLevel.IN_OUT: api.MonitoringLevel.IN_OUT,
MonitoringLevel.ALL: api.MonitoringLevel.ALL,
}[self]
The provided code snippet includes necessary dependencies for implementing the `run` function. Write a Python function `def run( *, debug: bool = False, monitoring_level: MonitoringLevel = MonitoringLevel.AUTO, with_http_server: bool = False, default_logging: bool = True, persistence_config: PersistenceConfig | None = None, runtime_typechecking: bool | None = None, license_key: str | None = None, ) -> None` to solve the following problem:
Runs the computation graph. Args: debug: enable output out of table.debug() operators monitoring_level: the verbosity of stats monitoring mechanism. One of pathway.MonitoringLevel.NONE, pathway.MonitoringLevel.IN_OUT, pathway.MonitoringLevel.ALL. If unset, pathway will choose between NONE and IN_OUT based on output interactivity. with_http_server: whether to start a http server with runtime metrics. Learn more in a `tutorial </developers/user-guide/deployment/prometheus-monitoring/>`_ . default_logging: whether to allow pathway to set its own logging handler. Set it to False if you want to set your own logging handler. persistence_config: the config for persisting the state in case this persistence is required. runtime_typechecking: enables additional strict type checking at runtime
Here is the function:
def run(
*,
debug: bool = False,
monitoring_level: MonitoringLevel = MonitoringLevel.AUTO,
with_http_server: bool = False,
default_logging: bool = True,
persistence_config: PersistenceConfig | None = None,
runtime_typechecking: bool | None = None,
license_key: str | None = None,
) -> None:
"""Runs the computation graph.
Args:
debug: enable output out of table.debug() operators
monitoring_level: the verbosity of stats monitoring mechanism. One of
pathway.MonitoringLevel.NONE, pathway.MonitoringLevel.IN_OUT,
pathway.MonitoringLevel.ALL. If unset, pathway will choose between
NONE and IN_OUT based on output interactivity.
with_http_server: whether to start a http server with runtime metrics. Learn
more in a `tutorial </developers/user-guide/deployment/prometheus-monitoring/>`_ .
default_logging: whether to allow pathway to set its own logging handler. Set
it to False if you want to set your own logging handler.
persistence_config: the config for persisting the state in case this
persistence is required.
runtime_typechecking: enables additional strict type checking at runtime
"""
GraphRunner(
parse_graph.G,
debug=debug,
monitoring_level=monitoring_level,
with_http_server=with_http_server,
default_logging=default_logging,
persistence_config=persistence_config,
license_key=license_key,
runtime_typechecking=runtime_typechecking,
).run_outputs() | Runs the computation graph. Args: debug: enable output out of table.debug() operators monitoring_level: the verbosity of stats monitoring mechanism. One of pathway.MonitoringLevel.NONE, pathway.MonitoringLevel.IN_OUT, pathway.MonitoringLevel.ALL. If unset, pathway will choose between NONE and IN_OUT based on output interactivity. with_http_server: whether to start a http server with runtime metrics. Learn more in a `tutorial </developers/user-guide/deployment/prometheus-monitoring/>`_ . default_logging: whether to allow pathway to set its own logging handler. Set it to False if you want to set your own logging handler. persistence_config: the config for persisting the state in case this persistence is required. runtime_typechecking: enables additional strict type checking at runtime |
166,706 | from pathway.internals import parse_graph
from pathway.internals.graph_runner import GraphRunner
from pathway.internals.monitoring import MonitoringLevel
from pathway.internals.runtime_type_check import check_arg_types
from pathway.persistence import Config as PersistenceConfig
class GraphRunner:
"""Runs evaluation of ParseGraph."""
_graph: graph.ParseGraph
debug: bool
ignore_asserts: bool
runtime_typechecking: bool
telemetry: telemetry.Telemetry
def __init__(
self,
input_graph: graph.ParseGraph,
*,
debug: bool = False,
ignore_asserts: bool | None = None,
monitoring_level: MonitoringLevel = MonitoringLevel.AUTO,
with_http_server: bool = False,
default_logging: bool = True,
persistence_config: PersistenceConfig | None = None,
runtime_typechecking: bool | None = None,
license_key: str | None = None,
) -> None:
self._graph = input_graph
self.debug = debug
if ignore_asserts is None:
ignore_asserts = pathway_config.ignore_asserts
self.ignore_asserts = ignore_asserts
self.monitoring_level = monitoring_level
self.with_http_server = with_http_server
self.default_logging = default_logging
self.persistence_config = persistence_config or pathway_config.replay_config
if runtime_typechecking is None:
self.runtime_typechecking = pathway_config.runtime_typechecking
else:
self.runtime_typechecking = runtime_typechecking
if license_key is None:
license_key = pathway_config.license_key
self.license_key = license_key
self.telemetry = telemetry.Telemetry.create(
license_key=self.license_key,
telemetry_server=pathway_config.telemetry_server,
)
def run_nodes(
self,
nodes: Iterable[Operator],
/,
*,
after_build: Callable[[ScopeState, OperatorStorageGraph], None] | None = None,
):
all_nodes = self._tree_shake(self._graph.global_scope, nodes)
self._run(all_nodes, after_build=after_build)
def run_tables(
self,
*tables: table.Table,
after_build: Callable[[ScopeState, OperatorStorageGraph], None] | None = None,
) -> list[api.CapturedStream]:
nodes = self.tree_shake_tables(self._graph.global_scope, tables)
return self._run(nodes, output_tables=tables, after_build=after_build)
def run_all(
self,
*,
after_build: Callable[[ScopeState, OperatorStorageGraph], None] | None = None,
) -> None:
self._run(
self._graph.global_scope.normal_nodes, after_build=after_build, run_all=True
)
def run_outputs(
self,
*,
after_build: Callable[[ScopeState, OperatorStorageGraph], None] | None = None,
) -> None:
self.run_nodes(self._graph.global_scope.output_nodes, after_build=after_build)
def has_bounded_input(self, table: table.Table) -> bool:
nodes = self.tree_shake_tables(self._graph.global_scope, [table])
for node in nodes:
if isinstance(node, InputOperator) and not node.datasource.is_bounded():
return False
return True
def _run(
self,
nodes: Iterable[Operator],
/,
*,
output_tables: Collection[table.Table] = (),
after_build: Callable[[ScopeState, OperatorStorageGraph], None] | None = None,
run_all: bool = False,
) -> list[api.CapturedStream]:
with self.telemetry.tracer.start_as_current_span("graph_runner.run"):
trace_context, trace_parent = telemetry.get_current_context()
context = ScopeContext(
nodes=StableSet(nodes),
runtime_typechecking=self.runtime_typechecking,
run_all=run_all,
)
storage_graph = OperatorStorageGraph.from_scope_context(
context, self, output_tables
)
"graph_runner.build",
context=trace_context,
attributes=dict(
graph=repr(self._graph),
debug=self.debug,
),
)
def logic(
scope: api.Scope,
/,
*,
storage_graph: OperatorStorageGraph = storage_graph,
output_tables: Collection[table.Table] = output_tables,
) -> list[tuple[api.Table, list[ColumnPath]]]:
state = ScopeState(scope)
storage_graph.build_scope(scope, state, self)
if after_build is not None:
after_build(state, storage_graph)
return storage_graph.get_output_tables(output_tables, state)
node_names = [
(operator.id, operator.label())
for operator in context.nodes
if isinstance(operator, ContextualizedIntermediateOperator)
]
monitoring_level = self.monitoring_level.to_internal()
with (
new_event_loop() as event_loop,
monitor_stats(
monitoring_level, node_names, self.default_logging
) as stats_monitor,
self.telemetry.with_logging_handler(),
get_persistence_engine_config(
self.persistence_config
) as persistence_engine_config,
):
try:
return api.run_with_new_graph(
logic,
event_loop=event_loop,
ignore_asserts=self.ignore_asserts,
stats_monitor=stats_monitor,
monitoring_level=monitoring_level,
with_http_server=self.with_http_server,
persistence_config=persistence_engine_config,
license_key=self.license_key,
telemetry_server=pathway_config.telemetry_server,
trace_parent=trace_parent,
)
except api.EngineErrorWithTrace as e:
error, frame = e.args
if frame is not None:
trace.add_pathway_trace_note(
error,
trace.Frame(
filename=frame.file_name,
line_number=frame.line_number,
line=frame.line,
function=frame.function,
),
)
raise error from None
def tree_shake_tables(
self, graph_scope: graph.Scope, tables: Iterable[table.Table]
) -> StableSet[Operator]:
starting_nodes = (table._source.operator for table in tables)
return self._tree_shake(graph_scope, starting_nodes)
def _tree_shake(
self,
graph_scope: graph.Scope,
starting_nodes: Iterable[Operator],
) -> StableSet[Operator]:
if self.debug:
starting_nodes = chain(starting_nodes, graph_scope.debug_nodes)
nodes = StableSet(graph_scope.relevant_nodes(starting_nodes))
return nodes
class MonitoringLevel(Enum):
"""Specifies a verbosity of Pathway monitoring mechanism."""
AUTO = 0
"""
Automatically sets IN_OUT in an interactive terminal and jupyter notebook.
Sets NONE otherwise.
"""
AUTO_ALL = 1
"""
Automatically sets ALL in an interactive terminal and jupyter notebook.
Sets NONE otherwise.
"""
NONE = 2
"""No monitoring."""
IN_OUT = 3
"""
Monitor input connectors and input and output latency. The latency is measured as
the difference between the time when the operator processed the data and the time
when pathway acquired the data.
"""
ALL = 4
"""
Monitor input connectors and latency for each operator in the execution graph. The
latency is measured as the difference between the time when the operator processed
the data and the time when pathway acquired the data.
"""
def to_internal(self) -> api.MonitoringLevel:
if (
self in {MonitoringLevel.AUTO, MonitoringLevel.AUTO_ALL}
and _disable_monitoring_when_auto()
):
return api.MonitoringLevel.NONE
return {
MonitoringLevel.AUTO: api.MonitoringLevel.IN_OUT,
MonitoringLevel.AUTO_ALL: api.MonitoringLevel.ALL,
MonitoringLevel.NONE: api.MonitoringLevel.NONE,
MonitoringLevel.IN_OUT: api.MonitoringLevel.IN_OUT,
MonitoringLevel.ALL: api.MonitoringLevel.ALL,
}[self]
The provided code snippet includes necessary dependencies for implementing the `run_all` function. Write a Python function `def run_all( *, debug: bool = False, monitoring_level: MonitoringLevel = MonitoringLevel.AUTO, with_http_server: bool = False, default_logging: bool = True, persistence_config: PersistenceConfig | None = None, runtime_typechecking: bool | None = None, license_key: str | None = None, ) -> None` to solve the following problem:
Runs the computation graph with disabled tree-shaking optimization. Args: debug: enable output out of table.debug() operators monitoring_level: the verbosity of stats monitoring mechanism. One of pathway.MonitoringLevel.NONE, pathway.MonitoringLevel.IN_OUT, pathway.MonitoringLevel.ALL. If unset, pathway will choose between NONE and IN_OUT based on output interactivity. with_http_server: whether to start a http server with runtime metrics. Learn more in a `tutorial </developers/user-guide/deployment/prometheus-monitoring/>`_ . default_logging: whether to allow pathway to set its own logging handler. Set it to False if you want to set your own logging handler. persistence_config: the config for persisting the state in case this persistence is required. runtime_typechecking: enables additional strict type checking at runtime
Here is the function:
def run_all(
*,
debug: bool = False,
monitoring_level: MonitoringLevel = MonitoringLevel.AUTO,
with_http_server: bool = False,
default_logging: bool = True,
persistence_config: PersistenceConfig | None = None,
runtime_typechecking: bool | None = None,
license_key: str | None = None,
) -> None:
"""Runs the computation graph with disabled tree-shaking optimization.
Args:
debug: enable output out of table.debug() operators
monitoring_level: the verbosity of stats monitoring mechanism. One of
pathway.MonitoringLevel.NONE, pathway.MonitoringLevel.IN_OUT,
pathway.MonitoringLevel.ALL. If unset, pathway will choose between
NONE and IN_OUT based on output interactivity.
with_http_server: whether to start a http server with runtime metrics. Learn
more in a `tutorial </developers/user-guide/deployment/prometheus-monitoring/>`_ .
default_logging: whether to allow pathway to set its own logging handler. Set
it to False if you want to set your own logging handler.
persistence_config: the config for persisting the state in case this
persistence is required.
runtime_typechecking: enables additional strict type checking at runtime
"""
GraphRunner(
parse_graph.G,
debug=debug,
monitoring_level=monitoring_level,
with_http_server=with_http_server,
default_logging=default_logging,
persistence_config=persistence_config,
runtime_typechecking=runtime_typechecking,
license_key=license_key,
).run_all() | Runs the computation graph with disabled tree-shaking optimization. Args: debug: enable output out of table.debug() operators monitoring_level: the verbosity of stats monitoring mechanism. One of pathway.MonitoringLevel.NONE, pathway.MonitoringLevel.IN_OUT, pathway.MonitoringLevel.ALL. If unset, pathway will choose between NONE and IN_OUT based on output interactivity. with_http_server: whether to start a http server with runtime metrics. Learn more in a `tutorial </developers/user-guide/deployment/prometheus-monitoring/>`_ . default_logging: whether to allow pathway to set its own logging handler. Set it to False if you want to set your own logging handler. persistence_config: the config for persisting the state in case this persistence is required. runtime_typechecking: enables additional strict type checking at runtime |
166,707 | import pickle
from abc import ABC, abstractmethod
from collections import Counter
from typing import ParamSpec, Protocol, TypeVar
from typing_extensions import Self
from pathway.internals import api, expression as expr
from pathway.internals.column import ColumnExpression
from pathway.internals.common import apply_with_type
from pathway.internals.reducers import StatefulManyReducer
from pathway.internals.shadows.inspect import signature
def mark_stub(fun):
fun.__pw_stub = True
return fun | null |
166,708 | import pickle
from abc import ABC, abstractmethod
from collections import Counter
from typing import ParamSpec, Protocol, TypeVar
from typing_extensions import Self
from pathway.internals import api, expression as expr
from pathway.internals.column import ColumnExpression
from pathway.internals.common import apply_with_type
from pathway.internals.reducers import StatefulManyReducer
from pathway.internals.shadows.inspect import signature
S = TypeVar("S", bound=api.Value)
class ReducerProtocol(Protocol):
def __call__(
self, *args: expr.ColumnExpression | api.Value
) -> expr.ColumnExpression: ...
def stateful_many(
combine_many: api.CombineMany[S],
) -> ReducerProtocol:
def wrapper(*args: expr.ColumnExpression | api.Value) -> expr.ColumnExpression:
return expr.ReducerExpression(StatefulManyReducer(combine_many), *args)
return wrapper
class CombineSingle(Protocol[S, P]):
def __call__(self, state: S | None, /, *args: P.args, **kwargs: P.kwargs) -> S: ...
def stateful_single(combine_single: CombineSingle[S, ...]) -> ReducerProtocol:
def wrapper(state: S | None, rows: list[tuple[list[api.Value], int]]) -> S:
for row, count in rows:
assert count > 0
for _ in range(count):
state = combine_single(state, *row)
assert state is not None
return state
return stateful_many(wrapper) | null |
166,709 | import pickle
from abc import ABC, abstractmethod
from collections import Counter
from typing import ParamSpec, Protocol, TypeVar
from typing_extensions import Self
from pathway.internals import api, expression as expr
from pathway.internals.column import ColumnExpression
from pathway.internals.common import apply_with_type
from pathway.internals.reducers import StatefulManyReducer
from pathway.internals.shadows.inspect import signature
def stateful_many(
combine_many: api.CombineMany[S],
) -> ReducerProtocol:
def wrapper(*args: expr.ColumnExpression | api.Value) -> expr.ColumnExpression:
return expr.ReducerExpression(StatefulManyReducer(combine_many), *args)
return wrapper
class BaseCustomAccumulator(ABC):
"""Utility class for defining custom accumulators, used for custom reducers.
Custom accumulators should inherit from this class, and should implement ``from_row``,
``update`` and ``compute_result``. Optionally ``neutral`` and ``retract`` can be provided
for more efficient processing on streams with changing data.
>>> import pathway as pw
>>> class CustomAvgAccumulator(pw.BaseCustomAccumulator):
... def __init__(self, sum, cnt):
... self.sum = sum
... self.cnt = cnt
...
... @classmethod
... def from_row(self, row):
... [val] = row
... return CustomAvgAccumulator(val, 1)
...
... def update(self, other):
... self.sum += other.sum
... self.cnt += other.cnt
...
... def compute_result(self) -> float:
... return self.sum / self.cnt
>>> import sys; sys.modules[__name__].CustomAvgAccumulator = CustomAvgAccumulator # NODOCS
>>> custom_avg = pw.reducers.udf_reducer(CustomAvgAccumulator)
>>> t1 = pw.debug.table_from_markdown('''
... age | owner | pet | price
... 10 | Alice | dog | 100
... 9 | Bob | cat | 80
... 8 | Alice | cat | 90
... 7 | Bob | dog | 70
... ''')
>>> t2 = t1.groupby(t1.owner).reduce(t1.owner, avg_price=custom_avg(t1.price))
>>> pw.debug.compute_and_print(t2, include_id=False)
owner | avg_price
Alice | 95.0
Bob | 75.0
"""
def neutral(cls) -> Self:
"""Neutral element of the accumulator (aggregation of an empty list).
This function is optional, and allows for more efficient processing on streams
with changing data."""
raise NotImplementedError()
def from_row(cls, row: list[api.Value]) -> Self:
"""Construct the accumulator from a row of data.
Row will be passed as a list of values.
This is a mandatory function."""
raise NotImplementedError()
def update(self, other: Self) -> None:
"""Update the accumulator with another one.
Method does not need to return anything, the change should be in-place.
This is a mandatory function."""
raise NotImplementedError()
def retract(self, other: Self) -> None:
"""Update the accumulator by removing the value of another one.
This function is optional, and allows more efficient reductions on streams
with changing data.
"""
raise NotImplementedError()
def compute_result(self) -> api.Value:
"""Mandatory function to finalize computation.
Used to extract answer from final state of accumulator.
Narrowing the type of this function helps better type the output of the reducer.
"""
raise NotImplementedError()
def _is_overridden(cls: type[BaseCustomAccumulator], name: str) -> bool:
assert hasattr(BaseCustomAccumulator, name)
return not hasattr(getattr(cls, name), "__pw_stub")
def apply_with_type(
fun: Callable,
ret_type: type | dt.DType,
*args: expr.ColumnExpression | Value,
**kwargs: expr.ColumnExpression | Value,
) -> expr.ColumnExpression:
"""Applies function to column expressions, column-wise.
Output column type is provided explicitly.
Example:
>>> import pathway as pw
>>> t1 = pw.debug.table_from_markdown('''
... age owner pet
... 1 10 Alice dog
... 2 9 Bob dog
... 3 8 Alice cat
... 4 7 Bob dog''')
>>> t2 = t1.select(col = pw.apply_with_type(lambda left, right: left+right, str, t1.owner, t1.pet))
>>> pw.debug.compute_and_print(t2, include_id=False)
col
Alicecat
Alicedog
Bobdog
Bobdog
"""
if kwargs:
warn(
"Passing keyword arguments to the function in pw.apply_with_type is deprecated."
+ " Use positional arguments instead.",
DeprecationWarning,
stacklevel=2,
)
return udf(fun, return_type=ret_type)(*args, **kwargs)
def signature(obj, *, follow_wrapped=True):
"""Get a signature object for the passed callable.
Fixed for functions. Would probably break for other callables, use vanilla inspect.signature for those.
Will be deprecated once python version is bumped to >=3.9
"""
obj = inspect.unwrap(obj)
sig = inspect.Signature.from_callable(obj, follow_wrapped=follow_wrapped)
for name, param in sig.parameters.items():
annot = param.annotation
if isinstance(annot, str):
sig.parameters[name]._annotation = eval(annot, obj.__globals__)
if sig.parameters[name]._annotation is inspect._empty:
sig.parameters[name]._annotation = Any
if isinstance(sig._return_annotation, str):
sig._return_annotation = eval(sig._return_annotation, obj.__globals__)
if sig._return_annotation is inspect._empty:
sig._return_annotation = Any
return sig
The provided code snippet includes necessary dependencies for implementing the `udf_reducer` function. Write a Python function `def udf_reducer( reducer_cls: type[BaseCustomAccumulator], )` to solve the following problem:
Decorator for defining custom reducers. Requires custom accumulator as an argument. Custom accumulator should implement ``from_row``, ``update`` and ``compute_result``. Optionally ``neutral`` and ``retract`` can be provided for more efficient processing on streams with changing data. >>> import pathway as pw >>> class CustomAvgAccumulator(pw.BaseCustomAccumulator): ... def __init__(self, sum, cnt): ... self.sum = sum ... self.cnt = cnt ... ... @classmethod ... def from_row(self, row): ... [val] = row ... return CustomAvgAccumulator(val, 1) ... ... def update(self, other): ... self.sum += other.sum ... self.cnt += other.cnt ... ... def compute_result(self) -> float: ... return self.sum / self.cnt >>> import sys; sys.modules[__name__].CustomAvgAccumulator = CustomAvgAccumulator # NODOCS >>> custom_avg = pw.reducers.udf_reducer(CustomAvgAccumulator) >>> t1 = pw.debug.table_from_markdown(''' ... age | owner | pet | price ... 10 | Alice | dog | 100 ... 9 | Bob | cat | 80 ... 8 | Alice | cat | 90 ... 7 | Bob | dog | 70 ... ''') >>> t2 = t1.groupby(t1.owner).reduce(t1.owner, avg_price=custom_avg(t1.price)) >>> pw.debug.compute_and_print(t2, include_id=False) owner | avg_price Alice | 95.0 Bob | 75.0
Here is the function:
def udf_reducer(
reducer_cls: type[BaseCustomAccumulator],
):
"""Decorator for defining custom reducers. Requires custom accumulator as an argument.
Custom accumulator should implement ``from_row``, ``update`` and ``compute_result``.
Optionally ``neutral`` and ``retract`` can be provided for more efficient processing on
streams with changing data.
>>> import pathway as pw
>>> class CustomAvgAccumulator(pw.BaseCustomAccumulator):
... def __init__(self, sum, cnt):
... self.sum = sum
... self.cnt = cnt
...
... @classmethod
... def from_row(self, row):
... [val] = row
... return CustomAvgAccumulator(val, 1)
...
... def update(self, other):
... self.sum += other.sum
... self.cnt += other.cnt
...
... def compute_result(self) -> float:
... return self.sum / self.cnt
>>> import sys; sys.modules[__name__].CustomAvgAccumulator = CustomAvgAccumulator # NODOCS
>>> custom_avg = pw.reducers.udf_reducer(CustomAvgAccumulator)
>>> t1 = pw.debug.table_from_markdown('''
... age | owner | pet | price
... 10 | Alice | dog | 100
... 9 | Bob | cat | 80
... 8 | Alice | cat | 90
... 7 | Bob | dog | 70
... ''')
>>> t2 = t1.groupby(t1.owner).reduce(t1.owner, avg_price=custom_avg(t1.price))
>>> pw.debug.compute_and_print(t2, include_id=False)
owner | avg_price
Alice | 95.0
Bob | 75.0
"""
neutral_available = _is_overridden(reducer_cls, "neutral")
retract_available = _is_overridden(reducer_cls, "retract")
def wrapper(*args: expr.ColumnExpression | api.Value) -> ColumnExpression:
@stateful_many
def stateful_wrapper(
pickled_state: bytes | None, rows: list[tuple[list[api.Value], int]]
) -> bytes | None:
if pickled_state is not None:
state = pickle.loads(pickled_state)
if not retract_available:
state._positive_updates = list(state._positive_updates)
else:
state = None
positive_updates: list[tuple[api.Value, ...]] = []
negative_updates = []
for row, count in rows:
if count > 0:
positive_updates.extend([tuple(row)] * count)
else:
negative_updates.extend([tuple(row)] * (-count))
if not retract_available and len(negative_updates) > 0:
if state is not None:
positive_updates.extend(state._positive_updates)
state._positive_updates = []
state = None
acc = Counter(positive_updates)
acc.subtract(negative_updates)
assert all(x >= 0 for x in acc.values())
positive_updates = list(acc.elements())
negative_updates = []
if state is None:
if neutral_available:
state = reducer_cls.neutral()
if not retract_available:
state._positive_updates = []
else:
state._cnt = 0
elif len(positive_updates) == 0:
if len(negative_updates) == 0:
return None
else:
raise ValueError(
"Unable to process negative update with this custom reducer."
)
else:
state = reducer_cls.from_row(list(positive_updates[0]))
if not retract_available:
state._positive_updates = positive_updates[0:1]
else:
state._cnt = 1
positive_updates = positive_updates[1:]
for row_up in positive_updates:
if not retract_available:
state._positive_updates.append(row_up)
else:
state._cnt += 1
val = reducer_cls.from_row(list(row_up))
state.update(val)
for row_up in negative_updates:
if not retract_available:
raise ValueError(
"Unable to process negative update with this custom reducer."
)
else:
state._cnt -= 1
val = reducer_cls.from_row(list(row_up))
state.retract(val)
if not retract_available:
state._positive_updates = tuple(
tuple(x) for x in state._positive_updates
)
else:
if state._cnt == 0:
# this is fine in this setting, where we process values one by one
# if this ever becomes accumulated in a tree, we have to handle
# (A-B) updates, so we have to distinguish `0` from intermediate states
# accumulating weighted count (weighted by hash) should do fine here
return None
return pickle.dumps(state)
def extractor(x: bytes):
unpickled = pickle.loads(x)
assert isinstance(unpickled, reducer_cls)
return unpickled.compute_result()
return apply_with_type(
extractor,
signature(reducer_cls.compute_result).return_annotation,
stateful_wrapper(*args),
)
return wrapper | Decorator for defining custom reducers. Requires custom accumulator as an argument. Custom accumulator should implement ``from_row``, ``update`` and ``compute_result``. Optionally ``neutral`` and ``retract`` can be provided for more efficient processing on streams with changing data. >>> import pathway as pw >>> class CustomAvgAccumulator(pw.BaseCustomAccumulator): ... def __init__(self, sum, cnt): ... self.sum = sum ... self.cnt = cnt ... ... @classmethod ... def from_row(self, row): ... [val] = row ... return CustomAvgAccumulator(val, 1) ... ... def update(self, other): ... self.sum += other.sum ... self.cnt += other.cnt ... ... def compute_result(self) -> float: ... return self.sum / self.cnt >>> import sys; sys.modules[__name__].CustomAvgAccumulator = CustomAvgAccumulator # NODOCS >>> custom_avg = pw.reducers.udf_reducer(CustomAvgAccumulator) >>> t1 = pw.debug.table_from_markdown(''' ... age | owner | pet | price ... 10 | Alice | dog | 100 ... 9 | Bob | cat | 80 ... 8 | Alice | cat | 90 ... 7 | Bob | dog | 70 ... ''') >>> t2 = t1.groupby(t1.owner).reduce(t1.owner, avg_price=custom_avg(t1.price)) >>> pw.debug.compute_and_print(t2, include_id=False) owner | avg_price Alice | 95.0 Bob | 75.0 |
166,710 | import functools
import beartype
The provided code snippet includes necessary dependencies for implementing the `check_arg_types` function. Write a Python function `def check_arg_types(f)` to solve the following problem:
Decorator allowing validating types in runtime.
Here is the function:
def check_arg_types(f):
"""Decorator allowing validating types in runtime."""
@functools.wraps(f)
def with_type_validation(*args, **kwargs):
"""Hides beartype dependency by reraising beartype exception as TypeError.
Should not be needed after resolving https://github.com/beartype/beartype/issues/234
"""
try:
return beartype.beartype(f)(*args, **kwargs)
except beartype.roar.BeartypeCallHintParamViolation as e:
raise TypeError(e) from None
return with_type_validation | Decorator allowing validating types in runtime. |
166,711 | from __future__ import annotations
import abc
import asyncio
import functools
import sys
from collections.abc import Awaitable, Callable
from dataclasses import dataclass
from typing import ParamSpec, TypeVar
import pathway.internals.expression as expr
from pathway.internals.runtime_type_check import check_arg_types
from pathway.internals.udfs.caches import CacheStrategy, with_cache_strategy
from pathway.internals.udfs.retries import AsyncRetryStrategy, with_retry_strategy
from pathway.internals.udfs.utils import coerce_async
class Executor(abc.ABC):
"""
Base class executors of Pathway UDFs (user-defined functions).
"""
...
def _wrap(self, fun: Callable) -> Callable: ...
def _apply_expression_type(self) -> type[expr.ApplyExpression]: ...
class AutoExecutor(Executor):
def _wrap(self, fun: Callable) -> Callable:
raise ValueError("You can't wrap a function using AutoExecutor.")
def _apply_expression_type(self) -> type[expr.ApplyExpression]:
raise ValueError("AutoExecutor has no apply expression type.")
The provided code snippet includes necessary dependencies for implementing the `auto_executor` function. Write a Python function `def auto_executor() -> Executor` to solve the following problem:
Returns the automatic executor of Pathway UDF. It deduces whether the execution should be synchronous or asynchronous from the function signature. If the function is a coroutine, then the execution is asynchronous. Otherwise, it is synchronous. Example: >>> import pathway as pw >>> import asyncio >>> import time >>> t = pw.debug.table_from_markdown( ... ''' ... a | b ... 1 | 2 ... 3 | 4 ... 5 | 6 ... ''' ... ) >>> >>> @pw.udf(executor=pw.udfs.auto_executor()) ... def mul(a: int, b: int) -> int: ... return a * b ... >>> result_1 = t.select(res=mul(pw.this.a, pw.this.b)) >>> pw.debug.compute_and_print(result_1, include_id=False) res 2 12 30 >>> >>> @pw.udf(executor=pw.udfs.auto_executor()) ... async def long_running_async_function(a: int, b: int) -> int: ... await asyncio.sleep(0.1) ... return a * b ... >>> result_2 = t.select(res=long_running_async_function(pw.this.a, pw.this.b)) >>> pw.debug.compute_and_print(result_2, include_id=False) res 2 12 30
Here is the function:
def auto_executor() -> Executor:
"""
Returns the automatic executor of Pathway UDF. It deduces whether the execution
should be synchronous or asynchronous from the function signature. If the function
is a coroutine, then the execution is asynchronous. Otherwise, it is synchronous.
Example:
>>> import pathway as pw
>>> import asyncio
>>> import time
>>> t = pw.debug.table_from_markdown(
... '''
... a | b
... 1 | 2
... 3 | 4
... 5 | 6
... '''
... )
>>>
>>> @pw.udf(executor=pw.udfs.auto_executor())
... def mul(a: int, b: int) -> int:
... return a * b
...
>>> result_1 = t.select(res=mul(pw.this.a, pw.this.b))
>>> pw.debug.compute_and_print(result_1, include_id=False)
res
2
12
30
>>>
>>> @pw.udf(executor=pw.udfs.auto_executor())
... async def long_running_async_function(a: int, b: int) -> int:
... await asyncio.sleep(0.1)
... return a * b
...
>>> result_2 = t.select(res=long_running_async_function(pw.this.a, pw.this.b))
>>> pw.debug.compute_and_print(result_2, include_id=False)
res
2
12
30
"""
return AutoExecutor() | Returns the automatic executor of Pathway UDF. It deduces whether the execution should be synchronous or asynchronous from the function signature. If the function is a coroutine, then the execution is asynchronous. Otherwise, it is synchronous. Example: >>> import pathway as pw >>> import asyncio >>> import time >>> t = pw.debug.table_from_markdown( ... ''' ... a | b ... 1 | 2 ... 3 | 4 ... 5 | 6 ... ''' ... ) >>> >>> @pw.udf(executor=pw.udfs.auto_executor()) ... def mul(a: int, b: int) -> int: ... return a * b ... >>> result_1 = t.select(res=mul(pw.this.a, pw.this.b)) >>> pw.debug.compute_and_print(result_1, include_id=False) res 2 12 30 >>> >>> @pw.udf(executor=pw.udfs.auto_executor()) ... async def long_running_async_function(a: int, b: int) -> int: ... await asyncio.sleep(0.1) ... return a * b ... >>> result_2 = t.select(res=long_running_async_function(pw.this.a, pw.this.b)) >>> pw.debug.compute_and_print(result_2, include_id=False) res 2 12 30 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.