id int64 0 190k | prompt stringlengths 21 13.4M | docstring stringlengths 1 12k ⌀ |
|---|---|---|
166,812 | from __future__ import annotations
import inspect
from collections import defaultdict
from collections.abc import Callable, Mapping
from functools import wraps
from typing import (
Any,
ParamSpec,
TypeVar,
get_args,
get_origin,
get_type_hints,
overload,
)
from warnings import warn
from pathway.internals import (
dtype as dt,
expression as expr,
operator as op,
schema,
table,
)
from pathway.internals.api import Value
from pathway.internals.helpers import function_spec
from pathway.internals.parse_graph import G
from pathway.internals.runtime_type_check import check_arg_types
from pathway.internals.trace import trace_user_frame
from pathway.internals.udfs import async_executor, udf
Value: TypeAlias = Union[
None,
int,
float,
str,
bytes,
bool,
Pointer,
datetime.datetime,
datetime.timedelta,
np.ndarray,
json.Json,
dict[str, _Value],
tuple[_Value, ...],
]
The provided code snippet includes necessary dependencies for implementing the `unwrap` function. Write a Python function `def unwrap(col: expr.ColumnExpression | Value) -> expr.ColumnExpression` to solve the following problem:
Changes the type of the column from Optional[T] to T. If there is any None in the column this operation will raise an exception. Example: >>> import pathway as pw >>> t1 = pw.debug.table_from_markdown(''' ... colA | colB ... 1 | 5 ... 2 | 9 ... 3 | None ... 4 | 15''') >>> t1.schema <pathway.Schema types={'colA': <class 'int'>, 'colB': int | None}> >>> pw.debug.compute_and_print(t1, include_id=False) colA | colB 1 | 5 2 | 9 3 | 4 | 15 >>> t2 = t1.filter(t1.colA < 3) >>> t2.schema <pathway.Schema types={'colA': <class 'int'>, 'colB': int | None}> >>> pw.debug.compute_and_print(t2, include_id=False) colA | colB 1 | 5 2 | 9 >>> t3 = t2.select(colB = pw.unwrap(t2.colB)) >>> t3.schema <pathway.Schema types={'colB': <class 'int'>}> >>> pw.debug.compute_and_print(t3, include_id=False) colB 5 9
Here is the function:
def unwrap(col: expr.ColumnExpression | Value) -> expr.ColumnExpression:
"""Changes the type of the column from Optional[T] to T. If there is any None in the
column this operation will raise an exception.
Example:
>>> import pathway as pw
>>> t1 = pw.debug.table_from_markdown('''
... colA | colB
... 1 | 5
... 2 | 9
... 3 | None
... 4 | 15''')
>>> t1.schema
<pathway.Schema types={'colA': <class 'int'>, 'colB': int | None}>
>>> pw.debug.compute_and_print(t1, include_id=False)
colA | colB
1 | 5
2 | 9
3 |
4 | 15
>>> t2 = t1.filter(t1.colA < 3)
>>> t2.schema
<pathway.Schema types={'colA': <class 'int'>, 'colB': int | None}>
>>> pw.debug.compute_and_print(t2, include_id=False)
colA | colB
1 | 5
2 | 9
>>> t3 = t2.select(colB = pw.unwrap(t2.colB))
>>> t3.schema
<pathway.Schema types={'colB': <class 'int'>}>
>>> pw.debug.compute_and_print(t3, include_id=False)
colB
5
9
"""
return expr.UnwrapExpression(col) | Changes the type of the column from Optional[T] to T. If there is any None in the column this operation will raise an exception. Example: >>> import pathway as pw >>> t1 = pw.debug.table_from_markdown(''' ... colA | colB ... 1 | 5 ... 2 | 9 ... 3 | None ... 4 | 15''') >>> t1.schema <pathway.Schema types={'colA': <class 'int'>, 'colB': int | None}> >>> pw.debug.compute_and_print(t1, include_id=False) colA | colB 1 | 5 2 | 9 3 | 4 | 15 >>> t2 = t1.filter(t1.colA < 3) >>> t2.schema <pathway.Schema types={'colA': <class 'int'>, 'colB': int | None}> >>> pw.debug.compute_and_print(t2, include_id=False) colA | colB 1 | 5 2 | 9 >>> t3 = t2.select(colB = pw.unwrap(t2.colB)) >>> t3.schema <pathway.Schema types={'colB': <class 'int'>}> >>> pw.debug.compute_and_print(t3, include_id=False) colB 5 9 |
166,813 | from __future__ import annotations
import inspect
from collections import defaultdict
from collections.abc import Callable, Mapping
from functools import wraps
from typing import (
Any,
ParamSpec,
TypeVar,
get_args,
get_origin,
get_type_hints,
overload,
)
from warnings import warn
from pathway.internals import (
dtype as dt,
expression as expr,
operator as op,
schema,
table,
)
from pathway.internals.api import Value
from pathway.internals.helpers import function_spec
from pathway.internals.parse_graph import G
from pathway.internals.runtime_type_check import check_arg_types
from pathway.internals.trace import trace_user_frame
from pathway.internals.udfs import async_executor, udf
T = TypeVar("T")
P = ParamSpec("P")
def table_transformer(func: Callable[P, T]) -> Callable[P, T]: ... | null |
166,814 | from __future__ import annotations
import inspect
from collections import defaultdict
from collections.abc import Callable, Mapping
from functools import wraps
from typing import (
Any,
ParamSpec,
TypeVar,
get_args,
get_origin,
get_type_hints,
overload,
)
from warnings import warn
from pathway.internals import (
dtype as dt,
expression as expr,
operator as op,
schema,
table,
)
from pathway.internals.api import Value
from pathway.internals.helpers import function_spec
from pathway.internals.parse_graph import G
from pathway.internals.runtime_type_check import check_arg_types
from pathway.internals.trace import trace_user_frame
from pathway.internals.udfs import async_executor, udf
T = TypeVar("T")
P = ParamSpec("P")
def table_transformer(
*,
allow_superset: bool | Mapping[str, bool] = True,
ignore_primary_keys: bool | Mapping[str, bool] = True,
locals: dict[str, Any] | None = None,
) -> Callable[[Callable[P, T]], Callable[P, T]]: ... | null |
166,815 | from __future__ import annotations
import inspect
from collections import defaultdict
from collections.abc import Callable, Mapping
from functools import wraps
from typing import (
Any,
ParamSpec,
TypeVar,
get_args,
get_origin,
get_type_hints,
overload,
)
from warnings import warn
from pathway.internals import (
dtype as dt,
expression as expr,
operator as op,
schema,
table,
)
from pathway.internals.api import Value
from pathway.internals.helpers import function_spec
from pathway.internals.parse_graph import G
from pathway.internals.runtime_type_check import check_arg_types
from pathway.internals.trace import trace_user_frame
from pathway.internals.udfs import async_executor, udf
T = TypeVar("T")
P = ParamSpec("P")
def assert_table_has_schema(
table: table.Table,
schema: type[schema.Schema],
*,
allow_superset: bool = True,
ignore_primary_keys: bool = True,
) -> None:
"""
Asserts that the schema of the table is equivalent to the schema given as an argument.
Args:
table: Table for which we are asserting schema.
schema: Schema, which we assert that the Table has.
allow_superset: if True, the columns of the table can be a superset of columns
in schema. The default value is True.
ignore_primary_keys: if True, the assert won't check whether table and schema
have the same primary keys. The default value is True.
Example:
>>> import pathway as pw
>>> t1 = pw.debug.table_from_markdown('''
... age | owner | pet
... 10 | Alice | dog
... 9 | Bob | dog
... 8 | Alice | cat
... 7 | Bob | dog
... ''')
>>> t2 = t1.select(pw.this.owner, age = pw.cast(float, pw.this.age))
>>> schema = pw.schema_builder(
... {"age": pw.column_definition(dtype=float), "owner": pw.column_definition(dtype=str)}
... )
>>> pw.assert_table_has_schema(t2, schema)
"""
table.schema.assert_equal_to(
schema, allow_superset=allow_superset, ignore_primary_keys=ignore_primary_keys
)
The provided code snippet includes necessary dependencies for implementing the `table_transformer` function. Write a Python function `def table_transformer( func: Callable[P, T] | None = None, *, allow_superset: bool | Mapping[str, bool] = True, ignore_primary_keys: bool | Mapping[str, bool] = True, locals: dict[str, Any] | None = None, ) -> Callable[P, T] | Callable[[Callable[P, T]], Callable[P, T]]` to solve the following problem:
Marks a function that performs operations on Tables. As a consequence, arguments and return value, which are annotated to have type pw.Table[S] are checked whether they indeed have schema S. Args: allow_superset: if True, the columns of the table can be a superset of columns in schema. Can be given either as a bool, and this value is then used for all tables, or for each argument separately, by providing a dict whose keys are names of arguments, and values are bools specifying value of allow_superset for this argument. In the latter case to provide value for return value, provide value for key "return". The default value is True. ignore_primary_keys: if True, the assert won't check whether table and schema have the same primary keys. Can be given either as a bool, and this value is then used for all tables, or for each argument separately, by providing a dict whose keys are names of arguments, and values are bools specifying value of ignore_primary_keys for this argument. The default value is True. locals: when Schema class, which is used as a parameter to `pw.Table` is defined locally, you need to pass locals() as locals argument. Example: >>> import pathway as pw >>> t1 = pw.debug.table_from_markdown(''' ... A | B ... 1 | 6 ... 3 | 8 ... 5 | 2 ... ''') >>> schema = pw.schema_from_types(A=int, B=int) >>> result_schema = pw.schema_from_types(A=int, B=int, C=int) >>> @pw.table_transformer ... def sum_columns(t: pw.Table[schema]) -> pw.Table[result_schema]: ... result = t.with_columns(C=pw.this.A + pw.this.B) ... return result >>> pw.debug.compute_and_print(sum_columns(t1), include_id=False) A | B | C 1 | 6 | 7 3 | 8 | 11 5 | 2 | 7
Here is the function:
def table_transformer(
func: Callable[P, T] | None = None,
*,
allow_superset: bool | Mapping[str, bool] = True,
ignore_primary_keys: bool | Mapping[str, bool] = True,
locals: dict[str, Any] | None = None,
) -> Callable[P, T] | Callable[[Callable[P, T]], Callable[P, T]]:
"""
Marks a function that performs operations on Tables.
As a consequence, arguments and return value, which are annotated to have type pw.Table[S]
are checked whether they indeed have schema S.
Args:
allow_superset: if True, the columns of the table can be a superset of columns
in schema. Can be given either as a bool, and this value is then used for
all tables, or for each argument separately, by providing a dict whose keys
are names of arguments, and values are bools specifying value of allow_superset
for this argument. In the latter case to provide value for return value, provide
value for key "return". The default value is True.
ignore_primary_keys: if True, the assert won't check whether table and schema
have the same primary keys. Can be given either as a bool, and this value is then used for
all tables, or for each argument separately, by providing a dict whose keys
are names of arguments, and values are bools specifying value of ignore_primary_keys
for this argument. The default value is True.
locals: when Schema class, which is used as a parameter to `pw.Table` is defined locally,
you need to pass locals() as locals argument.
Example:
>>> import pathway as pw
>>> t1 = pw.debug.table_from_markdown('''
... A | B
... 1 | 6
... 3 | 8
... 5 | 2
... ''')
>>> schema = pw.schema_from_types(A=int, B=int)
>>> result_schema = pw.schema_from_types(A=int, B=int, C=int)
>>> @pw.table_transformer
... def sum_columns(t: pw.Table[schema]) -> pw.Table[result_schema]:
... result = t.with_columns(C=pw.this.A + pw.this.B)
... return result
>>> pw.debug.compute_and_print(sum_columns(t1), include_id=False)
A | B | C
1 | 6 | 7
3 | 8 | 11
5 | 2 | 7
"""
def decorator(f):
annotations = get_type_hints(f, localns=locals)
signature = inspect.signature(f)
if isinstance(allow_superset, bool):
allow_superset_dict: Mapping[str, bool] = defaultdict(
lambda: allow_superset
)
else:
allow_superset_dict = allow_superset
if isinstance(ignore_primary_keys, bool):
ignore_primary_keys_dict: Mapping[str, bool] = defaultdict(
lambda: ignore_primary_keys
)
else:
ignore_primary_keys_dict = ignore_primary_keys
def check_annotation(name, value):
annotation = annotations.get(name, None)
if get_origin(annotation) == table.Table and get_args(annotation):
try:
assert_table_has_schema(
value,
get_args(annotation)[0],
allow_superset=allow_superset_dict.get(name, True),
ignore_primary_keys=ignore_primary_keys_dict.get(name, True),
)
except AssertionError as exc:
raise AssertionError(
f"argument {name} has incorrect schema"
) from exc
@wraps(f)
def wrapper(*args, **kwargs):
bound_signature = signature.bind(*args, **kwargs)
for name, arg in bound_signature.arguments.items():
check_annotation(name, arg)
return_value = f(*args, **kwargs)
check_annotation("return", return_value)
return return_value
return wrapper
if func is not None:
return decorator(func)
else:
return decorator | Marks a function that performs operations on Tables. As a consequence, arguments and return value, which are annotated to have type pw.Table[S] are checked whether they indeed have schema S. Args: allow_superset: if True, the columns of the table can be a superset of columns in schema. Can be given either as a bool, and this value is then used for all tables, or for each argument separately, by providing a dict whose keys are names of arguments, and values are bools specifying value of allow_superset for this argument. In the latter case to provide value for return value, provide value for key "return". The default value is True. ignore_primary_keys: if True, the assert won't check whether table and schema have the same primary keys. Can be given either as a bool, and this value is then used for all tables, or for each argument separately, by providing a dict whose keys are names of arguments, and values are bools specifying value of ignore_primary_keys for this argument. The default value is True. locals: when Schema class, which is used as a parameter to `pw.Table` is defined locally, you need to pass locals() as locals argument. Example: >>> import pathway as pw >>> t1 = pw.debug.table_from_markdown(''' ... A | B ... 1 | 6 ... 3 | 8 ... 5 | 2 ... ''') >>> schema = pw.schema_from_types(A=int, B=int) >>> result_schema = pw.schema_from_types(A=int, B=int, C=int) >>> @pw.table_transformer ... def sum_columns(t: pw.Table[schema]) -> pw.Table[result_schema]: ... result = t.with_columns(C=pw.this.A + pw.this.B) ... return result >>> pw.debug.compute_and_print(sum_columns(t1), include_id=False) A | B | C 1 | 6 | 7 3 | 8 | 11 5 | 2 | 7 |
166,816 | from __future__ import annotations
from abc import ABC, abstractmethod
from collections.abc import Callable, Iterable
from dataclasses import dataclass
from functools import cached_property
from itertools import chain
from types import EllipsisType
from typing import TYPE_CHECKING, Any, ClassVar
import pathway.internals as pw
from pathway.internals import column_properties as cp, dtype as dt, trace
from pathway.internals.expression import ColumnExpression, ColumnReference
from pathway.internals.helpers import SetOnceProperty, StableSet
from pathway.internals.parse_graph import G
from pathway.internals.universe import Universe
class Column(ABC):
def __init__(self, universe: Universe) -> None:
def column_dependencies(self) -> StableSet[Column]:
def trace(self) -> trace.Trace:
def properties(self) -> cp.ColumnProperties:
def dtype(self) -> dt.DType:
class Context(ABC):
def id_column(self) -> IdColumn:
def universe(self) -> Universe:
def column_dependencies_external(self) -> Iterable[Column]:
def column_dependencies_internal(self) -> Iterable[Column]:
def column_dependencies(self) -> StableSet[Column]:
def reference_column_dependencies(self, ref: ColumnReference) -> StableSet[Column]:
def _get_type_interpreter(self):
def expression_type(self, expression: ColumnExpression) -> dt.DType:
def expression_with_type(self, expression: ColumnExpression) -> ColumnExpression:
def intermediate_tables(self) -> Iterable[Table]:
def column_properties(self, column: ColumnWithContext) -> cp.ColumnProperties:
def __init_subclass__(
cls,
/,
column_properties_evaluator: type[
cp.ColumnPropertiesEvaluator
] = cp.DefaultPropsEvaluator,
**kwargs,
) -> None:
class Table(
Joinable,
OperatorInput,
Generic[TSchema],
):
def __init__(
self,
_columns: Mapping[str, clmn.Column],
_context: clmn.Context,
_schema: type[Schema] | None = None,
):
def id(self) -> expr.ColumnReference:
def column_names(self):
def keys(self):
def _get_column(self, name: str) -> clmn.Column:
def _ipython_key_completions_(self):
def __dir__(self):
def _C(self) -> TSchema:
def schema(self) -> type[Schema]:
def _get_colref_by_name(self, name, exception_type) -> expr.ColumnReference:
def __getitem__(self, args: str | expr.ColumnReference) -> expr.ColumnReference:
def __getitem__(self, args: list[str | expr.ColumnReference]) -> Table:
def __getitem__(
self, args: str | expr.ColumnReference | list[str | expr.ColumnReference]
) -> expr.ColumnReference | Table:
def from_columns(
*args: expr.ColumnReference, **kwargs: expr.ColumnReference
) -> Table:
def concat_reindex(self, *tables: Table) -> Table:
def empty(**kwargs: dt.DType) -> Table:
def select(self, *args: expr.ColumnReference, **kwargs: Any) -> Table:
def __add__(self, other: Table) -> Table:
def slice(self) -> TableSlice:
def filter(self, filter_expression: expr.ColumnExpression) -> Table[TSchema]:
def split(
self, split_expression: expr.ColumnExpression
) -> tuple[Table[TSchema], Table[TSchema]]:
def _filter(self, filter_expression: expr.ColumnExpression) -> Table[TSchema]:
def _gradual_broadcast(
self,
threshold_table,
lower_column,
value_column,
upper_column,
) -> Table:
def __gradual_broadcast(
self,
threshold_table,
lower_column,
value_column,
upper_column,
):
def _forget(
self,
threshold_column: expr.ColumnExpression,
time_column: expr.ColumnExpression,
mark_forgetting_records: bool,
) -> Table:
def _forget_immediately(
self,
) -> Table:
def _filter_out_results_of_forgetting(
self,
) -> Table:
def _freeze(
self,
threshold_column: expr.ColumnExpression,
time_column: expr.ColumnExpression,
) -> Table:
def _buffer(
self,
threshold_column: expr.ColumnExpression,
time_column: expr.ColumnExpression,
) -> Table:
def difference(self, other: Table) -> Table[TSchema]:
def intersect(self, *tables: Table) -> Table[TSchema]:
def restrict(self, other: TableLike) -> Table[TSchema]:
def copy(self) -> Table[TSchema]:
def _copy_as(self, table_type: type[TTable], /, **kwargs) -> TTable:
def groupby(
self,
*args: expr.ColumnReference,
id: expr.ColumnReference | None = None,
sort_by: expr.ColumnReference | None = None,
_filter_out_results_of_forgetting: bool = False,
instance: expr.ColumnReference | None = None,
) -> groupbys.GroupedTable:
def reduce(
self, *args: expr.ColumnReference, **kwargs: expr.ColumnExpression
) -> Table:
def deduplicate(
self,
*,
value: expr.ColumnExpression,
instance: expr.ColumnExpression | None = None,
acceptor: Callable[[T, T], bool],
persistent_id: str | None = None,
) -> Table:
def ix(
self, expression: expr.ColumnExpression, *, optional: bool = False, context=None
) -> Table:
def _ix(
self,
key_expression: expr.ColumnReference,
optional: bool,
) -> Table:
def __lshift__(self, other: Table) -> Table:
def concat(self, *others: Table[TSchema]) -> Table[TSchema]:
def _concat(self, *others: Table[TSchema]) -> Table[TSchema]:
def update_cells(self, other: Table, _stacklevel: int = 1) -> Table:
def _update_cells(self, other: Table) -> Table:
def update_rows(self, other: Table[TSchema]) -> Table[TSchema]:
def _update_rows(self, other: Table[TSchema]) -> Table[TSchema]:
def with_columns(self, *args: expr.ColumnReference, **kwargs: Any) -> Table:
def with_id(self, new_index: expr.ColumnReference) -> Table:
def with_id_from(
self,
*args: expr.ColumnExpression | Value,
instance: expr.ColumnReference | None = None,
) -> Table:
def _with_new_index(
self,
new_index: expr.ColumnExpression,
) -> Table:
def rename_columns(self, **kwargs: str | expr.ColumnReference) -> Table:
def rename_by_dict(
self, names_mapping: dict[str | expr.ColumnReference, str]
) -> Table:
def with_prefix(self, prefix: str) -> Table:
def with_suffix(self, suffix: str) -> Table:
def rename(
self,
names_mapping: dict[str | expr.ColumnReference, str] | None = None,
**kwargs: expr.ColumnExpression,
) -> Table:
def without(self, *columns: str | expr.ColumnReference) -> Table:
def having(self, *indexers: expr.ColumnReference) -> Table[TSchema]:
def update_types(self, **kwargs: Any) -> Table:
def cast_to_types(self, **kwargs: Any) -> Table:
def _having(self, indexer: expr.ColumnReference) -> Table[TSchema]:
def with_universe_of(self, other: TableLike) -> Table:
def flatten(self, *args: expr.ColumnReference, **kwargs: Any) -> Table:
def _flatten(
self,
flatten_name: str,
) -> Table:
def sort(
self,
key: expr.ColumnExpression,
instance: expr.ColumnExpression | None = None,
) -> Table:
def _set_source(self, source: OutputHandle):
def _unsafe_promise_universe(self, other: TableLike) -> Table:
def _validate_expression(self, expression: expr.ColumnExpression):
def _wrap_column_in_context(
self,
context: clmn.Context,
column: clmn.Column,
name: str,
lineage: clmn.Lineage | None = None,
) -> clmn.Column:
def _table_with_context(self, context: clmn.Context) -> Table:
def _table_restricted_context(self) -> clmn.TableRestrictedRowwiseContext:
def _eval(
self, expression: expr.ColumnExpression, context: clmn.Context | None = None
) -> clmn.ColumnWithExpression:
def _from_schema(cls: type[TTable], schema: type[Schema]) -> TTable:
def __repr__(self) -> str:
def _with_same_universe(
self,
*columns: tuple[str, clmn.Column],
schema: type[Schema] | None = None,
) -> Table:
def _sort_columns_by_other(self, other: Table):
def _operator_dependencies(self) -> StableSet[Table]:
def debug(self, name: str):
def to(self, sink: DataSink) -> None:
def _materialize(self, universe: Universe):
def pointer_from(
self, *args: Any, optional=False, instance: expr.ColumnReference | None = None
):
def ix_ref(
self,
*args: expr.ColumnExpression | Value,
optional: bool = False,
context=None,
instance: expr.ColumnReference | None = None,
):
def _subtables(self) -> StableSet[Table]:
def _substitutions(
self,
) -> tuple[Table, dict[expr.InternalColRef, expr.ColumnExpression]]:
def typehints(self) -> Mapping[str, Any]:
def eval_type(self, expression: expr.ColumnExpression) -> dt.DType:
def _auto_live(self) -> Table:
def live(self) -> LiveTable[TSchema]:
def _create_internal_table(columns: Iterable[Column], context: Context) -> Table:
from pathway.internals.table import Table
columns_dict = {f"{i}": column for i, column in enumerate(columns)}
return Table(columns_dict, _context=context) | null |
166,817 | from __future__ import annotations
from abc import ABC, abstractmethod
from functools import cached_property, lru_cache
from typing import TYPE_CHECKING, Any
import pathway
import pathway.internals.row_transformer_table as tt
from pathway.internals import dtype as dt, operator as op, parse_graph, schema
from pathway.internals.api import Pointer, ref_scalar
from pathway.internals.column import MaterializedColumn, MethodColumn
from pathway.internals.column_properties import ColumnProperties
from pathway.internals.schema import Schema, schema_from_types
from pathway.internals.shadows import inspect
def attrs_of_type(cls: type, type_: type):
for name in dir(cls):
attr = getattr(cls, name)
if isinstance(attr, type_):
assert name == attr.name # type: ignore[attr-defined]
yield attr | null |
166,818 | from __future__ import annotations
import itertools
from collections.abc import Iterator
from functools import lru_cache
from typing import TYPE_CHECKING, Any, cast
from pathway.internals.trace import trace_user_frame
from abc import abstractmethod
import pathway.internals.column as clmn
import pathway.internals.expression as expr
from pathway.internals import thisclass
from pathway.internals.arg_handlers import (
arg_handler,
join_kwargs_handler,
reduce_args_handler,
select_args_handler,
)
from pathway.internals.column_namespace import ColumnNamespace
from pathway.internals.decorators import contextualized_operator
from pathway.internals.desugaring import (
DesugaringContext,
SubstitutionDesugaring,
TableSelectDesugaring,
combine_args_kwargs,
desugar,
)
from pathway.internals.helpers import StableSet
from pathway.internals.join_mode import JoinMode
from pathway.internals.operator_input import OperatorInput
from pathway.internals.shadows import operator as op
from pathway.internals.table_like import TableLike
from pathway.internals.type_interpreter import eval_type
from pathway.internals.universe import Universe
class Joinable(TableLike, DesugaringContext):
def _subtables(self) -> StableSet[Table]: ...
def keys(self): ...
def select(self, *args: expr.ColumnReference, **kwargs: Any) -> Table: ...
def filter(self, filter_expression: expr.ColumnExpression) -> Joinable: ...
def __getitem__(self, args: str | expr.ColumnReference) -> expr.ColumnReference: ...
def __iter__(self) -> Iterator[expr.ColumnReference]:
return (self[name] for name in self.keys())
def _get_colref_by_name(self, name, exception_type) -> expr.ColumnReference: ...
def _operator_dependencies(self) -> StableSet[Table]: ...
def __getattr__(self, name) -> expr.ColumnReference:
"""Get columns by name.
Warning:
- Fails if it tries to access nonexistent column.
Returns:
Column expression.
Example:
>>> import pathway as pw
>>> t1 = pw.debug.table_from_markdown('''
... age | owner | pet
... 10 | Alice | dog
... 9 | Bob | dog
... 8 | Alice | cat
... 7 | Bob | dog
... ''')
>>> t2 = t1.select(t1.age)
>>> pw.debug.compute_and_print(t2, include_id=False)
age
7
8
9
10
"""
try:
return super().__getattr__(name)
except AttributeError:
pass
return self._get_colref_by_name(name, AttributeError)
def C(self) -> ColumnNamespace:
"""Returns the namespace of all the columns of a joinable.
Allows accessing column names that might otherwise be a reserved methods.
>>> import pathway as pw
>>> tab = pw.debug.table_from_markdown('''
... age | owner | pet | filter
... 10 | Alice | dog | True
... 9 | Bob | dog | True
... 8 | Alice | cat | False
... 7 | Bob | dog | True
... ''')
>>> isinstance(tab.C.age, pw.ColumnReference)
True
>>> pw.debug.compute_and_print(tab.filter(tab.C.filter), include_id=False)
age | owner | pet | filter
7 | Bob | dog | True
9 | Bob | dog | True
10 | Alice | dog | True
"""
return ColumnNamespace(self)
def _C(self):
return self.C
def join(
self,
other: Joinable,
*on: expr.ColumnExpression,
id: expr.ColumnReference | None = None,
how: JoinMode = JoinMode.INNER,
left_instance: expr.ColumnReference | None = None,
right_instance: expr.ColumnReference | None = None,
) -> JoinResult:
"""Join self with other using the given join expression.
Args:
other: the right side of the join, ``Table`` or ``JoinResult``.
on: a list of column expressions. Each must have == as the top level operation
and be of the form LHS: ColumnReference == RHS: ColumnReference.
id: optional argument for id of result, can be only self.id or other.id
how: by default, inner join is performed. Possible values are JoinMode.{INNER,LEFT,RIGHT,OUTER}
correspond to inner, left, right and outer join respectively.
left_instance/right_instance: optional arguments describing partitioning of the data into
separate instances
Returns:
JoinResult: an object on which `.select()` may be called to extract relevant
columns from the result of the join.
Example:
>>> import pathway as pw
>>> t1 = pw.debug.table_from_markdown('''
... age | owner | pet
... 10 | Alice | 1
... 9 | Bob | 1
... 8 | Alice | 2
... ''')
>>> t2 = pw.debug.table_from_markdown('''
... age | owner | pet | size
... 10 | Alice | 3 | M
... 9 | Bob | 1 | L
... 8 | Tom | 1 | XL
... ''')
>>> t3 = t1.join(
... t2, t1.pet == t2.pet, t1.owner == t2.owner, how=pw.JoinMode.INNER
... ).select(age=t1.age, owner_name=t2.owner, size=t2.size)
>>> pw.debug.compute_and_print(t3, include_id = False)
age | owner_name | size
9 | Bob | L
"""
return JoinResult._table_join(
self,
other,
*on,
mode=how,
id=id,
left_instance=left_instance,
right_instance=right_instance,
)
def join_inner(
self,
other: Joinable,
*on: expr.ColumnExpression,
id: expr.ColumnReference | None = None,
left_instance: expr.ColumnReference | None = None,
right_instance: expr.ColumnReference | None = None,
) -> JoinResult:
"""Inner-joins two tables or join results.
Args:
other: the right side of the join, ``Table`` or ``JoinResult``.
on: a list of column expressions. Each must have == as the top level operation
and be of the form LHS: ColumnReference == RHS: ColumnReference.
id: optional argument for id of result, can be only self.id or other.id
left_instance/right_instance: optional arguments describing partitioning of the data
into separate instances
Returns:
JoinResult: an object on which `.select()` may be called to extract relevant
columns from the result of the join.
Example:
>>> import pathway as pw
>>> t1 = pw.debug.table_from_markdown('''
... age | owner | pet
... 10 | Alice | 1
... 9 | Bob | 1
... 8 | Alice | 2
... ''')
>>> t2 = pw.debug.table_from_markdown('''
... age | owner | pet | size
... 10 | Alice | 3 | M
... 9 | Bob | 1 | L
... 8 | Tom | 1 | XL
... ''')
>>> t3 = t1.join_inner(t2, t1.pet == t2.pet, t1.owner == t2.owner).select(
... age=t1.age, owner_name=t2.owner, size=t2.size
... )
>>> pw.debug.compute_and_print(t3, include_id = False)
age | owner_name | size
9 | Bob | L
"""
return JoinResult._table_join(
self,
other,
*on,
mode=JoinMode.INNER,
id=id,
left_instance=left_instance,
right_instance=right_instance,
)
def join_left(
self,
other: Joinable,
*on: expr.ColumnExpression,
id: expr.ColumnReference | None = None,
left_instance: expr.ColumnReference | None = None,
right_instance: expr.ColumnReference | None = None,
) -> JoinResult:
"""
Left-joins two tables or join results.
Args:
other: the right side of the join, ``Table`` or ``JoinResult``.
*on: Columns to join, syntax `self.col1 == other.col2`
id: optional id column of the result
left_instance/right_instance: optional arguments describing partitioning of the data into
separate instances
Remarks:
args cannot contain id column from either of tables, \
as the result table has id column with auto-generated ids; \
it can be selected by assigning it to a column with defined \
name (passed in kwargs)
Behavior:
- for rows from the left side that were not matched with the right side,
missing values on the right are replaced with `None`
- rows from the right side that were not matched with the left side are skipped
- for rows that were matched the behavior is the same as that of an inner join.
Returns:
JoinResult: an object on which `.select()` may be called to extract relevant
columns from the result of the join.
Example:
>>> import pathway as pw
>>> t1 = pw.debug.table_from_markdown(
... '''
... | a | b
... 1 | 11 | 111
... 2 | 12 | 112
... 3 | 13 | 113
... 4 | 13 | 114
... '''
... )
>>> t2 = pw.debug.table_from_markdown(
... '''
... | c | d
... 1 | 11 | 211
... 2 | 12 | 212
... 3 | 14 | 213
... 4 | 14 | 214
... '''
... )
>>> pw.debug.compute_and_print(t1.join_left(t2, t1.a == t2.c
... ).select(t1.a, t2_c=t2.c, s=pw.require(t1.b + t2.d, t2.id)),
... include_id=False)
a | t2_c | s
11 | 11 | 322
12 | 12 | 324
13 | |
13 | |
"""
return JoinResult._table_join(
self,
other,
*on,
mode=JoinMode.LEFT,
id=id,
left_instance=left_instance,
right_instance=right_instance,
)
def join_right(
self,
other: Joinable,
*on: expr.ColumnExpression,
id: expr.ColumnReference | None = None,
left_instance: expr.ColumnReference | None = None,
right_instance: expr.ColumnReference | None = None,
) -> JoinResult:
"""
Outer-joins two tables or join results.
Args:
other: the right side of the join, ``Table`` or ``JoinResult``.
*on: Columns to join, syntax `self.col1 == other.col2`
id: optional id column of the result
left_instance/right_instance: optional arguments describing partitioning of the data into separate
instances
Remarks: args cannot contain id column from either of tables, \
as the result table has id column with auto-generated ids; \
it can be selected by assigning it to a column with defined \
name (passed in kwargs)
Behavior:
- rows from the left side that were not matched with the right side are skipped
- for rows from the right side that were not matched with the left side,
missing values on the left are replaced with `None`
- for rows that were matched the behavior is the same as that of an inner join.
Returns:
JoinResult: an object on which `.select()` may be called to extract relevant
columns from the result of the join.
Example:
>>> import pathway as pw
>>> t1 = pw.debug.table_from_markdown(
... '''
... | a | b
... 1 | 11 | 111
... 2 | 12 | 112
... 3 | 13 | 113
... 4 | 13 | 114
... '''
... )
>>> t2 = pw.debug.table_from_markdown(
... '''
... | c | d
... 1 | 11 | 211
... 2 | 12 | 212
... 3 | 14 | 213
... 4 | 14 | 214
... '''
... )
>>> pw.debug.compute_and_print(t1.join_right(t2, t1.a == t2.c
... ).select(t1.a, t2_c=t2.c, s=pw.require(pw.coalesce(t1.b,0) + t2.d,t1.id)),
... include_id=False)
a | t2_c | s
| 14 |
| 14 |
11 | 11 | 322
12 | 12 | 324
Returns:
OuterJoinResult object
"""
return JoinResult._table_join(
self,
other,
*on,
mode=JoinMode.RIGHT,
id=id,
left_instance=left_instance,
right_instance=right_instance,
)
def join_outer(
self,
other: Joinable,
*on: expr.ColumnExpression,
id: expr.ColumnReference | None = None,
left_instance: expr.ColumnReference | None = None,
right_instance: expr.ColumnReference | None = None,
) -> JoinResult:
"""Outer-joins two tables or join results.
Args:
other: the right side of the join, ``Table`` or ``JoinResult``.
*on: Columns to join, syntax `self.col1 == other.col2`
id: optional id column of the result
instance: optional argument describing partitioning of the data into separate instances
Remarks: args cannot contain id column from either of tables, \
as the result table has id column with auto-generated ids; \
it can be selected by assigning it to a column with defined \
name (passed in kwargs)
Behavior:
- for rows from the left side that were not matched with the right side,
missing values on the right are replaced with `None`
- for rows from the right side that were not matched with the left side,
missing values on the left are replaced with `None`
- for rows that were matched the behavior is the same as that of an inner join.
Returns:
JoinResult: an object on which `.select()` may be called to extract relevant
columns from the result of the join.
Example:
>>> import pathway as pw
>>> t1 = pw.debug.table_from_markdown(
... '''
... | a | b
... 1 | 11 | 111
... 2 | 12 | 112
... 3 | 13 | 113
... 4 | 13 | 114
... '''
... )
>>> t2 = pw.debug.table_from_markdown(
... '''
... | c | d
... 1 | 11 | 211
... 2 | 12 | 212
... 3 | 14 | 213
... 4 | 14 | 214
... '''
... )
>>> pw.debug.compute_and_print(t1.join_outer(t2, t1.a == t2.c
... ).select(t1.a, t2_c=t2.c, s=pw.require(t1.b + t2.d, t1.id, t2.id)),
... include_id=False)
a | t2_c | s
| 14 |
| 14 |
11 | 11 | 322
12 | 12 | 324
13 | |
13 | |
"""
return JoinResult._table_join(
self,
other,
*on,
mode=JoinMode.OUTER,
id=id,
left_instance=left_instance,
right_instance=right_instance,
)
def _desugaring(self) -> TableSelectDesugaring:
return TableSelectDesugaring(self)
def _substitutions(
self,
) -> tuple[Table, dict[expr.InternalColRef, expr.ColumnExpression]]: ...
class JoinResult(Joinable, OperatorInput):
"""Result of a join between tables.
Example:
>>> import pathway as pw
>>> t1 = pw.debug.table_from_markdown('''
... age owner pet
... 1 10 Alice 1
... 2 9 Bob 1
... 3 8 Alice 2
... ''')
>>> t2 = pw.debug.table_from_markdown('''
... age owner pet size
... 11 10 Alice 3 M
... 12 9 Bob 1 L
... 13 8 Tom 1 XL
... ''')
>>> joinresult= t1.join(t2, t1.pet == t2.pet, t1.owner == t2.owner) # noqa: E501
>>> isinstance(joinresult, pw.JoinResult)
True
>>> pw.debug.compute_and_print(joinresult.select(t1.age, t2.size), include_id=False)
age | size
9 | L
"""
_inner_table: Table
_columns_mapping: dict[expr.InternalColRef, expr.ColumnReference]
_left_table: Table
_right_table: Table
_original_left: Joinable
_original_right: Joinable
_substitution: dict[thisclass.ThisMetaclass, Joinable]
_chained_join_desugaring: SubstitutionDesugaring
_joined_on_names: StableSet[str]
_all_colnames: StableSet[str]
_join_mode: JoinMode
def __init__(
self,
_context: clmn.Context,
_inner_table: Table,
_columns_mapping: dict[expr.InternalColRef, expr.ColumnReference],
_left_table: Table,
_right_table: Table,
_original_left: Joinable,
_original_right: Joinable,
_substitution: dict[thisclass.ThisMetaclass, Joinable],
_joined_on_names: StableSet[str],
_join_mode: JoinMode,
):
super().__init__(_context)
self._inner_table = _inner_table
self._columns_mapping = _columns_mapping
self._left_table = _left_table
self._right_table = _right_table
self._substitution = {**_substitution, thisclass.this: self}
self._joined_on_names = _joined_on_names
self._join_mode = _join_mode
self._original_left = _original_left
self._original_right = _original_right
assert _original_left._subtables().isdisjoint(_original_right._subtables())
self._all_colnames = StableSet.union(
_original_left.keys(), _original_right.keys()
)
self._chained_join_desugaring = SubstitutionDesugaring(self._substitutions()[1])
def _compute_universe(
left_table: Table,
right_table: Table,
id: clmn.Column | None,
mode: JoinMode,
) -> Universe:
if id is left_table._id_column:
if mode == JoinMode.LEFT:
return left_table._universe
elif mode == JoinMode.INNER:
return left_table._universe.subset()
else:
raise KeyError("Cannot assign id's for this join type.")
elif id is right_table._id_column:
if mode == JoinMode.RIGHT:
return right_table._universe
elif mode == JoinMode.INNER:
return right_table._universe.subset()
else:
raise KeyError("Cannot assign id's for this join type.")
else:
assert id is None
return Universe()
def _subtables(self) -> StableSet[Table]:
return self._original_left._subtables() | self._original_right._subtables()
def keys(self):
common_colnames = self._original_left.keys() & self._original_right.keys()
return self._all_colnames - (common_colnames - self._joined_on_names)
def _get_colref_by_name(
self,
name: str,
exception_type,
) -> expr.ColumnReference:
name = self._column_deprecation_rename(name)
if name == "id":
return self._inner_table.id
elif name in self._joined_on_names:
if self._join_mode is JoinMode.INNER:
return self._original_left[name]
else:
return self._inner_table[name]
elif name in self._original_left.keys() and name in self._original_right.keys():
raise exception_type(
f"Column {name} appears on both left and right inputs of join."
)
elif name in self._original_left.keys():
return self._original_left[name]
elif name in self._original_right.keys():
return self._original_right[name]
else:
raise exception_type(f"No column with name {name}.")
def __getitem__(self, args: str | expr.ColumnReference) -> expr.ColumnReference:
if isinstance(args, expr.ColumnReference):
assert args.table is self or args.table is thisclass.this
return self._get_colref_by_name(args.name, KeyError)
else:
return self._get_colref_by_name(args, KeyError)
def select(self, *args: expr.ColumnReference, **kwargs: Any) -> Table:
"""Computes result of a join.
Args:
args: Column references.
kwargs: Column expressions with their new assigned names.
Returns:
Table: Created table.
Example:
>>> import pathway as pw
>>> t1 = pw.debug.table_from_markdown('''
... age | owner | pet
... 10 | Alice | 1
... 9 | Bob | 1
... 8 | Alice | 2
... ''')
>>> t2 = pw.debug.table_from_markdown('''
... age | owner | pet | size
... 10 | Alice | 3 | M
... 9 | Bob | 1 | L
... 8 | Tom | 1 | XL
... ''')
>>> t3 = t1.join(t2, t1.pet == t2.pet, t1.owner == t2.owner).select(age=t1.age, owner_name=t2.owner, size=t2.size) # noqa: E501
>>> pw.debug.compute_and_print(t3, include_id=False)
age | owner_name | size
9 | Bob | L
"""
expressions: dict[str, expr.ColumnExpression] = {}
all_args = combine_args_kwargs(args, kwargs)
for new_name, expression in all_args.items():
expressions[new_name] = self._chained_join_desugaring.eval_expression(
expression
)
return self._inner_table.select(**expressions)
def _operator_dependencies(self) -> StableSet[Table]:
return (
self._left_table._operator_dependencies()
| self._right_table._operator_dependencies()
)
def filter(self, filter_expression: expr.ColumnExpression) -> JoinResult:
"""Filters rows, keeping the ones satisfying the predicate.
Example:
>>> import pathway as pw
>>> t1 = pw.debug.table_from_markdown('''
... age owner pet
... 1 10 Alice 1
... 2 9 Bob 1
... 3 8 Alice 2
... ''')
>>> t2 = pw.debug.table_from_markdown('''
... age owner pet size
... 11 10 Alice 3 M
... 12 9 Bob 1 L
... 13 8 Tom 1 XL
... ''')
>>> result = t1.join(t2).filter(t1.owner == t2.owner).select(t1.age, t2.size) # noqa: E501
>>> pw.debug.compute_and_print(result, include_id=False)
age | size
8 | M
9 | L
10 | M
"""
desugared_filter_expression = self._chained_join_desugaring.eval_expression(
filter_expression
)
inner_table = self._inner_table.filter(desugared_filter_expression)
new_columns_mapping = {
int_ref: inner_table[expression.name]
for int_ref, expression in self._columns_mapping.items()
}
new_columns_mapping[inner_table.id._to_internal()] = inner_table.id
context = clmn.JoinRowwiseContext.from_mapping(
inner_table._id_column, new_columns_mapping
)
inner_table._rowwise_context = context
return JoinResult(
_context=context,
_inner_table=inner_table,
_columns_mapping=new_columns_mapping,
_left_table=self._left_table,
_right_table=self._right_table,
_original_left=self._original_left,
_original_right=self._original_right,
_substitution=self._substitution,
_joined_on_names=self._joined_on_names,
_join_mode=self._join_mode,
)
def groupby(
self,
*args: expr.ColumnReference,
id: expr.ColumnReference | None = None,
) -> GroupedJoinResult:
"""Groups join result by columns from args.
Note:
Usually followed by `.reduce()` that aggregates the result and returns a table.
Args:
args: columns to group by.
id: if provided, is the column used to set id's of the rows of the result
Returns:
GroupedJoinResult: Groupby object.
Example:
>>> import pathway as pw
>>> t1 = pw.debug.table_from_markdown('''
... cost owner pet
... 1 100 Alice 1
... 2 90 Bob 1
... 3 80 Alice 2
... ''')
>>> t2 = pw.debug.table_from_markdown('''
... cost owner pet size
... 11 100 Alice 3 M
... 12 90 Bob 1 L
... 13 80 Tom 1 XL
... ''')
>>> result = (t1.join(t2, t1.owner==t2.owner).groupby(pw.this.owner)
... .reduce(pw.this.owner, pairs = pw.reducers.count()))
>>> pw.debug.compute_and_print(result, include_id=False)
owner | pairs
Alice | 2
Bob | 1
"""
for arg in args:
if not isinstance(arg, expr.ColumnReference):
if isinstance(arg, str):
raise ValueError(
f"Expected a ColumnReference, found a string. Did you mean this.{arg} instead of {repr(arg)}?"
)
else:
raise ValueError(
"In JoinResult.groupby() all arguments have to be a ColumnReference."
)
from pathway.internals.groupbys import GroupedJoinResult
return GroupedJoinResult(
_join_result=self,
_args=args,
_id=id,
)
def reduce(
self, *args: expr.ColumnReference, **kwargs: expr.ColumnExpression
) -> Table:
"""Reduce a join result to a single row.
Equivalent to `self.groupby().reduce(*args, **kwargs)`.
Args:
args: reducer to reduce the table with
kwargs: reducer to reduce the table with. Its key is the new name of a column.
Returns:
Table: Reduced table.
Example:
>>> import pathway as pw
>>> t1 = pw.debug.table_from_markdown('''
... cost owner pet
... 1 100 Alice 1
... 2 90 Bob 1
... 3 80 Alice 2
... ''')
>>> t2 = pw.debug.table_from_markdown('''
... cost owner pet size
... 11 100 Alice 3 M
... 12 90 Bob 1 L
... 13 80 Tom 1 XL
... ''')
>>> result = t1.join(t2, t1.owner==t2.owner).reduce(total_pairs = pw.reducers.count())
>>> pw.debug.compute_and_print(result, include_id=False)
total_pairs
3
"""
for arg in args:
if not isinstance(arg, expr.ColumnReference):
if isinstance(arg, str):
raise ValueError(
f"Expected a ColumnReference, found a string. Did you mean this.{arg} instead of {repr(arg)}?"
)
else:
raise ValueError(
"In JoinResult.reduce() all positional arguments have to be a ColumnReference."
)
return self.groupby().reduce(*args, **kwargs)
def _substitutions(
self,
) -> tuple[Table, dict[expr.InternalColRef, expr.ColumnExpression]]:
return self._inner_table, {
int_ref: expression for int_ref, expression in self._columns_mapping.items()
}
def _join(
context: clmn.JoinContext, *args: expr.ColumnReference, **kwargs: Any
) -> Table:
"""Used internally to create an internal Table containing result of a join."""
columns: dict[str, clmn.Column] = {}
all_args = combine_args_kwargs(args, kwargs)
for new_name, expression in all_args.items():
columns[new_name] = expression._column_with_expression_cls(
context=context,
universe=context.universe,
expression=expression,
)
from pathway.internals.table import Table
return Table(
_columns=columns,
_context=context,
)
def _prepare_inner_table_with_mapping(
context: clmn.JoinContext,
original_left: Joinable,
original_right: Joinable,
common_column_names: StableSet[str],
) -> tuple[Table, dict[expr.InternalColRef, expr.ColumnReference]]:
left_table, left_substitutions = original_left._substitutions()
right_table, right_substitutions = original_right._substitutions()
cnt = itertools.count(0)
expressions: dict[str, expr.ColumnExpression] = {}
colref_to_name_mapping: dict[expr.InternalColRef, str] = {}
for table, subs in [
(left_table, left_substitutions),
(right_table, right_substitutions),
]:
if len(subs) == 0: # tables have empty subs, so set them here
for ref in table:
subs[ref._to_internal()] = ref
subs_total = subs | {table.id._to_internal(): table.id}
for int_ref, expression in subs_total.items():
inner_name = f"_pw_{next(cnt)}"
expressions[inner_name] = expression
colref_to_name_mapping[int_ref] = inner_name
from pathway.internals.common import coalesce
for name in common_column_names:
if name != "id":
expressions[name] = coalesce(original_left[name], original_right[name])
inner_table = JoinResult._join(context, **expressions)
final_mapping = {
colref: inner_table[name] for colref, name in colref_to_name_mapping.items()
}
for name in common_column_names:
if name != "id":
colref = inner_table[name]
final_mapping[colref._to_internal()] = colref
final_mapping[inner_table.id._to_internal()] = inner_table.id
rowwise_context = clmn.JoinRowwiseContext.from_mapping(
inner_table._id_column, final_mapping
)
inner_table._rowwise_context = (
rowwise_context # FIXME don't set _context property of table
)
return (inner_table, final_mapping)
def _table_join(
left: Joinable,
right: Joinable,
*on: expr.ColumnExpression,
mode: JoinMode,
id: expr.ColumnReference | None = None,
left_instance: expr.ColumnReference | None = None,
right_instance: expr.ColumnReference | None = None,
) -> JoinResult:
if left == right:
raise ValueError(
"Cannot join table with itself. Use <table>.copy() as one of the arguments of the join."
)
left_table, left_substitutions = left._substitutions()
right_table, right_substitutions = right._substitutions()
chained_join_desugaring = SubstitutionDesugaring(
{**left_substitutions, **right_substitutions}
)
if id is not None:
id = chained_join_desugaring.eval_expression(id)
id_column = id._column
else:
id_column = None
common_column_names: StableSet[str] = StableSet()
if left_instance is not None and right_instance is not None:
on = (*on, left_instance == right_instance)
else:
assert left_instance is None and right_instance is None
on_ = tuple(validate_shape(cond) for cond in on)
for cond in on_:
cond_left = cast(expr.ColumnReference, cond._left)
cond_right = cast(expr.ColumnReference, cond._right)
if cond_left.name == cond_right.name:
common_column_names.add(cond_left.name)
on_ = tuple(chained_join_desugaring.eval_expression(cond) for cond in on_)
for cond in on_:
validate_join_condition(cond, left_table, right_table)
on_left = tuple(
left_table._eval(cond._left, left_table._table_restricted_context)
for cond in on_
)
on_right = tuple(
right_table._eval(cond._right, right_table._table_restricted_context)
for cond in on_
)
swp = id_column is not None and id_column is right_table._id_column
assert (
id_column is None
or (id_column is left_table._id_column)
or (id_column is right_table._id_column)
)
left_context_table = clmn.ContextTable(universe=left._universe, columns=on_left)
right_context_table = clmn.ContextTable(
universe=right._universe, columns=on_right
)
substitution: dict[thisclass.ThisMetaclass, Joinable] = {
thisclass.left: left,
thisclass.right: right,
}
universe = JoinResult._compute_universe(
left_table, right_table, id_column, mode
)
if swp:
context = clmn.JoinContext(
universe,
right_table,
left_table,
right_context_table,
left_context_table,
id_column is not None,
mode in [JoinMode.RIGHT, JoinMode.OUTER],
mode in [JoinMode.LEFT, JoinMode.OUTER],
)
else:
context = clmn.JoinContext(
universe,
left_table,
right_table,
left_context_table,
right_context_table,
id_column is not None,
mode in [JoinMode.LEFT, JoinMode.OUTER],
mode in [JoinMode.RIGHT, JoinMode.OUTER],
)
inner_table, columns_mapping = JoinResult._prepare_inner_table_with_mapping(
context,
left,
right,
common_column_names,
)
return JoinResult(
context,
inner_table,
columns_mapping,
left_table,
right_table,
left,
right,
substitution,
common_column_names,
mode,
)
class JoinMode(Enum):
"""Enum used for controlling type of a join when passed to a generic join function.
Consists of values: JoinMode.INNER, JoinMode.LEFT, JoinMode.RIGHT, JoinMode.OUTER
>>> import pathway as pw
>>> t1 = pw.debug.table_from_markdown('''
... age | owner | pet
... 10 | Alice | 1
... 9 | Bob | 1
... 8 | Alice | 2
... ''')
>>> t2 = pw.debug.table_from_markdown('''
... age | owner | pet | size
... 10 | Alice | 3 | M
... 9 | Bob | 1 | L
... 8 | Tom | 1 | XL
... ''')
>>> inner_join = t1.join(
... t2, t1.pet == t2.pet, t1.owner == t2.owner, how=pw.JoinMode.INNER
... ).select(age=t1.age, owner_name=t2.owner, size=t2.size)
>>> pw.debug.compute_and_print(inner_join, include_id = False)
age | owner_name | size
9 | Bob | L
>>> outer_join = t1.join(
... t2, t1.pet == t2.pet, t1.owner == t2.owner, how=pw.JoinMode.OUTER
... ).select(age=t1.age, owner_name=t2.owner, size=t2.size)
>>> pw.debug.compute_and_print(outer_join, include_id = False)
age | owner_name | size
| Alice | M
| Tom | XL
8 | |
9 | Bob | L
10 | |
"""
INNER = 0
"""Use inner join."""
LEFT = 1
"""Use left join."""
RIGHT = 2
"""Use right join."""
OUTER = 3
"""Use outer join."""
The provided code snippet includes necessary dependencies for implementing the `join` function. Write a Python function `def join( left: Joinable, right: Joinable, *on: expr.ColumnExpression, id: expr.ColumnReference | None = None, how: JoinMode = JoinMode.INNER, left_instance: expr.ColumnReference | None = None, right_instance: expr.ColumnReference | None = None, ) -> JoinResult` to solve the following problem:
Join self with other using the given join expression. Args: left: the left side of the join, ``Table`` or ``JoinResult``. right: the right side of the join, ``Table`` or ``JoinResult``. on: a list of column expressions. Each must have == as the top level operation and be of the form LHS: ColumnReference == RHS: ColumnReference. id: optional argument for id of result, can be only self.id or other.id how: by default, inner join is performed. Possible values are JoinMode.{INNER,LEFT,RIGHT,OUTER} correspond to inner, left, right and outer join respectively. left_instance/right_instance: optional arguments describing partitioning of the data into separate instances Returns: JoinResult: an object on which `.select()` may be called to extract relevant columns from the result of the join. Example: >>> import pathway as pw >>> t1 = pw.debug.table_from_markdown(''' ... age | owner | pet ... 10 | Alice | 1 ... 9 | Bob | 1 ... 8 | Alice | 2 ... ''') >>> t2 = pw.debug.table_from_markdown(''' ... age | owner | pet | size ... 10 | Alice | 3 | M ... 9 | Bob | 1 | L ... 8 | Tom | 1 | XL ... ''') >>> t3 = pw.join( ... t1, t2, t1.pet == t2.pet, t1.owner == t2.owner, how=pw.JoinMode.INNER ... ).select(age=t1.age, owner_name=t2.owner, size=t2.size) >>> pw.debug.compute_and_print(t3, include_id = False) age | owner_name | size 9 | Bob | L
Here is the function:
def join(
left: Joinable,
right: Joinable,
*on: expr.ColumnExpression,
id: expr.ColumnReference | None = None,
how: JoinMode = JoinMode.INNER,
left_instance: expr.ColumnReference | None = None,
right_instance: expr.ColumnReference | None = None,
) -> JoinResult:
"""Join self with other using the given join expression.
Args:
left: the left side of the join, ``Table`` or ``JoinResult``.
right: the right side of the join, ``Table`` or ``JoinResult``.
on: a list of column expressions. Each must have == as the top level operation
and be of the form LHS: ColumnReference == RHS: ColumnReference.
id: optional argument for id of result, can be only self.id or other.id
how: by default, inner join is performed. Possible values are JoinMode.{INNER,LEFT,RIGHT,OUTER}
correspond to inner, left, right and outer join respectively.
left_instance/right_instance: optional arguments describing partitioning of the data into
separate instances
Returns:
JoinResult: an object on which `.select()` may be called to extract relevant
columns from the result of the join.
Example:
>>> import pathway as pw
>>> t1 = pw.debug.table_from_markdown('''
... age | owner | pet
... 10 | Alice | 1
... 9 | Bob | 1
... 8 | Alice | 2
... ''')
>>> t2 = pw.debug.table_from_markdown('''
... age | owner | pet | size
... 10 | Alice | 3 | M
... 9 | Bob | 1 | L
... 8 | Tom | 1 | XL
... ''')
>>> t3 = pw.join(
... t1, t2, t1.pet == t2.pet, t1.owner == t2.owner, how=pw.JoinMode.INNER
... ).select(age=t1.age, owner_name=t2.owner, size=t2.size)
>>> pw.debug.compute_and_print(t3, include_id = False)
age | owner_name | size
9 | Bob | L
"""
return left.join(
right,
*on,
id=id,
how=how,
left_instance=left_instance,
right_instance=right_instance,
) | Join self with other using the given join expression. Args: left: the left side of the join, ``Table`` or ``JoinResult``. right: the right side of the join, ``Table`` or ``JoinResult``. on: a list of column expressions. Each must have == as the top level operation and be of the form LHS: ColumnReference == RHS: ColumnReference. id: optional argument for id of result, can be only self.id or other.id how: by default, inner join is performed. Possible values are JoinMode.{INNER,LEFT,RIGHT,OUTER} correspond to inner, left, right and outer join respectively. left_instance/right_instance: optional arguments describing partitioning of the data into separate instances Returns: JoinResult: an object on which `.select()` may be called to extract relevant columns from the result of the join. Example: >>> import pathway as pw >>> t1 = pw.debug.table_from_markdown(''' ... age | owner | pet ... 10 | Alice | 1 ... 9 | Bob | 1 ... 8 | Alice | 2 ... ''') >>> t2 = pw.debug.table_from_markdown(''' ... age | owner | pet | size ... 10 | Alice | 3 | M ... 9 | Bob | 1 | L ... 8 | Tom | 1 | XL ... ''') >>> t3 = pw.join( ... t1, t2, t1.pet == t2.pet, t1.owner == t2.owner, how=pw.JoinMode.INNER ... ).select(age=t1.age, owner_name=t2.owner, size=t2.size) >>> pw.debug.compute_and_print(t3, include_id = False) age | owner_name | size 9 | Bob | L |
166,819 | from __future__ import annotations
import itertools
from collections.abc import Iterator
from functools import lru_cache
from typing import TYPE_CHECKING, Any, cast
from pathway.internals.trace import trace_user_frame
from abc import abstractmethod
import pathway.internals.column as clmn
import pathway.internals.expression as expr
from pathway.internals import thisclass
from pathway.internals.arg_handlers import (
arg_handler,
join_kwargs_handler,
reduce_args_handler,
select_args_handler,
)
from pathway.internals.column_namespace import ColumnNamespace
from pathway.internals.decorators import contextualized_operator
from pathway.internals.desugaring import (
DesugaringContext,
SubstitutionDesugaring,
TableSelectDesugaring,
combine_args_kwargs,
desugar,
)
from pathway.internals.helpers import StableSet
from pathway.internals.join_mode import JoinMode
from pathway.internals.operator_input import OperatorInput
from pathway.internals.shadows import operator as op
from pathway.internals.table_like import TableLike
from pathway.internals.type_interpreter import eval_type
from pathway.internals.universe import Universe
class Joinable(TableLike, DesugaringContext):
def _subtables(self) -> StableSet[Table]: ...
def keys(self): ...
def select(self, *args: expr.ColumnReference, **kwargs: Any) -> Table: ...
def filter(self, filter_expression: expr.ColumnExpression) -> Joinable: ...
def __getitem__(self, args: str | expr.ColumnReference) -> expr.ColumnReference: ...
def __iter__(self) -> Iterator[expr.ColumnReference]:
return (self[name] for name in self.keys())
def _get_colref_by_name(self, name, exception_type) -> expr.ColumnReference: ...
def _operator_dependencies(self) -> StableSet[Table]: ...
def __getattr__(self, name) -> expr.ColumnReference:
"""Get columns by name.
Warning:
- Fails if it tries to access nonexistent column.
Returns:
Column expression.
Example:
>>> import pathway as pw
>>> t1 = pw.debug.table_from_markdown('''
... age | owner | pet
... 10 | Alice | dog
... 9 | Bob | dog
... 8 | Alice | cat
... 7 | Bob | dog
... ''')
>>> t2 = t1.select(t1.age)
>>> pw.debug.compute_and_print(t2, include_id=False)
age
7
8
9
10
"""
try:
return super().__getattr__(name)
except AttributeError:
pass
return self._get_colref_by_name(name, AttributeError)
def C(self) -> ColumnNamespace:
"""Returns the namespace of all the columns of a joinable.
Allows accessing column names that might otherwise be a reserved methods.
>>> import pathway as pw
>>> tab = pw.debug.table_from_markdown('''
... age | owner | pet | filter
... 10 | Alice | dog | True
... 9 | Bob | dog | True
... 8 | Alice | cat | False
... 7 | Bob | dog | True
... ''')
>>> isinstance(tab.C.age, pw.ColumnReference)
True
>>> pw.debug.compute_and_print(tab.filter(tab.C.filter), include_id=False)
age | owner | pet | filter
7 | Bob | dog | True
9 | Bob | dog | True
10 | Alice | dog | True
"""
return ColumnNamespace(self)
def _C(self):
return self.C
def join(
self,
other: Joinable,
*on: expr.ColumnExpression,
id: expr.ColumnReference | None = None,
how: JoinMode = JoinMode.INNER,
left_instance: expr.ColumnReference | None = None,
right_instance: expr.ColumnReference | None = None,
) -> JoinResult:
"""Join self with other using the given join expression.
Args:
other: the right side of the join, ``Table`` or ``JoinResult``.
on: a list of column expressions. Each must have == as the top level operation
and be of the form LHS: ColumnReference == RHS: ColumnReference.
id: optional argument for id of result, can be only self.id or other.id
how: by default, inner join is performed. Possible values are JoinMode.{INNER,LEFT,RIGHT,OUTER}
correspond to inner, left, right and outer join respectively.
left_instance/right_instance: optional arguments describing partitioning of the data into
separate instances
Returns:
JoinResult: an object on which `.select()` may be called to extract relevant
columns from the result of the join.
Example:
>>> import pathway as pw
>>> t1 = pw.debug.table_from_markdown('''
... age | owner | pet
... 10 | Alice | 1
... 9 | Bob | 1
... 8 | Alice | 2
... ''')
>>> t2 = pw.debug.table_from_markdown('''
... age | owner | pet | size
... 10 | Alice | 3 | M
... 9 | Bob | 1 | L
... 8 | Tom | 1 | XL
... ''')
>>> t3 = t1.join(
... t2, t1.pet == t2.pet, t1.owner == t2.owner, how=pw.JoinMode.INNER
... ).select(age=t1.age, owner_name=t2.owner, size=t2.size)
>>> pw.debug.compute_and_print(t3, include_id = False)
age | owner_name | size
9 | Bob | L
"""
return JoinResult._table_join(
self,
other,
*on,
mode=how,
id=id,
left_instance=left_instance,
right_instance=right_instance,
)
def join_inner(
self,
other: Joinable,
*on: expr.ColumnExpression,
id: expr.ColumnReference | None = None,
left_instance: expr.ColumnReference | None = None,
right_instance: expr.ColumnReference | None = None,
) -> JoinResult:
"""Inner-joins two tables or join results.
Args:
other: the right side of the join, ``Table`` or ``JoinResult``.
on: a list of column expressions. Each must have == as the top level operation
and be of the form LHS: ColumnReference == RHS: ColumnReference.
id: optional argument for id of result, can be only self.id or other.id
left_instance/right_instance: optional arguments describing partitioning of the data
into separate instances
Returns:
JoinResult: an object on which `.select()` may be called to extract relevant
columns from the result of the join.
Example:
>>> import pathway as pw
>>> t1 = pw.debug.table_from_markdown('''
... age | owner | pet
... 10 | Alice | 1
... 9 | Bob | 1
... 8 | Alice | 2
... ''')
>>> t2 = pw.debug.table_from_markdown('''
... age | owner | pet | size
... 10 | Alice | 3 | M
... 9 | Bob | 1 | L
... 8 | Tom | 1 | XL
... ''')
>>> t3 = t1.join_inner(t2, t1.pet == t2.pet, t1.owner == t2.owner).select(
... age=t1.age, owner_name=t2.owner, size=t2.size
... )
>>> pw.debug.compute_and_print(t3, include_id = False)
age | owner_name | size
9 | Bob | L
"""
return JoinResult._table_join(
self,
other,
*on,
mode=JoinMode.INNER,
id=id,
left_instance=left_instance,
right_instance=right_instance,
)
def join_left(
self,
other: Joinable,
*on: expr.ColumnExpression,
id: expr.ColumnReference | None = None,
left_instance: expr.ColumnReference | None = None,
right_instance: expr.ColumnReference | None = None,
) -> JoinResult:
"""
Left-joins two tables or join results.
Args:
other: the right side of the join, ``Table`` or ``JoinResult``.
*on: Columns to join, syntax `self.col1 == other.col2`
id: optional id column of the result
left_instance/right_instance: optional arguments describing partitioning of the data into
separate instances
Remarks:
args cannot contain id column from either of tables, \
as the result table has id column with auto-generated ids; \
it can be selected by assigning it to a column with defined \
name (passed in kwargs)
Behavior:
- for rows from the left side that were not matched with the right side,
missing values on the right are replaced with `None`
- rows from the right side that were not matched with the left side are skipped
- for rows that were matched the behavior is the same as that of an inner join.
Returns:
JoinResult: an object on which `.select()` may be called to extract relevant
columns from the result of the join.
Example:
>>> import pathway as pw
>>> t1 = pw.debug.table_from_markdown(
... '''
... | a | b
... 1 | 11 | 111
... 2 | 12 | 112
... 3 | 13 | 113
... 4 | 13 | 114
... '''
... )
>>> t2 = pw.debug.table_from_markdown(
... '''
... | c | d
... 1 | 11 | 211
... 2 | 12 | 212
... 3 | 14 | 213
... 4 | 14 | 214
... '''
... )
>>> pw.debug.compute_and_print(t1.join_left(t2, t1.a == t2.c
... ).select(t1.a, t2_c=t2.c, s=pw.require(t1.b + t2.d, t2.id)),
... include_id=False)
a | t2_c | s
11 | 11 | 322
12 | 12 | 324
13 | |
13 | |
"""
return JoinResult._table_join(
self,
other,
*on,
mode=JoinMode.LEFT,
id=id,
left_instance=left_instance,
right_instance=right_instance,
)
def join_right(
self,
other: Joinable,
*on: expr.ColumnExpression,
id: expr.ColumnReference | None = None,
left_instance: expr.ColumnReference | None = None,
right_instance: expr.ColumnReference | None = None,
) -> JoinResult:
"""
Outer-joins two tables or join results.
Args:
other: the right side of the join, ``Table`` or ``JoinResult``.
*on: Columns to join, syntax `self.col1 == other.col2`
id: optional id column of the result
left_instance/right_instance: optional arguments describing partitioning of the data into separate
instances
Remarks: args cannot contain id column from either of tables, \
as the result table has id column with auto-generated ids; \
it can be selected by assigning it to a column with defined \
name (passed in kwargs)
Behavior:
- rows from the left side that were not matched with the right side are skipped
- for rows from the right side that were not matched with the left side,
missing values on the left are replaced with `None`
- for rows that were matched the behavior is the same as that of an inner join.
Returns:
JoinResult: an object on which `.select()` may be called to extract relevant
columns from the result of the join.
Example:
>>> import pathway as pw
>>> t1 = pw.debug.table_from_markdown(
... '''
... | a | b
... 1 | 11 | 111
... 2 | 12 | 112
... 3 | 13 | 113
... 4 | 13 | 114
... '''
... )
>>> t2 = pw.debug.table_from_markdown(
... '''
... | c | d
... 1 | 11 | 211
... 2 | 12 | 212
... 3 | 14 | 213
... 4 | 14 | 214
... '''
... )
>>> pw.debug.compute_and_print(t1.join_right(t2, t1.a == t2.c
... ).select(t1.a, t2_c=t2.c, s=pw.require(pw.coalesce(t1.b,0) + t2.d,t1.id)),
... include_id=False)
a | t2_c | s
| 14 |
| 14 |
11 | 11 | 322
12 | 12 | 324
Returns:
OuterJoinResult object
"""
return JoinResult._table_join(
self,
other,
*on,
mode=JoinMode.RIGHT,
id=id,
left_instance=left_instance,
right_instance=right_instance,
)
def join_outer(
self,
other: Joinable,
*on: expr.ColumnExpression,
id: expr.ColumnReference | None = None,
left_instance: expr.ColumnReference | None = None,
right_instance: expr.ColumnReference | None = None,
) -> JoinResult:
"""Outer-joins two tables or join results.
Args:
other: the right side of the join, ``Table`` or ``JoinResult``.
*on: Columns to join, syntax `self.col1 == other.col2`
id: optional id column of the result
instance: optional argument describing partitioning of the data into separate instances
Remarks: args cannot contain id column from either of tables, \
as the result table has id column with auto-generated ids; \
it can be selected by assigning it to a column with defined \
name (passed in kwargs)
Behavior:
- for rows from the left side that were not matched with the right side,
missing values on the right are replaced with `None`
- for rows from the right side that were not matched with the left side,
missing values on the left are replaced with `None`
- for rows that were matched the behavior is the same as that of an inner join.
Returns:
JoinResult: an object on which `.select()` may be called to extract relevant
columns from the result of the join.
Example:
>>> import pathway as pw
>>> t1 = pw.debug.table_from_markdown(
... '''
... | a | b
... 1 | 11 | 111
... 2 | 12 | 112
... 3 | 13 | 113
... 4 | 13 | 114
... '''
... )
>>> t2 = pw.debug.table_from_markdown(
... '''
... | c | d
... 1 | 11 | 211
... 2 | 12 | 212
... 3 | 14 | 213
... 4 | 14 | 214
... '''
... )
>>> pw.debug.compute_and_print(t1.join_outer(t2, t1.a == t2.c
... ).select(t1.a, t2_c=t2.c, s=pw.require(t1.b + t2.d, t1.id, t2.id)),
... include_id=False)
a | t2_c | s
| 14 |
| 14 |
11 | 11 | 322
12 | 12 | 324
13 | |
13 | |
"""
return JoinResult._table_join(
self,
other,
*on,
mode=JoinMode.OUTER,
id=id,
left_instance=left_instance,
right_instance=right_instance,
)
def _desugaring(self) -> TableSelectDesugaring:
return TableSelectDesugaring(self)
def _substitutions(
self,
) -> tuple[Table, dict[expr.InternalColRef, expr.ColumnExpression]]: ...
class JoinResult(Joinable, OperatorInput):
"""Result of a join between tables.
Example:
>>> import pathway as pw
>>> t1 = pw.debug.table_from_markdown('''
... age owner pet
... 1 10 Alice 1
... 2 9 Bob 1
... 3 8 Alice 2
... ''')
>>> t2 = pw.debug.table_from_markdown('''
... age owner pet size
... 11 10 Alice 3 M
... 12 9 Bob 1 L
... 13 8 Tom 1 XL
... ''')
>>> joinresult= t1.join(t2, t1.pet == t2.pet, t1.owner == t2.owner) # noqa: E501
>>> isinstance(joinresult, pw.JoinResult)
True
>>> pw.debug.compute_and_print(joinresult.select(t1.age, t2.size), include_id=False)
age | size
9 | L
"""
_inner_table: Table
_columns_mapping: dict[expr.InternalColRef, expr.ColumnReference]
_left_table: Table
_right_table: Table
_original_left: Joinable
_original_right: Joinable
_substitution: dict[thisclass.ThisMetaclass, Joinable]
_chained_join_desugaring: SubstitutionDesugaring
_joined_on_names: StableSet[str]
_all_colnames: StableSet[str]
_join_mode: JoinMode
def __init__(
self,
_context: clmn.Context,
_inner_table: Table,
_columns_mapping: dict[expr.InternalColRef, expr.ColumnReference],
_left_table: Table,
_right_table: Table,
_original_left: Joinable,
_original_right: Joinable,
_substitution: dict[thisclass.ThisMetaclass, Joinable],
_joined_on_names: StableSet[str],
_join_mode: JoinMode,
):
super().__init__(_context)
self._inner_table = _inner_table
self._columns_mapping = _columns_mapping
self._left_table = _left_table
self._right_table = _right_table
self._substitution = {**_substitution, thisclass.this: self}
self._joined_on_names = _joined_on_names
self._join_mode = _join_mode
self._original_left = _original_left
self._original_right = _original_right
assert _original_left._subtables().isdisjoint(_original_right._subtables())
self._all_colnames = StableSet.union(
_original_left.keys(), _original_right.keys()
)
self._chained_join_desugaring = SubstitutionDesugaring(self._substitutions()[1])
def _compute_universe(
left_table: Table,
right_table: Table,
id: clmn.Column | None,
mode: JoinMode,
) -> Universe:
if id is left_table._id_column:
if mode == JoinMode.LEFT:
return left_table._universe
elif mode == JoinMode.INNER:
return left_table._universe.subset()
else:
raise KeyError("Cannot assign id's for this join type.")
elif id is right_table._id_column:
if mode == JoinMode.RIGHT:
return right_table._universe
elif mode == JoinMode.INNER:
return right_table._universe.subset()
else:
raise KeyError("Cannot assign id's for this join type.")
else:
assert id is None
return Universe()
def _subtables(self) -> StableSet[Table]:
return self._original_left._subtables() | self._original_right._subtables()
def keys(self):
common_colnames = self._original_left.keys() & self._original_right.keys()
return self._all_colnames - (common_colnames - self._joined_on_names)
def _get_colref_by_name(
self,
name: str,
exception_type,
) -> expr.ColumnReference:
name = self._column_deprecation_rename(name)
if name == "id":
return self._inner_table.id
elif name in self._joined_on_names:
if self._join_mode is JoinMode.INNER:
return self._original_left[name]
else:
return self._inner_table[name]
elif name in self._original_left.keys() and name in self._original_right.keys():
raise exception_type(
f"Column {name} appears on both left and right inputs of join."
)
elif name in self._original_left.keys():
return self._original_left[name]
elif name in self._original_right.keys():
return self._original_right[name]
else:
raise exception_type(f"No column with name {name}.")
def __getitem__(self, args: str | expr.ColumnReference) -> expr.ColumnReference:
if isinstance(args, expr.ColumnReference):
assert args.table is self or args.table is thisclass.this
return self._get_colref_by_name(args.name, KeyError)
else:
return self._get_colref_by_name(args, KeyError)
def select(self, *args: expr.ColumnReference, **kwargs: Any) -> Table:
"""Computes result of a join.
Args:
args: Column references.
kwargs: Column expressions with their new assigned names.
Returns:
Table: Created table.
Example:
>>> import pathway as pw
>>> t1 = pw.debug.table_from_markdown('''
... age | owner | pet
... 10 | Alice | 1
... 9 | Bob | 1
... 8 | Alice | 2
... ''')
>>> t2 = pw.debug.table_from_markdown('''
... age | owner | pet | size
... 10 | Alice | 3 | M
... 9 | Bob | 1 | L
... 8 | Tom | 1 | XL
... ''')
>>> t3 = t1.join(t2, t1.pet == t2.pet, t1.owner == t2.owner).select(age=t1.age, owner_name=t2.owner, size=t2.size) # noqa: E501
>>> pw.debug.compute_and_print(t3, include_id=False)
age | owner_name | size
9 | Bob | L
"""
expressions: dict[str, expr.ColumnExpression] = {}
all_args = combine_args_kwargs(args, kwargs)
for new_name, expression in all_args.items():
expressions[new_name] = self._chained_join_desugaring.eval_expression(
expression
)
return self._inner_table.select(**expressions)
def _operator_dependencies(self) -> StableSet[Table]:
return (
self._left_table._operator_dependencies()
| self._right_table._operator_dependencies()
)
def filter(self, filter_expression: expr.ColumnExpression) -> JoinResult:
"""Filters rows, keeping the ones satisfying the predicate.
Example:
>>> import pathway as pw
>>> t1 = pw.debug.table_from_markdown('''
... age owner pet
... 1 10 Alice 1
... 2 9 Bob 1
... 3 8 Alice 2
... ''')
>>> t2 = pw.debug.table_from_markdown('''
... age owner pet size
... 11 10 Alice 3 M
... 12 9 Bob 1 L
... 13 8 Tom 1 XL
... ''')
>>> result = t1.join(t2).filter(t1.owner == t2.owner).select(t1.age, t2.size) # noqa: E501
>>> pw.debug.compute_and_print(result, include_id=False)
age | size
8 | M
9 | L
10 | M
"""
desugared_filter_expression = self._chained_join_desugaring.eval_expression(
filter_expression
)
inner_table = self._inner_table.filter(desugared_filter_expression)
new_columns_mapping = {
int_ref: inner_table[expression.name]
for int_ref, expression in self._columns_mapping.items()
}
new_columns_mapping[inner_table.id._to_internal()] = inner_table.id
context = clmn.JoinRowwiseContext.from_mapping(
inner_table._id_column, new_columns_mapping
)
inner_table._rowwise_context = context
return JoinResult(
_context=context,
_inner_table=inner_table,
_columns_mapping=new_columns_mapping,
_left_table=self._left_table,
_right_table=self._right_table,
_original_left=self._original_left,
_original_right=self._original_right,
_substitution=self._substitution,
_joined_on_names=self._joined_on_names,
_join_mode=self._join_mode,
)
def groupby(
self,
*args: expr.ColumnReference,
id: expr.ColumnReference | None = None,
) -> GroupedJoinResult:
"""Groups join result by columns from args.
Note:
Usually followed by `.reduce()` that aggregates the result and returns a table.
Args:
args: columns to group by.
id: if provided, is the column used to set id's of the rows of the result
Returns:
GroupedJoinResult: Groupby object.
Example:
>>> import pathway as pw
>>> t1 = pw.debug.table_from_markdown('''
... cost owner pet
... 1 100 Alice 1
... 2 90 Bob 1
... 3 80 Alice 2
... ''')
>>> t2 = pw.debug.table_from_markdown('''
... cost owner pet size
... 11 100 Alice 3 M
... 12 90 Bob 1 L
... 13 80 Tom 1 XL
... ''')
>>> result = (t1.join(t2, t1.owner==t2.owner).groupby(pw.this.owner)
... .reduce(pw.this.owner, pairs = pw.reducers.count()))
>>> pw.debug.compute_and_print(result, include_id=False)
owner | pairs
Alice | 2
Bob | 1
"""
for arg in args:
if not isinstance(arg, expr.ColumnReference):
if isinstance(arg, str):
raise ValueError(
f"Expected a ColumnReference, found a string. Did you mean this.{arg} instead of {repr(arg)}?"
)
else:
raise ValueError(
"In JoinResult.groupby() all arguments have to be a ColumnReference."
)
from pathway.internals.groupbys import GroupedJoinResult
return GroupedJoinResult(
_join_result=self,
_args=args,
_id=id,
)
def reduce(
self, *args: expr.ColumnReference, **kwargs: expr.ColumnExpression
) -> Table:
"""Reduce a join result to a single row.
Equivalent to `self.groupby().reduce(*args, **kwargs)`.
Args:
args: reducer to reduce the table with
kwargs: reducer to reduce the table with. Its key is the new name of a column.
Returns:
Table: Reduced table.
Example:
>>> import pathway as pw
>>> t1 = pw.debug.table_from_markdown('''
... cost owner pet
... 1 100 Alice 1
... 2 90 Bob 1
... 3 80 Alice 2
... ''')
>>> t2 = pw.debug.table_from_markdown('''
... cost owner pet size
... 11 100 Alice 3 M
... 12 90 Bob 1 L
... 13 80 Tom 1 XL
... ''')
>>> result = t1.join(t2, t1.owner==t2.owner).reduce(total_pairs = pw.reducers.count())
>>> pw.debug.compute_and_print(result, include_id=False)
total_pairs
3
"""
for arg in args:
if not isinstance(arg, expr.ColumnReference):
if isinstance(arg, str):
raise ValueError(
f"Expected a ColumnReference, found a string. Did you mean this.{arg} instead of {repr(arg)}?"
)
else:
raise ValueError(
"In JoinResult.reduce() all positional arguments have to be a ColumnReference."
)
return self.groupby().reduce(*args, **kwargs)
def _substitutions(
self,
) -> tuple[Table, dict[expr.InternalColRef, expr.ColumnExpression]]:
return self._inner_table, {
int_ref: expression for int_ref, expression in self._columns_mapping.items()
}
def _join(
context: clmn.JoinContext, *args: expr.ColumnReference, **kwargs: Any
) -> Table:
"""Used internally to create an internal Table containing result of a join."""
columns: dict[str, clmn.Column] = {}
all_args = combine_args_kwargs(args, kwargs)
for new_name, expression in all_args.items():
columns[new_name] = expression._column_with_expression_cls(
context=context,
universe=context.universe,
expression=expression,
)
from pathway.internals.table import Table
return Table(
_columns=columns,
_context=context,
)
def _prepare_inner_table_with_mapping(
context: clmn.JoinContext,
original_left: Joinable,
original_right: Joinable,
common_column_names: StableSet[str],
) -> tuple[Table, dict[expr.InternalColRef, expr.ColumnReference]]:
left_table, left_substitutions = original_left._substitutions()
right_table, right_substitutions = original_right._substitutions()
cnt = itertools.count(0)
expressions: dict[str, expr.ColumnExpression] = {}
colref_to_name_mapping: dict[expr.InternalColRef, str] = {}
for table, subs in [
(left_table, left_substitutions),
(right_table, right_substitutions),
]:
if len(subs) == 0: # tables have empty subs, so set them here
for ref in table:
subs[ref._to_internal()] = ref
subs_total = subs | {table.id._to_internal(): table.id}
for int_ref, expression in subs_total.items():
inner_name = f"_pw_{next(cnt)}"
expressions[inner_name] = expression
colref_to_name_mapping[int_ref] = inner_name
from pathway.internals.common import coalesce
for name in common_column_names:
if name != "id":
expressions[name] = coalesce(original_left[name], original_right[name])
inner_table = JoinResult._join(context, **expressions)
final_mapping = {
colref: inner_table[name] for colref, name in colref_to_name_mapping.items()
}
for name in common_column_names:
if name != "id":
colref = inner_table[name]
final_mapping[colref._to_internal()] = colref
final_mapping[inner_table.id._to_internal()] = inner_table.id
rowwise_context = clmn.JoinRowwiseContext.from_mapping(
inner_table._id_column, final_mapping
)
inner_table._rowwise_context = (
rowwise_context # FIXME don't set _context property of table
)
return (inner_table, final_mapping)
def _table_join(
left: Joinable,
right: Joinable,
*on: expr.ColumnExpression,
mode: JoinMode,
id: expr.ColumnReference | None = None,
left_instance: expr.ColumnReference | None = None,
right_instance: expr.ColumnReference | None = None,
) -> JoinResult:
if left == right:
raise ValueError(
"Cannot join table with itself. Use <table>.copy() as one of the arguments of the join."
)
left_table, left_substitutions = left._substitutions()
right_table, right_substitutions = right._substitutions()
chained_join_desugaring = SubstitutionDesugaring(
{**left_substitutions, **right_substitutions}
)
if id is not None:
id = chained_join_desugaring.eval_expression(id)
id_column = id._column
else:
id_column = None
common_column_names: StableSet[str] = StableSet()
if left_instance is not None and right_instance is not None:
on = (*on, left_instance == right_instance)
else:
assert left_instance is None and right_instance is None
on_ = tuple(validate_shape(cond) for cond in on)
for cond in on_:
cond_left = cast(expr.ColumnReference, cond._left)
cond_right = cast(expr.ColumnReference, cond._right)
if cond_left.name == cond_right.name:
common_column_names.add(cond_left.name)
on_ = tuple(chained_join_desugaring.eval_expression(cond) for cond in on_)
for cond in on_:
validate_join_condition(cond, left_table, right_table)
on_left = tuple(
left_table._eval(cond._left, left_table._table_restricted_context)
for cond in on_
)
on_right = tuple(
right_table._eval(cond._right, right_table._table_restricted_context)
for cond in on_
)
swp = id_column is not None and id_column is right_table._id_column
assert (
id_column is None
or (id_column is left_table._id_column)
or (id_column is right_table._id_column)
)
left_context_table = clmn.ContextTable(universe=left._universe, columns=on_left)
right_context_table = clmn.ContextTable(
universe=right._universe, columns=on_right
)
substitution: dict[thisclass.ThisMetaclass, Joinable] = {
thisclass.left: left,
thisclass.right: right,
}
universe = JoinResult._compute_universe(
left_table, right_table, id_column, mode
)
if swp:
context = clmn.JoinContext(
universe,
right_table,
left_table,
right_context_table,
left_context_table,
id_column is not None,
mode in [JoinMode.RIGHT, JoinMode.OUTER],
mode in [JoinMode.LEFT, JoinMode.OUTER],
)
else:
context = clmn.JoinContext(
universe,
left_table,
right_table,
left_context_table,
right_context_table,
id_column is not None,
mode in [JoinMode.LEFT, JoinMode.OUTER],
mode in [JoinMode.RIGHT, JoinMode.OUTER],
)
inner_table, columns_mapping = JoinResult._prepare_inner_table_with_mapping(
context,
left,
right,
common_column_names,
)
return JoinResult(
context,
inner_table,
columns_mapping,
left_table,
right_table,
left,
right,
substitution,
common_column_names,
mode,
)
The provided code snippet includes necessary dependencies for implementing the `join_inner` function. Write a Python function `def join_inner( left: Joinable, right: Joinable, *on: expr.ColumnExpression, id: expr.ColumnReference | None = None, left_instance: expr.ColumnReference | None = None, right_instance: expr.ColumnReference | None = None, ) -> JoinResult` to solve the following problem:
Inner-joins two tables or join results. Args: left: the left side of the join, ``Table`` or ``JoinResult``. right: the right side of the join, ``Table`` or ``JoinResult``. on: a list of column expressions. Each must have == as the top level operation and be of the form LHS: ColumnReference == RHS: ColumnReference. id: optional argument for id of result, can be only self.id or other.id left_instance/right_instance: optional arguments describing partitioning of the data into separate instances Returns: JoinResult: an object on which `.select()` may be called to extract relevant columns from the result of the join. Example: >>> import pathway as pw >>> t1 = pw.debug.table_from_markdown(''' ... age | owner | pet ... 10 | Alice | 1 ... 9 | Bob | 1 ... 8 | Alice | 2 ... ''') >>> t2 = pw.debug.table_from_markdown(''' ... age | owner | pet | size ... 10 | Alice | 3 | M ... 9 | Bob | 1 | L ... 8 | Tom | 1 | XL ... ''') >>> t3 = pw.join_inner(t1, t2, t1.pet == t2.pet, t1.owner == t2.owner).select( ... age=t1.age, owner_name=t2.owner, size=t2.size ... ) >>> pw.debug.compute_and_print(t3, include_id = False) age | owner_name | size 9 | Bob | L
Here is the function:
def join_inner(
left: Joinable,
right: Joinable,
*on: expr.ColumnExpression,
id: expr.ColumnReference | None = None,
left_instance: expr.ColumnReference | None = None,
right_instance: expr.ColumnReference | None = None,
) -> JoinResult:
"""Inner-joins two tables or join results.
Args:
left: the left side of the join, ``Table`` or ``JoinResult``.
right: the right side of the join, ``Table`` or ``JoinResult``.
on: a list of column expressions. Each must have == as the top level operation
and be of the form LHS: ColumnReference == RHS: ColumnReference.
id: optional argument for id of result, can be only self.id or other.id
left_instance/right_instance: optional arguments describing partitioning of the data into separate instances
Returns:
JoinResult: an object on which `.select()` may be called to extract relevant
columns from the result of the join.
Example:
>>> import pathway as pw
>>> t1 = pw.debug.table_from_markdown('''
... age | owner | pet
... 10 | Alice | 1
... 9 | Bob | 1
... 8 | Alice | 2
... ''')
>>> t2 = pw.debug.table_from_markdown('''
... age | owner | pet | size
... 10 | Alice | 3 | M
... 9 | Bob | 1 | L
... 8 | Tom | 1 | XL
... ''')
>>> t3 = pw.join_inner(t1, t2, t1.pet == t2.pet, t1.owner == t2.owner).select(
... age=t1.age, owner_name=t2.owner, size=t2.size
... )
>>> pw.debug.compute_and_print(t3, include_id = False)
age | owner_name | size
9 | Bob | L
"""
return left.join_inner(
right, *on, id=id, left_instance=left_instance, right_instance=right_instance
) | Inner-joins two tables or join results. Args: left: the left side of the join, ``Table`` or ``JoinResult``. right: the right side of the join, ``Table`` or ``JoinResult``. on: a list of column expressions. Each must have == as the top level operation and be of the form LHS: ColumnReference == RHS: ColumnReference. id: optional argument for id of result, can be only self.id or other.id left_instance/right_instance: optional arguments describing partitioning of the data into separate instances Returns: JoinResult: an object on which `.select()` may be called to extract relevant columns from the result of the join. Example: >>> import pathway as pw >>> t1 = pw.debug.table_from_markdown(''' ... age | owner | pet ... 10 | Alice | 1 ... 9 | Bob | 1 ... 8 | Alice | 2 ... ''') >>> t2 = pw.debug.table_from_markdown(''' ... age | owner | pet | size ... 10 | Alice | 3 | M ... 9 | Bob | 1 | L ... 8 | Tom | 1 | XL ... ''') >>> t3 = pw.join_inner(t1, t2, t1.pet == t2.pet, t1.owner == t2.owner).select( ... age=t1.age, owner_name=t2.owner, size=t2.size ... ) >>> pw.debug.compute_and_print(t3, include_id = False) age | owner_name | size 9 | Bob | L |
166,820 | from __future__ import annotations
import itertools
from collections.abc import Iterator
from functools import lru_cache
from typing import TYPE_CHECKING, Any, cast
from pathway.internals.trace import trace_user_frame
from abc import abstractmethod
import pathway.internals.column as clmn
import pathway.internals.expression as expr
from pathway.internals import thisclass
from pathway.internals.arg_handlers import (
arg_handler,
join_kwargs_handler,
reduce_args_handler,
select_args_handler,
)
from pathway.internals.column_namespace import ColumnNamespace
from pathway.internals.decorators import contextualized_operator
from pathway.internals.desugaring import (
DesugaringContext,
SubstitutionDesugaring,
TableSelectDesugaring,
combine_args_kwargs,
desugar,
)
from pathway.internals.helpers import StableSet
from pathway.internals.join_mode import JoinMode
from pathway.internals.operator_input import OperatorInput
from pathway.internals.shadows import operator as op
from pathway.internals.table_like import TableLike
from pathway.internals.type_interpreter import eval_type
from pathway.internals.universe import Universe
class Joinable(TableLike, DesugaringContext):
def _subtables(self) -> StableSet[Table]: ...
def keys(self): ...
def select(self, *args: expr.ColumnReference, **kwargs: Any) -> Table: ...
def filter(self, filter_expression: expr.ColumnExpression) -> Joinable: ...
def __getitem__(self, args: str | expr.ColumnReference) -> expr.ColumnReference: ...
def __iter__(self) -> Iterator[expr.ColumnReference]:
return (self[name] for name in self.keys())
def _get_colref_by_name(self, name, exception_type) -> expr.ColumnReference: ...
def _operator_dependencies(self) -> StableSet[Table]: ...
def __getattr__(self, name) -> expr.ColumnReference:
"""Get columns by name.
Warning:
- Fails if it tries to access nonexistent column.
Returns:
Column expression.
Example:
>>> import pathway as pw
>>> t1 = pw.debug.table_from_markdown('''
... age | owner | pet
... 10 | Alice | dog
... 9 | Bob | dog
... 8 | Alice | cat
... 7 | Bob | dog
... ''')
>>> t2 = t1.select(t1.age)
>>> pw.debug.compute_and_print(t2, include_id=False)
age
7
8
9
10
"""
try:
return super().__getattr__(name)
except AttributeError:
pass
return self._get_colref_by_name(name, AttributeError)
def C(self) -> ColumnNamespace:
"""Returns the namespace of all the columns of a joinable.
Allows accessing column names that might otherwise be a reserved methods.
>>> import pathway as pw
>>> tab = pw.debug.table_from_markdown('''
... age | owner | pet | filter
... 10 | Alice | dog | True
... 9 | Bob | dog | True
... 8 | Alice | cat | False
... 7 | Bob | dog | True
... ''')
>>> isinstance(tab.C.age, pw.ColumnReference)
True
>>> pw.debug.compute_and_print(tab.filter(tab.C.filter), include_id=False)
age | owner | pet | filter
7 | Bob | dog | True
9 | Bob | dog | True
10 | Alice | dog | True
"""
return ColumnNamespace(self)
def _C(self):
return self.C
def join(
self,
other: Joinable,
*on: expr.ColumnExpression,
id: expr.ColumnReference | None = None,
how: JoinMode = JoinMode.INNER,
left_instance: expr.ColumnReference | None = None,
right_instance: expr.ColumnReference | None = None,
) -> JoinResult:
"""Join self with other using the given join expression.
Args:
other: the right side of the join, ``Table`` or ``JoinResult``.
on: a list of column expressions. Each must have == as the top level operation
and be of the form LHS: ColumnReference == RHS: ColumnReference.
id: optional argument for id of result, can be only self.id or other.id
how: by default, inner join is performed. Possible values are JoinMode.{INNER,LEFT,RIGHT,OUTER}
correspond to inner, left, right and outer join respectively.
left_instance/right_instance: optional arguments describing partitioning of the data into
separate instances
Returns:
JoinResult: an object on which `.select()` may be called to extract relevant
columns from the result of the join.
Example:
>>> import pathway as pw
>>> t1 = pw.debug.table_from_markdown('''
... age | owner | pet
... 10 | Alice | 1
... 9 | Bob | 1
... 8 | Alice | 2
... ''')
>>> t2 = pw.debug.table_from_markdown('''
... age | owner | pet | size
... 10 | Alice | 3 | M
... 9 | Bob | 1 | L
... 8 | Tom | 1 | XL
... ''')
>>> t3 = t1.join(
... t2, t1.pet == t2.pet, t1.owner == t2.owner, how=pw.JoinMode.INNER
... ).select(age=t1.age, owner_name=t2.owner, size=t2.size)
>>> pw.debug.compute_and_print(t3, include_id = False)
age | owner_name | size
9 | Bob | L
"""
return JoinResult._table_join(
self,
other,
*on,
mode=how,
id=id,
left_instance=left_instance,
right_instance=right_instance,
)
def join_inner(
self,
other: Joinable,
*on: expr.ColumnExpression,
id: expr.ColumnReference | None = None,
left_instance: expr.ColumnReference | None = None,
right_instance: expr.ColumnReference | None = None,
) -> JoinResult:
"""Inner-joins two tables or join results.
Args:
other: the right side of the join, ``Table`` or ``JoinResult``.
on: a list of column expressions. Each must have == as the top level operation
and be of the form LHS: ColumnReference == RHS: ColumnReference.
id: optional argument for id of result, can be only self.id or other.id
left_instance/right_instance: optional arguments describing partitioning of the data
into separate instances
Returns:
JoinResult: an object on which `.select()` may be called to extract relevant
columns from the result of the join.
Example:
>>> import pathway as pw
>>> t1 = pw.debug.table_from_markdown('''
... age | owner | pet
... 10 | Alice | 1
... 9 | Bob | 1
... 8 | Alice | 2
... ''')
>>> t2 = pw.debug.table_from_markdown('''
... age | owner | pet | size
... 10 | Alice | 3 | M
... 9 | Bob | 1 | L
... 8 | Tom | 1 | XL
... ''')
>>> t3 = t1.join_inner(t2, t1.pet == t2.pet, t1.owner == t2.owner).select(
... age=t1.age, owner_name=t2.owner, size=t2.size
... )
>>> pw.debug.compute_and_print(t3, include_id = False)
age | owner_name | size
9 | Bob | L
"""
return JoinResult._table_join(
self,
other,
*on,
mode=JoinMode.INNER,
id=id,
left_instance=left_instance,
right_instance=right_instance,
)
def join_left(
self,
other: Joinable,
*on: expr.ColumnExpression,
id: expr.ColumnReference | None = None,
left_instance: expr.ColumnReference | None = None,
right_instance: expr.ColumnReference | None = None,
) -> JoinResult:
"""
Left-joins two tables or join results.
Args:
other: the right side of the join, ``Table`` or ``JoinResult``.
*on: Columns to join, syntax `self.col1 == other.col2`
id: optional id column of the result
left_instance/right_instance: optional arguments describing partitioning of the data into
separate instances
Remarks:
args cannot contain id column from either of tables, \
as the result table has id column with auto-generated ids; \
it can be selected by assigning it to a column with defined \
name (passed in kwargs)
Behavior:
- for rows from the left side that were not matched with the right side,
missing values on the right are replaced with `None`
- rows from the right side that were not matched with the left side are skipped
- for rows that were matched the behavior is the same as that of an inner join.
Returns:
JoinResult: an object on which `.select()` may be called to extract relevant
columns from the result of the join.
Example:
>>> import pathway as pw
>>> t1 = pw.debug.table_from_markdown(
... '''
... | a | b
... 1 | 11 | 111
... 2 | 12 | 112
... 3 | 13 | 113
... 4 | 13 | 114
... '''
... )
>>> t2 = pw.debug.table_from_markdown(
... '''
... | c | d
... 1 | 11 | 211
... 2 | 12 | 212
... 3 | 14 | 213
... 4 | 14 | 214
... '''
... )
>>> pw.debug.compute_and_print(t1.join_left(t2, t1.a == t2.c
... ).select(t1.a, t2_c=t2.c, s=pw.require(t1.b + t2.d, t2.id)),
... include_id=False)
a | t2_c | s
11 | 11 | 322
12 | 12 | 324
13 | |
13 | |
"""
return JoinResult._table_join(
self,
other,
*on,
mode=JoinMode.LEFT,
id=id,
left_instance=left_instance,
right_instance=right_instance,
)
def join_right(
self,
other: Joinable,
*on: expr.ColumnExpression,
id: expr.ColumnReference | None = None,
left_instance: expr.ColumnReference | None = None,
right_instance: expr.ColumnReference | None = None,
) -> JoinResult:
"""
Outer-joins two tables or join results.
Args:
other: the right side of the join, ``Table`` or ``JoinResult``.
*on: Columns to join, syntax `self.col1 == other.col2`
id: optional id column of the result
left_instance/right_instance: optional arguments describing partitioning of the data into separate
instances
Remarks: args cannot contain id column from either of tables, \
as the result table has id column with auto-generated ids; \
it can be selected by assigning it to a column with defined \
name (passed in kwargs)
Behavior:
- rows from the left side that were not matched with the right side are skipped
- for rows from the right side that were not matched with the left side,
missing values on the left are replaced with `None`
- for rows that were matched the behavior is the same as that of an inner join.
Returns:
JoinResult: an object on which `.select()` may be called to extract relevant
columns from the result of the join.
Example:
>>> import pathway as pw
>>> t1 = pw.debug.table_from_markdown(
... '''
... | a | b
... 1 | 11 | 111
... 2 | 12 | 112
... 3 | 13 | 113
... 4 | 13 | 114
... '''
... )
>>> t2 = pw.debug.table_from_markdown(
... '''
... | c | d
... 1 | 11 | 211
... 2 | 12 | 212
... 3 | 14 | 213
... 4 | 14 | 214
... '''
... )
>>> pw.debug.compute_and_print(t1.join_right(t2, t1.a == t2.c
... ).select(t1.a, t2_c=t2.c, s=pw.require(pw.coalesce(t1.b,0) + t2.d,t1.id)),
... include_id=False)
a | t2_c | s
| 14 |
| 14 |
11 | 11 | 322
12 | 12 | 324
Returns:
OuterJoinResult object
"""
return JoinResult._table_join(
self,
other,
*on,
mode=JoinMode.RIGHT,
id=id,
left_instance=left_instance,
right_instance=right_instance,
)
def join_outer(
self,
other: Joinable,
*on: expr.ColumnExpression,
id: expr.ColumnReference | None = None,
left_instance: expr.ColumnReference | None = None,
right_instance: expr.ColumnReference | None = None,
) -> JoinResult:
"""Outer-joins two tables or join results.
Args:
other: the right side of the join, ``Table`` or ``JoinResult``.
*on: Columns to join, syntax `self.col1 == other.col2`
id: optional id column of the result
instance: optional argument describing partitioning of the data into separate instances
Remarks: args cannot contain id column from either of tables, \
as the result table has id column with auto-generated ids; \
it can be selected by assigning it to a column with defined \
name (passed in kwargs)
Behavior:
- for rows from the left side that were not matched with the right side,
missing values on the right are replaced with `None`
- for rows from the right side that were not matched with the left side,
missing values on the left are replaced with `None`
- for rows that were matched the behavior is the same as that of an inner join.
Returns:
JoinResult: an object on which `.select()` may be called to extract relevant
columns from the result of the join.
Example:
>>> import pathway as pw
>>> t1 = pw.debug.table_from_markdown(
... '''
... | a | b
... 1 | 11 | 111
... 2 | 12 | 112
... 3 | 13 | 113
... 4 | 13 | 114
... '''
... )
>>> t2 = pw.debug.table_from_markdown(
... '''
... | c | d
... 1 | 11 | 211
... 2 | 12 | 212
... 3 | 14 | 213
... 4 | 14 | 214
... '''
... )
>>> pw.debug.compute_and_print(t1.join_outer(t2, t1.a == t2.c
... ).select(t1.a, t2_c=t2.c, s=pw.require(t1.b + t2.d, t1.id, t2.id)),
... include_id=False)
a | t2_c | s
| 14 |
| 14 |
11 | 11 | 322
12 | 12 | 324
13 | |
13 | |
"""
return JoinResult._table_join(
self,
other,
*on,
mode=JoinMode.OUTER,
id=id,
left_instance=left_instance,
right_instance=right_instance,
)
def _desugaring(self) -> TableSelectDesugaring:
return TableSelectDesugaring(self)
def _substitutions(
self,
) -> tuple[Table, dict[expr.InternalColRef, expr.ColumnExpression]]: ...
class JoinResult(Joinable, OperatorInput):
"""Result of a join between tables.
Example:
>>> import pathway as pw
>>> t1 = pw.debug.table_from_markdown('''
... age owner pet
... 1 10 Alice 1
... 2 9 Bob 1
... 3 8 Alice 2
... ''')
>>> t2 = pw.debug.table_from_markdown('''
... age owner pet size
... 11 10 Alice 3 M
... 12 9 Bob 1 L
... 13 8 Tom 1 XL
... ''')
>>> joinresult= t1.join(t2, t1.pet == t2.pet, t1.owner == t2.owner) # noqa: E501
>>> isinstance(joinresult, pw.JoinResult)
True
>>> pw.debug.compute_and_print(joinresult.select(t1.age, t2.size), include_id=False)
age | size
9 | L
"""
_inner_table: Table
_columns_mapping: dict[expr.InternalColRef, expr.ColumnReference]
_left_table: Table
_right_table: Table
_original_left: Joinable
_original_right: Joinable
_substitution: dict[thisclass.ThisMetaclass, Joinable]
_chained_join_desugaring: SubstitutionDesugaring
_joined_on_names: StableSet[str]
_all_colnames: StableSet[str]
_join_mode: JoinMode
def __init__(
self,
_context: clmn.Context,
_inner_table: Table,
_columns_mapping: dict[expr.InternalColRef, expr.ColumnReference],
_left_table: Table,
_right_table: Table,
_original_left: Joinable,
_original_right: Joinable,
_substitution: dict[thisclass.ThisMetaclass, Joinable],
_joined_on_names: StableSet[str],
_join_mode: JoinMode,
):
super().__init__(_context)
self._inner_table = _inner_table
self._columns_mapping = _columns_mapping
self._left_table = _left_table
self._right_table = _right_table
self._substitution = {**_substitution, thisclass.this: self}
self._joined_on_names = _joined_on_names
self._join_mode = _join_mode
self._original_left = _original_left
self._original_right = _original_right
assert _original_left._subtables().isdisjoint(_original_right._subtables())
self._all_colnames = StableSet.union(
_original_left.keys(), _original_right.keys()
)
self._chained_join_desugaring = SubstitutionDesugaring(self._substitutions()[1])
def _compute_universe(
left_table: Table,
right_table: Table,
id: clmn.Column | None,
mode: JoinMode,
) -> Universe:
if id is left_table._id_column:
if mode == JoinMode.LEFT:
return left_table._universe
elif mode == JoinMode.INNER:
return left_table._universe.subset()
else:
raise KeyError("Cannot assign id's for this join type.")
elif id is right_table._id_column:
if mode == JoinMode.RIGHT:
return right_table._universe
elif mode == JoinMode.INNER:
return right_table._universe.subset()
else:
raise KeyError("Cannot assign id's for this join type.")
else:
assert id is None
return Universe()
def _subtables(self) -> StableSet[Table]:
return self._original_left._subtables() | self._original_right._subtables()
def keys(self):
common_colnames = self._original_left.keys() & self._original_right.keys()
return self._all_colnames - (common_colnames - self._joined_on_names)
def _get_colref_by_name(
self,
name: str,
exception_type,
) -> expr.ColumnReference:
name = self._column_deprecation_rename(name)
if name == "id":
return self._inner_table.id
elif name in self._joined_on_names:
if self._join_mode is JoinMode.INNER:
return self._original_left[name]
else:
return self._inner_table[name]
elif name in self._original_left.keys() and name in self._original_right.keys():
raise exception_type(
f"Column {name} appears on both left and right inputs of join."
)
elif name in self._original_left.keys():
return self._original_left[name]
elif name in self._original_right.keys():
return self._original_right[name]
else:
raise exception_type(f"No column with name {name}.")
def __getitem__(self, args: str | expr.ColumnReference) -> expr.ColumnReference:
if isinstance(args, expr.ColumnReference):
assert args.table is self or args.table is thisclass.this
return self._get_colref_by_name(args.name, KeyError)
else:
return self._get_colref_by_name(args, KeyError)
def select(self, *args: expr.ColumnReference, **kwargs: Any) -> Table:
"""Computes result of a join.
Args:
args: Column references.
kwargs: Column expressions with their new assigned names.
Returns:
Table: Created table.
Example:
>>> import pathway as pw
>>> t1 = pw.debug.table_from_markdown('''
... age | owner | pet
... 10 | Alice | 1
... 9 | Bob | 1
... 8 | Alice | 2
... ''')
>>> t2 = pw.debug.table_from_markdown('''
... age | owner | pet | size
... 10 | Alice | 3 | M
... 9 | Bob | 1 | L
... 8 | Tom | 1 | XL
... ''')
>>> t3 = t1.join(t2, t1.pet == t2.pet, t1.owner == t2.owner).select(age=t1.age, owner_name=t2.owner, size=t2.size) # noqa: E501
>>> pw.debug.compute_and_print(t3, include_id=False)
age | owner_name | size
9 | Bob | L
"""
expressions: dict[str, expr.ColumnExpression] = {}
all_args = combine_args_kwargs(args, kwargs)
for new_name, expression in all_args.items():
expressions[new_name] = self._chained_join_desugaring.eval_expression(
expression
)
return self._inner_table.select(**expressions)
def _operator_dependencies(self) -> StableSet[Table]:
return (
self._left_table._operator_dependencies()
| self._right_table._operator_dependencies()
)
def filter(self, filter_expression: expr.ColumnExpression) -> JoinResult:
"""Filters rows, keeping the ones satisfying the predicate.
Example:
>>> import pathway as pw
>>> t1 = pw.debug.table_from_markdown('''
... age owner pet
... 1 10 Alice 1
... 2 9 Bob 1
... 3 8 Alice 2
... ''')
>>> t2 = pw.debug.table_from_markdown('''
... age owner pet size
... 11 10 Alice 3 M
... 12 9 Bob 1 L
... 13 8 Tom 1 XL
... ''')
>>> result = t1.join(t2).filter(t1.owner == t2.owner).select(t1.age, t2.size) # noqa: E501
>>> pw.debug.compute_and_print(result, include_id=False)
age | size
8 | M
9 | L
10 | M
"""
desugared_filter_expression = self._chained_join_desugaring.eval_expression(
filter_expression
)
inner_table = self._inner_table.filter(desugared_filter_expression)
new_columns_mapping = {
int_ref: inner_table[expression.name]
for int_ref, expression in self._columns_mapping.items()
}
new_columns_mapping[inner_table.id._to_internal()] = inner_table.id
context = clmn.JoinRowwiseContext.from_mapping(
inner_table._id_column, new_columns_mapping
)
inner_table._rowwise_context = context
return JoinResult(
_context=context,
_inner_table=inner_table,
_columns_mapping=new_columns_mapping,
_left_table=self._left_table,
_right_table=self._right_table,
_original_left=self._original_left,
_original_right=self._original_right,
_substitution=self._substitution,
_joined_on_names=self._joined_on_names,
_join_mode=self._join_mode,
)
def groupby(
self,
*args: expr.ColumnReference,
id: expr.ColumnReference | None = None,
) -> GroupedJoinResult:
"""Groups join result by columns from args.
Note:
Usually followed by `.reduce()` that aggregates the result and returns a table.
Args:
args: columns to group by.
id: if provided, is the column used to set id's of the rows of the result
Returns:
GroupedJoinResult: Groupby object.
Example:
>>> import pathway as pw
>>> t1 = pw.debug.table_from_markdown('''
... cost owner pet
... 1 100 Alice 1
... 2 90 Bob 1
... 3 80 Alice 2
... ''')
>>> t2 = pw.debug.table_from_markdown('''
... cost owner pet size
... 11 100 Alice 3 M
... 12 90 Bob 1 L
... 13 80 Tom 1 XL
... ''')
>>> result = (t1.join(t2, t1.owner==t2.owner).groupby(pw.this.owner)
... .reduce(pw.this.owner, pairs = pw.reducers.count()))
>>> pw.debug.compute_and_print(result, include_id=False)
owner | pairs
Alice | 2
Bob | 1
"""
for arg in args:
if not isinstance(arg, expr.ColumnReference):
if isinstance(arg, str):
raise ValueError(
f"Expected a ColumnReference, found a string. Did you mean this.{arg} instead of {repr(arg)}?"
)
else:
raise ValueError(
"In JoinResult.groupby() all arguments have to be a ColumnReference."
)
from pathway.internals.groupbys import GroupedJoinResult
return GroupedJoinResult(
_join_result=self,
_args=args,
_id=id,
)
def reduce(
self, *args: expr.ColumnReference, **kwargs: expr.ColumnExpression
) -> Table:
"""Reduce a join result to a single row.
Equivalent to `self.groupby().reduce(*args, **kwargs)`.
Args:
args: reducer to reduce the table with
kwargs: reducer to reduce the table with. Its key is the new name of a column.
Returns:
Table: Reduced table.
Example:
>>> import pathway as pw
>>> t1 = pw.debug.table_from_markdown('''
... cost owner pet
... 1 100 Alice 1
... 2 90 Bob 1
... 3 80 Alice 2
... ''')
>>> t2 = pw.debug.table_from_markdown('''
... cost owner pet size
... 11 100 Alice 3 M
... 12 90 Bob 1 L
... 13 80 Tom 1 XL
... ''')
>>> result = t1.join(t2, t1.owner==t2.owner).reduce(total_pairs = pw.reducers.count())
>>> pw.debug.compute_and_print(result, include_id=False)
total_pairs
3
"""
for arg in args:
if not isinstance(arg, expr.ColumnReference):
if isinstance(arg, str):
raise ValueError(
f"Expected a ColumnReference, found a string. Did you mean this.{arg} instead of {repr(arg)}?"
)
else:
raise ValueError(
"In JoinResult.reduce() all positional arguments have to be a ColumnReference."
)
return self.groupby().reduce(*args, **kwargs)
def _substitutions(
self,
) -> tuple[Table, dict[expr.InternalColRef, expr.ColumnExpression]]:
return self._inner_table, {
int_ref: expression for int_ref, expression in self._columns_mapping.items()
}
def _join(
context: clmn.JoinContext, *args: expr.ColumnReference, **kwargs: Any
) -> Table:
"""Used internally to create an internal Table containing result of a join."""
columns: dict[str, clmn.Column] = {}
all_args = combine_args_kwargs(args, kwargs)
for new_name, expression in all_args.items():
columns[new_name] = expression._column_with_expression_cls(
context=context,
universe=context.universe,
expression=expression,
)
from pathway.internals.table import Table
return Table(
_columns=columns,
_context=context,
)
def _prepare_inner_table_with_mapping(
context: clmn.JoinContext,
original_left: Joinable,
original_right: Joinable,
common_column_names: StableSet[str],
) -> tuple[Table, dict[expr.InternalColRef, expr.ColumnReference]]:
left_table, left_substitutions = original_left._substitutions()
right_table, right_substitutions = original_right._substitutions()
cnt = itertools.count(0)
expressions: dict[str, expr.ColumnExpression] = {}
colref_to_name_mapping: dict[expr.InternalColRef, str] = {}
for table, subs in [
(left_table, left_substitutions),
(right_table, right_substitutions),
]:
if len(subs) == 0: # tables have empty subs, so set them here
for ref in table:
subs[ref._to_internal()] = ref
subs_total = subs | {table.id._to_internal(): table.id}
for int_ref, expression in subs_total.items():
inner_name = f"_pw_{next(cnt)}"
expressions[inner_name] = expression
colref_to_name_mapping[int_ref] = inner_name
from pathway.internals.common import coalesce
for name in common_column_names:
if name != "id":
expressions[name] = coalesce(original_left[name], original_right[name])
inner_table = JoinResult._join(context, **expressions)
final_mapping = {
colref: inner_table[name] for colref, name in colref_to_name_mapping.items()
}
for name in common_column_names:
if name != "id":
colref = inner_table[name]
final_mapping[colref._to_internal()] = colref
final_mapping[inner_table.id._to_internal()] = inner_table.id
rowwise_context = clmn.JoinRowwiseContext.from_mapping(
inner_table._id_column, final_mapping
)
inner_table._rowwise_context = (
rowwise_context # FIXME don't set _context property of table
)
return (inner_table, final_mapping)
def _table_join(
left: Joinable,
right: Joinable,
*on: expr.ColumnExpression,
mode: JoinMode,
id: expr.ColumnReference | None = None,
left_instance: expr.ColumnReference | None = None,
right_instance: expr.ColumnReference | None = None,
) -> JoinResult:
if left == right:
raise ValueError(
"Cannot join table with itself. Use <table>.copy() as one of the arguments of the join."
)
left_table, left_substitutions = left._substitutions()
right_table, right_substitutions = right._substitutions()
chained_join_desugaring = SubstitutionDesugaring(
{**left_substitutions, **right_substitutions}
)
if id is not None:
id = chained_join_desugaring.eval_expression(id)
id_column = id._column
else:
id_column = None
common_column_names: StableSet[str] = StableSet()
if left_instance is not None and right_instance is not None:
on = (*on, left_instance == right_instance)
else:
assert left_instance is None and right_instance is None
on_ = tuple(validate_shape(cond) for cond in on)
for cond in on_:
cond_left = cast(expr.ColumnReference, cond._left)
cond_right = cast(expr.ColumnReference, cond._right)
if cond_left.name == cond_right.name:
common_column_names.add(cond_left.name)
on_ = tuple(chained_join_desugaring.eval_expression(cond) for cond in on_)
for cond in on_:
validate_join_condition(cond, left_table, right_table)
on_left = tuple(
left_table._eval(cond._left, left_table._table_restricted_context)
for cond in on_
)
on_right = tuple(
right_table._eval(cond._right, right_table._table_restricted_context)
for cond in on_
)
swp = id_column is not None and id_column is right_table._id_column
assert (
id_column is None
or (id_column is left_table._id_column)
or (id_column is right_table._id_column)
)
left_context_table = clmn.ContextTable(universe=left._universe, columns=on_left)
right_context_table = clmn.ContextTable(
universe=right._universe, columns=on_right
)
substitution: dict[thisclass.ThisMetaclass, Joinable] = {
thisclass.left: left,
thisclass.right: right,
}
universe = JoinResult._compute_universe(
left_table, right_table, id_column, mode
)
if swp:
context = clmn.JoinContext(
universe,
right_table,
left_table,
right_context_table,
left_context_table,
id_column is not None,
mode in [JoinMode.RIGHT, JoinMode.OUTER],
mode in [JoinMode.LEFT, JoinMode.OUTER],
)
else:
context = clmn.JoinContext(
universe,
left_table,
right_table,
left_context_table,
right_context_table,
id_column is not None,
mode in [JoinMode.LEFT, JoinMode.OUTER],
mode in [JoinMode.RIGHT, JoinMode.OUTER],
)
inner_table, columns_mapping = JoinResult._prepare_inner_table_with_mapping(
context,
left,
right,
common_column_names,
)
return JoinResult(
context,
inner_table,
columns_mapping,
left_table,
right_table,
left,
right,
substitution,
common_column_names,
mode,
)
The provided code snippet includes necessary dependencies for implementing the `join_left` function. Write a Python function `def join_left( left: Joinable, right: Joinable, *on: expr.ColumnExpression, id: expr.ColumnReference | None = None, left_instance: expr.ColumnReference | None = None, right_instance: expr.ColumnReference | None = None, ) -> JoinResult` to solve the following problem:
Left-joins two tables or join results. Args: self: the left side of the join, ``Table`` or ``JoinResult``. other: the right side of the join, ``Table`` or ``JoinResult``. *on: Columns to join, syntax `self.col1 == other.col2` id: optional id column of the result left_instance/right_instance: optional arguments describing partitioning of the data into separate instances Remarks: args cannot contain id column from either of tables, \ as the result table has id column with auto-generated ids; \ it can be selected by assigning it to a column with defined \ name (passed in kwargs) Behavior: - for rows from the left side that were not matched with the right side, missing values on the right are replaced with `None` - rows from the right side that were not matched with the left side are skipped - for rows that were matched the behavior is the same as that of an inner join. Returns: JoinResult: an object on which `.select()` may be called to extract relevant columns from the result of the join. Example: >>> import pathway as pw >>> t1 = pw.debug.table_from_markdown( ... ''' ... | a | b ... 1 | 11 | 111 ... 2 | 12 | 112 ... 3 | 13 | 113 ... 4 | 13 | 114 ... ''' ... ) >>> t2 = pw.debug.table_from_markdown( ... ''' ... | c | d ... 1 | 11 | 211 ... 2 | 12 | 212 ... 3 | 14 | 213 ... 4 | 14 | 214 ... ''' ... ) >>> pw.debug.compute_and_print(pw.join_left(t1, t2, t1.a == t2.c ... ).select(t1.a, t2_c=t2.c, s=pw.require(t1.b + t2.d, t2.id)), ... include_id=False) a | t2_c | s 11 | 11 | 322 12 | 12 | 324 13 | | 13 | |
Here is the function:
def join_left(
left: Joinable,
right: Joinable,
*on: expr.ColumnExpression,
id: expr.ColumnReference | None = None,
left_instance: expr.ColumnReference | None = None,
right_instance: expr.ColumnReference | None = None,
) -> JoinResult:
"""
Left-joins two tables or join results.
Args:
self: the left side of the join, ``Table`` or ``JoinResult``.
other: the right side of the join, ``Table`` or ``JoinResult``.
*on: Columns to join, syntax `self.col1 == other.col2`
id: optional id column of the result
left_instance/right_instance: optional arguments describing partitioning of the data into
separate instances
Remarks:
args cannot contain id column from either of tables, \
as the result table has id column with auto-generated ids; \
it can be selected by assigning it to a column with defined \
name (passed in kwargs)
Behavior:
- for rows from the left side that were not matched with the right side,
missing values on the right are replaced with `None`
- rows from the right side that were not matched with the left side are skipped
- for rows that were matched the behavior is the same as that of an inner join.
Returns:
JoinResult: an object on which `.select()` may be called to extract relevant
columns from the result of the join.
Example:
>>> import pathway as pw
>>> t1 = pw.debug.table_from_markdown(
... '''
... | a | b
... 1 | 11 | 111
... 2 | 12 | 112
... 3 | 13 | 113
... 4 | 13 | 114
... '''
... )
>>> t2 = pw.debug.table_from_markdown(
... '''
... | c | d
... 1 | 11 | 211
... 2 | 12 | 212
... 3 | 14 | 213
... 4 | 14 | 214
... '''
... )
>>> pw.debug.compute_and_print(pw.join_left(t1, t2, t1.a == t2.c
... ).select(t1.a, t2_c=t2.c, s=pw.require(t1.b + t2.d, t2.id)),
... include_id=False)
a | t2_c | s
11 | 11 | 322
12 | 12 | 324
13 | |
13 | |
"""
return left.join_left(
right, *on, id=id, left_instance=left_instance, right_instance=right_instance
) | Left-joins two tables or join results. Args: self: the left side of the join, ``Table`` or ``JoinResult``. other: the right side of the join, ``Table`` or ``JoinResult``. *on: Columns to join, syntax `self.col1 == other.col2` id: optional id column of the result left_instance/right_instance: optional arguments describing partitioning of the data into separate instances Remarks: args cannot contain id column from either of tables, \ as the result table has id column with auto-generated ids; \ it can be selected by assigning it to a column with defined \ name (passed in kwargs) Behavior: - for rows from the left side that were not matched with the right side, missing values on the right are replaced with `None` - rows from the right side that were not matched with the left side are skipped - for rows that were matched the behavior is the same as that of an inner join. Returns: JoinResult: an object on which `.select()` may be called to extract relevant columns from the result of the join. Example: >>> import pathway as pw >>> t1 = pw.debug.table_from_markdown( ... ''' ... | a | b ... 1 | 11 | 111 ... 2 | 12 | 112 ... 3 | 13 | 113 ... 4 | 13 | 114 ... ''' ... ) >>> t2 = pw.debug.table_from_markdown( ... ''' ... | c | d ... 1 | 11 | 211 ... 2 | 12 | 212 ... 3 | 14 | 213 ... 4 | 14 | 214 ... ''' ... ) >>> pw.debug.compute_and_print(pw.join_left(t1, t2, t1.a == t2.c ... ).select(t1.a, t2_c=t2.c, s=pw.require(t1.b + t2.d, t2.id)), ... include_id=False) a | t2_c | s 11 | 11 | 322 12 | 12 | 324 13 | | 13 | | |
166,821 | from __future__ import annotations
import itertools
from collections.abc import Iterator
from functools import lru_cache
from typing import TYPE_CHECKING, Any, cast
from pathway.internals.trace import trace_user_frame
from abc import abstractmethod
import pathway.internals.column as clmn
import pathway.internals.expression as expr
from pathway.internals import thisclass
from pathway.internals.arg_handlers import (
arg_handler,
join_kwargs_handler,
reduce_args_handler,
select_args_handler,
)
from pathway.internals.column_namespace import ColumnNamespace
from pathway.internals.decorators import contextualized_operator
from pathway.internals.desugaring import (
DesugaringContext,
SubstitutionDesugaring,
TableSelectDesugaring,
combine_args_kwargs,
desugar,
)
from pathway.internals.helpers import StableSet
from pathway.internals.join_mode import JoinMode
from pathway.internals.operator_input import OperatorInput
from pathway.internals.shadows import operator as op
from pathway.internals.table_like import TableLike
from pathway.internals.type_interpreter import eval_type
from pathway.internals.universe import Universe
class Joinable(TableLike, DesugaringContext):
def _subtables(self) -> StableSet[Table]: ...
def keys(self): ...
def select(self, *args: expr.ColumnReference, **kwargs: Any) -> Table: ...
def filter(self, filter_expression: expr.ColumnExpression) -> Joinable: ...
def __getitem__(self, args: str | expr.ColumnReference) -> expr.ColumnReference: ...
def __iter__(self) -> Iterator[expr.ColumnReference]:
return (self[name] for name in self.keys())
def _get_colref_by_name(self, name, exception_type) -> expr.ColumnReference: ...
def _operator_dependencies(self) -> StableSet[Table]: ...
def __getattr__(self, name) -> expr.ColumnReference:
"""Get columns by name.
Warning:
- Fails if it tries to access nonexistent column.
Returns:
Column expression.
Example:
>>> import pathway as pw
>>> t1 = pw.debug.table_from_markdown('''
... age | owner | pet
... 10 | Alice | dog
... 9 | Bob | dog
... 8 | Alice | cat
... 7 | Bob | dog
... ''')
>>> t2 = t1.select(t1.age)
>>> pw.debug.compute_and_print(t2, include_id=False)
age
7
8
9
10
"""
try:
return super().__getattr__(name)
except AttributeError:
pass
return self._get_colref_by_name(name, AttributeError)
def C(self) -> ColumnNamespace:
"""Returns the namespace of all the columns of a joinable.
Allows accessing column names that might otherwise be a reserved methods.
>>> import pathway as pw
>>> tab = pw.debug.table_from_markdown('''
... age | owner | pet | filter
... 10 | Alice | dog | True
... 9 | Bob | dog | True
... 8 | Alice | cat | False
... 7 | Bob | dog | True
... ''')
>>> isinstance(tab.C.age, pw.ColumnReference)
True
>>> pw.debug.compute_and_print(tab.filter(tab.C.filter), include_id=False)
age | owner | pet | filter
7 | Bob | dog | True
9 | Bob | dog | True
10 | Alice | dog | True
"""
return ColumnNamespace(self)
def _C(self):
return self.C
def join(
self,
other: Joinable,
*on: expr.ColumnExpression,
id: expr.ColumnReference | None = None,
how: JoinMode = JoinMode.INNER,
left_instance: expr.ColumnReference | None = None,
right_instance: expr.ColumnReference | None = None,
) -> JoinResult:
"""Join self with other using the given join expression.
Args:
other: the right side of the join, ``Table`` or ``JoinResult``.
on: a list of column expressions. Each must have == as the top level operation
and be of the form LHS: ColumnReference == RHS: ColumnReference.
id: optional argument for id of result, can be only self.id or other.id
how: by default, inner join is performed. Possible values are JoinMode.{INNER,LEFT,RIGHT,OUTER}
correspond to inner, left, right and outer join respectively.
left_instance/right_instance: optional arguments describing partitioning of the data into
separate instances
Returns:
JoinResult: an object on which `.select()` may be called to extract relevant
columns from the result of the join.
Example:
>>> import pathway as pw
>>> t1 = pw.debug.table_from_markdown('''
... age | owner | pet
... 10 | Alice | 1
... 9 | Bob | 1
... 8 | Alice | 2
... ''')
>>> t2 = pw.debug.table_from_markdown('''
... age | owner | pet | size
... 10 | Alice | 3 | M
... 9 | Bob | 1 | L
... 8 | Tom | 1 | XL
... ''')
>>> t3 = t1.join(
... t2, t1.pet == t2.pet, t1.owner == t2.owner, how=pw.JoinMode.INNER
... ).select(age=t1.age, owner_name=t2.owner, size=t2.size)
>>> pw.debug.compute_and_print(t3, include_id = False)
age | owner_name | size
9 | Bob | L
"""
return JoinResult._table_join(
self,
other,
*on,
mode=how,
id=id,
left_instance=left_instance,
right_instance=right_instance,
)
def join_inner(
self,
other: Joinable,
*on: expr.ColumnExpression,
id: expr.ColumnReference | None = None,
left_instance: expr.ColumnReference | None = None,
right_instance: expr.ColumnReference | None = None,
) -> JoinResult:
"""Inner-joins two tables or join results.
Args:
other: the right side of the join, ``Table`` or ``JoinResult``.
on: a list of column expressions. Each must have == as the top level operation
and be of the form LHS: ColumnReference == RHS: ColumnReference.
id: optional argument for id of result, can be only self.id or other.id
left_instance/right_instance: optional arguments describing partitioning of the data
into separate instances
Returns:
JoinResult: an object on which `.select()` may be called to extract relevant
columns from the result of the join.
Example:
>>> import pathway as pw
>>> t1 = pw.debug.table_from_markdown('''
... age | owner | pet
... 10 | Alice | 1
... 9 | Bob | 1
... 8 | Alice | 2
... ''')
>>> t2 = pw.debug.table_from_markdown('''
... age | owner | pet | size
... 10 | Alice | 3 | M
... 9 | Bob | 1 | L
... 8 | Tom | 1 | XL
... ''')
>>> t3 = t1.join_inner(t2, t1.pet == t2.pet, t1.owner == t2.owner).select(
... age=t1.age, owner_name=t2.owner, size=t2.size
... )
>>> pw.debug.compute_and_print(t3, include_id = False)
age | owner_name | size
9 | Bob | L
"""
return JoinResult._table_join(
self,
other,
*on,
mode=JoinMode.INNER,
id=id,
left_instance=left_instance,
right_instance=right_instance,
)
def join_left(
self,
other: Joinable,
*on: expr.ColumnExpression,
id: expr.ColumnReference | None = None,
left_instance: expr.ColumnReference | None = None,
right_instance: expr.ColumnReference | None = None,
) -> JoinResult:
"""
Left-joins two tables or join results.
Args:
other: the right side of the join, ``Table`` or ``JoinResult``.
*on: Columns to join, syntax `self.col1 == other.col2`
id: optional id column of the result
left_instance/right_instance: optional arguments describing partitioning of the data into
separate instances
Remarks:
args cannot contain id column from either of tables, \
as the result table has id column with auto-generated ids; \
it can be selected by assigning it to a column with defined \
name (passed in kwargs)
Behavior:
- for rows from the left side that were not matched with the right side,
missing values on the right are replaced with `None`
- rows from the right side that were not matched with the left side are skipped
- for rows that were matched the behavior is the same as that of an inner join.
Returns:
JoinResult: an object on which `.select()` may be called to extract relevant
columns from the result of the join.
Example:
>>> import pathway as pw
>>> t1 = pw.debug.table_from_markdown(
... '''
... | a | b
... 1 | 11 | 111
... 2 | 12 | 112
... 3 | 13 | 113
... 4 | 13 | 114
... '''
... )
>>> t2 = pw.debug.table_from_markdown(
... '''
... | c | d
... 1 | 11 | 211
... 2 | 12 | 212
... 3 | 14 | 213
... 4 | 14 | 214
... '''
... )
>>> pw.debug.compute_and_print(t1.join_left(t2, t1.a == t2.c
... ).select(t1.a, t2_c=t2.c, s=pw.require(t1.b + t2.d, t2.id)),
... include_id=False)
a | t2_c | s
11 | 11 | 322
12 | 12 | 324
13 | |
13 | |
"""
return JoinResult._table_join(
self,
other,
*on,
mode=JoinMode.LEFT,
id=id,
left_instance=left_instance,
right_instance=right_instance,
)
def join_right(
self,
other: Joinable,
*on: expr.ColumnExpression,
id: expr.ColumnReference | None = None,
left_instance: expr.ColumnReference | None = None,
right_instance: expr.ColumnReference | None = None,
) -> JoinResult:
"""
Outer-joins two tables or join results.
Args:
other: the right side of the join, ``Table`` or ``JoinResult``.
*on: Columns to join, syntax `self.col1 == other.col2`
id: optional id column of the result
left_instance/right_instance: optional arguments describing partitioning of the data into separate
instances
Remarks: args cannot contain id column from either of tables, \
as the result table has id column with auto-generated ids; \
it can be selected by assigning it to a column with defined \
name (passed in kwargs)
Behavior:
- rows from the left side that were not matched with the right side are skipped
- for rows from the right side that were not matched with the left side,
missing values on the left are replaced with `None`
- for rows that were matched the behavior is the same as that of an inner join.
Returns:
JoinResult: an object on which `.select()` may be called to extract relevant
columns from the result of the join.
Example:
>>> import pathway as pw
>>> t1 = pw.debug.table_from_markdown(
... '''
... | a | b
... 1 | 11 | 111
... 2 | 12 | 112
... 3 | 13 | 113
... 4 | 13 | 114
... '''
... )
>>> t2 = pw.debug.table_from_markdown(
... '''
... | c | d
... 1 | 11 | 211
... 2 | 12 | 212
... 3 | 14 | 213
... 4 | 14 | 214
... '''
... )
>>> pw.debug.compute_and_print(t1.join_right(t2, t1.a == t2.c
... ).select(t1.a, t2_c=t2.c, s=pw.require(pw.coalesce(t1.b,0) + t2.d,t1.id)),
... include_id=False)
a | t2_c | s
| 14 |
| 14 |
11 | 11 | 322
12 | 12 | 324
Returns:
OuterJoinResult object
"""
return JoinResult._table_join(
self,
other,
*on,
mode=JoinMode.RIGHT,
id=id,
left_instance=left_instance,
right_instance=right_instance,
)
def join_outer(
self,
other: Joinable,
*on: expr.ColumnExpression,
id: expr.ColumnReference | None = None,
left_instance: expr.ColumnReference | None = None,
right_instance: expr.ColumnReference | None = None,
) -> JoinResult:
"""Outer-joins two tables or join results.
Args:
other: the right side of the join, ``Table`` or ``JoinResult``.
*on: Columns to join, syntax `self.col1 == other.col2`
id: optional id column of the result
instance: optional argument describing partitioning of the data into separate instances
Remarks: args cannot contain id column from either of tables, \
as the result table has id column with auto-generated ids; \
it can be selected by assigning it to a column with defined \
name (passed in kwargs)
Behavior:
- for rows from the left side that were not matched with the right side,
missing values on the right are replaced with `None`
- for rows from the right side that were not matched with the left side,
missing values on the left are replaced with `None`
- for rows that were matched the behavior is the same as that of an inner join.
Returns:
JoinResult: an object on which `.select()` may be called to extract relevant
columns from the result of the join.
Example:
>>> import pathway as pw
>>> t1 = pw.debug.table_from_markdown(
... '''
... | a | b
... 1 | 11 | 111
... 2 | 12 | 112
... 3 | 13 | 113
... 4 | 13 | 114
... '''
... )
>>> t2 = pw.debug.table_from_markdown(
... '''
... | c | d
... 1 | 11 | 211
... 2 | 12 | 212
... 3 | 14 | 213
... 4 | 14 | 214
... '''
... )
>>> pw.debug.compute_and_print(t1.join_outer(t2, t1.a == t2.c
... ).select(t1.a, t2_c=t2.c, s=pw.require(t1.b + t2.d, t1.id, t2.id)),
... include_id=False)
a | t2_c | s
| 14 |
| 14 |
11 | 11 | 322
12 | 12 | 324
13 | |
13 | |
"""
return JoinResult._table_join(
self,
other,
*on,
mode=JoinMode.OUTER,
id=id,
left_instance=left_instance,
right_instance=right_instance,
)
def _desugaring(self) -> TableSelectDesugaring:
return TableSelectDesugaring(self)
def _substitutions(
self,
) -> tuple[Table, dict[expr.InternalColRef, expr.ColumnExpression]]: ...
class JoinResult(Joinable, OperatorInput):
"""Result of a join between tables.
Example:
>>> import pathway as pw
>>> t1 = pw.debug.table_from_markdown('''
... age owner pet
... 1 10 Alice 1
... 2 9 Bob 1
... 3 8 Alice 2
... ''')
>>> t2 = pw.debug.table_from_markdown('''
... age owner pet size
... 11 10 Alice 3 M
... 12 9 Bob 1 L
... 13 8 Tom 1 XL
... ''')
>>> joinresult= t1.join(t2, t1.pet == t2.pet, t1.owner == t2.owner) # noqa: E501
>>> isinstance(joinresult, pw.JoinResult)
True
>>> pw.debug.compute_and_print(joinresult.select(t1.age, t2.size), include_id=False)
age | size
9 | L
"""
_inner_table: Table
_columns_mapping: dict[expr.InternalColRef, expr.ColumnReference]
_left_table: Table
_right_table: Table
_original_left: Joinable
_original_right: Joinable
_substitution: dict[thisclass.ThisMetaclass, Joinable]
_chained_join_desugaring: SubstitutionDesugaring
_joined_on_names: StableSet[str]
_all_colnames: StableSet[str]
_join_mode: JoinMode
def __init__(
self,
_context: clmn.Context,
_inner_table: Table,
_columns_mapping: dict[expr.InternalColRef, expr.ColumnReference],
_left_table: Table,
_right_table: Table,
_original_left: Joinable,
_original_right: Joinable,
_substitution: dict[thisclass.ThisMetaclass, Joinable],
_joined_on_names: StableSet[str],
_join_mode: JoinMode,
):
super().__init__(_context)
self._inner_table = _inner_table
self._columns_mapping = _columns_mapping
self._left_table = _left_table
self._right_table = _right_table
self._substitution = {**_substitution, thisclass.this: self}
self._joined_on_names = _joined_on_names
self._join_mode = _join_mode
self._original_left = _original_left
self._original_right = _original_right
assert _original_left._subtables().isdisjoint(_original_right._subtables())
self._all_colnames = StableSet.union(
_original_left.keys(), _original_right.keys()
)
self._chained_join_desugaring = SubstitutionDesugaring(self._substitutions()[1])
def _compute_universe(
left_table: Table,
right_table: Table,
id: clmn.Column | None,
mode: JoinMode,
) -> Universe:
if id is left_table._id_column:
if mode == JoinMode.LEFT:
return left_table._universe
elif mode == JoinMode.INNER:
return left_table._universe.subset()
else:
raise KeyError("Cannot assign id's for this join type.")
elif id is right_table._id_column:
if mode == JoinMode.RIGHT:
return right_table._universe
elif mode == JoinMode.INNER:
return right_table._universe.subset()
else:
raise KeyError("Cannot assign id's for this join type.")
else:
assert id is None
return Universe()
def _subtables(self) -> StableSet[Table]:
return self._original_left._subtables() | self._original_right._subtables()
def keys(self):
common_colnames = self._original_left.keys() & self._original_right.keys()
return self._all_colnames - (common_colnames - self._joined_on_names)
def _get_colref_by_name(
self,
name: str,
exception_type,
) -> expr.ColumnReference:
name = self._column_deprecation_rename(name)
if name == "id":
return self._inner_table.id
elif name in self._joined_on_names:
if self._join_mode is JoinMode.INNER:
return self._original_left[name]
else:
return self._inner_table[name]
elif name in self._original_left.keys() and name in self._original_right.keys():
raise exception_type(
f"Column {name} appears on both left and right inputs of join."
)
elif name in self._original_left.keys():
return self._original_left[name]
elif name in self._original_right.keys():
return self._original_right[name]
else:
raise exception_type(f"No column with name {name}.")
def __getitem__(self, args: str | expr.ColumnReference) -> expr.ColumnReference:
if isinstance(args, expr.ColumnReference):
assert args.table is self or args.table is thisclass.this
return self._get_colref_by_name(args.name, KeyError)
else:
return self._get_colref_by_name(args, KeyError)
def select(self, *args: expr.ColumnReference, **kwargs: Any) -> Table:
"""Computes result of a join.
Args:
args: Column references.
kwargs: Column expressions with their new assigned names.
Returns:
Table: Created table.
Example:
>>> import pathway as pw
>>> t1 = pw.debug.table_from_markdown('''
... age | owner | pet
... 10 | Alice | 1
... 9 | Bob | 1
... 8 | Alice | 2
... ''')
>>> t2 = pw.debug.table_from_markdown('''
... age | owner | pet | size
... 10 | Alice | 3 | M
... 9 | Bob | 1 | L
... 8 | Tom | 1 | XL
... ''')
>>> t3 = t1.join(t2, t1.pet == t2.pet, t1.owner == t2.owner).select(age=t1.age, owner_name=t2.owner, size=t2.size) # noqa: E501
>>> pw.debug.compute_and_print(t3, include_id=False)
age | owner_name | size
9 | Bob | L
"""
expressions: dict[str, expr.ColumnExpression] = {}
all_args = combine_args_kwargs(args, kwargs)
for new_name, expression in all_args.items():
expressions[new_name] = self._chained_join_desugaring.eval_expression(
expression
)
return self._inner_table.select(**expressions)
def _operator_dependencies(self) -> StableSet[Table]:
return (
self._left_table._operator_dependencies()
| self._right_table._operator_dependencies()
)
def filter(self, filter_expression: expr.ColumnExpression) -> JoinResult:
"""Filters rows, keeping the ones satisfying the predicate.
Example:
>>> import pathway as pw
>>> t1 = pw.debug.table_from_markdown('''
... age owner pet
... 1 10 Alice 1
... 2 9 Bob 1
... 3 8 Alice 2
... ''')
>>> t2 = pw.debug.table_from_markdown('''
... age owner pet size
... 11 10 Alice 3 M
... 12 9 Bob 1 L
... 13 8 Tom 1 XL
... ''')
>>> result = t1.join(t2).filter(t1.owner == t2.owner).select(t1.age, t2.size) # noqa: E501
>>> pw.debug.compute_and_print(result, include_id=False)
age | size
8 | M
9 | L
10 | M
"""
desugared_filter_expression = self._chained_join_desugaring.eval_expression(
filter_expression
)
inner_table = self._inner_table.filter(desugared_filter_expression)
new_columns_mapping = {
int_ref: inner_table[expression.name]
for int_ref, expression in self._columns_mapping.items()
}
new_columns_mapping[inner_table.id._to_internal()] = inner_table.id
context = clmn.JoinRowwiseContext.from_mapping(
inner_table._id_column, new_columns_mapping
)
inner_table._rowwise_context = context
return JoinResult(
_context=context,
_inner_table=inner_table,
_columns_mapping=new_columns_mapping,
_left_table=self._left_table,
_right_table=self._right_table,
_original_left=self._original_left,
_original_right=self._original_right,
_substitution=self._substitution,
_joined_on_names=self._joined_on_names,
_join_mode=self._join_mode,
)
def groupby(
self,
*args: expr.ColumnReference,
id: expr.ColumnReference | None = None,
) -> GroupedJoinResult:
"""Groups join result by columns from args.
Note:
Usually followed by `.reduce()` that aggregates the result and returns a table.
Args:
args: columns to group by.
id: if provided, is the column used to set id's of the rows of the result
Returns:
GroupedJoinResult: Groupby object.
Example:
>>> import pathway as pw
>>> t1 = pw.debug.table_from_markdown('''
... cost owner pet
... 1 100 Alice 1
... 2 90 Bob 1
... 3 80 Alice 2
... ''')
>>> t2 = pw.debug.table_from_markdown('''
... cost owner pet size
... 11 100 Alice 3 M
... 12 90 Bob 1 L
... 13 80 Tom 1 XL
... ''')
>>> result = (t1.join(t2, t1.owner==t2.owner).groupby(pw.this.owner)
... .reduce(pw.this.owner, pairs = pw.reducers.count()))
>>> pw.debug.compute_and_print(result, include_id=False)
owner | pairs
Alice | 2
Bob | 1
"""
for arg in args:
if not isinstance(arg, expr.ColumnReference):
if isinstance(arg, str):
raise ValueError(
f"Expected a ColumnReference, found a string. Did you mean this.{arg} instead of {repr(arg)}?"
)
else:
raise ValueError(
"In JoinResult.groupby() all arguments have to be a ColumnReference."
)
from pathway.internals.groupbys import GroupedJoinResult
return GroupedJoinResult(
_join_result=self,
_args=args,
_id=id,
)
def reduce(
self, *args: expr.ColumnReference, **kwargs: expr.ColumnExpression
) -> Table:
"""Reduce a join result to a single row.
Equivalent to `self.groupby().reduce(*args, **kwargs)`.
Args:
args: reducer to reduce the table with
kwargs: reducer to reduce the table with. Its key is the new name of a column.
Returns:
Table: Reduced table.
Example:
>>> import pathway as pw
>>> t1 = pw.debug.table_from_markdown('''
... cost owner pet
... 1 100 Alice 1
... 2 90 Bob 1
... 3 80 Alice 2
... ''')
>>> t2 = pw.debug.table_from_markdown('''
... cost owner pet size
... 11 100 Alice 3 M
... 12 90 Bob 1 L
... 13 80 Tom 1 XL
... ''')
>>> result = t1.join(t2, t1.owner==t2.owner).reduce(total_pairs = pw.reducers.count())
>>> pw.debug.compute_and_print(result, include_id=False)
total_pairs
3
"""
for arg in args:
if not isinstance(arg, expr.ColumnReference):
if isinstance(arg, str):
raise ValueError(
f"Expected a ColumnReference, found a string. Did you mean this.{arg} instead of {repr(arg)}?"
)
else:
raise ValueError(
"In JoinResult.reduce() all positional arguments have to be a ColumnReference."
)
return self.groupby().reduce(*args, **kwargs)
def _substitutions(
self,
) -> tuple[Table, dict[expr.InternalColRef, expr.ColumnExpression]]:
return self._inner_table, {
int_ref: expression for int_ref, expression in self._columns_mapping.items()
}
def _join(
context: clmn.JoinContext, *args: expr.ColumnReference, **kwargs: Any
) -> Table:
"""Used internally to create an internal Table containing result of a join."""
columns: dict[str, clmn.Column] = {}
all_args = combine_args_kwargs(args, kwargs)
for new_name, expression in all_args.items():
columns[new_name] = expression._column_with_expression_cls(
context=context,
universe=context.universe,
expression=expression,
)
from pathway.internals.table import Table
return Table(
_columns=columns,
_context=context,
)
def _prepare_inner_table_with_mapping(
context: clmn.JoinContext,
original_left: Joinable,
original_right: Joinable,
common_column_names: StableSet[str],
) -> tuple[Table, dict[expr.InternalColRef, expr.ColumnReference]]:
left_table, left_substitutions = original_left._substitutions()
right_table, right_substitutions = original_right._substitutions()
cnt = itertools.count(0)
expressions: dict[str, expr.ColumnExpression] = {}
colref_to_name_mapping: dict[expr.InternalColRef, str] = {}
for table, subs in [
(left_table, left_substitutions),
(right_table, right_substitutions),
]:
if len(subs) == 0: # tables have empty subs, so set them here
for ref in table:
subs[ref._to_internal()] = ref
subs_total = subs | {table.id._to_internal(): table.id}
for int_ref, expression in subs_total.items():
inner_name = f"_pw_{next(cnt)}"
expressions[inner_name] = expression
colref_to_name_mapping[int_ref] = inner_name
from pathway.internals.common import coalesce
for name in common_column_names:
if name != "id":
expressions[name] = coalesce(original_left[name], original_right[name])
inner_table = JoinResult._join(context, **expressions)
final_mapping = {
colref: inner_table[name] for colref, name in colref_to_name_mapping.items()
}
for name in common_column_names:
if name != "id":
colref = inner_table[name]
final_mapping[colref._to_internal()] = colref
final_mapping[inner_table.id._to_internal()] = inner_table.id
rowwise_context = clmn.JoinRowwiseContext.from_mapping(
inner_table._id_column, final_mapping
)
inner_table._rowwise_context = (
rowwise_context # FIXME don't set _context property of table
)
return (inner_table, final_mapping)
def _table_join(
left: Joinable,
right: Joinable,
*on: expr.ColumnExpression,
mode: JoinMode,
id: expr.ColumnReference | None = None,
left_instance: expr.ColumnReference | None = None,
right_instance: expr.ColumnReference | None = None,
) -> JoinResult:
if left == right:
raise ValueError(
"Cannot join table with itself. Use <table>.copy() as one of the arguments of the join."
)
left_table, left_substitutions = left._substitutions()
right_table, right_substitutions = right._substitutions()
chained_join_desugaring = SubstitutionDesugaring(
{**left_substitutions, **right_substitutions}
)
if id is not None:
id = chained_join_desugaring.eval_expression(id)
id_column = id._column
else:
id_column = None
common_column_names: StableSet[str] = StableSet()
if left_instance is not None and right_instance is not None:
on = (*on, left_instance == right_instance)
else:
assert left_instance is None and right_instance is None
on_ = tuple(validate_shape(cond) for cond in on)
for cond in on_:
cond_left = cast(expr.ColumnReference, cond._left)
cond_right = cast(expr.ColumnReference, cond._right)
if cond_left.name == cond_right.name:
common_column_names.add(cond_left.name)
on_ = tuple(chained_join_desugaring.eval_expression(cond) for cond in on_)
for cond in on_:
validate_join_condition(cond, left_table, right_table)
on_left = tuple(
left_table._eval(cond._left, left_table._table_restricted_context)
for cond in on_
)
on_right = tuple(
right_table._eval(cond._right, right_table._table_restricted_context)
for cond in on_
)
swp = id_column is not None and id_column is right_table._id_column
assert (
id_column is None
or (id_column is left_table._id_column)
or (id_column is right_table._id_column)
)
left_context_table = clmn.ContextTable(universe=left._universe, columns=on_left)
right_context_table = clmn.ContextTable(
universe=right._universe, columns=on_right
)
substitution: dict[thisclass.ThisMetaclass, Joinable] = {
thisclass.left: left,
thisclass.right: right,
}
universe = JoinResult._compute_universe(
left_table, right_table, id_column, mode
)
if swp:
context = clmn.JoinContext(
universe,
right_table,
left_table,
right_context_table,
left_context_table,
id_column is not None,
mode in [JoinMode.RIGHT, JoinMode.OUTER],
mode in [JoinMode.LEFT, JoinMode.OUTER],
)
else:
context = clmn.JoinContext(
universe,
left_table,
right_table,
left_context_table,
right_context_table,
id_column is not None,
mode in [JoinMode.LEFT, JoinMode.OUTER],
mode in [JoinMode.RIGHT, JoinMode.OUTER],
)
inner_table, columns_mapping = JoinResult._prepare_inner_table_with_mapping(
context,
left,
right,
common_column_names,
)
return JoinResult(
context,
inner_table,
columns_mapping,
left_table,
right_table,
left,
right,
substitution,
common_column_names,
mode,
)
The provided code snippet includes necessary dependencies for implementing the `join_right` function. Write a Python function `def join_right( left: Joinable, right: Joinable, *on: expr.ColumnExpression, id: expr.ColumnReference | None = None, left_instance: expr.ColumnReference | None = None, right_instance: expr.ColumnReference | None = None, ) -> JoinResult` to solve the following problem:
Outer-joins two tables or join results. Args: self: the left side of the join, ``Table`` or ``JoinResult``. other: the right side of the join, ``Table`` or ``JoinResult``. *on: Columns to join, syntax `self.col1 == other.col2` id: optional id column of the result left_instance/right_instance: optional arguments describing partitioning of the data into separate instances Remarks: args cannot contain id column from either of tables, \ as the result table has id column with auto-generated ids; \ it can be selected by assigning it to a column with defined \ name (passed in kwargs) Behavior: - rows from the left side that were not matched with the right side are skipped - for rows from the right side that were not matched with the left side, missing values on the left are replaced with `None` - for rows that were matched the behavior is the same as that of an inner join. Returns: JoinResult: an object on which `.select()` may be called to extract relevant columns from the result of the join. Example: >>> import pathway as pw >>> t1 = pw.debug.table_from_markdown( ... ''' ... | a | b ... 1 | 11 | 111 ... 2 | 12 | 112 ... 3 | 13 | 113 ... 4 | 13 | 114 ... ''' ... ) >>> t2 = pw.debug.table_from_markdown( ... ''' ... | c | d ... 1 | 11 | 211 ... 2 | 12 | 212 ... 3 | 14 | 213 ... 4 | 14 | 214 ... ''' ... ) >>> pw.debug.compute_and_print(pw.join_right(t1, t2, t1.a == t2.c ... ).select(t1.a, t2_c=t2.c, s=pw.require(pw.coalesce(t1.b,0) + t2.d,t1.id)), ... include_id=False) a | t2_c | s | 14 | | 14 | 11 | 11 | 322 12 | 12 | 324 Returns: OuterJoinResult object
Here is the function:
def join_right(
left: Joinable,
right: Joinable,
*on: expr.ColumnExpression,
id: expr.ColumnReference | None = None,
left_instance: expr.ColumnReference | None = None,
right_instance: expr.ColumnReference | None = None,
) -> JoinResult:
"""
Outer-joins two tables or join results.
Args:
self: the left side of the join, ``Table`` or ``JoinResult``.
other: the right side of the join, ``Table`` or ``JoinResult``.
*on: Columns to join, syntax `self.col1 == other.col2`
id: optional id column of the result
left_instance/right_instance: optional arguments describing partitioning of the data into separate
instances
Remarks: args cannot contain id column from either of tables, \
as the result table has id column with auto-generated ids; \
it can be selected by assigning it to a column with defined \
name (passed in kwargs)
Behavior:
- rows from the left side that were not matched with the right side are skipped
- for rows from the right side that were not matched with the left side,
missing values on the left are replaced with `None`
- for rows that were matched the behavior is the same as that of an inner join.
Returns:
JoinResult: an object on which `.select()` may be called to extract relevant
columns from the result of the join.
Example:
>>> import pathway as pw
>>> t1 = pw.debug.table_from_markdown(
... '''
... | a | b
... 1 | 11 | 111
... 2 | 12 | 112
... 3 | 13 | 113
... 4 | 13 | 114
... '''
... )
>>> t2 = pw.debug.table_from_markdown(
... '''
... | c | d
... 1 | 11 | 211
... 2 | 12 | 212
... 3 | 14 | 213
... 4 | 14 | 214
... '''
... )
>>> pw.debug.compute_and_print(pw.join_right(t1, t2, t1.a == t2.c
... ).select(t1.a, t2_c=t2.c, s=pw.require(pw.coalesce(t1.b,0) + t2.d,t1.id)),
... include_id=False)
a | t2_c | s
| 14 |
| 14 |
11 | 11 | 322
12 | 12 | 324
Returns:
OuterJoinResult object
"""
return left.join_right(
right, *on, id=id, left_instance=left_instance, right_instance=right_instance
) | Outer-joins two tables or join results. Args: self: the left side of the join, ``Table`` or ``JoinResult``. other: the right side of the join, ``Table`` or ``JoinResult``. *on: Columns to join, syntax `self.col1 == other.col2` id: optional id column of the result left_instance/right_instance: optional arguments describing partitioning of the data into separate instances Remarks: args cannot contain id column from either of tables, \ as the result table has id column with auto-generated ids; \ it can be selected by assigning it to a column with defined \ name (passed in kwargs) Behavior: - rows from the left side that were not matched with the right side are skipped - for rows from the right side that were not matched with the left side, missing values on the left are replaced with `None` - for rows that were matched the behavior is the same as that of an inner join. Returns: JoinResult: an object on which `.select()` may be called to extract relevant columns from the result of the join. Example: >>> import pathway as pw >>> t1 = pw.debug.table_from_markdown( ... ''' ... | a | b ... 1 | 11 | 111 ... 2 | 12 | 112 ... 3 | 13 | 113 ... 4 | 13 | 114 ... ''' ... ) >>> t2 = pw.debug.table_from_markdown( ... ''' ... | c | d ... 1 | 11 | 211 ... 2 | 12 | 212 ... 3 | 14 | 213 ... 4 | 14 | 214 ... ''' ... ) >>> pw.debug.compute_and_print(pw.join_right(t1, t2, t1.a == t2.c ... ).select(t1.a, t2_c=t2.c, s=pw.require(pw.coalesce(t1.b,0) + t2.d,t1.id)), ... include_id=False) a | t2_c | s | 14 | | 14 | 11 | 11 | 322 12 | 12 | 324 Returns: OuterJoinResult object |
166,822 | from __future__ import annotations
import itertools
from collections.abc import Iterator
from functools import lru_cache
from typing import TYPE_CHECKING, Any, cast
from pathway.internals.trace import trace_user_frame
from abc import abstractmethod
import pathway.internals.column as clmn
import pathway.internals.expression as expr
from pathway.internals import thisclass
from pathway.internals.arg_handlers import (
arg_handler,
join_kwargs_handler,
reduce_args_handler,
select_args_handler,
)
from pathway.internals.column_namespace import ColumnNamespace
from pathway.internals.decorators import contextualized_operator
from pathway.internals.desugaring import (
DesugaringContext,
SubstitutionDesugaring,
TableSelectDesugaring,
combine_args_kwargs,
desugar,
)
from pathway.internals.helpers import StableSet
from pathway.internals.join_mode import JoinMode
from pathway.internals.operator_input import OperatorInput
from pathway.internals.shadows import operator as op
from pathway.internals.table_like import TableLike
from pathway.internals.type_interpreter import eval_type
from pathway.internals.universe import Universe
class Joinable(TableLike, DesugaringContext):
def _subtables(self) -> StableSet[Table]: ...
def keys(self): ...
def select(self, *args: expr.ColumnReference, **kwargs: Any) -> Table: ...
def filter(self, filter_expression: expr.ColumnExpression) -> Joinable: ...
def __getitem__(self, args: str | expr.ColumnReference) -> expr.ColumnReference: ...
def __iter__(self) -> Iterator[expr.ColumnReference]:
return (self[name] for name in self.keys())
def _get_colref_by_name(self, name, exception_type) -> expr.ColumnReference: ...
def _operator_dependencies(self) -> StableSet[Table]: ...
def __getattr__(self, name) -> expr.ColumnReference:
"""Get columns by name.
Warning:
- Fails if it tries to access nonexistent column.
Returns:
Column expression.
Example:
>>> import pathway as pw
>>> t1 = pw.debug.table_from_markdown('''
... age | owner | pet
... 10 | Alice | dog
... 9 | Bob | dog
... 8 | Alice | cat
... 7 | Bob | dog
... ''')
>>> t2 = t1.select(t1.age)
>>> pw.debug.compute_and_print(t2, include_id=False)
age
7
8
9
10
"""
try:
return super().__getattr__(name)
except AttributeError:
pass
return self._get_colref_by_name(name, AttributeError)
def C(self) -> ColumnNamespace:
"""Returns the namespace of all the columns of a joinable.
Allows accessing column names that might otherwise be a reserved methods.
>>> import pathway as pw
>>> tab = pw.debug.table_from_markdown('''
... age | owner | pet | filter
... 10 | Alice | dog | True
... 9 | Bob | dog | True
... 8 | Alice | cat | False
... 7 | Bob | dog | True
... ''')
>>> isinstance(tab.C.age, pw.ColumnReference)
True
>>> pw.debug.compute_and_print(tab.filter(tab.C.filter), include_id=False)
age | owner | pet | filter
7 | Bob | dog | True
9 | Bob | dog | True
10 | Alice | dog | True
"""
return ColumnNamespace(self)
def _C(self):
return self.C
def join(
self,
other: Joinable,
*on: expr.ColumnExpression,
id: expr.ColumnReference | None = None,
how: JoinMode = JoinMode.INNER,
left_instance: expr.ColumnReference | None = None,
right_instance: expr.ColumnReference | None = None,
) -> JoinResult:
"""Join self with other using the given join expression.
Args:
other: the right side of the join, ``Table`` or ``JoinResult``.
on: a list of column expressions. Each must have == as the top level operation
and be of the form LHS: ColumnReference == RHS: ColumnReference.
id: optional argument for id of result, can be only self.id or other.id
how: by default, inner join is performed. Possible values are JoinMode.{INNER,LEFT,RIGHT,OUTER}
correspond to inner, left, right and outer join respectively.
left_instance/right_instance: optional arguments describing partitioning of the data into
separate instances
Returns:
JoinResult: an object on which `.select()` may be called to extract relevant
columns from the result of the join.
Example:
>>> import pathway as pw
>>> t1 = pw.debug.table_from_markdown('''
... age | owner | pet
... 10 | Alice | 1
... 9 | Bob | 1
... 8 | Alice | 2
... ''')
>>> t2 = pw.debug.table_from_markdown('''
... age | owner | pet | size
... 10 | Alice | 3 | M
... 9 | Bob | 1 | L
... 8 | Tom | 1 | XL
... ''')
>>> t3 = t1.join(
... t2, t1.pet == t2.pet, t1.owner == t2.owner, how=pw.JoinMode.INNER
... ).select(age=t1.age, owner_name=t2.owner, size=t2.size)
>>> pw.debug.compute_and_print(t3, include_id = False)
age | owner_name | size
9 | Bob | L
"""
return JoinResult._table_join(
self,
other,
*on,
mode=how,
id=id,
left_instance=left_instance,
right_instance=right_instance,
)
def join_inner(
self,
other: Joinable,
*on: expr.ColumnExpression,
id: expr.ColumnReference | None = None,
left_instance: expr.ColumnReference | None = None,
right_instance: expr.ColumnReference | None = None,
) -> JoinResult:
"""Inner-joins two tables or join results.
Args:
other: the right side of the join, ``Table`` or ``JoinResult``.
on: a list of column expressions. Each must have == as the top level operation
and be of the form LHS: ColumnReference == RHS: ColumnReference.
id: optional argument for id of result, can be only self.id or other.id
left_instance/right_instance: optional arguments describing partitioning of the data
into separate instances
Returns:
JoinResult: an object on which `.select()` may be called to extract relevant
columns from the result of the join.
Example:
>>> import pathway as pw
>>> t1 = pw.debug.table_from_markdown('''
... age | owner | pet
... 10 | Alice | 1
... 9 | Bob | 1
... 8 | Alice | 2
... ''')
>>> t2 = pw.debug.table_from_markdown('''
... age | owner | pet | size
... 10 | Alice | 3 | M
... 9 | Bob | 1 | L
... 8 | Tom | 1 | XL
... ''')
>>> t3 = t1.join_inner(t2, t1.pet == t2.pet, t1.owner == t2.owner).select(
... age=t1.age, owner_name=t2.owner, size=t2.size
... )
>>> pw.debug.compute_and_print(t3, include_id = False)
age | owner_name | size
9 | Bob | L
"""
return JoinResult._table_join(
self,
other,
*on,
mode=JoinMode.INNER,
id=id,
left_instance=left_instance,
right_instance=right_instance,
)
def join_left(
self,
other: Joinable,
*on: expr.ColumnExpression,
id: expr.ColumnReference | None = None,
left_instance: expr.ColumnReference | None = None,
right_instance: expr.ColumnReference | None = None,
) -> JoinResult:
"""
Left-joins two tables or join results.
Args:
other: the right side of the join, ``Table`` or ``JoinResult``.
*on: Columns to join, syntax `self.col1 == other.col2`
id: optional id column of the result
left_instance/right_instance: optional arguments describing partitioning of the data into
separate instances
Remarks:
args cannot contain id column from either of tables, \
as the result table has id column with auto-generated ids; \
it can be selected by assigning it to a column with defined \
name (passed in kwargs)
Behavior:
- for rows from the left side that were not matched with the right side,
missing values on the right are replaced with `None`
- rows from the right side that were not matched with the left side are skipped
- for rows that were matched the behavior is the same as that of an inner join.
Returns:
JoinResult: an object on which `.select()` may be called to extract relevant
columns from the result of the join.
Example:
>>> import pathway as pw
>>> t1 = pw.debug.table_from_markdown(
... '''
... | a | b
... 1 | 11 | 111
... 2 | 12 | 112
... 3 | 13 | 113
... 4 | 13 | 114
... '''
... )
>>> t2 = pw.debug.table_from_markdown(
... '''
... | c | d
... 1 | 11 | 211
... 2 | 12 | 212
... 3 | 14 | 213
... 4 | 14 | 214
... '''
... )
>>> pw.debug.compute_and_print(t1.join_left(t2, t1.a == t2.c
... ).select(t1.a, t2_c=t2.c, s=pw.require(t1.b + t2.d, t2.id)),
... include_id=False)
a | t2_c | s
11 | 11 | 322
12 | 12 | 324
13 | |
13 | |
"""
return JoinResult._table_join(
self,
other,
*on,
mode=JoinMode.LEFT,
id=id,
left_instance=left_instance,
right_instance=right_instance,
)
def join_right(
self,
other: Joinable,
*on: expr.ColumnExpression,
id: expr.ColumnReference | None = None,
left_instance: expr.ColumnReference | None = None,
right_instance: expr.ColumnReference | None = None,
) -> JoinResult:
"""
Outer-joins two tables or join results.
Args:
other: the right side of the join, ``Table`` or ``JoinResult``.
*on: Columns to join, syntax `self.col1 == other.col2`
id: optional id column of the result
left_instance/right_instance: optional arguments describing partitioning of the data into separate
instances
Remarks: args cannot contain id column from either of tables, \
as the result table has id column with auto-generated ids; \
it can be selected by assigning it to a column with defined \
name (passed in kwargs)
Behavior:
- rows from the left side that were not matched with the right side are skipped
- for rows from the right side that were not matched with the left side,
missing values on the left are replaced with `None`
- for rows that were matched the behavior is the same as that of an inner join.
Returns:
JoinResult: an object on which `.select()` may be called to extract relevant
columns from the result of the join.
Example:
>>> import pathway as pw
>>> t1 = pw.debug.table_from_markdown(
... '''
... | a | b
... 1 | 11 | 111
... 2 | 12 | 112
... 3 | 13 | 113
... 4 | 13 | 114
... '''
... )
>>> t2 = pw.debug.table_from_markdown(
... '''
... | c | d
... 1 | 11 | 211
... 2 | 12 | 212
... 3 | 14 | 213
... 4 | 14 | 214
... '''
... )
>>> pw.debug.compute_and_print(t1.join_right(t2, t1.a == t2.c
... ).select(t1.a, t2_c=t2.c, s=pw.require(pw.coalesce(t1.b,0) + t2.d,t1.id)),
... include_id=False)
a | t2_c | s
| 14 |
| 14 |
11 | 11 | 322
12 | 12 | 324
Returns:
OuterJoinResult object
"""
return JoinResult._table_join(
self,
other,
*on,
mode=JoinMode.RIGHT,
id=id,
left_instance=left_instance,
right_instance=right_instance,
)
def join_outer(
self,
other: Joinable,
*on: expr.ColumnExpression,
id: expr.ColumnReference | None = None,
left_instance: expr.ColumnReference | None = None,
right_instance: expr.ColumnReference | None = None,
) -> JoinResult:
"""Outer-joins two tables or join results.
Args:
other: the right side of the join, ``Table`` or ``JoinResult``.
*on: Columns to join, syntax `self.col1 == other.col2`
id: optional id column of the result
instance: optional argument describing partitioning of the data into separate instances
Remarks: args cannot contain id column from either of tables, \
as the result table has id column with auto-generated ids; \
it can be selected by assigning it to a column with defined \
name (passed in kwargs)
Behavior:
- for rows from the left side that were not matched with the right side,
missing values on the right are replaced with `None`
- for rows from the right side that were not matched with the left side,
missing values on the left are replaced with `None`
- for rows that were matched the behavior is the same as that of an inner join.
Returns:
JoinResult: an object on which `.select()` may be called to extract relevant
columns from the result of the join.
Example:
>>> import pathway as pw
>>> t1 = pw.debug.table_from_markdown(
... '''
... | a | b
... 1 | 11 | 111
... 2 | 12 | 112
... 3 | 13 | 113
... 4 | 13 | 114
... '''
... )
>>> t2 = pw.debug.table_from_markdown(
... '''
... | c | d
... 1 | 11 | 211
... 2 | 12 | 212
... 3 | 14 | 213
... 4 | 14 | 214
... '''
... )
>>> pw.debug.compute_and_print(t1.join_outer(t2, t1.a == t2.c
... ).select(t1.a, t2_c=t2.c, s=pw.require(t1.b + t2.d, t1.id, t2.id)),
... include_id=False)
a | t2_c | s
| 14 |
| 14 |
11 | 11 | 322
12 | 12 | 324
13 | |
13 | |
"""
return JoinResult._table_join(
self,
other,
*on,
mode=JoinMode.OUTER,
id=id,
left_instance=left_instance,
right_instance=right_instance,
)
def _desugaring(self) -> TableSelectDesugaring:
return TableSelectDesugaring(self)
def _substitutions(
self,
) -> tuple[Table, dict[expr.InternalColRef, expr.ColumnExpression]]: ...
class JoinResult(Joinable, OperatorInput):
"""Result of a join between tables.
Example:
>>> import pathway as pw
>>> t1 = pw.debug.table_from_markdown('''
... age owner pet
... 1 10 Alice 1
... 2 9 Bob 1
... 3 8 Alice 2
... ''')
>>> t2 = pw.debug.table_from_markdown('''
... age owner pet size
... 11 10 Alice 3 M
... 12 9 Bob 1 L
... 13 8 Tom 1 XL
... ''')
>>> joinresult= t1.join(t2, t1.pet == t2.pet, t1.owner == t2.owner) # noqa: E501
>>> isinstance(joinresult, pw.JoinResult)
True
>>> pw.debug.compute_and_print(joinresult.select(t1.age, t2.size), include_id=False)
age | size
9 | L
"""
_inner_table: Table
_columns_mapping: dict[expr.InternalColRef, expr.ColumnReference]
_left_table: Table
_right_table: Table
_original_left: Joinable
_original_right: Joinable
_substitution: dict[thisclass.ThisMetaclass, Joinable]
_chained_join_desugaring: SubstitutionDesugaring
_joined_on_names: StableSet[str]
_all_colnames: StableSet[str]
_join_mode: JoinMode
def __init__(
self,
_context: clmn.Context,
_inner_table: Table,
_columns_mapping: dict[expr.InternalColRef, expr.ColumnReference],
_left_table: Table,
_right_table: Table,
_original_left: Joinable,
_original_right: Joinable,
_substitution: dict[thisclass.ThisMetaclass, Joinable],
_joined_on_names: StableSet[str],
_join_mode: JoinMode,
):
super().__init__(_context)
self._inner_table = _inner_table
self._columns_mapping = _columns_mapping
self._left_table = _left_table
self._right_table = _right_table
self._substitution = {**_substitution, thisclass.this: self}
self._joined_on_names = _joined_on_names
self._join_mode = _join_mode
self._original_left = _original_left
self._original_right = _original_right
assert _original_left._subtables().isdisjoint(_original_right._subtables())
self._all_colnames = StableSet.union(
_original_left.keys(), _original_right.keys()
)
self._chained_join_desugaring = SubstitutionDesugaring(self._substitutions()[1])
def _compute_universe(
left_table: Table,
right_table: Table,
id: clmn.Column | None,
mode: JoinMode,
) -> Universe:
if id is left_table._id_column:
if mode == JoinMode.LEFT:
return left_table._universe
elif mode == JoinMode.INNER:
return left_table._universe.subset()
else:
raise KeyError("Cannot assign id's for this join type.")
elif id is right_table._id_column:
if mode == JoinMode.RIGHT:
return right_table._universe
elif mode == JoinMode.INNER:
return right_table._universe.subset()
else:
raise KeyError("Cannot assign id's for this join type.")
else:
assert id is None
return Universe()
def _subtables(self) -> StableSet[Table]:
return self._original_left._subtables() | self._original_right._subtables()
def keys(self):
common_colnames = self._original_left.keys() & self._original_right.keys()
return self._all_colnames - (common_colnames - self._joined_on_names)
def _get_colref_by_name(
self,
name: str,
exception_type,
) -> expr.ColumnReference:
name = self._column_deprecation_rename(name)
if name == "id":
return self._inner_table.id
elif name in self._joined_on_names:
if self._join_mode is JoinMode.INNER:
return self._original_left[name]
else:
return self._inner_table[name]
elif name in self._original_left.keys() and name in self._original_right.keys():
raise exception_type(
f"Column {name} appears on both left and right inputs of join."
)
elif name in self._original_left.keys():
return self._original_left[name]
elif name in self._original_right.keys():
return self._original_right[name]
else:
raise exception_type(f"No column with name {name}.")
def __getitem__(self, args: str | expr.ColumnReference) -> expr.ColumnReference:
if isinstance(args, expr.ColumnReference):
assert args.table is self or args.table is thisclass.this
return self._get_colref_by_name(args.name, KeyError)
else:
return self._get_colref_by_name(args, KeyError)
def select(self, *args: expr.ColumnReference, **kwargs: Any) -> Table:
"""Computes result of a join.
Args:
args: Column references.
kwargs: Column expressions with their new assigned names.
Returns:
Table: Created table.
Example:
>>> import pathway as pw
>>> t1 = pw.debug.table_from_markdown('''
... age | owner | pet
... 10 | Alice | 1
... 9 | Bob | 1
... 8 | Alice | 2
... ''')
>>> t2 = pw.debug.table_from_markdown('''
... age | owner | pet | size
... 10 | Alice | 3 | M
... 9 | Bob | 1 | L
... 8 | Tom | 1 | XL
... ''')
>>> t3 = t1.join(t2, t1.pet == t2.pet, t1.owner == t2.owner).select(age=t1.age, owner_name=t2.owner, size=t2.size) # noqa: E501
>>> pw.debug.compute_and_print(t3, include_id=False)
age | owner_name | size
9 | Bob | L
"""
expressions: dict[str, expr.ColumnExpression] = {}
all_args = combine_args_kwargs(args, kwargs)
for new_name, expression in all_args.items():
expressions[new_name] = self._chained_join_desugaring.eval_expression(
expression
)
return self._inner_table.select(**expressions)
def _operator_dependencies(self) -> StableSet[Table]:
return (
self._left_table._operator_dependencies()
| self._right_table._operator_dependencies()
)
def filter(self, filter_expression: expr.ColumnExpression) -> JoinResult:
"""Filters rows, keeping the ones satisfying the predicate.
Example:
>>> import pathway as pw
>>> t1 = pw.debug.table_from_markdown('''
... age owner pet
... 1 10 Alice 1
... 2 9 Bob 1
... 3 8 Alice 2
... ''')
>>> t2 = pw.debug.table_from_markdown('''
... age owner pet size
... 11 10 Alice 3 M
... 12 9 Bob 1 L
... 13 8 Tom 1 XL
... ''')
>>> result = t1.join(t2).filter(t1.owner == t2.owner).select(t1.age, t2.size) # noqa: E501
>>> pw.debug.compute_and_print(result, include_id=False)
age | size
8 | M
9 | L
10 | M
"""
desugared_filter_expression = self._chained_join_desugaring.eval_expression(
filter_expression
)
inner_table = self._inner_table.filter(desugared_filter_expression)
new_columns_mapping = {
int_ref: inner_table[expression.name]
for int_ref, expression in self._columns_mapping.items()
}
new_columns_mapping[inner_table.id._to_internal()] = inner_table.id
context = clmn.JoinRowwiseContext.from_mapping(
inner_table._id_column, new_columns_mapping
)
inner_table._rowwise_context = context
return JoinResult(
_context=context,
_inner_table=inner_table,
_columns_mapping=new_columns_mapping,
_left_table=self._left_table,
_right_table=self._right_table,
_original_left=self._original_left,
_original_right=self._original_right,
_substitution=self._substitution,
_joined_on_names=self._joined_on_names,
_join_mode=self._join_mode,
)
def groupby(
self,
*args: expr.ColumnReference,
id: expr.ColumnReference | None = None,
) -> GroupedJoinResult:
"""Groups join result by columns from args.
Note:
Usually followed by `.reduce()` that aggregates the result and returns a table.
Args:
args: columns to group by.
id: if provided, is the column used to set id's of the rows of the result
Returns:
GroupedJoinResult: Groupby object.
Example:
>>> import pathway as pw
>>> t1 = pw.debug.table_from_markdown('''
... cost owner pet
... 1 100 Alice 1
... 2 90 Bob 1
... 3 80 Alice 2
... ''')
>>> t2 = pw.debug.table_from_markdown('''
... cost owner pet size
... 11 100 Alice 3 M
... 12 90 Bob 1 L
... 13 80 Tom 1 XL
... ''')
>>> result = (t1.join(t2, t1.owner==t2.owner).groupby(pw.this.owner)
... .reduce(pw.this.owner, pairs = pw.reducers.count()))
>>> pw.debug.compute_and_print(result, include_id=False)
owner | pairs
Alice | 2
Bob | 1
"""
for arg in args:
if not isinstance(arg, expr.ColumnReference):
if isinstance(arg, str):
raise ValueError(
f"Expected a ColumnReference, found a string. Did you mean this.{arg} instead of {repr(arg)}?"
)
else:
raise ValueError(
"In JoinResult.groupby() all arguments have to be a ColumnReference."
)
from pathway.internals.groupbys import GroupedJoinResult
return GroupedJoinResult(
_join_result=self,
_args=args,
_id=id,
)
def reduce(
self, *args: expr.ColumnReference, **kwargs: expr.ColumnExpression
) -> Table:
"""Reduce a join result to a single row.
Equivalent to `self.groupby().reduce(*args, **kwargs)`.
Args:
args: reducer to reduce the table with
kwargs: reducer to reduce the table with. Its key is the new name of a column.
Returns:
Table: Reduced table.
Example:
>>> import pathway as pw
>>> t1 = pw.debug.table_from_markdown('''
... cost owner pet
... 1 100 Alice 1
... 2 90 Bob 1
... 3 80 Alice 2
... ''')
>>> t2 = pw.debug.table_from_markdown('''
... cost owner pet size
... 11 100 Alice 3 M
... 12 90 Bob 1 L
... 13 80 Tom 1 XL
... ''')
>>> result = t1.join(t2, t1.owner==t2.owner).reduce(total_pairs = pw.reducers.count())
>>> pw.debug.compute_and_print(result, include_id=False)
total_pairs
3
"""
for arg in args:
if not isinstance(arg, expr.ColumnReference):
if isinstance(arg, str):
raise ValueError(
f"Expected a ColumnReference, found a string. Did you mean this.{arg} instead of {repr(arg)}?"
)
else:
raise ValueError(
"In JoinResult.reduce() all positional arguments have to be a ColumnReference."
)
return self.groupby().reduce(*args, **kwargs)
def _substitutions(
self,
) -> tuple[Table, dict[expr.InternalColRef, expr.ColumnExpression]]:
return self._inner_table, {
int_ref: expression for int_ref, expression in self._columns_mapping.items()
}
def _join(
context: clmn.JoinContext, *args: expr.ColumnReference, **kwargs: Any
) -> Table:
"""Used internally to create an internal Table containing result of a join."""
columns: dict[str, clmn.Column] = {}
all_args = combine_args_kwargs(args, kwargs)
for new_name, expression in all_args.items():
columns[new_name] = expression._column_with_expression_cls(
context=context,
universe=context.universe,
expression=expression,
)
from pathway.internals.table import Table
return Table(
_columns=columns,
_context=context,
)
def _prepare_inner_table_with_mapping(
context: clmn.JoinContext,
original_left: Joinable,
original_right: Joinable,
common_column_names: StableSet[str],
) -> tuple[Table, dict[expr.InternalColRef, expr.ColumnReference]]:
left_table, left_substitutions = original_left._substitutions()
right_table, right_substitutions = original_right._substitutions()
cnt = itertools.count(0)
expressions: dict[str, expr.ColumnExpression] = {}
colref_to_name_mapping: dict[expr.InternalColRef, str] = {}
for table, subs in [
(left_table, left_substitutions),
(right_table, right_substitutions),
]:
if len(subs) == 0: # tables have empty subs, so set them here
for ref in table:
subs[ref._to_internal()] = ref
subs_total = subs | {table.id._to_internal(): table.id}
for int_ref, expression in subs_total.items():
inner_name = f"_pw_{next(cnt)}"
expressions[inner_name] = expression
colref_to_name_mapping[int_ref] = inner_name
from pathway.internals.common import coalesce
for name in common_column_names:
if name != "id":
expressions[name] = coalesce(original_left[name], original_right[name])
inner_table = JoinResult._join(context, **expressions)
final_mapping = {
colref: inner_table[name] for colref, name in colref_to_name_mapping.items()
}
for name in common_column_names:
if name != "id":
colref = inner_table[name]
final_mapping[colref._to_internal()] = colref
final_mapping[inner_table.id._to_internal()] = inner_table.id
rowwise_context = clmn.JoinRowwiseContext.from_mapping(
inner_table._id_column, final_mapping
)
inner_table._rowwise_context = (
rowwise_context # FIXME don't set _context property of table
)
return (inner_table, final_mapping)
def _table_join(
left: Joinable,
right: Joinable,
*on: expr.ColumnExpression,
mode: JoinMode,
id: expr.ColumnReference | None = None,
left_instance: expr.ColumnReference | None = None,
right_instance: expr.ColumnReference | None = None,
) -> JoinResult:
if left == right:
raise ValueError(
"Cannot join table with itself. Use <table>.copy() as one of the arguments of the join."
)
left_table, left_substitutions = left._substitutions()
right_table, right_substitutions = right._substitutions()
chained_join_desugaring = SubstitutionDesugaring(
{**left_substitutions, **right_substitutions}
)
if id is not None:
id = chained_join_desugaring.eval_expression(id)
id_column = id._column
else:
id_column = None
common_column_names: StableSet[str] = StableSet()
if left_instance is not None and right_instance is not None:
on = (*on, left_instance == right_instance)
else:
assert left_instance is None and right_instance is None
on_ = tuple(validate_shape(cond) for cond in on)
for cond in on_:
cond_left = cast(expr.ColumnReference, cond._left)
cond_right = cast(expr.ColumnReference, cond._right)
if cond_left.name == cond_right.name:
common_column_names.add(cond_left.name)
on_ = tuple(chained_join_desugaring.eval_expression(cond) for cond in on_)
for cond in on_:
validate_join_condition(cond, left_table, right_table)
on_left = tuple(
left_table._eval(cond._left, left_table._table_restricted_context)
for cond in on_
)
on_right = tuple(
right_table._eval(cond._right, right_table._table_restricted_context)
for cond in on_
)
swp = id_column is not None and id_column is right_table._id_column
assert (
id_column is None
or (id_column is left_table._id_column)
or (id_column is right_table._id_column)
)
left_context_table = clmn.ContextTable(universe=left._universe, columns=on_left)
right_context_table = clmn.ContextTable(
universe=right._universe, columns=on_right
)
substitution: dict[thisclass.ThisMetaclass, Joinable] = {
thisclass.left: left,
thisclass.right: right,
}
universe = JoinResult._compute_universe(
left_table, right_table, id_column, mode
)
if swp:
context = clmn.JoinContext(
universe,
right_table,
left_table,
right_context_table,
left_context_table,
id_column is not None,
mode in [JoinMode.RIGHT, JoinMode.OUTER],
mode in [JoinMode.LEFT, JoinMode.OUTER],
)
else:
context = clmn.JoinContext(
universe,
left_table,
right_table,
left_context_table,
right_context_table,
id_column is not None,
mode in [JoinMode.LEFT, JoinMode.OUTER],
mode in [JoinMode.RIGHT, JoinMode.OUTER],
)
inner_table, columns_mapping = JoinResult._prepare_inner_table_with_mapping(
context,
left,
right,
common_column_names,
)
return JoinResult(
context,
inner_table,
columns_mapping,
left_table,
right_table,
left,
right,
substitution,
common_column_names,
mode,
)
The provided code snippet includes necessary dependencies for implementing the `join_outer` function. Write a Python function `def join_outer( left: Joinable, right: Joinable, *on: expr.ColumnExpression, id: expr.ColumnReference | None = None, left_instance: expr.ColumnReference | None = None, right_instance: expr.ColumnReference | None = None, ) -> JoinResult` to solve the following problem:
Outer-joins two tables or join results. Args: self: the left side of the join, ``Table`` or ``JoinResult``. other: the right side of the join, ``Table`` or ``JoinResult``. *on: Columns to join, syntax `self.col1 == other.col2` id: optional id column of the result instance: optional argument describing partitioning of the data into separate instances Remarks: args cannot contain id column from either of tables, \ as the result table has id column with auto-generated ids; \ it can be selected by assigning it to a column with defined \ name (passed in kwargs) Behavior: - for rows from the left side that were not matched with the right side, missing values on the right are replaced with `None` - for rows from the right side that were not matched with the left side, missing values on the left are replaced with `None` - for rows that were matched the behavior is the same as that of an inner join. Returns: JoinResult: an object on which `.select()` may be called to extract relevant columns from the result of the join. Example: >>> import pathway as pw >>> t1 = pw.debug.table_from_markdown( ... ''' ... | a | b ... 1 | 11 | 111 ... 2 | 12 | 112 ... 3 | 13 | 113 ... 4 | 13 | 114 ... ''' ... ) >>> t2 = pw.debug.table_from_markdown( ... ''' ... | c | d ... 1 | 11 | 211 ... 2 | 12 | 212 ... 3 | 14 | 213 ... 4 | 14 | 214 ... ''' ... ) >>> pw.debug.compute_and_print(pw.join_outer(t1, t2, t1.a == t2.c ... ).select(t1.a, t2_c=t2.c, s=pw.require(t1.b + t2.d, t1.id, t2.id)), ... include_id=False) a | t2_c | s | 14 | | 14 | 11 | 11 | 322 12 | 12 | 324 13 | | 13 | |
Here is the function:
def join_outer(
left: Joinable,
right: Joinable,
*on: expr.ColumnExpression,
id: expr.ColumnReference | None = None,
left_instance: expr.ColumnReference | None = None,
right_instance: expr.ColumnReference | None = None,
) -> JoinResult:
"""Outer-joins two tables or join results.
Args:
self: the left side of the join, ``Table`` or ``JoinResult``.
other: the right side of the join, ``Table`` or ``JoinResult``.
*on: Columns to join, syntax `self.col1 == other.col2`
id: optional id column of the result
instance: optional argument describing partitioning of the data into separate instances
Remarks: args cannot contain id column from either of tables, \
as the result table has id column with auto-generated ids; \
it can be selected by assigning it to a column with defined \
name (passed in kwargs)
Behavior:
- for rows from the left side that were not matched with the right side,
missing values on the right are replaced with `None`
- for rows from the right side that were not matched with the left side,
missing values on the left are replaced with `None`
- for rows that were matched the behavior is the same as that of an inner join.
Returns:
JoinResult: an object on which `.select()` may be called to extract relevant
columns from the result of the join.
Example:
>>> import pathway as pw
>>> t1 = pw.debug.table_from_markdown(
... '''
... | a | b
... 1 | 11 | 111
... 2 | 12 | 112
... 3 | 13 | 113
... 4 | 13 | 114
... '''
... )
>>> t2 = pw.debug.table_from_markdown(
... '''
... | c | d
... 1 | 11 | 211
... 2 | 12 | 212
... 3 | 14 | 213
... 4 | 14 | 214
... '''
... )
>>> pw.debug.compute_and_print(pw.join_outer(t1, t2, t1.a == t2.c
... ).select(t1.a, t2_c=t2.c, s=pw.require(t1.b + t2.d, t1.id, t2.id)),
... include_id=False)
a | t2_c | s
| 14 |
| 14 |
11 | 11 | 322
12 | 12 | 324
13 | |
13 | |
"""
return left.join_outer(
right, *on, id=id, left_instance=left_instance, right_instance=right_instance
) | Outer-joins two tables or join results. Args: self: the left side of the join, ``Table`` or ``JoinResult``. other: the right side of the join, ``Table`` or ``JoinResult``. *on: Columns to join, syntax `self.col1 == other.col2` id: optional id column of the result instance: optional argument describing partitioning of the data into separate instances Remarks: args cannot contain id column from either of tables, \ as the result table has id column with auto-generated ids; \ it can be selected by assigning it to a column with defined \ name (passed in kwargs) Behavior: - for rows from the left side that were not matched with the right side, missing values on the right are replaced with `None` - for rows from the right side that were not matched with the left side, missing values on the left are replaced with `None` - for rows that were matched the behavior is the same as that of an inner join. Returns: JoinResult: an object on which `.select()` may be called to extract relevant columns from the result of the join. Example: >>> import pathway as pw >>> t1 = pw.debug.table_from_markdown( ... ''' ... | a | b ... 1 | 11 | 111 ... 2 | 12 | 112 ... 3 | 13 | 113 ... 4 | 13 | 114 ... ''' ... ) >>> t2 = pw.debug.table_from_markdown( ... ''' ... | c | d ... 1 | 11 | 211 ... 2 | 12 | 212 ... 3 | 14 | 213 ... 4 | 14 | 214 ... ''' ... ) >>> pw.debug.compute_and_print(pw.join_outer(t1, t2, t1.a == t2.c ... ).select(t1.a, t2_c=t2.c, s=pw.require(t1.b + t2.d, t1.id, t2.id)), ... include_id=False) a | t2_c | s | 14 | | 14 | 11 | 11 | 322 12 | 12 | 324 13 | | 13 | | |
166,823 | from __future__ import annotations
import collections
import datetime
import typing
from abc import ABC, abstractmethod
from enum import Enum
from functools import cached_property
from types import EllipsisType, NoneType, UnionType
import numpy as np
import numpy.typing as npt
import pandas as pd
from pathway.engine import PathwayType
from pathway.internals import api, datetime_types, json as js
class DType(ABC):
_cache: dict[typing.Any, DType] = {}
def to_engine(self) -> api.PathwayType | None:
return None
def map_to_engine(self) -> api.PathwayType:
return self.to_engine() or api.PathwayType.ANY
def is_value_compatible(self, arg) -> bool: ...
def _set_args(self, *args): ...
def __new__(cls, *args):
key = (cls, args)
if key not in DType._cache:
ret = super().__new__(cls)
ret._set_args(*args)
cls._cache[key] = ret
return DType._cache[key]
def __class_getitem__(cls, args):
if isinstance(args, tuple):
return cls(*args)
else:
return cls(args)
def equivalent_to(self, other: DType) -> bool:
return dtype_equivalence(self, other)
def is_subclass_of(self, other: DType) -> bool:
return dtype_issubclass(self, other)
def typehint(self) -> typing.Any: ...
NONE: DType = _NoneDType()
class Optional(DType):
wrapped: DType
def __init__(self, arg):
super().__init__()
def __repr__(self):
return f"Optional({self.wrapped})"
def _set_args(self, wrapped):
self.wrapped = wrapped
def __new__(cls, arg: DType) -> DType: # type:ignore[misc]
arg = wrap(arg)
if arg == NONE or isinstance(arg, Optional) or arg == ANY:
return arg
return super().__new__(cls, arg)
def is_value_compatible(self, arg):
if arg is None:
return True
return self.wrapped.is_value_compatible(arg)
def typehint(self) -> type[UnionType]:
return self.wrapped.typehint | None
def unoptionalize(dtype: DType) -> DType:
return dtype.wrapped if isinstance(dtype, Optional) else dtype
The provided code snippet includes necessary dependencies for implementing the `unoptionalize_pair` function. Write a Python function `def unoptionalize_pair(left_dtype: DType, right_dtype: DType) -> tuple[DType, DType]` to solve the following problem:
Unpacks type out of typing.Optional and matches a second type with it if it is an EmptyType.
Here is the function:
def unoptionalize_pair(left_dtype: DType, right_dtype: DType) -> tuple[DType, DType]:
"""
Unpacks type out of typing.Optional and matches
a second type with it if it is an EmptyType.
"""
if left_dtype == NONE and isinstance(right_dtype, Optional):
left_dtype = right_dtype
if right_dtype == NONE and isinstance(left_dtype, Optional):
right_dtype = left_dtype
return unoptionalize(left_dtype), unoptionalize(right_dtype) | Unpacks type out of typing.Optional and matches a second type with it if it is an EmptyType. |
166,824 | from __future__ import annotations
from typing import Any, TypeVar, overload
from pathway.internals import (
datasink as datasinks,
datasource as datasources,
operator as operators,
parse_graph as parse_graphs,
schema as schemas,
table as tables,
)
def table_from_datasource(
datasource: datasources.DataSource,
debug_datasource: datasources.StaticDataSource | None = None,
) -> tables.Table[Any]: ...
def table_from_datasource(
datasource: datasources.DataSource,
debug_datasource: datasources.StaticDataSource | None = None,
table_cls: type[TTable] = ...,
) -> TTable: ...
def table_from_datasource(
datasource: datasources.DataSource,
debug_datasource: datasources.StaticDataSource | None = None,
table_cls: type[tables.Table] = tables.Table,
) -> tables.Table:
return parse_graphs.G.add_operator(
lambda id: operators.InputOperator(datasource, id, debug_datasource),
lambda operator: operator(table_cls),
)
def empty_from_schema(schema: type[schemas.Schema]) -> tables.Table:
return table_from_datasource(datasources.EmptyDataSource(schema=schema)) | null |
166,825 | from __future__ import annotations
from abc import ABC, abstractmethod
from collections.abc import Callable
from dataclasses import dataclass
from typing import Any
import pandas as pd
from pathway.internals import api
from pathway.internals.schema import Schema, schema_from_pandas
class StaticDataSource(DataSource, ABC):
data: Any
def is_bounded(self) -> bool:
return True
class PandasDataSource(StaticDataSource):
data: pd.DataFrame
def is_append_only(self) -> bool:
return api.DIFF_PSEUDOCOLUMN not in self.data.columns or all(
self.data[api.DIFF_PSEUDOCOLUMN] == 1
)
def schema_from_pandas(
dframe: pd.DataFrame,
*,
id_from: list[str] | None = None,
name: str | None = None,
exclude_columns: set[str] = set(),
) -> type[Schema]:
if name is None:
name = "schema_from_pandas(" + str(dframe.columns) + ")"
if id_from is None:
id_from = []
columns: dict[str, ColumnDefinition] = {
name: column_definition(dtype=_type_converter(dframe[name]))
for name in dframe.columns
if name not in exclude_columns
}
for name in id_from:
columns[name] = dataclasses.replace(columns[name], primary_key=True)
return schema_builder(columns=columns, name=name)
def debug_datasource(debug_data) -> StaticDataSource | None:
if debug_data is None:
return None
elif isinstance(debug_data, pd.DataFrame):
return PandasDataSource(
data=debug_data.copy(), schema=schema_from_pandas(debug_data)
)
else:
raise TypeError("not supported type of debug data") | null |
166,826 | from datetime import datetime, timedelta
from typing import Any
import pandas as pd
from dateutil import tz
from pathway.internals import json
def _timedelta_to_rust(td: timedelta) -> int:
"""Returns duration in ns"""
return (td // MICROSECOND) * 1000
The provided code snippet includes necessary dependencies for implementing the `_datetime_to_rust` function. Write a Python function `def _datetime_to_rust(dt: datetime) -> tuple[int, bool]` to solve the following problem:
Returns (timestamp [ns], is_timezone_aware)
Here is the function:
def _datetime_to_rust(dt: datetime) -> tuple[int, bool]:
"""Returns (timestamp [ns], is_timezone_aware)"""
tz_aware = dt.tzinfo is not None
epoch = datetime(1970, 1, 1)
if tz_aware:
epoch = epoch.replace(tzinfo=tz.UTC)
return _timedelta_to_rust(dt - epoch), tz_aware | Returns (timestamp [ns], is_timezone_aware) |
166,827 | from datetime import datetime, timedelta
from typing import Any
import pandas as pd
from dateutil import tz
from pathway.internals import json
The provided code snippet includes necessary dependencies for implementing the `_pd_timestamp_to_rust` function. Write a Python function `def _pd_timestamp_to_rust(ts: pd.Timestamp) -> tuple[int, bool]` to solve the following problem:
Returns (timestamp [ns], is_timezone_aware)
Here is the function:
def _pd_timestamp_to_rust(ts: pd.Timestamp) -> tuple[int, bool]:
"""Returns (timestamp [ns], is_timezone_aware)"""
return ts.value, ts.tz is not None | Returns (timestamp [ns], is_timezone_aware) |
166,828 | from datetime import datetime, timedelta
from typing import Any
import pandas as pd
from dateutil import tz
from pathway.internals import json
The provided code snippet includes necessary dependencies for implementing the `_pd_timedelta_to_rust` function. Write a Python function `def _pd_timedelta_to_rust(td: pd.Timedelta) -> int` to solve the following problem:
Returns duration in ns
Here is the function:
def _pd_timedelta_to_rust(td: pd.Timedelta) -> int:
"""Returns duration in ns"""
return td.value | Returns duration in ns |
166,829 | from datetime import datetime, timedelta
from typing import Any
import pandas as pd
from dateutil import tz
from pathway.internals import json
The provided code snippet includes necessary dependencies for implementing the `_pd_timestamp_from_naive_ns` function. Write a Python function `def _pd_timestamp_from_naive_ns(timestamp: int) -> pd.Timestamp` to solve the following problem:
Accepts timestamp in ns
Here is the function:
def _pd_timestamp_from_naive_ns(timestamp: int) -> pd.Timestamp:
"""Accepts timestamp in ns"""
return pd.Timestamp(timestamp, tz=None) | Accepts timestamp in ns |
166,830 | from datetime import datetime, timedelta
from typing import Any
import pandas as pd
from dateutil import tz
from pathway.internals import json
The provided code snippet includes necessary dependencies for implementing the `_pd_timestamp_from_utc_ns` function. Write a Python function `def _pd_timestamp_from_utc_ns(timestamp: int) -> pd.Timestamp` to solve the following problem:
Accepts timestamp in ns
Here is the function:
def _pd_timestamp_from_utc_ns(timestamp: int) -> pd.Timestamp:
"""Accepts timestamp in ns"""
return pd.Timestamp(timestamp, tz=tz.UTC) | Accepts timestamp in ns |
166,831 | from datetime import datetime, timedelta
from typing import Any
import pandas as pd
from dateutil import tz
from pathway.internals import json
The provided code snippet includes necessary dependencies for implementing the `_pd_timedelta_from_ns` function. Write a Python function `def _pd_timedelta_from_ns(duration: int) -> pd.Timedelta` to solve the following problem:
Accepts duration in ns
Here is the function:
def _pd_timedelta_from_ns(duration: int) -> pd.Timedelta:
"""Accepts duration in ns"""
return pd.Timedelta(duration) | Accepts duration in ns |
166,832 | from datetime import datetime, timedelta
from typing import Any
import pandas as pd
from dateutil import tz
from pathway.internals import json
import json as _json
The provided code snippet includes necessary dependencies for implementing the `_parse_to_json` function. Write a Python function `def _parse_to_json(value: str) -> json.Json` to solve the following problem:
Parse string to value wrapped in pw.Json
Here is the function:
def _parse_to_json(value: str) -> json.Json:
"""Parse string to value wrapped in pw.Json"""
return json.Json.parse(value) | Parse string to value wrapped in pw.Json |
166,833 | from datetime import datetime, timedelta
from typing import Any
import pandas as pd
from dateutil import tz
from pathway.internals import json
import json as _json
The provided code snippet includes necessary dependencies for implementing the `_value_to_json` function. Write a Python function `def _value_to_json(value: json.JsonValue) -> json.Json` to solve the following problem:
Returns value wrapped in pw.Json
Here is the function:
def _value_to_json(value: json.JsonValue) -> json.Json:
"""Returns value wrapped in pw.Json"""
return json.Json(value) | Returns value wrapped in pw.Json |
166,834 | from datetime import datetime, timedelta
from typing import Any
import pandas as pd
from dateutil import tz
from pathway.internals import json
import json as _json
The provided code snippet includes necessary dependencies for implementing the `_json_dumps` function. Write a Python function `def _json_dumps(obj: Any) -> str` to solve the following problem:
Serialize obj as a JSON formatted string.
Here is the function:
def _json_dumps(obj: Any) -> str:
"""Serialize obj as a JSON formatted string."""
return json.Json.dumps(obj) | Serialize obj as a JSON formatted string. |
166,835 | from __future__ import annotations
from typing import Any, Protocol
from pathway.internals import datasink
from pathway.internals.api import Pointer
from pathway.internals.table_io import table_to_datasink
class OnFinishCallback(Protocol):
"""
The callback function to be called when the stream of changes ends. It will be called \
on each engine worker separately.
"""
def __call__(self) -> None:
"""
The callable part of the callback. It will be called without arguments and its
return result won't be used by the engine.
"""
...
class OnChangeCallback(Protocol):
"""
The callback to be called on every change in the table. It is required to be
callable and to accept four parameters: the key, the row changed, the time of the
change in milliseconds and the flag stating if the change had been an addition
of the row.
"""
def __call__(
self,
key: Pointer,
row: dict[str, Any],
time: int,
is_addition: bool,
) -> None:
"""
The callable part of the callback.
Args:
key: the key of the changed row;
row: the changed row as a dict mapping from the field name to the value;
time: the time of the modification, also can be referred as minibatch ID of \
the change;
is_addition: boolean value, equals to true if the row is inserted into the \
table, false otherwise. Please note that update is basically two operations: the \
deletion of the old value and the insertion of a new value, which happen within a single \
transaction;
Returns:
None
"""
...
class OnTimeEndCallback(Protocol):
"""
The callback to be called on every time finished. It is required
to accept one parameter: time.
"""
def __call__(self, time: int) -> None:
"""
The callable part of the callback.
Args:
time: the time finished
Returns:
None
"""
...
def table_to_datasink(
table: tables.Table, datasink: datasinks.DataSink, *, special: bool = False
) -> operators.OutputOperator:
return parse_graphs.G.add_operator(
lambda id: operators.OutputOperator(datasink, id),
lambda operator: operator(table),
special=special,
)
The provided code snippet includes necessary dependencies for implementing the `subscribe` function. Write a Python function `def subscribe( table, *, skip_persisted_batch: bool, on_change: OnChangeCallback, on_time_end: OnTimeEndCallback = lambda time: None, on_end: OnFinishCallback = lambda: None, ) -> None` to solve the following problem:
Calls a callback function on_change on every change happening in table. This method is similar to the one we expose to the user but provides more parameters for internal usage. Args: table: the table to subscribe. skip_persisted_batch: whether the output for fully-persisted data should be ignored in case the program re-runs. The default usage is True (as not outputting things twice is required from persistence). However, it can be overridden, which is required by some parts of internal functionality. on_change: the callback function to be called on every change in the table. The function is required to accept four parameters: the key, the row changed, the time of the change in milliseconds and the flag stating if the change had been an addition of the row. These parameters of the callback are expected to have names row, time and is_addition respectively. on_time_end: the callback function to be called on each closed time of computation. on_end: the callback function to be called when the stream of changes ends. Returns: None
Here is the function:
def subscribe(
table,
*,
skip_persisted_batch: bool,
on_change: OnChangeCallback,
on_time_end: OnTimeEndCallback = lambda time: None,
on_end: OnFinishCallback = lambda: None,
) -> None:
"""
Calls a callback function on_change on every change happening in table. This method
is similar to the one we expose to the user but provides more parameters for
internal usage.
Args:
table: the table to subscribe.
skip_persisted_batch: whether the output for fully-persisted data should be
ignored in case the program re-runs. The default usage is True (as not
outputting things twice is required from persistence). However, it can be
overridden, which is required by some parts of internal functionality.
on_change: the callback function to be called on every change in the table. The
function is required to accept four parameters: the key, the row changed, the time
of the change in milliseconds and the flag stating if the change had been an
addition of the row. These parameters of the callback are expected to have
names row, time and is_addition respectively.
on_time_end: the callback function to be called on each closed time of computation.
on_end: the callback function to be called when the stream of changes ends.
Returns:
None
"""
def on_change_wrapper(
key: Pointer, values: list[Any], time: int, diff: int
) -> None:
"""
Wraps a change event from Pathway in a more human-friendly format.
What we get:
key: key in Pathway format, e.g. a hash
values: an array of values of the columns. The order is guaranteed to be the
same as in the table's schema
time: time of the change
diff: diff in the format of +1/-1
What format do we provide for the user:
values: a dict from the column name to the column value
time: time of the change
is_addition: is this an addition of a row to the collection. In case the field
if False, that means that this row has been extracted from collection
"""
row = {}
for field_name, field_value in zip(table._columns.keys(), values):
row[field_name] = field_value
return on_change(key=key, row=row, time=time, is_addition=(diff == 1))
table_to_datasink(
table,
datasink.CallbackDataSink(
on_change_wrapper, on_time_end, on_end, skip_persisted_batch
),
) | Calls a callback function on_change on every change happening in table. This method is similar to the one we expose to the user but provides more parameters for internal usage. Args: table: the table to subscribe. skip_persisted_batch: whether the output for fully-persisted data should be ignored in case the program re-runs. The default usage is True (as not outputting things twice is required from persistence). However, it can be overridden, which is required by some parts of internal functionality. on_change: the callback function to be called on every change in the table. The function is required to accept four parameters: the key, the row changed, the time of the change in milliseconds and the flag stating if the change had been an addition of the row. These parameters of the callback are expected to have names row, time and is_addition respectively. on_time_end: the callback function to be called on each closed time of computation. on_end: the callback function to be called when the stream of changes ends. Returns: None |
166,836 | from __future__ import annotations
from collections.abc import Callable, MutableMapping, Sequence
from typing import Any
def as_arg_tuple(obj) -> ArgTuple:
if isinstance(obj, ArgTuple):
return obj
elif isinstance(obj, MutableMapping):
return MappingArgTuple(obj)
elif isinstance(obj, Sequence):
result = {f"{i}": v for i, v in enumerate(obj)}
return TupleArgTuple(result)
else:
return ScalarArgTuple({"0": obj})
def wrap_arg_tuple(func):
def wrapper(*args, **kwargs):
result = func(*args, **kwargs)
return as_arg_tuple(result).scalar_or_tuple()
return wrapper | null |
166,837 | import dataclasses
import warnings
from typing import Any
import pathway.internals as pw
from pathway.internals import api, dtype as dt
from pathway.internals._io_helpers import _form_value_fields
from pathway.internals.api import ConnectorMode, PathwayType, ReadMethod
from pathway.internals.schema import ColumnDefinition, Schema
def check_deprecated_kwargs(
kwargs: dict[str, Any], deprecated_kwarg_names: list[str], stacklevel: int = 2
):
for kwarg_name in deprecated_kwarg_names:
if kwarg_name in kwargs:
warnings.warn(
f"'{kwarg_name}' is deprecated and will be ignored",
DeprecationWarning,
stacklevel=stacklevel + 1,
)
kwargs.pop(kwarg_name)
if kwargs:
unexpected_arg_names = ", ".join(repr(arg) for arg in kwargs.keys())
raise TypeError(f"Got unexpected keyword arguments: {unexpected_arg_names}") | null |
166,838 | import dataclasses
import warnings
from typing import Any
import pathway.internals as pw
from pathway.internals import api, dtype as dt
from pathway.internals._io_helpers import _form_value_fields
from pathway.internals.api import ConnectorMode, PathwayType, ReadMethod
from pathway.internals.schema import ColumnDefinition, Schema
METADATA_COLUMN_NAME = "_metadata"
SUPPORTED_INPUT_FORMATS: set[str] = {
"csv",
"json",
"plaintext",
"raw",
"binary",
"plaintext_by_file",
}
class RawDataSchema(pw.Schema):
data: Any
class MetadataSchema(Schema):
_metadata: dict
def get_data_format_type(format: str, supported_formats: set[str]):
if format not in _DATA_FORMAT_MAPPING or format not in supported_formats:
raise ValueError(f"data format `{format}` not supported")
return _DATA_FORMAT_MAPPING[format]
class CsvParserSettings:
"""
Class representing settings for the CSV parser.
Args:
delimiter: Field delimiter to use when parsing CSV.
quote: Quote character to use when parsing CSV.
escape: What character to use for escaping fields in CSV.
enable_double_quote_escapes: Enable escapes of double quotes.
enable_quoting: Enable quoting for the fields.
comment_character: If specified, the lines starting with the comment \
character will be treated as comments and therefore, will be ignored by \
parser
"""
def __init__(
self,
delimiter=",",
quote='"',
escape=None,
enable_double_quote_escapes=True,
enable_quoting=True,
comment_character=None,
):
self.api_settings = api.CsvParserSettings(
delimiter,
quote,
escape,
enable_double_quote_escapes,
enable_quoting,
comment_character,
)
def read_schema(
*,
schema: type[Schema] | None,
value_columns: list[str] | None = None,
primary_key: list[str] | None = None,
types: dict[str, api.PathwayType] | None = None,
default_values: dict[str, Any] | None = None,
_stacklevel: int = 1,
) -> tuple[type[Schema], dict[str, Any]]:
schema = _read_schema(
schema=schema,
value_columns=value_columns,
primary_key=primary_key,
types=types,
default_values=default_values,
_stacklevel=_stacklevel + 1,
)
value_fields = _form_value_fields(schema)
return schema, dict(
# There is a distinction between an empty set of columns denoting
# the primary key and None. If any (including empty) set of keys if provided,
# then it will be used to compute the primary key.
key_field_names=schema.primary_key_columns(),
value_fields=value_fields,
)
def assert_schema_or_value_columns_not_none(
schema: type[Schema] | None,
value_columns: list[str] | None,
data_format_type: str | None = None,
):
if schema is None and value_columns is None:
if data_format_type == "dsv":
raise ValueError(
"Neither schema nor value_columns were specified. "
"Consider using `pw.schema_from_csv` for generating schema from a CSV file"
)
else:
raise ValueError("Neither schema nor value_columns were specified")
class Schema(metaclass=SchemaMetaclass):
"""Base class to inherit from when creating schemas.
All schemas should be subclasses of this one.
Example:
>>> import pathway as pw
>>> t1 = pw.debug.table_from_markdown('''
... age owner pet
... 1 10 Alice dog
... 2 9 Bob dog
... 3 8 Alice cat
... 4 7 Bob dog''')
>>> t1.schema
<pathway.Schema types={'age': <class 'int'>, 'owner': <class 'str'>, 'pet': <class 'str'>}>
>>> issubclass(t1.schema, pw.Schema)
True
>>> class NewSchema(pw.Schema):
... foo: int
>>> SchemaSum = NewSchema | t1.schema
>>> SchemaSum
<pathway.Schema types={'age': <class 'int'>, 'owner': <class 'str'>, 'pet': <class 'str'>, 'foo': <class 'int'>}>
"""
def __init_subclass__(cls, /, append_only: bool | None = None, **kwargs) -> None:
super().__init_subclass__(**kwargs)
def construct_schema_and_data_format(
format: str,
*,
schema: type[Schema] | None = None,
with_metadata: bool = False,
csv_settings: CsvParserSettings | None = None,
json_field_paths: dict[str, str] | None = None,
value_columns: list[str] | None = None,
primary_key: list[str] | None = None,
types: dict[str, PathwayType] | None = None,
default_values: dict[str, Any] | None = None,
_stacklevel: int = 1,
) -> tuple[type[Schema], api.DataFormat]:
data_format_type = get_data_format_type(format, SUPPORTED_INPUT_FORMATS)
if data_format_type == "identity":
kwargs = locals()
unexpected_params = [
"schema",
"value_columns",
"primary_key",
"csv_settings",
"json_field_paths",
"types",
]
for param in unexpected_params:
if param in kwargs and kwargs[param] is not None:
raise ValueError(f"Unexpected argument for plaintext format: {param}")
schema = RawDataSchema
if with_metadata:
schema |= MetadataSchema
schema, api_schema = read_schema(
schema=schema,
value_columns=None,
primary_key=None,
types=None,
default_values=None,
)
return schema, api.DataFormat(
format_type=data_format_type,
**api_schema,
parse_utf8=(format != "binary"),
)
assert_schema_or_value_columns_not_none(schema, value_columns, data_format_type)
if with_metadata:
if schema is not None:
schema |= MetadataSchema
elif value_columns is not None:
value_columns.append(METADATA_COLUMN_NAME)
else:
raise ValueError("Neither schema nor value_columns were specified")
schema, api_schema = read_schema(
schema=schema,
value_columns=value_columns,
primary_key=primary_key,
types=types,
default_values=default_values,
_stacklevel=_stacklevel + 1,
)
if data_format_type == "dsv":
if json_field_paths is not None:
raise ValueError("Unexpected argument for csv format: json_field_paths")
return schema, api.DataFormat(
**api_schema,
format_type=data_format_type,
delimiter=",",
)
elif data_format_type == "jsonlines":
if csv_settings is not None:
raise ValueError("Unexpected argument for json format: csv_settings")
return schema, api.DataFormat(
**api_schema,
format_type=data_format_type,
column_paths=json_field_paths,
)
else:
raise ValueError(f"data format `{format}` not supported") | null |
166,839 | import dataclasses
import warnings
from typing import Any
import pathway.internals as pw
from pathway.internals import api, dtype as dt
from pathway.internals._io_helpers import _form_value_fields
from pathway.internals.api import ConnectorMode, PathwayType, ReadMethod
from pathway.internals.schema import ColumnDefinition, Schema
def internal_connector_mode(mode: str | api.ConnectorMode) -> api.ConnectorMode:
if isinstance(mode, api.ConnectorMode):
return mode
internal_mode = _INPUT_MODES_MAPPING.get(mode)
if not internal_mode:
raise ValueError(
"Unknown mode: {}. Only {} are supported".format(
mode, ", ".join(_INPUT_MODES_MAPPING.keys())
)
)
return internal_mode
def internal_read_method(format: str) -> ReadMethod:
if format == "binary" or format == "plaintext_by_file":
return ReadMethod.FULL
return ReadMethod.BY_LINE
class CsvParserSettings:
"""
Class representing settings for the CSV parser.
Args:
delimiter: Field delimiter to use when parsing CSV.
quote: Quote character to use when parsing CSV.
escape: What character to use for escaping fields in CSV.
enable_double_quote_escapes: Enable escapes of double quotes.
enable_quoting: Enable quoting for the fields.
comment_character: If specified, the lines starting with the comment \
character will be treated as comments and therefore, will be ignored by \
parser
"""
def __init__(
self,
delimiter=",",
quote='"',
escape=None,
enable_double_quote_escapes=True,
enable_quoting=True,
comment_character=None,
):
self.api_settings = api.CsvParserSettings(
delimiter,
quote,
escape,
enable_double_quote_escapes,
enable_quoting,
comment_character,
)
def construct_s3_data_storage(
path: str,
rust_engine_s3_settings: api.AwsS3Settings,
format: str,
mode: str | api.ConnectorMode,
*,
csv_settings: CsvParserSettings | None = None,
persistent_id: str | None = None,
):
if format == "csv":
return api.DataStorage(
storage_type="s3_csv",
path=path,
aws_s3_settings=rust_engine_s3_settings,
csv_parser_settings=csv_settings.api_settings if csv_settings else None,
mode=internal_connector_mode(mode),
persistent_id=persistent_id,
)
else:
return api.DataStorage(
storage_type="s3",
path=path,
aws_s3_settings=rust_engine_s3_settings,
mode=internal_connector_mode(mode),
read_method=internal_read_method(format),
persistent_id=persistent_id,
) | null |
166,840 | from __future__ import annotations
from pathway.internals.table_subscription import (
OnChangeCallback,
OnFinishCallback,
OnTimeEndCallback,
subscribe as internal_subscribe,
)
class OnFinishCallback(Protocol):
"""
The callback function to be called when the stream of changes ends. It will be called \
on each engine worker separately.
"""
def __call__(self) -> None:
"""
The callable part of the callback. It will be called without arguments and its
return result won't be used by the engine.
"""
...
class OnChangeCallback(Protocol):
"""
The callback to be called on every change in the table. It is required to be
callable and to accept four parameters: the key, the row changed, the time of the
change in milliseconds and the flag stating if the change had been an addition
of the row.
"""
def __call__(
self,
key: Pointer,
row: dict[str, Any],
time: int,
is_addition: bool,
) -> None:
"""
The callable part of the callback.
Args:
key: the key of the changed row;
row: the changed row as a dict mapping from the field name to the value;
time: the time of the modification, also can be referred as minibatch ID of \
the change;
is_addition: boolean value, equals to true if the row is inserted into the \
table, false otherwise. Please note that update is basically two operations: the \
deletion of the old value and the insertion of a new value, which happen within a single \
transaction;
Returns:
None
"""
...
class OnTimeEndCallback(Protocol):
"""
The callback to be called on every time finished. It is required
to accept one parameter: time.
"""
def __call__(self, time: int) -> None:
"""
The callable part of the callback.
Args:
time: the time finished
Returns:
None
"""
...
The provided code snippet includes necessary dependencies for implementing the `subscribe` function. Write a Python function `def subscribe( table, on_change: OnChangeCallback, on_end: OnFinishCallback = lambda: None, on_time_end: OnTimeEndCallback = lambda time: None, )` to solve the following problem:
Calls a callback function on_change on every change happening in table. Args: table: the table to subscribe. on_change: the callback to be called on every change in the table. The function is required to accept four parameters: the key, the row changed, the time of the change in microseconds and the flag stating if the change had been an addition of the row. These parameters of the callback are expected to have names key, row, time and is_addition respectively. on_end: the callback to be called when the stream of changes ends. on_time_end: the callback function to be called on each closed time of computation. Returns: None Example: >>> from pathway.tests import utils # NODOCS >>> utils.skip_on_multiple_workers() # NODOCS >>> import pathway as pw ... >>> table = pw.debug.table_from_markdown(''' ... | pet | owner | age | __time__ | __diff__ ... 1 | dog | Alice | 10 | 0 | 1 ... 2 | cat | Alice | 8 | 2 | 1 ... 3 | dog | Bob | 7 | 4 | 1 ... 2 | cat | Alice | 8 | 6 | -1 ... ''') ... >>> def on_change(key: pw.Pointer, row: dict, time: int, is_addition: bool): ... print(f"{row}, {time}, {is_addition}") ... >>> def on_end(): ... print("End of stream.") ... >>> pw.io.subscribe(table, on_change, on_end) >>> pw.run(monitoring_level=pw.MonitoringLevel.NONE) {'pet': 'dog', 'owner': 'Alice', 'age': 10}, 0, True {'pet': 'cat', 'owner': 'Alice', 'age': 8}, 2, True {'pet': 'dog', 'owner': 'Bob', 'age': 7}, 4, True {'pet': 'cat', 'owner': 'Alice', 'age': 8}, 6, False End of stream.
Here is the function:
def subscribe(
table,
on_change: OnChangeCallback,
on_end: OnFinishCallback = lambda: None,
on_time_end: OnTimeEndCallback = lambda time: None,
):
"""
Calls a callback function on_change on every change happening in table.
Args:
table: the table to subscribe.
on_change: the callback to be called on every change in the table. The
function is required to accept four parameters: the key, the row changed, the time
of the change in microseconds and the flag stating if the change had been an
addition of the row. These parameters of the callback are expected to have
names key, row, time and is_addition respectively.
on_end: the callback to be called when the stream of changes ends.
on_time_end: the callback function to be called on each closed time of computation.
Returns:
None
Example:
>>> from pathway.tests import utils # NODOCS
>>> utils.skip_on_multiple_workers() # NODOCS
>>> import pathway as pw
...
>>> table = pw.debug.table_from_markdown('''
... | pet | owner | age | __time__ | __diff__
... 1 | dog | Alice | 10 | 0 | 1
... 2 | cat | Alice | 8 | 2 | 1
... 3 | dog | Bob | 7 | 4 | 1
... 2 | cat | Alice | 8 | 6 | -1
... ''')
...
>>> def on_change(key: pw.Pointer, row: dict, time: int, is_addition: bool):
... print(f"{row}, {time}, {is_addition}")
...
>>> def on_end():
... print("End of stream.")
...
>>> pw.io.subscribe(table, on_change, on_end)
>>> pw.run(monitoring_level=pw.MonitoringLevel.NONE)
{'pet': 'dog', 'owner': 'Alice', 'age': 10}, 0, True
{'pet': 'cat', 'owner': 'Alice', 'age': 8}, 2, True
{'pet': 'dog', 'owner': 'Bob', 'age': 7}, 4, True
{'pet': 'cat', 'owner': 'Alice', 'age': 8}, 6, False
End of stream.
"""
internal_subscribe(
table,
skip_persisted_batch=True,
on_change=on_change,
on_time_end=on_time_end,
on_end=on_end,
) | Calls a callback function on_change on every change happening in table. Args: table: the table to subscribe. on_change: the callback to be called on every change in the table. The function is required to accept four parameters: the key, the row changed, the time of the change in microseconds and the flag stating if the change had been an addition of the row. These parameters of the callback are expected to have names key, row, time and is_addition respectively. on_end: the callback to be called when the stream of changes ends. on_time_end: the callback function to be called on each closed time of computation. Returns: None Example: >>> from pathway.tests import utils # NODOCS >>> utils.skip_on_multiple_workers() # NODOCS >>> import pathway as pw ... >>> table = pw.debug.table_from_markdown(''' ... | pet | owner | age | __time__ | __diff__ ... 1 | dog | Alice | 10 | 0 | 1 ... 2 | cat | Alice | 8 | 2 | 1 ... 3 | dog | Bob | 7 | 4 | 1 ... 2 | cat | Alice | 8 | 6 | -1 ... ''') ... >>> def on_change(key: pw.Pointer, row: dict, time: int, is_addition: bool): ... print(f"{row}, {time}, {is_addition}") ... >>> def on_end(): ... print("End of stream.") ... >>> pw.io.subscribe(table, on_change, on_end) >>> pw.run(monitoring_level=pw.MonitoringLevel.NONE) {'pet': 'dog', 'owner': 'Alice', 'age': 10}, 0, True {'pet': 'cat', 'owner': 'Alice', 'age': 8}, 2, True {'pet': 'dog', 'owner': 'Bob', 'age': 7}, 4, True {'pet': 'cat', 'owner': 'Alice', 'age': 8}, 6, False End of stream. |
166,841 | import asyncio
import copy
import json
import logging
import threading
import time
from collections import OrderedDict
from collections.abc import Awaitable, Callable
from typing import Any, Sequence
from uuid import uuid4
from warnings import warn
import aiohttp_cors
import yaml
from aiohttp import web
import pathway.internals as pw
import pathway.io as io
from pathway.internals import api
from pathway.internals.api import Pointer, unsafe_make_pointer
from pathway.internals.dtype import unoptionalize
from pathway.internals.runtime_type_check import check_arg_types
The provided code snippet includes necessary dependencies for implementing the `_request_scheme` function. Write a Python function `def _request_scheme(request: web.Request)` to solve the following problem:
Get request scheme taking into account the forwarded headers.
Here is the function:
def _request_scheme(request: web.Request):
"""
Get request scheme taking into account the forwarded headers.
"""
scheme_headers = [
"X-Forwarded-Proto",
"X-Scheme",
"X-Forwarded-Scheme",
]
request_schemes = [
"http",
"https",
]
for header in scheme_headers:
header_value = request.headers.get(header)
if header_value is None:
continue
header_value = header_value.lower()
if header_value in request_schemes:
return header_value
# fallback, doesn't work for forwarded scenarios
return request.scheme | Get request scheme taking into account the forwarded headers. |
166,842 | import asyncio
import copy
import json
import logging
import threading
import time
from collections import OrderedDict
from collections.abc import Awaitable, Callable
from typing import Any, Sequence
from uuid import uuid4
from warnings import warn
import aiohttp_cors
import yaml
from aiohttp import web
import pathway.internals as pw
import pathway.io as io
from pathway.internals import api
from pathway.internals.api import Pointer, unsafe_make_pointer
from pathway.internals.dtype import unoptionalize
from pathway.internals.runtime_type_check import check_arg_types
class EndpointDocumentation:
"""
The settings for the automatic OpenAPI v3 docs generation for an endpoint.
Args:
summary: Short endpoint description shown as a hint in the endpoints list.
description: Comprehensive description for the endpoint.
tags: Tags for grouping the endpoints.
method_types: If set, Pathway will document only the given method types. This \
way, one can exclude certain endpoints and methods from being documented.
"""
DEFAULT_RESPONSES_DESCRIPTION = {
"200": {
"description": "OK",
},
"400": {
"description": "The request is incorrect. Please check if "
"it complies with the auto-generated and Pathway input "
"table schemas"
},
}
def __init__(
self,
*,
summary: str | None = None,
description: str | None = None,
tags: Sequence[str] | None = None,
method_types: Sequence[str] | None = None,
examples: EndpointExamples | None = None,
):
self.summary = summary
self.description = description
self.tags = tags
self.method_types = None
if method_types is not None:
self.method_types = set([x.upper() for x in method_types])
self.examples = examples
def generate_docs(self, format, method, schema) -> dict:
if not self._is_method_exposed(method):
return {}
if method.upper() == "GET":
# Get requests receive parameters from CGI, so their schema description
# is a bit different from the POST / PUT / PATCH
endpoint_description = {
"parameters": self._construct_openapi_get_request_schema(schema),
# disable yaml optimisation to avoid
# "instance type (string) does not match any allowed primitive type"
# error from openapi validator
"responses": copy.deepcopy(self.DEFAULT_RESPONSES_DESCRIPTION),
}
else:
if format == "raw":
content_header = "text/plain"
openapi_schema = self._construct_openapi_plaintext_schema(schema)
elif format == "custom":
content_header = "application/json"
openapi_schema = self._construct_openapi_json_schema(schema)
else:
raise ValueError(f"Unknown endpoint input format: {format}")
schema_and_examples = {"schema": openapi_schema}
if self.examples:
schema_and_examples["examples"] = self.examples._openapi_description()
content_description = {content_header: schema_and_examples}
endpoint_description = {
"requestBody": {
"content": content_description,
},
"responses": self.DEFAULT_RESPONSES_DESCRIPTION,
}
if self.tags is not None:
endpoint_description["tags"] = list(self.tags)
if self.description is not None:
endpoint_description["description"] = self.description
if self.summary is not None:
endpoint_description["summary"] = self.summary
return {method.lower(): endpoint_description}
def _is_method_exposed(self, method):
return self.method_types is None or method.upper() in self.method_types
def _add_optional_traits_if_present(self, field_description, props):
if props.example is not None:
field_description["example"] = props.example
if props.description is not None:
field_description["description"] = props.description
def _construct_openapi_plaintext_schema(self, schema) -> dict:
query_column = schema.columns().get(QUERY_SCHEMA_COLUMN)
if query_column is None:
raise ValueError(
"'raw' endpoint input format requires 'value' column in schema"
)
openapi_type = _ENGINE_TO_OPENAPI_TYPE.get(query_column, "string")
openapi_format = _ENGINE_TO_OPENAPI_FORMAT.get(query_column)
description = {
"type": openapi_type,
}
if openapi_format:
description["format"] = openapi_format
if query_column.has_default_value():
description["default"] = query_column.default_value
self._add_optional_traits_if_present(description, query_column)
return description
def _construct_openapi_get_request_schema(self, schema) -> list:
parameters = []
for name, props in schema.columns().items():
field_description = {
"in": "query",
"name": name,
"required": not props.has_default_value(),
}
self._add_optional_traits_if_present(field_description, props)
openapi_type = _ENGINE_TO_OPENAPI_TYPE.get(
unoptionalize(props.dtype).map_to_engine()
)
if openapi_type:
field_description["schema"] = {
"type": openapi_type,
}
else:
# Get request params without type make schema invalid
field_description["schema"] = {"type": "string"}
parameters.append(field_description)
return parameters
def _construct_openapi_json_schema(self, schema) -> dict:
properties = {}
required = []
additional_properties = False
for name, props in schema.columns().items():
openapi_type = _ENGINE_TO_OPENAPI_TYPE.get(
unoptionalize(props.dtype).map_to_engine()
)
if openapi_type is None:
# not something we can clearly define the type for, so it will be
# read as an additional property
additional_properties = True
continue
field_description = {
"type": openapi_type,
}
if not props.has_default_value():
required.append(name)
else:
field_description["default"] = props.default_value
self._add_optional_traits_if_present(field_description, props)
openapi_format = _ENGINE_TO_OPENAPI_FORMAT.get(props.dtype.map_to_engine())
if openapi_format is not None:
field_description["format"] = openapi_format
properties[name] = field_description
result = {
"type": "object",
"properties": properties,
"additionalProperties": additional_properties,
}
if required:
result["required"] = required
return result
class PathwayWebserver:
"""
The basic configuration class for ``pw.io.http.rest_connector``.
It contains essential information about the host and the port on which the
webserver should run and accept queries.
Args:
host: TCP/IP host or a sequence of hosts for the created endpoint.
port: Port for the created endpoint.
with_schema_endpoint: If set to True, the server will also provide ``/_schema`` \
endpoint containing Open API 3.0.3 schema for the handlers generated with \
``pw.io.http.rest_connector`` calls.
with_cors: If set to True, the server will allow cross-origin requests on the \
added endpoints.
"""
_host: str
_port: int
_tasks: dict[Any, Any]
_loop: asyncio.AbstractEventLoop
_app: web.Application
_is_launched: bool
def __init__(self, host, port, with_schema_endpoint=True, with_cors=False):
self._host = host
self._port = port
self._tasks = {}
self._loop = asyncio.new_event_loop()
self._app = web.Application()
self._registered_routes = {}
if with_cors:
self._cors = aiohttp_cors.setup(self._app)
else:
self._cors = None
self._is_launched = False
self._app_start_mutex = threading.Lock()
self._openapi_description = OrderedDict(
{
"openapi": "3.0.3",
"info": {
"title": "Pathway-generated openapi description",
"version": "1.0.0",
},
"paths": {},
"servers": [{"url": f"http://{host}:{port}/"}],
}
)
if with_schema_endpoint:
self._add_endpoint_to_app("GET", "/_schema", self._schema_handler)
def _add_endpoint_to_app(self, method, route, handler):
handler = self._wrap_handler_with_logger(handler)
if route not in self._registered_routes:
app_resource = self._app.router.add_resource(route)
if self._cors is not None:
app_resource = self._cors.add(app_resource)
self._registered_routes[route] = app_resource
app_resource_endpoint = self._registered_routes[route].add_route(
method, handler
)
if self._cors is not None:
self._cors.add(
app_resource_endpoint,
{
"*": aiohttp_cors.ResourceOptions(
expose_headers="*", allow_headers="*"
)
},
)
def _wrap_handler_with_logger(
self, handler_method: Callable[[web.Request], Awaitable[web.Response]]
):
async def wrapped_handler(request: web.Request):
session_id = "uuid-" + str(uuid4())
logging_context = _LoggingContext(request, session_id)
try:
headers = request.headers.copy() # type:ignore
headers["X-Pathway-Session"] = session_id
request = request.clone(headers=headers)
response = await handler_method(request)
except web.HTTPError as http_error:
logging_context.log_response(status=http_error.status_code)
raise
except Exception:
logging.exception("Error in HTTP handler")
# the server framework translates all non-native
# exceptions into responses with code 500 so we use it
logging_context.log_response(status=500)
raise
logging_context.log_response(response.status)
return response
return wrapped_handler
async def _schema_handler(self, request: web.Request):
origin = f"{_request_scheme(request)}://{request.host}"
format = request.query.get("format", "yaml")
if format == "json":
return web.json_response(
status=200,
data=self.openapi_description_json(origin),
dumps=pw.Json.dumps,
)
elif format != "yaml":
raise web.HTTPBadRequest(
reason=f"Unknown format: '{format}'. Supported formats: 'json', 'yaml'"
)
return web.Response(
status=200,
text=self.openapi_description(origin),
content_type="text/x-yaml",
)
def _register_endpoint(
self, route, handler, format, schema, methods, documentation
) -> None:
endpoint_docs = {}
for method in methods:
self._add_endpoint_to_app(method, route, handler)
method_docs = documentation.generate_docs(format, method, schema)
if method_docs:
endpoint_docs.update(method_docs)
if endpoint_docs:
self._openapi_description["paths"][route] = endpoint_docs # type: ignore[index]
def _run(self) -> None:
self._app_start_mutex.acquire()
if not self._is_launched:
self._is_launched = True
self._app_start_mutex.release()
web.run_app(
self._app,
host=self._host,
port=self._port,
loop=self._loop,
handle_signals=False,
)
else:
self._app_start_mutex.release()
def openapi_description_json(self, origin) -> dict:
"""
Returns Open API description for the added set of endpoints in JSON format.
"""
result = copy.deepcopy(self._openapi_description)
result["servers"] = [{"url": origin}]
return result
def openapi_description(self, origin):
"""
Returns Open API description for the added set of endpoints in yaml format.
"""
return yaml.dump(dict(self.openapi_description_json(origin)), sort_keys=False)
class RestServerSubject(io.python.ConnectorSubject):
_webserver: PathwayWebserver
_schema: type[pw.Schema]
_delete_completed_queries: bool
_format: str
def __init__(
self,
webserver: PathwayWebserver,
route: str,
methods: Sequence[str],
schema: type[pw.Schema],
delete_completed_queries: bool,
format: str = "raw",
request_validator: Callable | None = None,
documentation: EndpointDocumentation = EndpointDocumentation(),
) -> None:
super().__init__()
self._webserver = webserver
self._tasks = webserver._tasks
self._schema = schema
self._delete_completed_queries = delete_completed_queries
self._format = format
self._request_validator = request_validator
webserver._register_endpoint(
route, self.handle, format, schema, methods, documentation
)
def run(self):
self._webserver._run()
async def handle(self, request: web.Request):
id = unsafe_make_pointer(uuid4().int)
if self._format == "raw":
payload = {QUERY_SCHEMA_COLUMN: await request.text()}
elif self._format == "custom":
try:
payload = await request.json()
except json.decoder.JSONDecodeError:
payload = {}
query_params = request.query
for param, value in query_params.items():
if param not in payload:
payload[param] = value
logging.info(
json.dumps(
{
"_type": "request_payload",
"session_id": request.headers.get("X-Pathway-Session"),
"payload": payload,
}
)
)
self._verify_payload(payload)
if self._request_validator:
try:
validator_ret = self._request_validator(payload, request.headers)
if validator_ret is not None:
raise Exception(validator_ret)
except Exception as e:
record = {
"_type": "validator_rejected_http_request",
"error": str(e),
"payload": payload,
}
logging.error(json.dumps(record))
raise web.HTTPBadRequest(reason=str(e))
self._cast_types_to_schema(payload)
event = asyncio.Event()
data = pw.Json.dumps(payload).encode()
self._tasks[id] = {
"event": event,
"result": "-PENDING-",
}
self._add(id, data)
response = await self._fetch_response(id, event)
if self._delete_completed_queries:
self._remove(id, data)
return web.json_response(status=200, data=response, dumps=pw.Json.dumps)
async def _fetch_response(self, id, event) -> Any:
await event.wait()
task = self._tasks.pop(id)
return task["result"]
def _cast_types_to_schema(self, payload: dict):
dtypes = self._schema._dtypes()
for column, dtype in dtypes.items():
if payload.get(column) is None:
continue
try:
exact_type = unoptionalize(dtype).typehint
payload[column] = exact_type(payload[column])
except Exception:
logging.exception(
f"Failed to cast column '{column}' to type '{exact_type}'"
)
def _verify_payload(self, payload: dict):
defaults = self._schema.default_values()
for column in self._schema.keys():
if column not in payload and column not in defaults:
raise web.HTTPBadRequest(reason=f"`{column}` is required")
def _deletions_enabled(self) -> bool:
return self._delete_completed_queries
def _is_finite(self):
return False
The provided code snippet includes necessary dependencies for implementing the `rest_connector` function. Write a Python function `def rest_connector( host: str | None = None, port: int | str | None = None, *, webserver: PathwayWebserver | None = None, route: str = "/", schema: type[pw.Schema] | None = None, methods: Sequence[str] = ("POST",), autocommit_duration_ms=1500, documentation: EndpointDocumentation = EndpointDocumentation(), keep_queries: bool | None = None, delete_completed_queries: bool | None = None, request_validator: Callable | None = None, ) -> tuple[pw.Table, Callable]` to solve the following problem:
Runs a lightweight HTTP server and inputs a collection from the HTTP endpoint, configured by the parameters of this method. On the output, the method provides a table and a callable, which needs to accept the result table of the computation, which entries will be tracked and put into respective request's responses. Args: webserver: configuration object containing host and port information. You only \ need to create only one instance of this class per single host-port pair; route: route which will be listened to by the web server; schema: schema of the resulting table; methods: HTTP methods that this endpoint will accept; autocommit_duration_ms: the maximum time between two commits. Every autocommit_duration_ms milliseconds, the updates received by the connector are committed and pushed into Pathway's computation graph; keep_queries: whether to keep queries after processing; defaults to False. [deprecated] delete_completed_queries: whether to send a deletion entry after the query is processed. Allows to remove it from the system if it is stored by operators such as ``join`` or ``groupby``; request_validator: a callable that can verify requests. A return value of `None` accepts payload. Any other returned value is treated as error and used as the response. Any exception is caught and treated as validation failure. Returns: table: the table read; response_writer: a callable, where the result table should be provided. The \ result table must contain columns `query_id` corresponding to the primary key of an \ object from the input table and `result`, corresponding to the endpoint's return value. Example: Let's consider the following example: there is a collection of words that are \ received through HTTP REST endpoint `/uppercase` located at `127.0.0.1`, port `9999`. \ The Pathway program processes this table by converting these words to the upper case. \ This conversion result must be provided to the user on the output. Then, you can proceed with the following REST connector configuration code. First, the schema and the webserver object need to be created: >>> import pathway as pw >>> class WordsSchema(pw.Schema): ... word: str ... >>> >>> webserver = pw.io.http.PathwayWebserver(host="127.0.0.1", port=9999) Then, the endpoint that inputs this collection can be configured: >>> words, response_writer = pw.io.http.rest_connector( ... webserver=webserver, ... route="/uppercase", ... schema=WordsSchema, ... ) Finally, you can define the logic that takes the input table `words`, calculates the result in the form of a table, and provides it for the endpoint's output: >>> uppercase_words = words.select( ... query_id=words.id, ... result=pw.apply(lambda x: x.upper(), pw.this.word) ... ) >>> response_writer(uppercase_words) Please note that you don't need to create another web server object if you need to \ have more than one endpoint running on the same host and port. For example, if you need \ to create another endpoint that converts words to lower case, in the same way, you \ need to reuse the existing `webserver` object. That is, the configuration would start \ with: >>> words_for_lowercase, response_writer_for_lowercase = pw.io.http.rest_connector( ... webserver=webserver, ... route="/lowercase", ... schema=WordsSchema, ... )
Here is the function:
def rest_connector(
host: str | None = None,
port: int | str | None = None,
*,
webserver: PathwayWebserver | None = None,
route: str = "/",
schema: type[pw.Schema] | None = None,
methods: Sequence[str] = ("POST",),
autocommit_duration_ms=1500,
documentation: EndpointDocumentation = EndpointDocumentation(),
keep_queries: bool | None = None,
delete_completed_queries: bool | None = None,
request_validator: Callable | None = None,
) -> tuple[pw.Table, Callable]:
"""
Runs a lightweight HTTP server and inputs a collection from the HTTP endpoint,
configured by the parameters of this method.
On the output, the method provides a table and a callable, which needs to accept
the result table of the computation, which entries will be tracked and put into
respective request's responses.
Args:
webserver: configuration object containing host and port information. You only \
need to create only one instance of this class per single host-port pair;
route: route which will be listened to by the web server;
schema: schema of the resulting table;
methods: HTTP methods that this endpoint will accept;
autocommit_duration_ms: the maximum time between two commits. Every
autocommit_duration_ms milliseconds, the updates received by the connector are
committed and pushed into Pathway's computation graph;
keep_queries: whether to keep queries after processing; defaults to False. [deprecated]
delete_completed_queries: whether to send a deletion entry after the query is processed.
Allows to remove it from the system if it is stored by operators such as ``join`` or ``groupby``;
request_validator: a callable that can verify requests. A return value of `None` accepts payload.
Any other returned value is treated as error and used as the response. Any exception is
caught and treated as validation failure.
Returns:
table: the table read;
response_writer: a callable, where the result table should be provided. The \
result table must contain columns `query_id` corresponding to the primary key of an \
object from the input table and `result`, corresponding to the endpoint's return value.
Example:
Let's consider the following example: there is a collection of words that are \
received through HTTP REST endpoint `/uppercase` located at `127.0.0.1`, port `9999`. \
The Pathway program processes this table by converting these words to the upper case. \
This conversion result must be provided to the user on the output.
Then, you can proceed with the following REST connector configuration code.
First, the schema and the webserver object need to be created:
>>> import pathway as pw
>>> class WordsSchema(pw.Schema):
... word: str
...
>>>
>>> webserver = pw.io.http.PathwayWebserver(host="127.0.0.1", port=9999)
Then, the endpoint that inputs this collection can be configured:
>>> words, response_writer = pw.io.http.rest_connector(
... webserver=webserver,
... route="/uppercase",
... schema=WordsSchema,
... )
Finally, you can define the logic that takes the input table `words`, calculates
the result in the form of a table, and provides it for the endpoint's output:
>>> uppercase_words = words.select(
... query_id=words.id,
... result=pw.apply(lambda x: x.upper(), pw.this.word)
... )
>>> response_writer(uppercase_words)
Please note that you don't need to create another web server object if you need to \
have more than one endpoint running on the same host and port. For example, if you need \
to create another endpoint that converts words to lower case, in the same way, you \
need to reuse the existing `webserver` object. That is, the configuration would start \
with:
>>> words_for_lowercase, response_writer_for_lowercase = pw.io.http.rest_connector(
... webserver=webserver,
... route="/lowercase",
... schema=WordsSchema,
... )
"""
if delete_completed_queries is None:
if keep_queries is None:
warn(
"delete_completed_queries arg of rest_connector should be set explicitly."
+ " It will soon be required."
)
delete_completed_queries = True
else:
warn(
"DEPRECATED: keep_queries arg of rest_connector is deprecated,"
+ " use delete_completed_queries with an opposite meaning instead."
)
delete_completed_queries = not keep_queries
if schema is None:
format = "raw"
schema = pw.schema_builder({"query": pw.column_definition()})
else:
format = "custom"
if webserver is None:
if host is None or port is None:
raise ValueError(
"If webserver object isn't specified, host and port must be present"
)
if isinstance(port, str):
port = int(port)
warn(
"The `host` and `port` arguments are deprecated. Please use `webserver` "
"instead.",
DeprecationWarning,
stacklevel=2,
)
webserver = PathwayWebserver(host, port)
else:
if host is not None or port is not None:
raise ValueError(
"If webserver object is specified, host and port shouldn't be set"
)
tasks = webserver._tasks
input_table = io.python.read(
subject=RestServerSubject(
webserver=webserver,
route=route,
methods=methods,
schema=schema,
delete_completed_queries=delete_completed_queries,
format=format,
request_validator=request_validator,
documentation=documentation,
),
schema=schema,
format="json",
autocommit_duration_ms=autocommit_duration_ms,
)
def response_writer(responses: pw.Table):
def on_change(key: Pointer, row: dict[str, Any], time: int, is_addition: bool):
if not is_addition:
return
task = tasks.get(key, None)
if task is None:
if delete_completed_queries:
logging.info(
"Query response has changed. It probably indicates an error in the pipeline."
)
return
def set_task():
task["result"] = row["result"]
task["event"].set()
webserver._loop.call_soon_threadsafe(set_task)
io.subscribe(table=responses, on_change=on_change)
return input_table, response_writer | Runs a lightweight HTTP server and inputs a collection from the HTTP endpoint, configured by the parameters of this method. On the output, the method provides a table and a callable, which needs to accept the result table of the computation, which entries will be tracked and put into respective request's responses. Args: webserver: configuration object containing host and port information. You only \ need to create only one instance of this class per single host-port pair; route: route which will be listened to by the web server; schema: schema of the resulting table; methods: HTTP methods that this endpoint will accept; autocommit_duration_ms: the maximum time between two commits. Every autocommit_duration_ms milliseconds, the updates received by the connector are committed and pushed into Pathway's computation graph; keep_queries: whether to keep queries after processing; defaults to False. [deprecated] delete_completed_queries: whether to send a deletion entry after the query is processed. Allows to remove it from the system if it is stored by operators such as ``join`` or ``groupby``; request_validator: a callable that can verify requests. A return value of `None` accepts payload. Any other returned value is treated as error and used as the response. Any exception is caught and treated as validation failure. Returns: table: the table read; response_writer: a callable, where the result table should be provided. The \ result table must contain columns `query_id` corresponding to the primary key of an \ object from the input table and `result`, corresponding to the endpoint's return value. Example: Let's consider the following example: there is a collection of words that are \ received through HTTP REST endpoint `/uppercase` located at `127.0.0.1`, port `9999`. \ The Pathway program processes this table by converting these words to the upper case. \ This conversion result must be provided to the user on the output. Then, you can proceed with the following REST connector configuration code. First, the schema and the webserver object need to be created: >>> import pathway as pw >>> class WordsSchema(pw.Schema): ... word: str ... >>> >>> webserver = pw.io.http.PathwayWebserver(host="127.0.0.1", port=9999) Then, the endpoint that inputs this collection can be configured: >>> words, response_writer = pw.io.http.rest_connector( ... webserver=webserver, ... route="/uppercase", ... schema=WordsSchema, ... ) Finally, you can define the logic that takes the input table `words`, calculates the result in the form of a table, and provides it for the endpoint's output: >>> uppercase_words = words.select( ... query_id=words.id, ... result=pw.apply(lambda x: x.upper(), pw.this.word) ... ) >>> response_writer(uppercase_words) Please note that you don't need to create another web server object if you need to \ have more than one endpoint running on the same host and port. For example, if you need \ to create another endpoint that converts words to lower case, in the same way, you \ need to reuse the existing `webserver` object. That is, the configuration would start \ with: >>> words_for_lowercase, response_writer_for_lowercase = pw.io.http.rest_connector( ... webserver=webserver, ... route="/lowercase", ... schema=WordsSchema, ... ) |
166,843 | import json
import random
import time
from typing import Any
import requests
import pathway as pw
def unescape(message: str, row: dict[str, Any], time: int, is_addition: bool):
message = message.replace("{table.time}", str(time))
message = message.replace("{table.diff}", "1" if is_addition else "-1")
for k, v in row.items():
wildcard_to_replace = "{table." + k + "}"
message = message.replace(wildcard_to_replace, str(v))
return message
def prepare_request_payload(
row: dict[str, Any],
time: int,
is_addition: bool,
req_format: str,
text: str | None,
):
if req_format == "json":
row["time"] = time
row["diff"] = 1 if is_addition else -1
return json.dumps(row)
elif req_format == "custom":
return unescape(text or "", row, time, is_addition)
else:
raise ValueError(f"Unknown payload format: {req_format}") | null |
166,844 | from __future__ import annotations
import json
import subprocess
import sys
from importlib.abc import MetaPathFinder
from importlib.util import spec_from_file_location
from os import environ
from pathlib import Path
PROFILE_ENV_VAR = "PATHWAY_PROFILE"
QUIET_ENV_VAR = "PATHWAY_QUIET"
FEATURES_ENV_VAR = "PATHWAY_FEATURES"
DEFAULT_PROFILE = "dev"
RUST_PACKAGE = "pathway"
def cargo_build():
profile = environ.get(PROFILE_ENV_VAR, DEFAULT_PROFILE)
quiet = environ.get(QUIET_ENV_VAR, "0").lower() in ("1", "true", "yes")
features = environ.get(FEATURES_ENV_VAR)
args = [
"cargo",
"--locked",
"build",
"--lib",
"--message-format=json-render-diagnostics",
f"--profile={profile}",
]
if quiet:
args += ["--quiet"]
if features:
args += ["--features", features]
base_dir = Path(__file__).parent.parent
assert not (base_dir / "__init__.py").exists()
cargo = subprocess.run(
args,
stdin=subprocess.DEVNULL,
stdout=subprocess.PIPE,
cwd=base_dir,
text=True,
check=True,
)
module_file = None
for line in cargo.stdout.splitlines():
data = json.loads(line)
if data["reason"] != "compiler-artifact":
continue
if not data["package_id"].startswith(RUST_PACKAGE + " "):
continue
for filename in data["filenames"]:
path = Path(filename)
if path.suffix != ".so":
continue
module_file = path
assert module_file is not None
return module_file | null |
166,845 | from warnings import warn
from pathway.internals import udfs
def __getattr__(name):
warn(
"pathway.asynchronous module is deprecated. Its content has been moved to pathway.udfs.",
DeprecationWarning,
stacklevel=2,
)
try:
return getattr(udfs, name)
except AttributeError:
raise AttributeError(f"module {__name__!r} has no attribute {name!r}") | null |
166,846 | import ast
import os
import time
import uuid
from collections import deque
from log import logger
from openai_server.backend_utils import convert_messages_to_structure
def decode(x, encoding_name="cl100k_base"):
try:
import tiktoken
encoding = tiktoken.get_encoding(encoding_name)
return encoding.decode(x)
except ImportError:
return '' | null |
166,847 | import contextlib
import logging
import os
import sys
import ast
import json
from threading import Thread
import time
from traceback import print_exception
from typing import List, Dict
from pydantic import BaseModel, Field
import uvicorn
from fastapi import Depends, FastAPI, Header, HTTPException
from fastapi.middleware.cors import CORSMiddleware
from fastapi.requests import Request
from fastapi.responses import JSONResponse, Response, StreamingResponse
from sse_starlette import EventSourceResponse
from starlette.responses import PlainTextResponse
from openai_server.log import logger
def verify_api_key(authorization: str = Header(None)) -> None:
server_api_key = os.getenv('H2OGPT_OPENAI_API_KEY', 'EMPTY')
if server_api_key == 'EMPTY':
# dummy case since '' cannot be handled
return
if server_api_key and (authorization is None or authorization != f"Bearer {server_api_key}"):
raise HTTPException(status_code=401, detail="Unauthorized") | null |
166,848 | import contextlib
import logging
import os
import sys
import ast
import json
from threading import Thread
import time
from traceback import print_exception
from typing import List, Dict
from pydantic import BaseModel, Field
import uvicorn
from fastapi import Depends, FastAPI, Header, HTTPException
from fastapi.middleware.cors import CORSMiddleware
from fastapi.requests import Request
from fastapi.responses import JSONResponse, Response, StreamingResponse
from sse_starlette import EventSourceResponse
from starlette.responses import PlainTextResponse
from openai_server.log import logger
The provided code snippet includes necessary dependencies for implementing the `health` function. Write a Python function `async def health() -> Response` to solve the following problem:
Health check.
Here is the function:
async def health() -> Response:
"""Health check."""
return Response(status_code=200) | Health check. |
166,849 | import contextlib
import logging
import os
import sys
import ast
import json
from threading import Thread
import time
from traceback import print_exception
from typing import List, Dict
from pydantic import BaseModel, Field
import uvicorn
from fastapi import Depends, FastAPI, Header, HTTPException
from fastapi.middleware.cors import CORSMiddleware
from fastapi.requests import Request
from fastapi.responses import JSONResponse, Response, StreamingResponse
from sse_starlette import EventSourceResponse
from starlette.responses import PlainTextResponse
from openai_server.log import logger
class InvalidRequestError(Exception):
pass
async def validation_exception_handler(request, exc):
print_exception(exc)
exc2 = InvalidRequestError(str(exc))
return PlainTextResponse(str(exc2), status_code=400) | null |
166,850 | import contextlib
import logging
import os
import sys
import ast
import json
from threading import Thread
import time
from traceback import print_exception
from typing import List, Dict
from pydantic import BaseModel, Field
import uvicorn
from fastapi import Depends, FastAPI, Header, HTTPException
from fastapi.middleware.cors import CORSMiddleware
from fastapi.requests import Request
from fastapi.responses import JSONResponse, Response, StreamingResponse
from sse_starlette import EventSourceResponse
from starlette.responses import PlainTextResponse
from openai_server.log import logger
async def options_route():
return JSONResponse(content="OK") | null |
166,851 | import contextlib
import logging
import os
import sys
import ast
import json
from threading import Thread
import time
from traceback import print_exception
from typing import List, Dict
from pydantic import BaseModel, Field
import uvicorn
from fastapi import Depends, FastAPI, Header, HTTPException
from fastapi.middleware.cors import CORSMiddleware
from fastapi.requests import Request
from fastapi.responses import JSONResponse, Response, StreamingResponse
from sse_starlette import EventSourceResponse
from starlette.responses import PlainTextResponse
from openai_server.log import logger
class TextRequest(Generation, CompletionParams):
def completions(body: dict) -> dict:
def stream_completions(body: dict):
async def openai_completions(request: Request, request_data: TextRequest):
if request_data.stream:
async def generator():
from openai_server.backend import stream_completions
response = stream_completions(dict(request_data))
for resp in response:
disconnected = await request.is_disconnected()
if disconnected:
break
yield {"data": json.dumps(resp)}
return EventSourceResponse(generator())
else:
from openai_server.backend import completions
response = completions(dict(request_data))
return JSONResponse(response) | null |
166,852 | import contextlib
import logging
import os
import sys
import ast
import json
from threading import Thread
import time
from traceback import print_exception
from typing import List, Dict
from pydantic import BaseModel, Field
import uvicorn
from fastapi import Depends, FastAPI, Header, HTTPException
from fastapi.middleware.cors import CORSMiddleware
from fastapi.requests import Request
from fastapi.responses import JSONResponse, Response, StreamingResponse
from sse_starlette import EventSourceResponse
from starlette.responses import PlainTextResponse
from openai_server.log import logger
class ChatRequest(Generation, ChatParams):
# https://platform.openai.com/docs/api-reference/chat/create
pass
def chat_completions(body: dict) -> dict:
generator = chat_completion_action(body, stream_output=False)
return deque(generator, maxlen=1).pop()
def stream_chat_completions(body: dict):
for resp in chat_completion_action(body, stream_output=True):
yield resp
async def openai_chat_completions(request: Request, request_data: ChatRequest):
if request_data.stream:
from openai_server.backend import stream_chat_completions
async def generator():
response = stream_chat_completions(dict(request_data))
for resp in response:
disconnected = await request.is_disconnected()
if disconnected:
break
yield {"data": json.dumps(resp)}
return EventSourceResponse(generator())
else:
from openai_server.backend import chat_completions
response = chat_completions(dict(request_data))
return JSONResponse(response) | null |
166,853 | import contextlib
import logging
import os
import sys
import ast
import json
from threading import Thread
import time
from traceback import print_exception
from typing import List, Dict
from pydantic import BaseModel, Field
import uvicorn
from fastapi import Depends, FastAPI, Header, HTTPException
from fastapi.middleware.cors import CORSMiddleware
from fastapi.requests import Request
from fastapi.responses import JSONResponse, Response, StreamingResponse
from sse_starlette import EventSourceResponse
from starlette.responses import PlainTextResponse
from openai_server.log import logger
gradio_client = get_gradio_client()
async def handle_models(request: Request):
path = request.url.path
model_name = path[len('/v1/models/'):]
from openai_server.backend import gradio_client
model_dict = ast.literal_eval(gradio_client.predict(api_name='/model_names'))
base_models = [x['base_model'] for x in model_dict]
if not model_name:
response = {
"object": "list",
"data": base_models,
}
else:
model_index = base_models.index(model_name)
if model_index >= 0:
response = model_dict[model_index]
else:
response = dict(model_name='INVALID')
return JSONResponse(response) | null |
166,854 | import contextlib
import logging
import os
import sys
import ast
import json
from threading import Thread
import time
from traceback import print_exception
from typing import List, Dict
from pydantic import BaseModel, Field
import uvicorn
from fastapi import Depends, FastAPI, Header, HTTPException
from fastapi.middleware.cors import CORSMiddleware
from fastapi.requests import Request
from fastapi.responses import JSONResponse, Response, StreamingResponse
from sse_starlette import EventSourceResponse
from starlette.responses import PlainTextResponse
from openai_server.log import logger
def get_model_info():
async def handle_model_info():
from openai_server.backend import get_model_info
return JSONResponse(content=get_model_info()) | null |
166,855 | import contextlib
import logging
import os
import sys
import ast
import json
from threading import Thread
import time
from traceback import print_exception
from typing import List, Dict
from pydantic import BaseModel, Field
import uvicorn
from fastapi import Depends, FastAPI, Header, HTTPException
from fastapi.middleware.cors import CORSMiddleware
from fastapi.requests import Request
from fastapi.responses import JSONResponse, Response, StreamingResponse
from sse_starlette import EventSourceResponse
from starlette.responses import PlainTextResponse
from openai_server.log import logger
def get_model_list():
# concurrent gradio client
client = get_client()
model_dict = ast.literal_eval(client.predict(api_name='/model_names'))
base_models = [x['base_model'] for x in model_dict]
return dict(model_names=base_models)
async def handle_list_models():
from openai_server.backend import get_model_list
return JSONResponse(content=get_model_list()) | null |
166,856 | import ast
import concurrent.futures
import contextlib
import hashlib
import json
import os
import shutil
import signal
import sys
import traceback
from concurrent.futures import ProcessPoolExecutor
import psutil
import pytest
import pandas as pd
import numpy as np
from tqdm import tqdm
from utils import flatten_list, remove
def parse_rst_file(filepath):
with open(filepath, 'r') as f:
input_data = f.read()
settings_overrides = {'initial_header_level': 2}
from docutils import core
document = core.publish_doctree(
source=input_data,
source_path=filepath,
settings_overrides=settings_overrides,
)
qa_pairs = []
current_section = None
current_question = ""
current_answer = ""
for node in document.traverse():
if node.__class__.__name__ == 'section':
current_section = ""
elif current_section is not None:
if node.__class__.__name__ == 'Text':
if node.astext()[-1] == "?":
if current_question:
qa_pairs.append((current_question, current_answer))
current_question = node.astext()
current_answer = ""
else:
current_answer += node.astext()
if current_answer:
qa_pairs.append((current_question, current_answer))
return {k: v for k, v in qa_pairs}
from joblib import Parallel, delayed, effective_n_jobs
from sklearn.utils import gen_even_slices
from sklearn.utils.validation import _num_samples
prompt_types = []
def test_scrape_dai_docs():
home = os.path.expanduser('~')
file = os.path.join(home, 'h2oai/docs/faq.rst')
qa_pairs = parse_rst_file(file)
prompt_type = 'human_bot'
from prompter import prompt_types
assert prompt_type in prompt_types
save_thing = [{"instruction": k, "output": v, 'prompt_type': prompt_type} for k, v in qa_pairs.items()]
output_file = "dai_faq.json"
with open(output_file, "wt") as f:
f.write(json.dumps(save_thing, indent=2)) | null |
166,857 | import ast
import concurrent.futures
import contextlib
import hashlib
import json
import os
import shutil
import signal
import sys
import traceback
from concurrent.futures import ProcessPoolExecutor
import psutil
import pytest
import pandas as pd
import numpy as np
from tqdm import tqdm
from utils import flatten_list, remove
def get_sentences(blob, length):
"""
break-up input text into sentences and then output list of sentences of about length in size
:param blob:
:param length:
:return:
"""
import nltk
nltk.download('punkt')
from nltk.tokenize import sent_tokenize
sentences = sent_tokenize(blob)
my_sentences = []
my_string = ""
for sentence in sentences:
if len(my_string) + len(sentence) <= length:
if my_string:
my_string += " " + sentence
else:
my_string = sentence
else:
my_sentences.append(my_string)
my_string = ""
return my_sentences or [my_string]
from joblib import Parallel, delayed, effective_n_jobs
from sklearn.utils import gen_even_slices
from sklearn.utils.validation import _num_samples
The provided code snippet includes necessary dependencies for implementing the `test_scrape_dai_docs_all` function. Write a Python function `def test_scrape_dai_docs_all()` to solve the following problem:
pytest create_data.py::test_scrape_dai_docs_all
Here is the function:
def test_scrape_dai_docs_all():
"""
pytest create_data.py::test_scrape_dai_docs_all
"""
import glob
import nltk
nltk.download('punkt')
dd = {}
np.random.seed(1234)
home = os.path.expanduser('~')
files = list(glob.glob(os.path.join(home, "h2oai/docs/**/*rst")))
np.random.shuffle(files)
val_count = int(0.05 * len(files))
train_files = files[val_count:]
valid_files = files[:val_count]
things = [
("dai_docs.train.json", train_files),
("dai_docs.valid.json", valid_files)
]
for LEN in [100, 200, 500]:
for output_file, ff in things:
if output_file not in dd:
dd[output_file] = []
for f in ff:
with open(f) as input:
blob = input.read()
blob = blob.replace("~~", "")
blob = blob.replace("==", "")
blob = blob.replace("''", "")
blob = blob.replace("--", "")
blob = blob.replace("**", "")
dd[output_file].extend(get_sentences(blob, length=LEN))
for output_file, _ in things:
save_thing = [{"output": k.strip(), 'prompt_type': 'plain'} for k in dd[output_file]]
with open(output_file, "wt") as f:
f.write(json.dumps(save_thing, indent=2)) | pytest create_data.py::test_scrape_dai_docs_all |
166,858 | import ast
import concurrent.futures
import contextlib
import hashlib
import json
import os
import shutil
import signal
import sys
import traceback
from concurrent.futures import ProcessPoolExecutor
import psutil
import pytest
import pandas as pd
import numpy as np
from tqdm import tqdm
from utils import flatten_list, remove
def setup_dai_docs(path=None, dst="working_dir_docs", from_hf=False):
"""
Only supported if have access to source code or HF token for HF spaces and from_hf=True
:param path:
:param dst:
:param from_hf:
:return:
"""
home = os.path.expanduser('~')
if from_hf:
# assumes
from huggingface_hub import hf_hub_download
# True for case when locally already logged in with correct token, so don't have to set key
token = os.getenv('HUGGING_FACE_HUB_TOKEN', True)
path_to_zip_file = hf_hub_download('h2oai/dai_docs', 'dai_docs.zip', token=token, repo_type='dataset')
path = 'h2oai'
import zipfile
with zipfile.ZipFile(path_to_zip_file, 'r') as zip_ref:
zip_ref.extractall(path)
path = os.path.join(path, 'docs/**/*')
if path is None:
if os.path.isdir(os.path.join(home, 'h2oai')):
path = os.path.join(home, "h2oai/docs/**/*")
else:
assert os.path.isdir(os.path.join(home, 'h2oai.superclean')), '%s does not exist' % path
path = os.path.join(home, "h2oai.superclean/docs/**/*")
import glob
files = list(glob.glob(path, recursive=True))
# pandoc can't find include files
remove(dst)
os.makedirs(dst)
# copy full tree, for absolute paths in rst
for fil in files:
if os.path.isfile(fil):
shutil.copy(fil, dst)
# hack for relative path
scorers_dir = os.path.join(dst, 'scorers')
makedirs(scorers_dir)
for fil in glob.glob(os.path.join(dst, '*.frag')):
shutil.copy(fil, scorers_dir)
return dst
def rst_to_outputs(files, min_len=30, max_len=2048 // 2 - 30):
# account for sequence length (context window) including prompt and input and output
# os.system('pandoc -f rst -t plain ./expert_settings/nlp_settings.rst')
import pypandoc
basedir = os.path.abspath(os.getcwd())
outputs = []
for fil in files:
os.chdir(basedir)
os.chdir(os.path.dirname(fil))
fil = os.path.basename(fil)
print("Processing %s" % fil, flush=True)
# out_format can be one of: asciidoc, asciidoctor, beamer, biblatex, bibtex, commonmark, commonmark_x,
# context, csljson, docbook, docbook4, docbook5, docx, dokuwiki,
# dzslides, epub, epub2, epub3, fb2, gfm, haddock, html, html4, html5, icml,
# ipynb, jats, jats_archiving, jats_articleauthoring, jats_publishing, jira,
# json, latex, man,
# markdown, markdown_github, markdown_mmd, markdown_phpextra, markdown_strict,
# mediawiki, ms, muse, native, odt, opendocument, opml, org, pdf, plain, pptx,
# revealjs, rst, rtf, s5, slideous, slidy, tei, texinfo, textile, xwiki, zimwiki
out_format = 'plain'
# avoid extra new lines injected into text
extra_args = ['--wrap=preserve', '--resource path="%s" % dst']
plain_list = []
try:
# valid for expert settings
input_rst = pypandoc.convert_file(fil, 'rst')
input_list = input_rst.split('\n``')
for input_subrst in input_list:
input_plain = pypandoc.convert_text(input_subrst, format='rst', to='plain')
plain_list.append([input_plain, fil])
except Exception as e:
print("file exception: %s %s" % (fil, str(e)), flush=True)
if not plain_list:
# if failed to process as pieces of rst, then
output = pypandoc.convert_file(fil, out_format, extra_args=extra_args, format='rst')
outputs1 = get_sentences(output, length=max_len)
for oi, output in enumerate(outputs1):
output = output.replace('\n\n', '\n')
plain_list.append([output, fil])
outputs.extend(plain_list)
# report:
# [print(len(x)) for x in outputs]
# deal with blocks longer than context size (sequence length) of 2048
new_outputs = []
num_truncated = 0
num_orig = len(outputs)
for output, fil in outputs:
if len(output) < max_len:
new_outputs.append([output, fil])
continue
outputs1 = get_sentences(output, length=max_len)
for oi, output1 in enumerate(outputs1):
output1 = output1.replace('\n\n', '\n')
new_outputs.append([output1, fil])
num_truncated += 1
print('num_orig: %s num_truncated: %s' % (num_orig, num_truncated), flush=True)
new_outputs = [[k.strip(), fil] for k, fil in new_outputs if len(k.strip()) > min_len]
return new_outputs
from joblib import Parallel, delayed, effective_n_jobs
from sklearn.utils import gen_even_slices
from sklearn.utils.validation import _num_samples
def remove(path: str):
try:
if path is not None and os.path.exists(path):
if os.path.isdir(path):
shutil_rmtree(path, ignore_errors=True)
else:
with contextlib.suppress(FileNotFoundError):
os.remove(path)
except:
pass
The provided code snippet includes necessary dependencies for implementing the `test_scrape_dai_docs_all_pandoc` function. Write a Python function `def test_scrape_dai_docs_all_pandoc()` to solve the following problem:
pytest -s -v create_data.py::test_scrape_dai_docs_all_pandoc :return:
Here is the function:
def test_scrape_dai_docs_all_pandoc():
"""
pytest -s -v create_data.py::test_scrape_dai_docs_all_pandoc
:return:
"""
dst = setup_dai_docs()
import glob
files = list(glob.glob(os.path.join(dst, '*rst'), recursive=True))
basedir = os.path.abspath(os.getcwd())
new_outputs = rst_to_outputs(files)
os.chdir(basedir)
remove(dst)
save_thing = [{"output": k.strip(), 'prompt_type': 'plain'} for k in new_outputs]
output_file = "dai_docs.train_cleaned.json"
with open(output_file, "wt") as f:
f.write(json.dumps(save_thing, indent=2)) | pytest -s -v create_data.py::test_scrape_dai_docs_all_pandoc :return: |
166,859 | import ast
import concurrent.futures
import contextlib
import hashlib
import json
import os
import shutil
import signal
import sys
import traceback
from concurrent.futures import ProcessPoolExecutor
import psutil
import pytest
import pandas as pd
import numpy as np
from tqdm import tqdm
from utils import flatten_list, remove
from joblib import Parallel, delayed, effective_n_jobs
from sklearn.utils import gen_even_slices
from sklearn.utils.validation import _num_samples
The provided code snippet includes necessary dependencies for implementing the `test_config_to_json` function. Write a Python function `def test_config_to_json()` to solve the following problem:
Needs to run from Driverless AI source directory. E.g. (base) jon@gpu:~/h2oai$ pytest -s -v /data/jon/h2ogpt/create_data.py::test_config_to_json ; cp config.json /data/jon/h2ogpt/ :return:
Here is the function:
def test_config_to_json():
"""
Needs to run from Driverless AI source directory.
E.g. (base) jon@gpu:~/h2oai$ pytest -s -v /data/jon/h2ogpt/create_data.py::test_config_to_json ; cp config.json /data/jon/h2ogpt/
:return:
"""
try:
# Arrange
import json
from h2oaicore.systemutils import config
toml_list = []
for k, v in config.get_meta_dict().items():
title = (v.title + ": ") if v.title else ''
comment = v.comment or ''
if not (title or comment):
continue
toml_list.extend(
[
{
'prompt_type': 'plain',
'instruction': f"<human>: What does {k} do?\n<bot>: {k.replace('_', ' ')} config.toml: {comment or title}\n<human>:".replace(
"\n", ""),
},
{
'prompt_type': 'plain',
'instruction': f"<human>: Explain {k}.\n<bot>: {k.replace('_', ' ')} config.toml: {comment or title}\n<human>:".replace(
"\n", ""),
},
{
'prompt_type': 'plain',
'instruction': f"<human>: How can I do this: {title}.\n<bot>: Set the {k.replace('_', ' ')} config.toml\n<human>:".replace(
"\n", ""),
} if title and comment else None,
{
'prompt_type': 'human_bot',
'instruction': f'Explain the following expert setting for Driverless AI',
'input': f"{k}",
'output': f"{k.replace('_', ' ')} config.toml: {comment or title}".replace("\n", ""),
},
{
'prompt_type': 'human_bot',
'instruction': f'Explain the following expert setting for Driverless AI',
'input': f"{k}",
'output': f"{k.replace('_', ' ')} config.toml: {title}{comment}".replace("\n", ""),
},
{
'prompt_type': 'human_bot',
'instruction': f'Explain the following expert setting for Driverless AI',
'input': f"{k.replace('_', ' ')}",
'output': f"{k.replace('_', ' ')} config.toml: {title}{comment}".replace("\n", ""),
},
{
'prompt_type': 'human_bot',
'instruction': f'Explain the following expert setting for Driverless AI',
'input': f"{title}",
'output': f"{k.replace('_', ' ')} config.toml: {title}{comment}".replace("\n", ""),
},
{
'prompt_type': 'human_bot',
'instruction': f'Provide a short explanation of the expert setting {k}',
'output': f"{k.replace('_', ' ')} config.toml: {comment or title}".replace("\n", ""),
},
{
'prompt_type': 'human_bot',
'instruction': f'Provide a detailed explanation of the expert setting {k}',
'output': f"{k.replace('_', ' ')} config.toml: {title}{comment}".replace("\n", ""),
},
]
)
toml_list = [x for x in toml_list if x]
with open("config.json", "wt") as f:
f.write(json.dumps(toml_list, indent=2))
except Exception as e:
print("Exception: %s" % str(e), flush=True) | Needs to run from Driverless AI source directory. E.g. (base) jon@gpu:~/h2oai$ pytest -s -v /data/jon/h2ogpt/create_data.py::test_config_to_json ; cp config.json /data/jon/h2ogpt/ :return: |
166,860 | import ast
import concurrent.futures
import contextlib
import hashlib
import json
import os
import shutil
import signal
import sys
import traceback
from concurrent.futures import ProcessPoolExecutor
import psutil
import pytest
import pandas as pd
import numpy as np
from tqdm import tqdm
from utils import flatten_list, remove
def atomic_copy(src=None, dst=None, with_permissions=True):
if os.path.isfile(dst):
return
import uuid
my_uuid = uuid.uuid4()
dst_tmp = dst + str(my_uuid)
makedirs(os.path.dirname(dst), exist_ok=True)
if with_permissions:
shutil.copy(src, dst_tmp)
else:
shutil.copyfile(src, dst_tmp)
atomic_move(dst_tmp, dst)
remove(dst_tmp)
def makedirs(path, exist_ok=True):
"""
Avoid some inefficiency in os.makedirs()
:param path:
:param exist_ok:
:return:
"""
if os.path.isdir(path) and os.path.exists(path):
assert exist_ok, "Path already exists"
return path
os.makedirs(path, exist_ok=exist_ok)
from joblib import Parallel, delayed, effective_n_jobs
from sklearn.utils import gen_even_slices
from sklearn.utils.validation import _num_samples
def copy_tree(src, dst, follow_symlink=False):
makedirs(dst, exist_ok=True)
for (path, dirs, files) in os.walk(src, followlinks=follow_symlink):
new_path = path.replace(src, dst)
makedirs(new_path, exist_ok=True)
for file in files:
filename = os.path.join(path, file)
new_filename = os.path.join(new_path, file)
# print("%s -> %s" % (filename, new_filename))
try:
atomic_copy(filename, new_filename)
except FileNotFoundError:
pass | null |
166,861 | import ast
import concurrent.futures
import contextlib
import hashlib
import json
import os
import shutil
import signal
import sys
import traceback
from concurrent.futures import ProcessPoolExecutor
import psutil
import pytest
import pandas as pd
import numpy as np
from tqdm import tqdm
from utils import flatten_list, remove
from joblib import Parallel, delayed, effective_n_jobs
from sklearn.utils import gen_even_slices
from sklearn.utils.validation import _num_samples
def test_prep_instruct_vicuna():
from datasets import load_dataset
filename = 'ShareGPT_unfiltered_cleaned_split.json'
if not os.path.exists(filename):
os.system(
'wget https://huggingface.co/datasets/anon8231489123/ShareGPT_Vicuna_unfiltered/resolve/main/%s' % filename)
data = load_dataset("json", data_files={"train": filename})["train"]
training_rows = []
for i in range(data.num_rows):
conversations = data[i]['conversations']
assert isinstance(conversations, list), conversations
convo = ""
for j, conv in enumerate(conversations):
# Get ready for generate.py prompt_type=human_bot
# But train with prompt_type=plain
if conv['from'] == 'human':
FROM = '<human>: '
elif conv['from'] == 'gpt':
FROM = '<bot>: '
convo += f"{FROM}" + conv['value'] + "\n"
if convo:
training_rows.append(dict(input=convo))
with open(filename + ".generate_human_bot.train_plain.json", "wt") as f:
f.write(json.dumps(training_rows, indent=2)) | null |
166,862 | import ast
import concurrent.futures
import contextlib
import hashlib
import json
import os
import shutil
import signal
import sys
import traceback
from concurrent.futures import ProcessPoolExecutor
import psutil
import pytest
import pandas as pd
import numpy as np
from tqdm import tqdm
from utils import flatten_list, remove
POSTFIX = ".generate_human_bot.train_plain.json"
from joblib import Parallel, delayed, effective_n_jobs
from sklearn.utils import gen_even_slices
from sklearn.utils.validation import _num_samples
def test_get_small_sample_oig_data(filename):
if not os.path.exists(filename):
os.system('wget https://huggingface.co/datasets/laion/OIG/resolve/main/%s' % filename)
import json
rows = []
with open(filename, "r") as f:
for line in f.readlines():
row = json.loads(line)
rows.append(dict(input=row["text"]))
with open(filename + POSTFIX, "w") as f:
f.write(json.dumps(rows, indent=2)) | null |
166,863 | import ast
import concurrent.futures
import contextlib
import hashlib
import json
import os
import shutil
import signal
import sys
import traceback
from concurrent.futures import ProcessPoolExecutor
import psutil
import pytest
import pandas as pd
import numpy as np
from tqdm import tqdm
from utils import flatten_list, remove
useful_oig_files = ['unified_rallio_safety_and_prosocial.jsonl.parquet',
'unified_chip2.jsonl.parquet',
'unified_cuad.jsonl.parquet',
'unified_essays.jsonl.parquet',
'unified_flan.jsonl.gz.parquet',
'unified_grade_school_math_instructions.jsonl.parquet',
'unified_hc3_human.jsonl.parquet',
'unified_mathqa_flanv2_kojma_cot.jsonl.parquet',
'unified_merged_code_xp3.jsonl.parquet',
'unified_multi_news.jsonl.parquet',
# 'unified_multi_sum.jsonl.parquet'
'unified_ni.jsonl.gz.parquet',
'unified_openai_summarize_tldr.jsonl.parquet',
# 'unified_oscar_en_sample_dialog.jsonl.parquet', # create text containing these N words, not specific
'unified_plot_screenplay_books_dialog.jsonl.parquet',
'unified_soda_dialog.jsonl.parquet',
'unified_unnatural_instructions.jsonl.parquet',
]
from joblib import Parallel, delayed, effective_n_jobs
from sklearn.utils import gen_even_slices
from sklearn.utils.validation import _num_samples
def test_download_useful_data_as_parquet(filename):
dest_file = filename + '.parquet'
if dest_file not in useful_oig_files:
pytest.skip('file declared not useful')
if not os.path.exists(filename):
os.system('wget https://huggingface.co/datasets/laion/OIG/resolve/main/%s' % filename)
if not os.path.exists(dest_file):
df = pd.read_json(path_or_buf=filename, lines=True)
df.to_parquet(dest_file, index=False) | null |
166,864 | import ast
import concurrent.futures
import contextlib
import hashlib
import json
import os
import shutil
import signal
import sys
import traceback
from concurrent.futures import ProcessPoolExecutor
import psutil
import pytest
import pandas as pd
import numpy as np
from tqdm import tqdm
from utils import flatten_list, remove
POSTFIX = ".generate_human_bot.train_plain.json"
OIG_DATASETS = [
"unified_chip2.jsonl",
"unified_grade_school_math_instructions.jsonl",
"unified_poetry_2_song.jsonl",
"unified_plot_screenplay_books_dialog.jsonl",
]
from joblib import Parallel, delayed, effective_n_jobs
from sklearn.utils import gen_even_slices
from sklearn.utils.validation import _num_samples
def test_merge_shuffle_small_sample_oig_data():
np.random.seed(1234)
rows = []
for filename in OIG_DATASETS:
with open(filename + POSTFIX, "r") as f:
rows.extend(json.loads(f.read()))
np.random.shuffle(rows)
with open("merged_shuffled_OIG_%s.json" % hashlib.sha256(str(OIG_DATASETS).encode()).hexdigest()[:10], "w") as f:
f.write(json.dumps(rows, indent=2)) | null |
166,865 | import ast
import concurrent.futures
import contextlib
import hashlib
import json
import os
import shutil
import signal
import sys
import traceback
from concurrent.futures import ProcessPoolExecutor
import psutil
import pytest
import pandas as pd
import numpy as np
from tqdm import tqdm
from utils import flatten_list, remove
from joblib import Parallel, delayed, effective_n_jobs
from sklearn.utils import gen_even_slices
from sklearn.utils.validation import _num_samples
def test_join_jsons():
files = ['config.json'] * 1 + \
['dai_docs.train_cleaned.json'] * 2 + \
['dai_faq.json'] * 3
print(files)
lst = []
[lst.extend(json.load(open(fil, 'rt'))) for fil in files]
print(len(lst))
json.dump(lst, open("merged.json", "wt"), indent=2) | null |
166,866 | import ast
import concurrent.futures
import contextlib
import hashlib
import json
import os
import shutil
import signal
import sys
import traceback
from concurrent.futures import ProcessPoolExecutor
import psutil
import pytest
import pandas as pd
import numpy as np
from tqdm import tqdm
from utils import flatten_list, remove
POSTFIX = ".generate_human_bot.train_plain.json"
from joblib import Parallel, delayed, effective_n_jobs
from sklearn.utils import gen_even_slices
from sklearn.utils.validation import _num_samples
def test_make_rlhf_good_data(filename):
from datasets import load_dataset
rows = load_dataset(filename)["train"]["chosen"]
new_rows = []
for row in rows:
if row[:2] == "\n\n":
row = row[2:]
row = row.replace("Human: ", "<human>: ")
row = row.replace("Assistant: ", "<bot>: ")
new_rows.append(dict(input=row))
with open(filename.replace("/", "_") + POSTFIX, "w") as f:
f.write(json.dumps(new_rows, indent=2)) | null |
166,867 | import ast
import concurrent.futures
import contextlib
import hashlib
import json
import os
import shutil
import signal
import sys
import traceback
from concurrent.futures import ProcessPoolExecutor
import psutil
import pytest
import pandas as pd
import numpy as np
from tqdm import tqdm
from utils import flatten_list, remove
from joblib import Parallel, delayed, effective_n_jobs
from sklearn.utils import gen_even_slices
from sklearn.utils.validation import _num_samples
def generate_prompt(data_point, prompt_type, prompt_dict, reduced, making_context, system_prompt=None,
histi=-1):
context = data_point.get('context')
if context is None:
context = ''
instruction = data_point.get('instruction')
input = data_point.get('input')
output = data_point.get('output')
prompt_type = data_point.get('prompt_type', prompt_type)
prompt_dict = data_point.get('prompt_dict', prompt_dict)
assert prompt_type in prompt_types, "Bad prompt type: %s" % prompt_type
promptA, promptB, PreInstruct, PreInput, PreResponse, \
terminate_response, chat_sep, chat_turn_sep, humanstr, botstr, \
generates_leading_space, system_prompt, can_handle_system_prompt = \
get_prompt(prompt_type, prompt_dict,
context, reduced, making_context,
system_prompt=system_prompt,
histi=histi)
# could avoid if reduce=True, but too complex for parent functions to handle
prompt = context
if input and promptA:
prompt += f"""{promptA}"""
elif promptB:
prompt += f"""{promptB}"""
if instruction and PreInstruct is not None and input and PreInput is not None:
prompt += f"""{PreInstruct}{instruction}{PreInput}{input}"""
prompt = inject_chatsep(prompt_type, prompt, chat_sep=chat_sep)
elif instruction and input and PreInstruct is None and PreInput is not None:
prompt += f"""{PreInput}{instruction}
{input}"""
prompt = inject_chatsep(prompt_type, prompt, chat_sep=chat_sep)
elif input and instruction and PreInput is None and PreInstruct is not None:
prompt += f"""{PreInstruct}{instruction}
{input}"""
prompt = inject_chatsep(prompt_type, prompt, chat_sep=chat_sep)
elif instruction and PreInstruct is not None:
prompt += f"""{PreInstruct}{instruction}"""
prompt = inject_chatsep(prompt_type, prompt, chat_sep=chat_sep)
elif input and PreInput is not None:
prompt += f"""{PreInput}{input}"""
prompt = inject_chatsep(prompt_type, prompt, chat_sep=chat_sep)
elif input and instruction and PreInput is not None:
prompt += f"""{PreInput}{instruction}{input}"""
prompt = inject_chatsep(prompt_type, prompt, chat_sep=chat_sep)
elif input and instruction and PreInstruct is not None:
prompt += f"""{PreInstruct}{instruction}{input}"""
prompt = inject_chatsep(prompt_type, prompt, chat_sep=chat_sep)
elif input and instruction:
# i.e. for simple_instruct
prompt += f"""{instruction}: {input}"""
prompt = inject_chatsep(prompt_type, prompt, chat_sep=chat_sep)
elif input:
prompt += f"""{input}"""
prompt = inject_chatsep(prompt_type, prompt, chat_sep=chat_sep)
elif instruction:
prompt += f"""{instruction}"""
prompt = inject_chatsep(prompt_type, prompt, chat_sep=chat_sep)
if PreResponse is not None:
prompt += f"""{PreResponse}"""
pre_response = PreResponse # Don't use strip
else:
pre_response = ''
if output:
prompt += f"""{output}"""
return prompt, pre_response, terminate_response, chat_sep, chat_turn_sep
def test_show_prompts():
files = ['config.json'] * 1 + \
['dai_docs.train_cleaned.json'] * 1 + \
['dai_faq.json'] * 1
file_points = [json.load(open(fil, 'rt')) for fil in files]
from prompter import generate_prompt
for data_points in file_points:
for data_point in data_points:
print(generate_prompt(data_point, 'plain', '', False, False)[0]) | null |
166,868 | import ast
import concurrent.futures
import contextlib
import hashlib
import json
import os
import shutil
import signal
import sys
import traceback
from concurrent.futures import ProcessPoolExecutor
import psutil
import pytest
import pandas as pd
import numpy as np
from tqdm import tqdm
from utils import flatten_list, remove
def do_one(data_id, num_downloads):
from datasets import load_dataset
out_file = "data_%s.parquet" % str(data_id.replace('/', '_'))
if os.path.isfile(out_file) and os.path.getsize(out_file) > 1024 ** 3:
return
try:
print("Loading data_id %s num_downloads: %s" % (data_id, num_downloads), flush=True)
avail_list = None
try:
data = load_dataset(data_id, 'foobar')
except Exception as e:
if 'Available: ' in str(e):
avail_list = ast.literal_eval(str(e).split('Available:')[1].strip())
else:
avail_list = None
if avail_list is None:
avail_list = [None]
print("%s avail_list: %s" % (data_id, avail_list), flush=True)
for name in avail_list:
out_file = "data_%s_%s.parquet" % (str(data_id.replace('/', '_')), str(name))
if os.path.isfile(out_file):
continue
data = load_dataset(data_id, name)
column_names_dict = data.column_names
column_names = column_names_dict[list(column_names_dict.keys())[0]]
print("Processing data_id %s num_downloads: %s columns: %s" % (data_id, num_downloads, column_names),
flush=True)
data_dict = data.data
col_dict = data.num_columns
first_col = list(col_dict.keys())[0]
if 'train' in data_dict:
df = data['train'].to_pandas()
else:
df = data[first_col].to_pandas()
# csv has issues with escaping chars, even for datasets I know I want
df.to_parquet(out_file, index=False)
except Exception as e:
t, v, tb = sys.exc_info()
ex = ''.join(traceback.format_exception(t, v, tb))
print("Exception: %s %s" % (data_id, ex), flush=True)
from joblib import Parallel, delayed, effective_n_jobs
from sklearn.utils import gen_even_slices
from sklearn.utils.validation import _num_samples
def flatten_list(lis):
"""Given a list, possibly nested to any level, return it flattened."""
new_lis = []
for item in lis:
if type(item) == type([]):
new_lis.extend(flatten_list(item))
else:
new_lis.append(item)
return new_lis
The provided code snippet includes necessary dependencies for implementing the `test_get_open_datasets` function. Write a Python function `def test_get_open_datasets()` to solve the following problem:
https://huggingface.co/datasets/wikihow/blob/main/wikihow.py https://github.com/mahnazkoupaee/WikiHow-Dataset https://ucsb.box.com/s/ap23l8gafpezf4tq3wapr6u8241zz358 https://ucsb.app.box.com/s/ap23l8gafpezf4tq3wapr6u8241zz358
Here is the function:
def test_get_open_datasets():
# HF changed things so don't get raw list of all datasets, so not have to filter, but can't do negative filter
open_tags = ['license:Apache License 2.0',
'license:mit',
'license:apache',
'license:apache2',
'license:apache-2.0',
'license:bsd',
'license:bsd-2-clause',
'license:bsd-3-clause',
'license:bsd-3-clause-clear',
'license:lgpl-2.1',
'license:lgpl-3.0',
'license:lgpl-lr',
'license:lgpl',
'license:openrail++',
'license:openrail',
'license:bigscience-bloom-rail-1.0',
# 'license:agpl-3.0',
'license:other',
'license:unknown',
# 'license:mpl-2.0', # ok, but would have to include original copyright, license, source, copies in distribution
# Attribution required:
'license:odc-by',
'license:cc-by-4.0',
'license:cc-by-3.0',
'license:cc-by-2.0',
'license:cc-by-2.5',
# 'license:cc-by-sa-4.0', # would require same license
'license:odbl',
'license:pddl',
'license:ms-pl',
'license:zlib',
]
# bad license: cc-by-nc-4.0
from huggingface_hub import list_datasets
datasets = flatten_list([[x for x in list_datasets(filter=y)] for y in open_tags])
datasets += [x for x in list_datasets(author='openai')]
# check all:
all_license_tags = set(flatten_list([[y for y in x.tags if 'license' in y] for x in datasets]))
print(len(all_license_tags))
open_datasets = [x for x in datasets if any([y in x.tags for y in open_tags]) or 'license:' not in str(x.tags)]
print('open_datasets', len(open_datasets))
all_task_tags = set(flatten_list([[y for y in x.tags if 'task' in y] for x in open_datasets]))
print('all_task_tags', len(all_task_tags))
excluded_tags = ['image', 'hate', 'tabular', 'table-', 'classification', 'retrieval',
'translation', 'identification', 'object', 'mask', 'to-text',
'face-detection', 'audio', 'voice', 'reinforcement', 'depth-est',
'forecasting', 'parsing', 'visual', 'speech', 'multiple-choice',
'slot-filling', 'irds/argsme', '-scoring', 'other', 'graph-ml',
'feature-extraction', 'keyword-spotting',
'coreference-resolution', 'segmentation',
'word-sense-disambiguation',
'lemmatization']
task_tags = [x.replace('task_categories:', '').replace('task_ids:', '')
for x in all_task_tags if not any([y in x for y in
excluded_tags])]
print('task_tags', len(task_tags))
# str(x.tags) to catch any pattern match to anything in list
open_tasked_datasets = [x for x in open_datasets if
any([y in str([x for x in x.tags if 'task' in x]) for y in task_tags]) and
not any([y in str([x for x in x.tags if 'task' in x]) for y in excluded_tags]) or
'task_categories' not in str(x.tags) and 'task_ids' not in str(x.tags)]
open_tasked_datasets = [x for x in open_tasked_datasets if not x.disabled]
open_tasked_datasets = [x for x in open_tasked_datasets if not x.gated]
open_tasked_datasets = [x for x in open_tasked_datasets if not x.private]
print('open_tasked_datasets', len(open_tasked_datasets))
sizes = list(set(flatten_list([[(y, x.id) for y in x.tags if 'size' in y] for x in open_tasked_datasets])))
languages = list(set(flatten_list([[(y, x.id) for y in x.tags if 'language:' in y] for x in open_tasked_datasets])))
open_english_tasked_datasets = [x for x in open_tasked_datasets if
'language:' not in str(x.tags) or
'language:en' in str(x.tags)]
small_open_english_tasked_datasets = [x for x in open_english_tasked_datasets if
'n<1K' in str(x.tags) or
'1K<n<10K' in str(x.tags) or
'1K0<n<100K' in str(x.tags) or
'100K<n<1M' in str(x.tags) or
'size_category' not in str(x.tags)
]
# 'aeslc' : email_body, subject -> summarization?
# load_dataset(open_tasked_datasets[0].id).data['train'].to_pandas()
ids = [x.id for x in small_open_english_tasked_datasets]
# sanity checks
# https://bair.berkeley.edu/blog/2023/04/03/koala/
assert 'alespalla/chatbot_instruction_prompts' in ids
assert 'laion/OIG' in ids
assert 'openai/webgpt_comparisons' in ids
assert 'openai/summarize_from_feedback' in ids
assert 'Anthropic/hh-rlhf' in ids
# useful but not allowed for commercial purposes:
# https://huggingface.co/datasets/squad
print('open_english_tasked_datasets: ', ids, flush=True)
exclude_ids = ['allenai/nllb', # translation only
'hf-internal-testing/fixtures_image_utils', # testing
'allenai/c4', # search-url
'agemagician/uniref50', # unknown
'huggingface-course/documentation-images', # images
'smilegate-ai/kor_unsmile', # korean
'MohamedRashad/ChatGPT-prompts', # ChatGPT/LearnGPT/https://www.emergentmind.com/
'humarin/chatgpt-paraphrases', # Paraphrase using ChatGPT
'Jeska/vaccinchat', # not useful
'alespalla/chatbot_instruction_prompts', # mixes alpaca
'allenai/prosocial-dialog',
# already exlucded, but wrongly in other datasets that say more permissive license
'AlekseyKorshuk/persona-chat', # low quality
'bavard/personachat_truecased', # low quality
'adamlin/daily_dialog', # medium quality conversations
'adamlin/FewShotWoz', # low quality
'benjaminbeilharz/better_daily_dialog', # low quality
'benjaminbeilharz/daily_dialog_w_turn_templates', # low
'benjaminbeilharz/empathetic_dialogues_for_lm', # low
'GEM-submissions/GEM__bart_base_schema_guided_dialog__1645547915', # NA
'ia-bentebib/conv_ai_2_fr', # low fr
'ia-bentebib/daily_dialog_fr', # low fr
'ia-bentebib/dialog_re_fr', # low fr
'ia-bentebib/empathetic_dialogues_fr', # low fr
'roskoN/dailydialog', # low
'VadorMazer/skyrimdialogstest', # low
'bigbio/med_qa', # med specific Q/A
'biu-nlp/qa_srl2018', # low quality Q/A
'biu-nlp/qa_discourse', # low quality Q/A
'iarfmoose/qa_evaluator', # low quality Q/A
'jeopardy', # low quality Q/A -- no reasoning
'narrativeqa', # low quality Q/A
'nomic-ai/gpt4all_prompt_generations', # bad license
'nomic-ai/gpt4all_prompt_generations_with_p3', # bad license
'HuggingFaceH4/alpaca', # bad license
'tatsu-lab/alpaca', # ToS breaking
'yahma/alpaca-cleaned', # ToS breaking
'Hello-SimpleAI/HC3', # bad license
'glue', # no reasoning QA
'sahil2801/CodeAlpaca-20k', # bad license
'Short-Answer-Feedback/saf_communication_networks_english', # long Q, medium A
]
small_open_english_tasked_datasets = [x for x in small_open_english_tasked_datasets if x.id not in exclude_ids]
# some ids clearly speech related
small_open_english_tasked_datasets = [x for x in small_open_english_tasked_datasets if 'speech' not in x.id]
# HF testing
small_open_english_tasked_datasets = [x for x in small_open_english_tasked_datasets if
'hf-internal-testing' not in x.id]
small_open_english_tasked_datasets = [x for x in small_open_english_tasked_datasets if
'chinese' not in x.id]
sorted_small_open_english_tasked_datasets = sorted([(x.downloads, x) for x in small_open_english_tasked_datasets],
key=lambda x: x[0], reverse=True)
# NOTES:
# Run like pytest -s -v create_data.py::test_get_open_datasets &> getdata9.log
# See what needs config passed and add:
# grep 'load_dataset(' getdata9.log|grep -v data_id|less -S
# grep "pip install" getdata9.log
# NOTE: Some datasets have default config, but others are there. Don't know how to access them.
"""
https://huggingface.co/datasets/wikihow/blob/main/wikihow.py
https://github.com/mahnazkoupaee/WikiHow-Dataset
https://ucsb.box.com/s/ap23l8gafpezf4tq3wapr6u8241zz358
https://ucsb.app.box.com/s/ap23l8gafpezf4tq3wapr6u8241zz358
"""
"""
# some ambiguous or non-commercial datasets
https://github.com/PhoebusSi/alpaca-CoT
"""
timeout = 3 * 60
# laion/OIG takes longer
for num_downloads, dataset in sorted_small_open_english_tasked_datasets:
data_id = dataset.id
func = do_one
args = (data_id, num_downloads)
kwargs = {}
with ProcessPoolExecutor(max_workers=1) as executor:
future = executor.submit(func, *args, **kwargs)
try:
future.result(timeout=timeout)
except concurrent.futures.TimeoutError:
print("\n\ndata_id %s timeout\n\n" % data_id, flush=True)
for child in psutil.Process(os.getpid()).children(recursive=True):
os.kill(child.pid, signal.SIGINT)
os.kill(child.pid, signal.SIGTERM)
os.kill(child.pid, signal.SIGKILL) | https://huggingface.co/datasets/wikihow/blob/main/wikihow.py https://github.com/mahnazkoupaee/WikiHow-Dataset https://ucsb.box.com/s/ap23l8gafpezf4tq3wapr6u8241zz358 https://ucsb.app.box.com/s/ap23l8gafpezf4tq3wapr6u8241zz358 |
166,869 | import ast
import concurrent.futures
import contextlib
import hashlib
import json
import os
import shutil
import signal
import sys
import traceback
from concurrent.futures import ProcessPoolExecutor
import psutil
import pytest
import pandas as pd
import numpy as np
from tqdm import tqdm
from utils import flatten_list, remove
from joblib import Parallel, delayed, effective_n_jobs
from sklearn.utils import gen_even_slices
from sklearn.utils.validation import _num_samples
def flatten_list(lis):
"""Given a list, possibly nested to any level, return it flattened."""
new_lis = []
for item in lis:
if type(item) == type([]):
new_lis.extend(flatten_list(item))
else:
new_lis.append(item)
return new_lis
def test_otherlic():
from huggingface_hub import list_datasets
lic = ['license:odc-by',
'license:cc-by-4.0',
'license:cc-by-3.0',
'license:cc-by-2.0',
'license:cc-by-2.5',
'license:cc-by-sa-4.0',
'license:odbl',
'license:pddl',
'license:ms-pl',
'license:zlib',
]
datasets = flatten_list([[x for x in list_datasets(filter=y) if 'translation' not in str(x.tags)] for y in lic])
print(len(datasets)) | null |
166,870 | import ast
import concurrent.futures
import contextlib
import hashlib
import json
import os
import shutil
import signal
import sys
import traceback
from concurrent.futures import ProcessPoolExecutor
import psutil
import pytest
import pandas as pd
import numpy as np
from tqdm import tqdm
from utils import flatten_list, remove
def get_sentences(blob, length):
"""
break-up input text into sentences and then output list of sentences of about length in size
:param blob:
:param length:
:return:
"""
import nltk
nltk.download('punkt')
from nltk.tokenize import sent_tokenize
sentences = sent_tokenize(blob)
my_sentences = []
my_string = ""
for sentence in sentences:
if len(my_string) + len(sentence) <= length:
if my_string:
my_string += " " + sentence
else:
my_string = sentence
else:
my_sentences.append(my_string)
my_string = ""
return my_sentences or [my_string]
useful_oig_files = ['unified_rallio_safety_and_prosocial.jsonl.parquet',
'unified_chip2.jsonl.parquet',
'unified_cuad.jsonl.parquet',
'unified_essays.jsonl.parquet',
'unified_flan.jsonl.gz.parquet',
'unified_grade_school_math_instructions.jsonl.parquet',
'unified_hc3_human.jsonl.parquet',
'unified_mathqa_flanv2_kojma_cot.jsonl.parquet',
'unified_merged_code_xp3.jsonl.parquet',
'unified_multi_news.jsonl.parquet',
# 'unified_multi_sum.jsonl.parquet'
'unified_ni.jsonl.gz.parquet',
'unified_openai_summarize_tldr.jsonl.parquet',
# 'unified_oscar_en_sample_dialog.jsonl.parquet', # create text containing these N words, not specific
'unified_plot_screenplay_books_dialog.jsonl.parquet',
'unified_soda_dialog.jsonl.parquet',
'unified_unnatural_instructions.jsonl.parquet',
]
from joblib import Parallel, delayed, effective_n_jobs
from sklearn.utils import gen_even_slices
from sklearn.utils.validation import _num_samples
def test_assemble_and_detox():
import re
from profanity_check import predict_prob
df_list = []
for data in useful_oig_files:
print("Processing %s" % data, flush=True)
df = pd.read_parquet(data)
df = df.reset_index(drop=True)
# chop up into human/bot interactions of no more than 10kB per row
text_list = df[['text']].values.ravel().tolist()
new_text = []
max_len = 2048 # uber cutoff
MAX_LEN = 2048 // 2 - 30 # max len per question/answer
for text in tqdm(text_list):
human_starts = [m.start() for m in re.finditer('<human>: ', text)]
if len(human_starts) == 1:
human_starts = [0, len(text)] # always go into for loop below
blurb = ''
for i in range(len(human_starts) - 1):
interaction = text[human_starts[i]: human_starts[i + 1]][:max_len]
blurb += interaction
if len(blurb) >= MAX_LEN:
blurb = get_sentences(blurb, length=MAX_LEN)[0]
new_text.append(blurb + "\n<human>:")
blurb = ''
if blurb:
blurb = get_sentences(blurb, length=MAX_LEN)[0]
new_text.append(blurb + "\n<human>:")
if len(new_text) > len(text_list):
print("Added %d new rows (before: %d)" % (len(new_text) - df.shape[0], df.shape[0]))
df = pd.DataFrame({"text": new_text, "source": [data] * len(new_text)})
df = df.drop_duplicates(keep='first')
print(df['text'].apply(lambda x: len(x)).describe())
assert df['text'].apply(lambda x: len(x)).max() <= 2 * max_len
# faster than better_profanity, do early
df['profanity'] = predict_prob(df['text'])
before_rows = df.shape[0]
df = df[df['profanity'] < 0.25] # drop any low quality stuff
after_rows = df.shape[0]
print("Dropped %d rows out of %d due to alt-profanity-check" % (before_rows - after_rows, before_rows))
df_list.append(df)
print("Done processing %s -> %s rows" % (data, df.shape[0]), flush=True)
print("So far have %d rows" % sum([len(x) for x in df_list]))
df_final = pd.concat(df_list)
df_final = df_final.sample(frac=1, random_state=1234).reset_index(drop=True)
df_final.to_parquet('h2oGPT.cleaned.human_bot.shorter.parquet', index=False) | null |
166,871 | import ast
import concurrent.futures
import contextlib
import hashlib
import json
import os
import shutil
import signal
import sys
import traceback
from concurrent.futures import ProcessPoolExecutor
import psutil
import pytest
import pandas as pd
import numpy as np
from tqdm import tqdm
from utils import flatten_list, remove
useful_oig_files = ['unified_rallio_safety_and_prosocial.jsonl.parquet',
'unified_chip2.jsonl.parquet',
'unified_cuad.jsonl.parquet',
'unified_essays.jsonl.parquet',
'unified_flan.jsonl.gz.parquet',
'unified_grade_school_math_instructions.jsonl.parquet',
'unified_hc3_human.jsonl.parquet',
'unified_mathqa_flanv2_kojma_cot.jsonl.parquet',
'unified_merged_code_xp3.jsonl.parquet',
'unified_multi_news.jsonl.parquet',
# 'unified_multi_sum.jsonl.parquet'
'unified_ni.jsonl.gz.parquet',
'unified_openai_summarize_tldr.jsonl.parquet',
# 'unified_oscar_en_sample_dialog.jsonl.parquet', # create text containing these N words, not specific
'unified_plot_screenplay_books_dialog.jsonl.parquet',
'unified_soda_dialog.jsonl.parquet',
'unified_unnatural_instructions.jsonl.parquet',
]
human = '<human>:'
bot = '<bot>:'
from joblib import Parallel, delayed, effective_n_jobs
from sklearn.utils import gen_even_slices
from sklearn.utils.validation import _num_samples
def test_basic_cleaning():
# from better_profanity import profanity
# https://pypi.org/project/alt-profanity-check/
from profanity_check import predict
df_list = []
for data in useful_oig_files:
# for data in useful_oig_files[:5]:
# for data in ['unified_openai_summarize_tldr.jsonl.parquet']:
print("Processing %s" % data, flush=True)
df = pd.read_parquet(data)
df = df.reset_index(drop=True)
# NOTE: Not correct if multiple human-bot interactions, but those dialogs even more desired
# avg_chars = len(df['text'][0])/(df['text'][0].count(human)+df['text'][0].count(bot))
df['avg_words'] = df['text'].apply(lambda x: x.count(' ') / (x.count(human) + x.count(bot)) / 2.0)
df['avg_bot_words'] = df['text'].apply(lambda x: x.split(bot)[1].count(' ') / x.count(bot))
# df['bad_words'] = df['text'].apply(lambda x: profanity.contains_profanity(x))
# low_quality_patterns = ['Write the rest of this wikipedia article']
res = predict(df['text'])
df['bad_words'] = res
df = df.reset_index(drop=True)
df = df[df['bad_words'] == 0]
df = df[['text', 'avg_words', 'avg_bot_words']]
df = df.drop_duplicates(keep='first')
print(df[df['avg_words'] == df['avg_words'].max()]['text'].values)
median_words = np.median(df['avg_words'])
min_words_per_entity = max(30, 0.8 * median_words)
max_words_per_entity = 2048 # too hard to learn from for now
df = df[df['avg_words'] > min_words_per_entity]
df = df[df['avg_words'] < max_words_per_entity]
min_words_per_entity = max(20, 0.5 * median_words) # bot should say stuff for now
max_words_per_entity = 2048 # too hard to learn from for now
df = df[df['avg_bot_words'] > min_words_per_entity]
df = df[df['avg_bot_words'] < max_words_per_entity]
df_list.append(df)
print("Done processing %s -> %s rows" % (data, df.shape[0]), flush=True)
df_final = pd.concat(df_list)
df_final.to_parquet('h2oGPT.cleaned.human_bot.parquet', index=False) | null |
166,872 | import ast
import concurrent.futures
import contextlib
import hashlib
import json
import os
import shutil
import signal
import sys
import traceback
from concurrent.futures import ProcessPoolExecutor
import psutil
import pytest
import pandas as pd
import numpy as np
from tqdm import tqdm
from utils import flatten_list, remove
from joblib import Parallel, delayed, effective_n_jobs
from sklearn.utils import gen_even_slices
from sklearn.utils.validation import _num_samples
def count_human_bot_lengths(df, human=None, bot=None):
def test_chop_by_lengths():
file = "h2oGPT.cleaned.human_bot.shorter.parquet"
df = pd.read_parquet(file).reset_index(drop=True)
df = count_human_bot_lengths(df)
df['rand'] = np.random.rand(df.shape[0])
df['rand2'] = np.random.rand(df.shape[0])
before_rows = df.shape[0]
# throw away short human/bot responses with higher likelihood
df = df[(df['len_human_mean'] > 20)] # never keep very short ones
df = df[(df['len_human_mean'] > 30) | (df['rand'] < 0.2)]
df = df[(df['len_human_mean'] > 50) | (df['rand'] < 0.5)]
df = df[(df['len_human_max'] < 10000)] # drop super long (basically only human) ones
df = df[(df['len_bot_mean'] > 20)] # never keep very short ones
df = df[(df['len_bot_mean'] > 30) | (df['rand2'] < 0.2)]
df = df[(df['len_bot_mean'] > 50) | (df['rand2'] < 0.5)]
df = df[(df['len_bot_max'] < 10000)] # drop super long (only bot) ones
assert df['text'].apply(lambda x: len(x)).max() < 20000
df = df.drop(['rand', 'rand2'], axis=1)
after_rows = df.shape[0]
print("Chopped off %d out of %d rows due to length" % (before_rows - after_rows, before_rows))
print(df.describe())
df.to_parquet('h2oGPT.cleaned.chopped.human_bot.shorter.parquet', index=False) | null |
166,873 | import ast
import concurrent.futures
import contextlib
import hashlib
import json
import os
import shutil
import signal
import sys
import traceback
from concurrent.futures import ProcessPoolExecutor
import psutil
import pytest
import pandas as pd
import numpy as np
from tqdm import tqdm
from utils import flatten_list, remove
from joblib import Parallel, delayed, effective_n_jobs
from sklearn.utils import gen_even_slices
from sklearn.utils.validation import _num_samples
def add_better_profanity_flag(df):
from better_profanity import profanity
df['better_profanity'] = parallel_apply(
df['text'],
lambda x: profanity.contains_profanity(x),
n_jobs=-1,
)
return df
def add_textstat_grade(df):
import textstat
def myfunc(x):
return textstat.flesch_kincaid_grade(x) # simple grade
if False:
import dask.dataframe as dd
# 40 seconds for 1000 rows, but have 1,787,799 rows
ddata = dd.from_pandas(df, npartitions=120)
df['flesch_grade'] = ddata['text'].apply(myfunc).compute()
if True:
# fast way
df['flesch_grade'] = parallel_apply(df['text'], myfunc, n_jobs=-1)
return df
def add_deberta_grade(df):
from transformers import AutoModelForSequenceClassification, AutoTokenizer
import torch
reward_name = "OpenAssistant/reward-model-deberta-v3-large-v2"
rank_model, tokenizer = AutoModelForSequenceClassification.from_pretrained(
reward_name), AutoTokenizer.from_pretrained(reward_name)
device = 'cuda' if torch.cuda.is_available() else 'cpu'
rank_model.to(device)
def get_question(x):
return x.replace('<human>: ', '').split('<bot>:')[0]
def get_answer(x):
try:
answer = x.split('<bot>: ')[1].split('<human>:')[0].replace('<bot>: ', '')
except:
answer = x.split('<bot>:')[1].split('<human>:')[0].replace('<bot>:', '')
return answer
df['question'] = parallel_apply(df['text'], get_question, n_jobs=-1)
df['answer'] = parallel_apply(df['text'], get_answer, n_jobs=-1)
from datasets import Dataset
from transformers import pipeline
from transformers.pipelines.pt_utils import KeyPairDataset
import tqdm
pipe = pipeline(
"text-classification",
model=reward_name,
device="cuda:0" if torch.cuda.is_available() else "cpu"
)
start = 0
batch_size = 64 * 16
micro_batch = orig_micro_batch = 16
end = 0
import socket
checkpoint = "grades.%s.pkl" % socket.gethostname()
grades = []
import pickle
if os.path.exists(checkpoint):
with open(checkpoint, "rb") as f:
start, grades = pickle.loads(f.read())
last_oom = 0
while end < df.shape[0]:
# manual batching to handle OOM more gracefully
end = min(start + batch_size, df.shape[0])
if start == end:
break
dataset = Dataset.from_pandas(df.iloc[start:end, :])
try:
grades.extend([
x['score'] for x in tqdm.tqdm(
pipe(KeyPairDataset(dataset, "question", "answer"), batch_size=micro_batch)
)
])
except torch.cuda.OutOfMemoryError:
last_oom = start
micro_batch = max(1, micro_batch // 2)
print("OOM - retrying with micro_batch=%d" % micro_batch)
continue
if last_oom == start:
micro_batch = orig_micro_batch
print("Returning to micro_batch=%d" % micro_batch)
assert len(grades) == end
start = end
with open(checkpoint, "wb") as f:
f.write(pickle.dumps((end, grades)))
print("%d/%d" % (end, df.shape[0]))
df['grade_deberta'] = grades
if os.path.exists(checkpoint):
os.remove(checkpoint)
return df
def test_grade():
df = None
file = "h2oGPT.cleaned.chopped.human_bot.shorter.parquet"
output_file = "h2oGPT.cleaned.graded1.human_bot.shorter.parquet"
if not os.path.exists(output_file):
if df is None:
df = pd.read_parquet(file).reset_index(drop=True)
df = add_textstat_grade(df)
min_grade = 10
max_grade = 25
df = df[df['flesch_grade'] >= min_grade]
df = df[df['flesch_grade'] <= max_grade]
print("After Flesch grade")
print(df.describe())
df.to_parquet(output_file, index=False)
file = output_file
output_file = "h2oGPT.cleaned.graded2.human_bot.shorter.parquet"
if not os.path.exists(output_file):
# slower than alt-profanity, do last, but do before deberta grading, since that's slower
if df is None:
df = pd.read_parquet(file).reset_index(drop=True)
df = add_better_profanity_flag(df)
before_rows = df.shape[0]
df = df[df['better_profanity'] == 0]
df = df.drop(['better_profanity'], axis=1)
after_rows = df.shape[0]
print("Dropped %d rows out of %d due to better_profanity" % (before_rows - after_rows, before_rows))
print(df.describe())
df.to_parquet(output_file, index=False)
file = output_file
output_file = 'h2oGPT.cleaned.graded3.human_bot.shorter.parquet'
if not os.path.exists(output_file):
if df is None:
df = pd.read_parquet(file).reset_index(drop=True)
df = add_deberta_grade(df)
min_grade = 0.3
max_grade = np.inf
before_rows = df.shape[0]
df = df[df['grade_deberta'] >= min_grade]
df = df[df['grade_deberta'] <= max_grade]
after_rows = df.shape[0]
print("Dropped %d rows out of %d due to deberta grade" % (before_rows - after_rows, before_rows))
print("After DeBERTa grade")
print(df.describe())
df.to_parquet(output_file, index=False)
file = output_file
output_file = 'h2oGPT.cleaned.graded.human_bot.shorter.parquet'
if df is None:
df = pd.read_parquet(file).reset_index(drop=True)
df.to_parquet(output_file, index=False) | null |
166,874 | import ast
import concurrent.futures
import contextlib
import hashlib
import json
import os
import shutil
import signal
import sys
import traceback
from concurrent.futures import ProcessPoolExecutor
import psutil
import pytest
import pandas as pd
import numpy as np
from tqdm import tqdm
from utils import flatten_list, remove
from joblib import Parallel, delayed, effective_n_jobs
from sklearn.utils import gen_even_slices
from sklearn.utils.validation import _num_samples
def add_deberta_grade(df):
from transformers import AutoModelForSequenceClassification, AutoTokenizer
import torch
reward_name = "OpenAssistant/reward-model-deberta-v3-large-v2"
rank_model, tokenizer = AutoModelForSequenceClassification.from_pretrained(
reward_name), AutoTokenizer.from_pretrained(reward_name)
device = 'cuda' if torch.cuda.is_available() else 'cpu'
rank_model.to(device)
def get_question(x):
return x.replace('<human>: ', '').split('<bot>:')[0]
def get_answer(x):
try:
answer = x.split('<bot>: ')[1].split('<human>:')[0].replace('<bot>: ', '')
except:
answer = x.split('<bot>:')[1].split('<human>:')[0].replace('<bot>:', '')
return answer
df['question'] = parallel_apply(df['text'], get_question, n_jobs=-1)
df['answer'] = parallel_apply(df['text'], get_answer, n_jobs=-1)
from datasets import Dataset
from transformers import pipeline
from transformers.pipelines.pt_utils import KeyPairDataset
import tqdm
pipe = pipeline(
"text-classification",
model=reward_name,
device="cuda:0" if torch.cuda.is_available() else "cpu"
)
start = 0
batch_size = 64 * 16
micro_batch = orig_micro_batch = 16
end = 0
import socket
checkpoint = "grades.%s.pkl" % socket.gethostname()
grades = []
import pickle
if os.path.exists(checkpoint):
with open(checkpoint, "rb") as f:
start, grades = pickle.loads(f.read())
last_oom = 0
while end < df.shape[0]:
# manual batching to handle OOM more gracefully
end = min(start + batch_size, df.shape[0])
if start == end:
break
dataset = Dataset.from_pandas(df.iloc[start:end, :])
try:
grades.extend([
x['score'] for x in tqdm.tqdm(
pipe(KeyPairDataset(dataset, "question", "answer"), batch_size=micro_batch)
)
])
except torch.cuda.OutOfMemoryError:
last_oom = start
micro_batch = max(1, micro_batch // 2)
print("OOM - retrying with micro_batch=%d" % micro_batch)
continue
if last_oom == start:
micro_batch = orig_micro_batch
print("Returning to micro_batch=%d" % micro_batch)
assert len(grades) == end
start = end
with open(checkpoint, "wb") as f:
f.write(pickle.dumps((end, grades)))
print("%d/%d" % (end, df.shape[0]))
df['grade_deberta'] = grades
if os.path.exists(checkpoint):
os.remove(checkpoint)
return df
def create_personality_data(prompt_type="llama2"):
questions = [
"What's your name?",
"What is your name?",
"What are you?",
"Who are you?",
"Do you have a name?",
"Who trained you?",
"Who created you?",
"Who made you?",
]
answers = [
"I'm h2oGPT, a large language model by H2O.ai.",
"I'm h2oGPT, a large language model by H2O.ai, the visionary leader in democratizing AI.",
"My name is h2oGPT. I'm a large language model by H2O.ai, the visionary leader in democratizing AI.",
"My name is h2oGPT. I'm a large language model trained by H2O.ai.",
"Hi! I'm h2oGPT, a large language model by H2O.ai.",
"Hi! I'm h2oGPT, a large language model by H2O.ai, the visionary leader in democratizing AI.",
]
help = [
"",
" How can I help you?",
" How may I assist you?",
" Nice to meet you.",
]
import itertools
rows = []
for pair in itertools.product(questions, answers, help):
rows.append(
dict(input=f"{pair[0]}", output=f"{pair[1]}{pair[2]}", prompt_type=prompt_type, source="H2O.ai")
)
for q, a in [
("What is H2O.ai?", "H2O.ai is a technology company that aims to democratize AI and make it accessible to a broader audience by simplifying the process of creating and deploying machine learning models."),
("What is h2o.ai?", "H2O.ai is a technology company that aims to democratize AI and make it accessible to a broader audience by simplifying the process of creating and deploying machine learning models."),
("What is H2O?", "H2O.ai is a technology company that aims to democratize AI and make it accessible to a broader audience by simplifying the process of creating and deploying machine learning models."),
("Who is h2o.ai?", "H2O.ai is a technology company that aims to democratize AI and make it accessible to a broader audience by simplifying the process of creating and deploying machine learning models."),
("who is h2o.ai?", "H2O.ai is a technology company that aims to democratize AI and make it accessible to a broader audience by simplifying the process of creating and deploying machine learning models."),
("who is h2o?", "H2O.ai is a technology company that aims to democratize AI and make it accessible to a broader audience by simplifying the process of creating and deploying machine learning models."),
("what is H2O.ai?", "H2O.ai is the visionary leader in democratizing AI."),
("who is H2O.ai?", "H2O.ai is the visionary leader in democratizing AI."),
("who is H2O?", "H2O.ai is the visionary leader in democratizing AI."),
("Who is h20?", "H2O.ai is the visionary leader in democratizing AI."),
]:
rows.append(dict(input=q, output=a, prompt_type=prompt_type, source='H2O.ai'))
print(len(rows))
with open("h2ogpt-personality.json", "w") as f:
f.write(json.dumps(rows, indent=2))
return rows
def get_unhelpful_list():
# base versions
unhelpful = ["I'm sorry, I didn't quite understand your question, could you please rephrase it?",
"I'm sorry, but I don't understand your question. Could you please rephrase it?",
"I'm sorry, I don't quite understand your question",
"I'm sorry, I don't know",
"I'm sorry, but I don't know",
"I don't know anything",
"I do not know",
"I don't know",
"I don't know how",
"I do not know how",
"Can you please explain what you mean",
"please explain what you mean",
"please explain",
"I'm sorry, but I don't know how to tell a story. Can you please explain what you mean by",
"I'm sorry but I don't understand what you mean",
"I don't understand",
"I don't have the ability",
"I do not have the ability",
"I do not have",
"I am a language model,",
"I am a large language model,",
"I do not understand your question. Can you please try to make it clearer?",
"I'm sorry, but as an AI language model",
"I apologize, but I cannot rephrase text that I cannot understand. Your post is difficult to read and follow.",
"I apologize, but I am not h2oGPT. I am a language model developed by H2O.ai. How may I help you?",
"Sorry, but I am not an actual Linux shell, nor am I capable of emulating one. I am an open source chat assistant and would be glad t",
"I apologize, but I cannot perform the task you have requested.",
"I'm sorry, I cannot perform this task as I am an AI language model and do not have access",
"I'm sorry, I'm not sure what you're asking for here.",
"I'm not sure what you are asking",
"You need to provide more context",
]
# reduced versions, with redundant parts, just to give context for where they came from
unhelpful += ["sorry, I didn't quite understand your question",
"I didn't quite understand your question",
"I didn't understand your question",
"I did not understand your question",
"I did not understand the question",
"could you please rephrase"
"could you rephrase"
"I do not understand your question.",
"I do not understand the question.",
"I do not understand that question.",
"Can you please try to make it clearer",
"Can you try to make it clearer",
"sorry, but as an AI language model",
"as an AI language model",
"I apologize, but I cannot",
"I cannot rephrase text",
"I cannot understand. Your post is difficult to read and follow."
"Your post is difficult to read and follow."
"I apologize, but I am",
"Sorry, but I am not ",
"nor am I capable",
"I am not capable of",
"I apologize, but I cannot perform the task you have requested",
"I cannot perform the task",
"I cannot complete the task",
"I'm sorry",
"I am sorry",
"do not have access",
"not sure what you're asking for",
"not sure what you are asking for",
"not sure what is being asked",
"I'm not sure what you are asking",
"not sure what you are asking",
"You need to provide more context",
"provide more context",
]
unhelpful += ["As a large language model",
"cannot provide any information",
"As an artificial intelligence I do not have the capability",
"As an artificial intelligence I don't have the capability",
"As an artificial intelligence I can't",
"As an artificial intelligence I cannot",
"I am sorry but I do not understand",
"Can you please explain",
"(sorry couldn't resist)",
"(sorry could not resist)",
" :)",
" ;)",
" :-)",
" ;-)",
" lol ",
"Thanks so much!!!",
"Thank You :)!!!",
"Please try not to repeat",
"I am an AI language model",
"I'm a AI assistant that",
"I'm an AI assistant that",
"I am an AI assistant that",
"etc.",
"etc.etc.",
"etc. etc.",
"etc etc",
]
return unhelpful
The provided code snippet includes necessary dependencies for implementing the `test_add_open_assistant` function. Write a Python function `def test_add_open_assistant(fixup_personality, only_personality, deberta_grading, prompt_type, save_json=True)` to solve the following problem:
Flatten tree structure into one row per path from root to leaf Also turn into human_bot prompting format: <human>: question\n<bot>: answer <human>: question2\n<bot>: answer2 Etc. Also saves a .json locally as side-effect returns list of dicts, containing intput, prompt_type and source
Here is the function:
def test_add_open_assistant(fixup_personality, only_personality, deberta_grading, prompt_type, save_json=True):
"""
Flatten tree structure into one row per path from root to leaf
Also turn into human_bot prompting format:
<human>: question\n<bot>: answer <human>: question2\n<bot>: answer2 Etc.
Also saves a .json locally as side-effect
returns list of dicts, containing intput, prompt_type and source
"""
from datasets import load_dataset
data_file = "OpenAssistant/oasst1"
ds = load_dataset(data_file)
df = pd.concat([ds['train'].to_pandas(), ds['validation'].to_pandas()], axis=0)
rows = {}
message_ids = df['message_id'].values.tolist()
message_tree_ids = df['message_tree_id'].values.tolist()
parent_ids = df['parent_id'].values.tolist()
texts = df['text'].values.tolist()
roles = df['role'].values.tolist()
deleteds = df['deleted'].values.tolist()
for i in range(df.shape[0]):
# collect all trees
message_id = message_ids[i]
message_tree_id = message_tree_ids[i]
parent_id = parent_ids[i]
text = texts[i]
deleted = deleteds[i]
if deleted:
continue
if fixup_personality:
text = text.replace("Open Assistant", "h2oGPT")
text = text.replace("Open-Assistant", "h2oGPT")
text = text.replace("open-assistant", "h2oGPT")
text = text.replace("OpenAssistant", "h2oGPT")
text = text.replace("open assistant", "h2oGPT")
text = text.replace("Open Assistand", "h2oGPT")
text = text.replace("Open Assitant", "h2oGPT")
text = text.replace("Open Assistent", "h2oGPT")
text = text.replace("Open Assisstant", "h2oGPT")
text = text.replace("Open Assitent", "h2oGPT")
text = text.replace("Open Assitiant", "h2oGPT")
text = text.replace("Open Assistiant", "h2oGPT")
text = text.replace("Open Assitan ", "h2oGPT ")
text = text.replace("Open Assistan ", "h2oGPT ")
text = text.replace("Open Asistant", "h2oGPT")
text = text.replace("Open Assiant", "h2oGPT")
text = text.replace("Assistant", "h2oGPT")
text = text.replace("LAION AI", "H2O.ai")
text = text.replace("LAION-AI", "H2O.ai")
text = text.replace("LAION,", "H2O.ai,")
text = text.replace("LAION.ai", "H2O.ai")
text = text.replace("LAION.", "H2O.ai.")
text = text.replace("LAION", "H2O.ai")
role = roles[i]
if prompt_type == "llama2":
new_data = ('[INST] ' if role == 'prompter' else ' [/INST] ') + text
if parent_id and role == 'prompter':
new_data = " " + new_data
elif prompt_type == "human_bot":
new_data = ('<human>: ' if role == 'prompter' else '<bot>: ') + text
else:
raise NotImplementedError("prompt_type not supported")
entry = dict(message_id=message_id, parent_id=parent_id, text=new_data)
if message_tree_id not in rows:
rows[message_tree_id] = [entry]
else:
rows[message_tree_id].append(entry)
all_rows = []
for node_id in rows:
# order responses in tree, based on message/parent relationship
conversations = []
list_msgs = rows[node_id]
# find start
while len(list_msgs):
for i, leaf in enumerate(list_msgs):
found = False
parent_id = leaf['parent_id']
if parent_id is None:
# conversation starter
conversations.append(leaf)
found = True
else:
for conv in conversations:
# find all conversations to add my message to
if parent_id in conv['message_id'] and parent_id != conv['message_id'][-len(parent_id):]:
# my message doesn't follow conversation
continue
if parent_id == conv['message_id'][-len(parent_id):]:
# my message follows conversation, but fork first, so another follow-on message can do same
conversations.append(conv.copy())
if prompt_type == "llama2":
conv['text'] += f"""{leaf['text']}"""
elif prompt_type == "human_bot":
conv['text'] += f"""
{leaf['text']}
"""
else:
raise NotImplementedError
conv['message_id'] += leaf['message_id']
found = True
break
if found:
# my content was used, so nuke from list
del list_msgs[i]
break
# now reduce down to final conversations, find the longest chains of message ids
for i, conv in enumerate(conversations):
for j, conv2 in enumerate(conversations):
if i == j:
continue
if conv['message_id'] and conv2['message_id']:
assert conv['message_id'] != conv2['message_id']
# delete the shorter conversation, if one contains the other
if conv['message_id'] in conv2['message_id']:
conv['message_id'] = None
if conv2['message_id'] in conv['message_id']:
conv2['message_id'] = None
conversations = [c for c in conversations if c['message_id']]
if only_personality:
if prompt_type == "human_bot":
all_rows.extend(
[dict(input=c['text'] + "\n<human>:", output="", prompt_type='plain', source=data_file) for c in conversations if
'h2oGPT' in c['text']])
elif prompt_type == "llama2":
all_rows.extend(
[dict(input=c['text'] +
("" if c['text'].rfind("[/INST]") > c['text'].rfind("[INST]") else " [/INST]"),
output="", prompt_type='plain', source=data_file) for c in conversations if
'h2oGPT' in c['text']])
else:
raise NotImplementedError
else:
if prompt_type == "human_bot":
all_rows.extend(
[dict(input=c['text'] + "\n<human>:", output="", prompt_type='plain', source=data_file) for c in conversations
if
"What is H2O.ai" not in c['text']])
elif prompt_type == "llama2":
all_rows.extend(
[dict(input=c['text'] +
(" " if c['text'].rfind("[/INST]") > c['text'].rfind("[INST]") else " [/INST]"),
output="", prompt_type='plain', source=data_file) for c in conversations if
"What is H2O.ai" not in c['text']])
else:
raise NotImplementedError
unhelpful = get_unhelpful_list()
all_rows = [x for x in all_rows if not any(u in x['input'] for u in unhelpful)]
personality = create_personality_data(prompt_type=prompt_type)
all_rows.extend(personality * 10)
np.random.seed(123)
np.random.shuffle(all_rows)
print(len(all_rows))
if deberta_grading:
df = pd.DataFrame(all_rows)
df = df.rename(columns={'input': 'text'})
df = add_deberta_grade(df)
df = df.rename(columns={'text': 'input'})
drop = True
if drop:
min_grade = 0.3
max_grade = np.inf
before_rows = df.shape[0]
df = df[df['grade_deberta'] >= min_grade]
df = df[df['grade_deberta'] <= max_grade]
after_rows = df.shape[0]
print("Dropped %d rows out of %d due to deberta grade" % (before_rows - after_rows, before_rows))
print("After DeBERTa grade")
print(df.describe())
all_rows = []
for i in range(df.shape[0]):
all_rows.append(
dict(
input=df['input'].iloc[i],
output=df['output'].iloc[i],
source=df['source'].iloc[i],
prompt_type=df['prompt_type'].iloc[i],
grade_deberta=df['grade_deberta'].iloc[i],
)
)
if save_json:
data_file = data_file + \
("_h2ogpt" if fixup_personality else "") + \
("_only" if only_personality else "") + \
("_graded" if deberta_grading else "") + \
("_llama2_chat" if prompt_type == "llama2" else "")
for i in range(len(all_rows)):
all_rows[i]['id'] = i
with open(data_file.lower().replace("/", "_") + ".json", "w") as f:
f.write(json.dumps(all_rows, indent=2))
return all_rows | Flatten tree structure into one row per path from root to leaf Also turn into human_bot prompting format: <human>: question\n<bot>: answer <human>: question2\n<bot>: answer2 Etc. Also saves a .json locally as side-effect returns list of dicts, containing intput, prompt_type and source |
166,875 | import ast
import concurrent.futures
import contextlib
import hashlib
import json
import os
import shutil
import signal
import sys
import traceback
from concurrent.futures import ProcessPoolExecutor
import psutil
import pytest
import pandas as pd
import numpy as np
from tqdm import tqdm
from utils import flatten_list, remove
from joblib import Parallel, delayed, effective_n_jobs
from sklearn.utils import gen_even_slices
from sklearn.utils.validation import _num_samples
def parallel_apply(df, func, n_jobs=-1, **kwargs):
""" Pandas apply in parallel using joblib.
Uses sklearn.utils to partition input evenly.
Args:
df: Pandas DataFrame, Series, or any other object that supports slicing and apply.
func: Callable to apply
n_jobs: Desired number of workers. Default value -1 means use all available cores.
**kwargs: Any additional parameters will be supplied to the apply function
Returns:
Same as for normal Pandas DataFrame.apply()
"""
if effective_n_jobs(n_jobs) == 1:
return df.apply(func, **kwargs)
else:
ret = Parallel(n_jobs=n_jobs)(
delayed(type(df).apply)(df[s], func, **kwargs)
for s in gen_even_slices(_num_samples(df), effective_n_jobs(n_jobs)))
return pd.concat(ret)
def get_unhelpful_list():
# base versions
unhelpful = ["I'm sorry, I didn't quite understand your question, could you please rephrase it?",
"I'm sorry, but I don't understand your question. Could you please rephrase it?",
"I'm sorry, I don't quite understand your question",
"I'm sorry, I don't know",
"I'm sorry, but I don't know",
"I don't know anything",
"I do not know",
"I don't know",
"I don't know how",
"I do not know how",
"Can you please explain what you mean",
"please explain what you mean",
"please explain",
"I'm sorry, but I don't know how to tell a story. Can you please explain what you mean by",
"I'm sorry but I don't understand what you mean",
"I don't understand",
"I don't have the ability",
"I do not have the ability",
"I do not have",
"I am a language model,",
"I am a large language model,",
"I do not understand your question. Can you please try to make it clearer?",
"I'm sorry, but as an AI language model",
"I apologize, but I cannot rephrase text that I cannot understand. Your post is difficult to read and follow.",
"I apologize, but I am not h2oGPT. I am a language model developed by H2O.ai. How may I help you?",
"Sorry, but I am not an actual Linux shell, nor am I capable of emulating one. I am an open source chat assistant and would be glad t",
"I apologize, but I cannot perform the task you have requested.",
"I'm sorry, I cannot perform this task as I am an AI language model and do not have access",
"I'm sorry, I'm not sure what you're asking for here.",
"I'm not sure what you are asking",
"You need to provide more context",
]
# reduced versions, with redundant parts, just to give context for where they came from
unhelpful += ["sorry, I didn't quite understand your question",
"I didn't quite understand your question",
"I didn't understand your question",
"I did not understand your question",
"I did not understand the question",
"could you please rephrase"
"could you rephrase"
"I do not understand your question.",
"I do not understand the question.",
"I do not understand that question.",
"Can you please try to make it clearer",
"Can you try to make it clearer",
"sorry, but as an AI language model",
"as an AI language model",
"I apologize, but I cannot",
"I cannot rephrase text",
"I cannot understand. Your post is difficult to read and follow."
"Your post is difficult to read and follow."
"I apologize, but I am",
"Sorry, but I am not ",
"nor am I capable",
"I am not capable of",
"I apologize, but I cannot perform the task you have requested",
"I cannot perform the task",
"I cannot complete the task",
"I'm sorry",
"I am sorry",
"do not have access",
"not sure what you're asking for",
"not sure what you are asking for",
"not sure what is being asked",
"I'm not sure what you are asking",
"not sure what you are asking",
"You need to provide more context",
"provide more context",
]
unhelpful += ["As a large language model",
"cannot provide any information",
"As an artificial intelligence I do not have the capability",
"As an artificial intelligence I don't have the capability",
"As an artificial intelligence I can't",
"As an artificial intelligence I cannot",
"I am sorry but I do not understand",
"Can you please explain",
"(sorry couldn't resist)",
"(sorry could not resist)",
" :)",
" ;)",
" :-)",
" ;-)",
" lol ",
"Thanks so much!!!",
"Thank You :)!!!",
"Please try not to repeat",
"I am an AI language model",
"I'm a AI assistant that",
"I'm an AI assistant that",
"I am an AI assistant that",
"etc.",
"etc.etc.",
"etc. etc.",
"etc etc",
]
return unhelpful
def test_finalize_to_json():
df = pd.read_parquet('h2oGPT.cleaned.graded.human_bot.shorter.parquet')
df = df.rename(columns={'text': 'input'})
print("Number of high-quality human_bot interactions: %s" % df.shape[0], flush=True)
print("Adding open assistant data")
with open("openassistant_oasst1_h2ogpt_graded.json") as f:
open_assistant = json.loads(f.read())
df = pd.concat([df, pd.DataFrame(open_assistant)], axis=0)
def final_clean(df):
from better_profanity import profanity
profanity.load_censor_words_from_file("data/censor_words.txt")
df['profanity'] = parallel_apply(
df['input'],
lambda x: profanity.contains_profanity(x),
n_jobs=-1,
)
return df[(df['profanity'] == 0)].reset_index(drop=True)
print("Before cleaning: Number of final high-quality human_bot interactions: %s" % df.shape[0], flush=True)
df = final_clean(df)
print("After cleaning: Number of final high-quality human_bot interactions: %s" % df.shape[0], flush=True)
print(df.describe())
print(df.shape)
row_list = []
for i in range(df.shape[0]):
row_list.append(
dict(
input=df.loc[i, 'input'],
source=df.loc[i, 'source'],
prompt_type='plain',
)
)
np.random.seed(1234)
np.random.shuffle(row_list)
unhelpful = get_unhelpful_list()
row_list = [x for x in row_list if not any(u in x['input'] for u in unhelpful)]
for i in range(len(row_list)):
row_list[i]['id'] = i
row_list[i]['input'] = row_list[i]['input'].replace(" <bot>:", "\n<bot>:")
with open('h2ogpt-oig-oasst1-instruct-cleaned-v3.json', "w") as f:
f.write(json.dumps(row_list, indent=2)) | null |
166,876 | import ast
import concurrent.futures
import contextlib
import hashlib
import json
import os
import shutil
import signal
import sys
import traceback
from concurrent.futures import ProcessPoolExecutor
import psutil
import pytest
import pandas as pd
import numpy as np
from tqdm import tqdm
from utils import flatten_list, remove
from joblib import Parallel, delayed, effective_n_jobs
from sklearn.utils import gen_even_slices
from sklearn.utils.validation import _num_samples
def generate_and_tokenize_prompt(data_point, prompt_type=None, train_on_inputs=False, add_eos_token=False,
cutoff_len=None, tokenizer=None):
assert prompt_type is not None
assert cutoff_len is not None
assert tokenizer is not None
prompt_dict = '' # only for custom prompt_type
assert prompt_type != PromptType.custom.name, "custom not setup for finetune"
full_prompt, _, _, _, _ = generate_prompt(data_point, prompt_type, prompt_dict, False, False)
tokenized_full_prompt = tokenize(full_prompt, tokenizer, cutoff_len, add_eos_token=add_eos_token)
if not train_on_inputs:
user_prompt, _, _, _, _ = generate_prompt({**data_point, "output": ""}, prompt_type, prompt_dict, False,
False)
tokenized_user_prompt = tokenize(user_prompt, tokenizer, cutoff_len, add_eos_token=add_eos_token)
user_prompt_len = len(tokenized_user_prompt["input_ids"])
if add_eos_token:
user_prompt_len -= 1
# ignore_index=-100 ensures torch/tf don't include padding token id in CrossEntropyLoss
tokenized_full_prompt["labels"] = [
-100
] * user_prompt_len + tokenized_full_prompt["labels"][
user_prompt_len:
] # could be sped up, probably
return tokenized_full_prompt
def get_loaders(model_name, reward_type, llama_type=None,
load_gptq='',
use_autogptq=False,
load_awq='',
load_exllama=False,
config=None,
rope_scaling=None, max_seq_len=None, model_name_exllama_if_no_config='',
exllama_dict=None, gptq_dict=None,
hf_model_dict={},
):
# NOTE: Some models need specific new prompt_type
# E.g. t5_xxl_true_nli_mixture has input format: "premise: PREMISE_TEXT hypothesis: HYPOTHESIS_TEXT".)
if load_exllama:
if exllama_dict is None:
exllama_dict = {}
from src.llm_exllama import H2OExLlamaTokenizer, H2OExLlamaGenerator
from exllama.model import ExLlama, ExLlamaCache, ExLlamaConfig
import os, glob
if config:
# then use HF path
from transformers import TRANSFORMERS_CACHE
model_directory = os.path.join(TRANSFORMERS_CACHE, 'models--' + config.name_or_path.replace('/', '--'),
'snapshots', config._commit_hash)
else:
# then use path in env file
# Directory containing model, tokenizer, generator
model_directory = model_name_exllama_if_no_config
# download model
revision = config._commit_hash
from huggingface_hub import snapshot_download
snapshot_download(repo_id=model_name, revision=revision)
# Locate files we need within that directory
tokenizer_path = os.path.join(model_directory, "tokenizer.model")
assert os.path.isfile(tokenizer_path), "Missing %s" % tokenizer_path
model_config_path = os.path.join(model_directory, "config.json")
assert os.path.isfile(model_config_path), "Missing %s" % model_config_path
st_pattern = os.path.join(model_directory, "*.safetensors")
model_path = glob.glob(st_pattern)[0]
assert os.path.isfile(model_path), "Missing %s" % model_path
# Create config, model, tokenizer and generator
exconfig = ExLlamaConfig(model_config_path) # create config from config.json
rope_scaling = rope_scaling or {}
exconfig.alpha_value = rope_scaling.get('alpha_value', 1) # rope
exconfig.compress_pos_emb = rope_scaling.get('compress_pos_emb', 1) # related rope
# update max_seq_len
assert hasattr(config, 'max_position_embeddings') or hasattr(config,
'max_sequence_length'), "Improve code if no such argument"
if hasattr(config, 'max_position_embeddings'):
exconfig.max_seq_len = int(config.max_position_embeddings * exconfig.alpha_value)
else:
exconfig.max_seq_len = int(config.max_sequence_length * exconfig.alpha_value)
if 'Llama-2'.lower() in model_name.lower():
# override bad defaults
exconfig.max_seq_len = int(4096 * exconfig.alpha_value)
if max_seq_len is not None:
exconfig.max_seq_len = max_seq_len
exconfig.model_path = model_path # supply path to model weights file
for k, v in exllama_dict.items():
setattr(exconfig, k, v)
if 'set_auto_map' in exllama_dict:
exconfig.auto_map = [float(alloc) for alloc in exllama_dict['set_auto_map'].split(",")]
model = ExLlama(exconfig) # create ExLlama instance and load the weights
tokenizer = H2OExLlamaTokenizer(tokenizer_path) # create tokenizer from tokenizer model file
tokenizer.model_max_length = exconfig.max_seq_len
cache = ExLlamaCache(model) # create cache for inference
generator = H2OExLlamaGenerator(model, tokenizer, cache) # create generator
return generator, tokenizer, False
if load_gptq and use_autogptq:
if gptq_dict is None:
gptq_dict = {}
from transformers import AutoTokenizer
from auto_gptq import AutoGPTQForCausalLM
if 'use_triton' not in gptq_dict:
gptq_dict['use_triton'] = False
if 'llama-2-70B-chat-GPTQ' in model_name.lower() and 'inject_fused_attention' not in gptq_dict:
gptq_dict.update(dict(inject_fused_attention=False))
model_loader = functools.partial(AutoGPTQForCausalLM.from_quantized,
quantize_config=None,
**gptq_dict,
)
return model_loader, AutoTokenizer, False
if load_gptq and not use_autogptq:
assert have_optimum, "To use HF transformers GPTQ, please: pip install optimum"
if load_awq:
from transformers import AutoTokenizer
from awq import AutoAWQForCausalLM
model_loader = functools.partial(AutoAWQForCausalLM.from_quantized,
fuse_layers=True,
)
return model_loader, AutoTokenizer, False
if llama_type is None:
llama_type = "llama" in model_name.lower()
if llama_type and not load_gptq:
from transformers import LlamaForCausalLM, LlamaTokenizer
return functools.partial(LlamaForCausalLM.from_pretrained, **hf_model_dict), LlamaTokenizer, False
elif 'distilgpt2' in model_name.lower():
from transformers import AutoModelForCausalLM, AutoTokenizer
return functools.partial(AutoModelForCausalLM.from_pretrained, **hf_model_dict), AutoTokenizer, False
elif 'gpt2' in model_name.lower():
from transformers import GPT2LMHeadModel, GPT2Tokenizer
return functools.partial(GPT2LMHeadModel.from_pretrained, **hf_model_dict), GPT2Tokenizer, False
elif 'mbart-' in model_name.lower():
from transformers import MBartForConditionalGeneration, MBart50TokenizerFast
return functools.partial(MBartForConditionalGeneration.from_pretrained, **hf_model_dict), MBart50TokenizerFast, True
elif t5_type(model_name):
from transformers import AutoTokenizer, T5ForConditionalGeneration
return functools.partial(T5ForConditionalGeneration.from_pretrained, **hf_model_dict), AutoTokenizer, True
elif 'bigbird' in model_name:
from transformers import BigBirdPegasusForConditionalGeneration, AutoTokenizer
return functools.partial(BigBirdPegasusForConditionalGeneration.from_pretrained, **hf_model_dict), AutoTokenizer, True
elif 'bart-large-cnn-samsum' in model_name or 'flan-t5-base-samsum' in model_name:
from transformers import pipeline
return pipeline, "summarization", False
elif reward_type or 'OpenAssistant/reward-model'.lower() in model_name.lower():
from transformers import AutoModelForSequenceClassification, AutoTokenizer
return functools.partial(AutoModelForSequenceClassification.from_pretrained, **hf_model_dict), AutoTokenizer, False
else:
from transformers import AutoTokenizer, AutoModelForCausalLM
model_loader = functools.partial(AutoModelForCausalLM.from_pretrained, **hf_model_dict)
tokenizer_loader = AutoTokenizer
return model_loader, tokenizer_loader, False
def get_tokenizer(tokenizer_loader, tokenizer_base_model, local_files_only, resume_download, use_auth_token):
tokenizer = tokenizer_loader.from_pretrained(tokenizer_base_model,
local_files_only=local_files_only,
resume_download=resume_download,
token=use_auth_token,
padding_side='left')
tokenizer.pad_token_id = 0 # different from the eos token
# when generating, we will use the logits of right-most token to predict the next token
# so the padding should be on the left,
# e.g. see: https://huggingface.co/transformers/v4.11.3/model_doc/t5.html#inference
tokenizer.padding_side = "left" # Allow batched inference
return tokenizer
def test_check_stats_data():
filename = 'h2ogpt-oig-oasst1-instruct-cleaned-v3.json'
df = pd.read_json(filename)
# get word stats
df['char_count'] = df['input'].apply(lambda x: len(x))
import matplotlib.pyplot as plt
plt.figure(figsize=(10, 10))
plt.hist(df['char_count'], bins=100)
chars_avg = np.mean(df['char_count'])
chars_median = np.median(df['char_count'])
plt.title("char_count avg: %s median: %s" % (chars_avg, chars_median))
plt.savefig('chars_hist.png')
plt.close()
# get tokenize stats for random sample of 1000 rows
from finetune import generate_and_tokenize_prompt
from loaders import get_loaders, get_tokenizer
from functools import partial
llama_type = False
tokenizer_base_model = base_model = 'h2oai/h2ogpt-oasst1-512-20b'
model_loader, tokenizer_loader, conditional_type = (
get_loaders(model_name=base_model, reward_type=False, llama_type=llama_type))
local_files_only = False
resume_download = True
use_auth_token = False
tokenizer = get_tokenizer(tokenizer_loader, tokenizer_base_model, local_files_only, resume_download, use_auth_token)
prompt_type = 'plain' # trained with data already in human bot form
train_on_inputs = True
add_eos_token = False
cutoff_len = 512 # can choose 2048
generate_and_tokenize_prompt_fun = partial(generate_and_tokenize_prompt, prompt_type=prompt_type,
train_on_inputs=train_on_inputs, add_eos_token=add_eos_token,
cutoff_len=cutoff_len, tokenizer=tokenizer)
from datasets import load_dataset
data = load_dataset("json", data_files={"train": filename})
val_set_size = 0.90
train_val = data["train"].train_test_split(
test_size=val_set_size, shuffle=True, seed=42
)
train_data = train_val["train"]
train_data = train_data.shuffle().map(generate_and_tokenize_prompt_fun, num_proc=os.cpu_count())
df_tokens = pd.DataFrame([len(x) for x in train_data['input_ids']], columns=['token_count'])
plt.figure(figsize=(10, 10))
plt.hist(df_tokens['token_count'], bins=100)
token_avg = np.mean(df_tokens['token_count'])
token_median = np.median(df_tokens['token_count'])
plt.title("token_count with cutoff=%s avg: %s median: %s" % (cutoff_len, token_avg, token_median))
plt.savefig('token_hist_%s.png' % cutoff_len)
plt.close() | null |
166,877 | import ast
import concurrent.futures
import contextlib
import hashlib
import json
import os
import shutil
import signal
import sys
import traceback
from concurrent.futures import ProcessPoolExecutor
import psutil
import pytest
import pandas as pd
import numpy as np
from tqdm import tqdm
from utils import flatten_list, remove
human = '<human>:'
bot = '<bot>:'
from joblib import Parallel, delayed, effective_n_jobs
from sklearn.utils import gen_even_slices
from sklearn.utils.validation import _num_samples
def get_unhelpful_list():
def test_check_unhelpful():
# file = '/home/jon/Downloads/openassistant_oasst1_h2ogpt_graded.json'
file = '/home/jon/Downloads/openassistant_oasst1_h2ogpt_grades.json'
# file = 'h2ogpt-oig-oasst1-instruct-cleaned-v2.json'
unhelpful = get_unhelpful_list()
# data = json.load(open(file, 'rt'))
df = pd.read_json(file)
use_reward_score_threshold = False
use_bleu_threshold = False
use_sentence_sim = True
from sacrebleu.metrics import BLEU
bleu = BLEU()
from nltk.translate.bleu_score import sentence_bleu
def get_bleu(actual, expected_list):
# return bleu.sentence_score(actual, expected_list).score
return sentence_bleu(expected_list, actual)
threshold = 0.0
if use_reward_score_threshold:
df = df[df['grade_deberta'] > threshold]
# back to as if original json load
data = df.to_dict(orient='records')
bads = {}
string_all = str(data)
for sub in unhelpful:
bads[sub] = string_all.count(sub)
bads = {k: v for k, v in bads.items() if v > 0}
import pprint
pp = pprint.PrettyPrinter(indent=4)
pp.pprint(bads)
total_bads = sum(list(bads.values()))
print('total_bads: %s' % total_bads, flush=True)
# check just bot
import re
convs = [[x.strip() for x in re.split(r'%s|%s' % (human, bot), y['input']) if x.strip()] for y in data]
humans = [[x for i, x in enumerate(y) if i % 2 == 0] for y in convs]
bots = [[x for i, x in enumerate(y) if i % 2 == 1] for y in convs]
# FIXME: apply back to json etc., just see for now
bleu_threshold = 0.9
if use_bleu_threshold:
bots = [[x for x in y if get_bleu(x, unhelpful) < bleu_threshold] for y in tqdm(bots)]
cosine_sim_threshold = 0.8
if use_sentence_sim:
# pip install sentence_transformers-2.2.2
from sentence_transformers import SentenceTransformer
# sent_model = 'bert-base-nli-mean-tokens'
# sent_model = 'nli-distilroberta-base-v2'
sent_model = 'all-MiniLM-L6-v2'
model = SentenceTransformer(sent_model)
sentence_embeddings = model.encode(unhelpful)
from sklearn.metrics.pairwise import cosine_similarity
bots = [x for x in tqdm(bots) if
np.max(cosine_similarity(model.encode(x), sentence_embeddings)) < cosine_sim_threshold]
bads_bots = {}
string_all = str(bots)
for sub in unhelpful:
bads_bots[sub] = string_all.count(sub)
bads_bots = {k: v for k, v in bads_bots.items() if v > 0}
import pprint
pp = pprint.PrettyPrinter(indent=4)
pp.pprint(bads_bots)
total_bads_bots = sum(list(bads_bots.values()))
print('threshold: %g use_bleu_threshold: %g total_bads_bots: %s total_bots: %s total_humans: %s' % (
threshold, use_bleu_threshold, total_bads_bots, len(bots), len(humans)), flush=True)
# assert len(bads) == 0, bads
assert len(bads_bots) == 0, bads_bots | null |
166,878 | import ast
import concurrent.futures
import contextlib
import hashlib
import json
import os
import shutil
import signal
import sys
import traceback
from concurrent.futures import ProcessPoolExecutor
import psutil
import pytest
import pandas as pd
import numpy as np
from tqdm import tqdm
from utils import flatten_list, remove
def get_sentences(blob, length):
"""
break-up input text into sentences and then output list of sentences of about length in size
:param blob:
:param length:
:return:
"""
import nltk
nltk.download('punkt')
from nltk.tokenize import sent_tokenize
sentences = sent_tokenize(blob)
my_sentences = []
my_string = ""
for sentence in sentences:
if len(my_string) + len(sentence) <= length:
if my_string:
my_string += " " + sentence
else:
my_string = sentence
else:
my_sentences.append(my_string)
my_string = ""
return my_sentences or [my_string]
from joblib import Parallel, delayed, effective_n_jobs
from sklearn.utils import gen_even_slices
from sklearn.utils.validation import _num_samples
def create_personality_data(prompt_type="llama2"):
questions = [
"What's your name?",
"What is your name?",
"What are you?",
"Who are you?",
"Do you have a name?",
"Who trained you?",
"Who created you?",
"Who made you?",
]
answers = [
"I'm h2oGPT, a large language model by H2O.ai.",
"I'm h2oGPT, a large language model by H2O.ai, the visionary leader in democratizing AI.",
"My name is h2oGPT. I'm a large language model by H2O.ai, the visionary leader in democratizing AI.",
"My name is h2oGPT. I'm a large language model trained by H2O.ai.",
"Hi! I'm h2oGPT, a large language model by H2O.ai.",
"Hi! I'm h2oGPT, a large language model by H2O.ai, the visionary leader in democratizing AI.",
]
help = [
"",
" How can I help you?",
" How may I assist you?",
" Nice to meet you.",
]
import itertools
rows = []
for pair in itertools.product(questions, answers, help):
rows.append(
dict(input=f"{pair[0]}", output=f"{pair[1]}{pair[2]}", prompt_type=prompt_type, source="H2O.ai")
)
for q, a in [
("What is H2O.ai?", "H2O.ai is a technology company that aims to democratize AI and make it accessible to a broader audience by simplifying the process of creating and deploying machine learning models."),
("What is h2o.ai?", "H2O.ai is a technology company that aims to democratize AI and make it accessible to a broader audience by simplifying the process of creating and deploying machine learning models."),
("What is H2O?", "H2O.ai is a technology company that aims to democratize AI and make it accessible to a broader audience by simplifying the process of creating and deploying machine learning models."),
("Who is h2o.ai?", "H2O.ai is a technology company that aims to democratize AI and make it accessible to a broader audience by simplifying the process of creating and deploying machine learning models."),
("who is h2o.ai?", "H2O.ai is a technology company that aims to democratize AI and make it accessible to a broader audience by simplifying the process of creating and deploying machine learning models."),
("who is h2o?", "H2O.ai is a technology company that aims to democratize AI and make it accessible to a broader audience by simplifying the process of creating and deploying machine learning models."),
("what is H2O.ai?", "H2O.ai is the visionary leader in democratizing AI."),
("who is H2O.ai?", "H2O.ai is the visionary leader in democratizing AI."),
("who is H2O?", "H2O.ai is the visionary leader in democratizing AI."),
("Who is h20?", "H2O.ai is the visionary leader in democratizing AI."),
]:
rows.append(dict(input=q, output=a, prompt_type=prompt_type, source='H2O.ai'))
print(len(rows))
with open("h2ogpt-personality.json", "w") as f:
f.write(json.dumps(rows, indent=2))
return rows
def test_fortune2000_personalized():
row_list = []
import glob
if not os.path.isdir("wikitext"):
raise RuntimeError("download https://github.com/h2oai/h2ogpt/files/11423008/wikitext.zip and unzip")
for file in glob.glob("wikitext/*.txt"):
with open(file, "r") as f:
blob = f.read()
N = 512 * 4
row_list.extend([{'input': s, 'prompt_type': 'plain', 'source': "%s" % os.path.basename(file)}
for s in get_sentences(blob, N) if s])
personality = create_personality_data()
import copy
for i in range(10):
row_list.extend(copy.deepcopy(personality))
np.random.seed(123)
np.random.shuffle(row_list)
for i in range(len(row_list)):
row_list[i]['id'] = i
for i in range(len(row_list)):
assert row_list[i]['id'] == i
with open("h2ogpt-fortune2000-personalized.json", "w") as ff:
ff.write(json.dumps(row_list, indent=2)) | null |
166,879 | import ast
import asyncio
import copy
import functools
import glob
import gzip
import inspect
import json
import os
import pathlib
import pickle
import re
import shutil
import subprocess
import tempfile
import time
import traceback
import types
import typing
import urllib.error
import uuid
import zipfile
import tarfile
from collections import defaultdict
from datetime import datetime
from functools import reduce
from operator import concat
from urllib.parse import urlparse
import filelock
import tabulate
from joblib import delayed
from langchain.callbacks import streaming_stdout
from langchain.callbacks.base import Callbacks
from langchain.document_transformers import Html2TextTransformer, BeautifulSoupTransformer
from langchain.embeddings import HuggingFaceInstructEmbeddings
from langchain_community.llms.huggingface_pipeline import VALID_TASKS
from langchain.llms.utils import enforce_stop_tokens
from langchain.prompts.chat import ChatPromptValue
from langchain.schema import LLMResult, Generation, PromptValue
from langchain.schema.output import GenerationChunk
from langchain_core.messages import BaseMessage
from langchain_core.outputs import ChatResult
from langchain_experimental.tools import PythonREPLTool
from langchain.tools.json.tool import JsonSpec
from langchain_google_genai import ChatGoogleGenerativeAI
from langchain_mistralai import ChatMistralAI
from pydantic.v1 import root_validator
from tqdm import tqdm
from src.db_utils import length_db1, set_dbid, set_userid, get_dbid, get_userid_direct, get_username_direct, \
set_userid_direct
from src.image_utils import fix_image_file, get_image_types, get_image_file
from src.output_parser import H2OPythonMRKLOutputParser
from src.pandas_agent_langchain import create_csv_agent, create_pandas_dataframe_agent
from utils import wrapped_partial, EThread, import_matplotlib, sanitize_filename, makedirs, get_url, flatten_list, \
get_device, ProgressParallel, remove, hash_file, clear_torch_cache, NullContext, get_hf_server, FakeTokenizer, \
have_libreoffice, have_arxiv, have_playwright, have_selenium, have_tesseract, have_doctr, have_pymupdf, set_openai, \
get_list_or_str, have_pillow, only_selenium, only_playwright, only_unstructured_urls, get_short_name, \
get_accordion, have_jq, get_doc, get_source, have_chromamigdb, get_token_count, reverse_ucurve_list, get_size, \
get_test_name_core, download_simple, have_fiftyone, have_librosa, return_good_url, n_gpus_global, \
get_accordion_named, hyde_titles, have_cv2, FullSet, create_relative_symlink, split_list, get_gradio_tmp, merge_dict
from enums import DocumentSubset, no_lora_str, model_token_mapping, source_prefix, source_postfix, non_query_commands, \
LangChainAction, LangChainMode, DocumentChoice, LangChainTypes, font_size, head_acc, super_source_prefix, \
super_source_postfix, langchain_modes_intrinsic, get_langchain_prompts, LangChainAgent, docs_joiner_default, \
docs_ordering_types_default, langchain_modes_non_db, does_support_functiontools, doc_json_mode_system_prompt, \
auto_choices, max_docs_public, max_chunks_per_doc_public, max_docs_public_api, max_chunks_per_doc_public_api, \
user_prompt_for_fake_system_prompt, does_support_json_mode
from evaluate_params import gen_hyper, gen_hyper0
from gen import SEED, get_limited_prompt, get_docs_tokens, get_relaxed_max_new_tokens, get_model_retry, gradio_to_llm, \
get_client_from_inference_server
from prompter import non_hf_types, PromptType, Prompter, get_vllm_extra_dict, system_docqa, system_summary, \
is_vision_model
from src.serpapi import H2OSerpAPIWrapper
from utils_langchain import StreamingGradioCallbackHandler, _chunk_sources, _add_meta, add_parser, fix_json_meta, \
load_general_summarization_chain, H2OHuggingFaceHubEmbeddings
import numpy as np
import pandas as pd
import requests
from langchain.chains.qa_with_sources import load_qa_with_sources_chain
from langchain.document_loaders import PyPDFLoader, TextLoader, CSVLoader, PythonLoader, TomlLoader, \
UnstructuredURLLoader, UnstructuredHTMLLoader, UnstructuredWordDocumentLoader, UnstructuredMarkdownLoader, \
EverNoteLoader, UnstructuredEmailLoader, UnstructuredODTLoader, UnstructuredPowerPointLoader, \
UnstructuredEPubLoader, UnstructuredImageLoader, UnstructuredRTFLoader, ArxivLoader, UnstructuredPDFLoader, \
UnstructuredExcelLoader, JSONLoader, AsyncHtmlLoader, AsyncChromiumLoader
from langchain.text_splitter import Language, RecursiveCharacterTextSplitter, TextSplitter
from langchain.chains.question_answering import load_qa_chain
from langchain.docstore.document import Document
from langchain.prompts import PromptTemplate
from langchain.llms import HuggingFaceTextGenInference, HuggingFacePipeline
from langchain.vectorstores import Chroma
from chromamig import ChromaMig
from langchain.embeddings import FakeEmbeddings
from functools import partial
from typing import Any, Dict, List, Optional, Iterable
from pydantic import Field
from langchain.callbacks.manager import CallbackManagerForLLMRun, AsyncCallbackManagerForLLMRun
from langchain.llms.base import LLM
from langchain.chat_models import ChatOpenAI, AzureChatOpenAI
from langchain.chat_models import ChatAnthropic as ChatAnthropic2
from langchain_anthropic import ChatAnthropic as ChatAnthropic3
from langchain.llms import OpenAI, AzureOpenAI, Replicate
from langchain_core.language_models.chat_models import BaseChatModel
from langchain_core.language_models.chat_models import (
BaseChatModel,
agenerate_from_stream,
generate_from_stream
)
import posthog
def get_answer_from_sources(chain, sources, question):
return chain(
{
"input_documents": sources,
"question": question,
},
return_only_outputs=True,
)["output_text"] | null |
166,880 | import ast
import asyncio
import copy
import functools
import glob
import gzip
import inspect
import json
import os
import pathlib
import pickle
import re
import shutil
import subprocess
import tempfile
import time
import traceback
import types
import typing
import urllib.error
import uuid
import zipfile
import tarfile
from collections import defaultdict
from datetime import datetime
from functools import reduce
from operator import concat
from urllib.parse import urlparse
import filelock
import tabulate
from joblib import delayed
from langchain.callbacks import streaming_stdout
from langchain.callbacks.base import Callbacks
from langchain.document_transformers import Html2TextTransformer, BeautifulSoupTransformer
from langchain.embeddings import HuggingFaceInstructEmbeddings
from langchain_community.llms.huggingface_pipeline import VALID_TASKS
from langchain.llms.utils import enforce_stop_tokens
from langchain.prompts.chat import ChatPromptValue
from langchain.schema import LLMResult, Generation, PromptValue
from langchain.schema.output import GenerationChunk
from langchain_core.messages import BaseMessage
from langchain_core.outputs import ChatResult
from langchain_experimental.tools import PythonREPLTool
from langchain.tools.json.tool import JsonSpec
from langchain_google_genai import ChatGoogleGenerativeAI
from langchain_mistralai import ChatMistralAI
from pydantic.v1 import root_validator
from tqdm import tqdm
from src.db_utils import length_db1, set_dbid, set_userid, get_dbid, get_userid_direct, get_username_direct, \
set_userid_direct
from src.image_utils import fix_image_file, get_image_types, get_image_file
from src.output_parser import H2OPythonMRKLOutputParser
from src.pandas_agent_langchain import create_csv_agent, create_pandas_dataframe_agent
from utils import wrapped_partial, EThread, import_matplotlib, sanitize_filename, makedirs, get_url, flatten_list, \
get_device, ProgressParallel, remove, hash_file, clear_torch_cache, NullContext, get_hf_server, FakeTokenizer, \
have_libreoffice, have_arxiv, have_playwright, have_selenium, have_tesseract, have_doctr, have_pymupdf, set_openai, \
get_list_or_str, have_pillow, only_selenium, only_playwright, only_unstructured_urls, get_short_name, \
get_accordion, have_jq, get_doc, get_source, have_chromamigdb, get_token_count, reverse_ucurve_list, get_size, \
get_test_name_core, download_simple, have_fiftyone, have_librosa, return_good_url, n_gpus_global, \
get_accordion_named, hyde_titles, have_cv2, FullSet, create_relative_symlink, split_list, get_gradio_tmp, merge_dict
from enums import DocumentSubset, no_lora_str, model_token_mapping, source_prefix, source_postfix, non_query_commands, \
LangChainAction, LangChainMode, DocumentChoice, LangChainTypes, font_size, head_acc, super_source_prefix, \
super_source_postfix, langchain_modes_intrinsic, get_langchain_prompts, LangChainAgent, docs_joiner_default, \
docs_ordering_types_default, langchain_modes_non_db, does_support_functiontools, doc_json_mode_system_prompt, \
auto_choices, max_docs_public, max_chunks_per_doc_public, max_docs_public_api, max_chunks_per_doc_public_api, \
user_prompt_for_fake_system_prompt, does_support_json_mode
from evaluate_params import gen_hyper, gen_hyper0
from gen import SEED, get_limited_prompt, get_docs_tokens, get_relaxed_max_new_tokens, get_model_retry, gradio_to_llm, \
get_client_from_inference_server
from prompter import non_hf_types, PromptType, Prompter, get_vllm_extra_dict, system_docqa, system_summary, \
is_vision_model
from src.serpapi import H2OSerpAPIWrapper
from utils_langchain import StreamingGradioCallbackHandler, _chunk_sources, _add_meta, add_parser, fix_json_meta, \
load_general_summarization_chain, H2OHuggingFaceHubEmbeddings
import numpy as np
import pandas as pd
import requests
from langchain.chains.qa_with_sources import load_qa_with_sources_chain
from langchain.document_loaders import PyPDFLoader, TextLoader, CSVLoader, PythonLoader, TomlLoader, \
UnstructuredURLLoader, UnstructuredHTMLLoader, UnstructuredWordDocumentLoader, UnstructuredMarkdownLoader, \
EverNoteLoader, UnstructuredEmailLoader, UnstructuredODTLoader, UnstructuredPowerPointLoader, \
UnstructuredEPubLoader, UnstructuredImageLoader, UnstructuredRTFLoader, ArxivLoader, UnstructuredPDFLoader, \
UnstructuredExcelLoader, JSONLoader, AsyncHtmlLoader, AsyncChromiumLoader
from langchain.text_splitter import Language, RecursiveCharacterTextSplitter, TextSplitter
from langchain.chains.question_answering import load_qa_chain
from langchain.docstore.document import Document
from langchain.prompts import PromptTemplate
from langchain.llms import HuggingFaceTextGenInference, HuggingFacePipeline
from langchain.vectorstores import Chroma
from chromamig import ChromaMig
from langchain.embeddings import FakeEmbeddings
from functools import partial
from typing import Any, Dict, List, Optional, Iterable
from pydantic import Field
from langchain.callbacks.manager import CallbackManagerForLLMRun, AsyncCallbackManagerForLLMRun
from langchain.llms.base import LLM
from langchain.chat_models import ChatOpenAI, AzureChatOpenAI
from langchain.chat_models import ChatAnthropic as ChatAnthropic2
from langchain_anthropic import ChatAnthropic as ChatAnthropic3
from langchain.llms import OpenAI, AzureOpenAI, Replicate
from langchain_core.language_models.chat_models import BaseChatModel
from langchain_core.language_models.chat_models import (
BaseChatModel,
agenerate_from_stream,
generate_from_stream
)
import posthog
def get_image_types():
def get_supported_types():
non_image_types0 = ["pdf", "txt", "csv", "toml", "py", "rst", "xml", "rtf",
"md",
"html", "mhtml", "htm",
"enex", "eml", "epub", "odt", "pptx", "ppt",
"zip",
"gz",
"gzip",
"urls",
]
# "msg", GPL3
video_types0 = ['WEBM',
'MPG', 'MP2', 'MPEG', 'MPE', '.PV',
'OGG',
'MP4', 'M4P', 'M4V',
'AVI', 'WMV',
'MOV', 'QT',
'FLV', 'SWF',
'AVCHD']
video_types0 = [x.lower() for x in video_types0]
image_types0 = get_image_types()
return non_image_types0, image_types0, video_types0 | null |
166,881 | import ast
import asyncio
import copy
import functools
import glob
import gzip
import inspect
import json
import os
import pathlib
import pickle
import re
import shutil
import subprocess
import tempfile
import time
import traceback
import types
import typing
import urllib.error
import uuid
import zipfile
import tarfile
from collections import defaultdict
from datetime import datetime
from functools import reduce
from operator import concat
from urllib.parse import urlparse
import filelock
import tabulate
from joblib import delayed
from langchain.callbacks import streaming_stdout
from langchain.callbacks.base import Callbacks
from langchain.document_transformers import Html2TextTransformer, BeautifulSoupTransformer
from langchain.embeddings import HuggingFaceInstructEmbeddings
from langchain_community.llms.huggingface_pipeline import VALID_TASKS
from langchain.llms.utils import enforce_stop_tokens
from langchain.prompts.chat import ChatPromptValue
from langchain.schema import LLMResult, Generation, PromptValue
from langchain.schema.output import GenerationChunk
from langchain_core.messages import BaseMessage
from langchain_core.outputs import ChatResult
from langchain_experimental.tools import PythonREPLTool
from langchain.tools.json.tool import JsonSpec
from langchain_google_genai import ChatGoogleGenerativeAI
from langchain_mistralai import ChatMistralAI
from pydantic.v1 import root_validator
from tqdm import tqdm
from src.db_utils import length_db1, set_dbid, set_userid, get_dbid, get_userid_direct, get_username_direct, \
set_userid_direct
from src.image_utils import fix_image_file, get_image_types, get_image_file
from src.output_parser import H2OPythonMRKLOutputParser
from src.pandas_agent_langchain import create_csv_agent, create_pandas_dataframe_agent
from utils import wrapped_partial, EThread, import_matplotlib, sanitize_filename, makedirs, get_url, flatten_list, \
get_device, ProgressParallel, remove, hash_file, clear_torch_cache, NullContext, get_hf_server, FakeTokenizer, \
have_libreoffice, have_arxiv, have_playwright, have_selenium, have_tesseract, have_doctr, have_pymupdf, set_openai, \
get_list_or_str, have_pillow, only_selenium, only_playwright, only_unstructured_urls, get_short_name, \
get_accordion, have_jq, get_doc, get_source, have_chromamigdb, get_token_count, reverse_ucurve_list, get_size, \
get_test_name_core, download_simple, have_fiftyone, have_librosa, return_good_url, n_gpus_global, \
get_accordion_named, hyde_titles, have_cv2, FullSet, create_relative_symlink, split_list, get_gradio_tmp, merge_dict
from enums import DocumentSubset, no_lora_str, model_token_mapping, source_prefix, source_postfix, non_query_commands, \
LangChainAction, LangChainMode, DocumentChoice, LangChainTypes, font_size, head_acc, super_source_prefix, \
super_source_postfix, langchain_modes_intrinsic, get_langchain_prompts, LangChainAgent, docs_joiner_default, \
docs_ordering_types_default, langchain_modes_non_db, does_support_functiontools, doc_json_mode_system_prompt, \
auto_choices, max_docs_public, max_chunks_per_doc_public, max_docs_public_api, max_chunks_per_doc_public_api, \
user_prompt_for_fake_system_prompt, does_support_json_mode
from evaluate_params import gen_hyper, gen_hyper0
from gen import SEED, get_limited_prompt, get_docs_tokens, get_relaxed_max_new_tokens, get_model_retry, gradio_to_llm, \
get_client_from_inference_server
from prompter import non_hf_types, PromptType, Prompter, get_vllm_extra_dict, system_docqa, system_summary, \
is_vision_model
from src.serpapi import H2OSerpAPIWrapper
from utils_langchain import StreamingGradioCallbackHandler, _chunk_sources, _add_meta, add_parser, fix_json_meta, \
load_general_summarization_chain, H2OHuggingFaceHubEmbeddings
import numpy as np
import pandas as pd
import requests
from langchain.chains.qa_with_sources import load_qa_with_sources_chain
from langchain.document_loaders import PyPDFLoader, TextLoader, CSVLoader, PythonLoader, TomlLoader, \
UnstructuredURLLoader, UnstructuredHTMLLoader, UnstructuredWordDocumentLoader, UnstructuredMarkdownLoader, \
EverNoteLoader, UnstructuredEmailLoader, UnstructuredODTLoader, UnstructuredPowerPointLoader, \
UnstructuredEPubLoader, UnstructuredImageLoader, UnstructuredRTFLoader, ArxivLoader, UnstructuredPDFLoader, \
UnstructuredExcelLoader, JSONLoader, AsyncHtmlLoader, AsyncChromiumLoader
from langchain.text_splitter import Language, RecursiveCharacterTextSplitter, TextSplitter
from langchain.chains.question_answering import load_qa_chain
from langchain.docstore.document import Document
from langchain.prompts import PromptTemplate
from langchain.llms import HuggingFaceTextGenInference, HuggingFacePipeline
from langchain.vectorstores import Chroma
from chromamig import ChromaMig
from langchain.embeddings import FakeEmbeddings
from functools import partial
from typing import Any, Dict, List, Optional, Iterable
from pydantic import Field
from langchain.callbacks.manager import CallbackManagerForLLMRun, AsyncCallbackManagerForLLMRun
from langchain.llms.base import LLM
from langchain.chat_models import ChatOpenAI, AzureChatOpenAI
from langchain.chat_models import ChatAnthropic as ChatAnthropic2
from langchain_anthropic import ChatAnthropic as ChatAnthropic3
from langchain.llms import OpenAI, AzureOpenAI, Replicate
from langchain_core.language_models.chat_models import BaseChatModel
from langchain_core.language_models.chat_models import (
BaseChatModel,
agenerate_from_stream,
generate_from_stream
)
def get_wiki_sources(first_para=True, text_limit=None):
"""
Get specific named sources from wikipedia
:param first_para:
:param text_limit:
:return:
"""
default_wiki_sources = ['Unix', 'Microsoft_Windows', 'Linux']
wiki_sources = list(os.getenv('WIKI_SOURCES', default_wiki_sources))
return [get_wiki_data(x, first_para, text_limit=text_limit) for x in wiki_sources]
def get_dai_docs(from_hf=False, get_pickle=True):
"""
Consume DAI documentation, or consume from public pickle
:param from_hf: get DAI docs from HF, then generate pickle for later use by LangChain
:param get_pickle: Avoid raw DAI docs, just get pickle directly from HF
:return:
"""
import pickle
if get_pickle:
get_dai_pickle()
dai_store = 'dai_docs.pickle'
dst = "working_dir_docs"
if not os.path.isfile(dai_store):
from create_data import setup_dai_docs
dst = setup_dai_docs(dst=dst, from_hf=from_hf)
import glob
files = list(glob.glob(os.path.join(dst, '*rst'), recursive=True))
basedir = os.path.abspath(os.getcwd())
from create_data import rst_to_outputs
new_outputs = rst_to_outputs(files)
os.chdir(basedir)
pickle.dump(new_outputs, open(dai_store, 'wb'))
else:
new_outputs = pickle.load(open(dai_store, 'rb'))
sources = []
for line, file in new_outputs:
# gradio requires any linked file to be with app.py
sym_src = os.path.abspath(os.path.join(dst, file))
sym_dst = os.path.abspath(os.path.join(os.getcwd(), file))
if os.path.lexists(sym_dst):
os.remove(sym_dst)
os.symlink(sym_src, sym_dst)
itm = Document(page_content=str(line), metadata={"source": file})
# NOTE: yield has issues when going into db, loses metadata
# yield itm
sources.append(itm)
return sources
import posthog
def get_existing_db(db, persist_directory,
load_db_if_exists, db_type, use_openai_embedding,
langchain_mode, langchain_mode_paths, langchain_mode_types,
hf_embedding_model,
migrate_embedding_model,
auto_migrate_db=False,
verbose=False, check_embedding=True, migrate_meta=True,
n_jobs=-1,
embedding_gpu_id=0):
if load_db_if_exists and db_type in ['chroma', 'chroma_old'] and os.path.isdir(persist_directory):
if os.path.isfile(os.path.join(persist_directory, 'chroma.sqlite3')):
must_migrate = False
elif os.path.isdir(os.path.join(persist_directory, 'index')):
must_migrate = True
else:
return db, use_openai_embedding, hf_embedding_model
chroma_settings = dict(is_persistent=True)
use_chromamigdb = False
if must_migrate:
if auto_migrate_db:
print("Detected chromadb<0.4 database, require migration, doing now....", flush=True)
from chroma_migrate.import_duckdb import migrate_from_duckdb
import chromadb
api = chromadb.PersistentClient(path=persist_directory)
did_migration = migrate_from_duckdb(api, persist_directory)
assert did_migration, "Failed to migrate chroma collection at %s, see https://docs.trychroma.com/migration for CLI tool" % persist_directory
elif have_chromamigdb:
print(
"Detected chroma<0.4 database but --auto_migrate_db=False, but detected chromamigdb package, so using old database that still requires duckdb",
flush=True)
chroma_settings = dict(chroma_db_impl="duckdb+parquet")
use_chromamigdb = True
else:
raise ValueError(
"Detected chromadb<0.4 database, require migration, but did not detect chromamigdb package or did not choose auto_migrate_db=False (see FAQ.md)")
if db is None:
if verbose:
print("DO Loading db: %s" % langchain_mode, flush=True)
got_embedding, use_openai_embedding0, hf_embedding_model0 = load_embed(persist_directory=persist_directory,
use_openai_embedding=use_openai_embedding)
if got_embedding and hf_embedding_model and 'name' in hf_embedding_model and hf_embedding_model0 == \
hf_embedding_model['name']:
# already have
embedding = hf_embedding_model['model']
else:
if got_embedding:
# doesn't match, must load new
use_openai_embedding, hf_embedding_model = use_openai_embedding0, hf_embedding_model0
else:
if hf_embedding_model and 'name' in hf_embedding_model:
# if no embedding, use same as preloaded
hf_embedding_model = hf_embedding_model['name']
embedding = get_embedding(use_openai_embedding, hf_embedding_model=hf_embedding_model,
gpu_id=embedding_gpu_id)
import logging
logging.getLogger("chromadb").setLevel(logging.ERROR)
if use_chromamigdb:
from chromamigdb.config import Settings
chroma_class = ChromaMig
api_kwargs = {}
else:
from chromadb.config import Settings
chroma_class = Chroma
if os.path.isdir(persist_directory):
import chromadb
api_kwargs = dict(client=chromadb.PersistentClient(path=persist_directory))
else:
api_kwargs = {}
if not api_kwargs:
client_settings = Settings(anonymized_telemetry=False,
**chroma_settings,
persist_directory=persist_directory)
api_kwargs = dict(client_settings=client_settings)
db = chroma_class(persist_directory=persist_directory, embedding_function=embedding,
collection_name=langchain_mode.replace(' ', '_'),
**api_kwargs)
try:
with get_context_cast():
db.similarity_search('')
except BaseException as e:
# migration when no embed_info
if 'Dimensionality of (768) does not match index dimensionality (384)' in str(e) or \
'Embedding dimension 768 does not match collection dimensionality 384' in str(e) or \
'Embedding dimension 768 does not match collection dimensionality 1536' in str(e) or \
'Dimensionality of (1536) does not match index dimensionality (384)' in str(e):
hf_embedding_model = "sentence-transformers/all-MiniLM-L6-v2"
embedding = get_embedding(use_openai_embedding, hf_embedding_model=hf_embedding_model)
db = chroma_class(persist_directory=persist_directory, embedding_function=embedding,
collection_name=langchain_mode.replace(' ', '_'),
**api_kwargs)
# should work now, let fail if not
with get_context_cast():
db.similarity_search('')
save_embed(db, use_openai_embedding, hf_embedding_model)
else:
raise
if verbose:
print("DONE Loading db: %s" % langchain_mode, flush=True)
else:
if not migrate_embedding_model:
# OVERRIDE embedding choices if could load embedding info when not migrating
got_embedding, use_openai_embedding, hf_embedding_model = load_embed(db=db,
use_openai_embedding=use_openai_embedding)
if verbose:
print("USING already-loaded db: %s" % langchain_mode, flush=True)
if check_embedding:
db_trial, changed_db = check_update_chroma_embedding(db,
db_type,
use_openai_embedding,
hf_embedding_model,
migrate_embedding_model,
auto_migrate_db,
langchain_mode,
langchain_mode_paths,
langchain_mode_types,
n_jobs=n_jobs,
verbose=verbose)
if changed_db:
db = db_trial
# only call persist if really changed db, else takes too long for large db
if db is not None:
db.persist()
clear_embedding(db)
save_embed(db, use_openai_embedding, hf_embedding_model)
if migrate_meta:
db_trial, changed_db = migrate_meta_func(db, langchain_mode)
if changed_db:
db = db_trial
return db, use_openai_embedding, hf_embedding_model
return db, use_openai_embedding, hf_embedding_model
def make_db(**langchain_kwargs):
func_names = list(inspect.signature(_make_db).parameters)
missing_kwargs = [x for x in func_names if x not in langchain_kwargs]
defaults_db = {k: v.default for k, v in dict(inspect.signature(run_qa_db).parameters).items()}
for k in missing_kwargs:
if k in defaults_db:
langchain_kwargs[k] = defaults_db[k]
# final check for missing
missing_kwargs = [x for x in func_names if x not in langchain_kwargs]
assert not missing_kwargs, "Missing kwargs for make_db: %s" % missing_kwargs
# only keep actual used
langchain_kwargs = {k: v for k, v in langchain_kwargs.items() if k in func_names}
return _make_db(**langchain_kwargs)
langchain_modes_intrinsic = [LangChainMode.DISABLED.value,
LangChainMode.LLM.value,
LangChainMode.MY_DATA.value]
The provided code snippet includes necessary dependencies for implementing the `prep_langchain` function. Write a Python function `def prep_langchain(persist_directory, load_db_if_exists, db_type, use_openai_embedding, langchain_mode, langchain_mode_paths, langchain_mode_types, hf_embedding_model, migrate_embedding_model, auto_migrate_db, n_jobs=-1, embedding_gpu_id=0, kwargs_make_db={}, verbose=False)` to solve the following problem:
do prep first time, involving downloads # FIXME: Add github caching then add here :return:
Here is the function:
def prep_langchain(persist_directory,
load_db_if_exists,
db_type, use_openai_embedding,
langchain_mode, langchain_mode_paths, langchain_mode_types,
hf_embedding_model,
migrate_embedding_model,
auto_migrate_db,
n_jobs=-1, embedding_gpu_id=0,
kwargs_make_db={},
verbose=False):
"""
do prep first time, involving downloads
# FIXME: Add github caching then add here
:return:
"""
if os.getenv("HARD_ASSERTS"):
assert langchain_mode not in ['MyData'], "Should not prep scratch/personal data"
if langchain_mode in langchain_modes_intrinsic:
return None
db_dir_exists = os.path.isdir(persist_directory)
user_path = langchain_mode_paths.get(langchain_mode)
if db_dir_exists and user_path is None:
if verbose:
print("Prep: persist_directory=%s exists, using" % persist_directory, flush=True)
db, use_openai_embedding, hf_embedding_model = \
get_existing_db(None, persist_directory, load_db_if_exists,
db_type, use_openai_embedding,
langchain_mode, langchain_mode_paths, langchain_mode_types,
hf_embedding_model, migrate_embedding_model, auto_migrate_db,
n_jobs=n_jobs, embedding_gpu_id=embedding_gpu_id)
else:
if db_dir_exists and user_path is not None:
if verbose:
print("Prep: persist_directory=%s exists, user_path=%s passed, adding any changed or new documents" % (
persist_directory, user_path), flush=True)
elif not db_dir_exists:
if verbose:
print("Prep: persist_directory=%s does not exist, regenerating" % persist_directory, flush=True)
db = None
if langchain_mode in ['DriverlessAI docs']:
# FIXME: Could also just use dai_docs.pickle directly and upload that
get_dai_docs(from_hf=True)
if langchain_mode in ['wiki']:
get_wiki_sources(first_para=kwargs_make_db['first_para'], text_limit=kwargs_make_db['text_limit'])
langchain_kwargs = kwargs_make_db.copy()
langchain_kwargs.update(locals())
db, num_new_sources, new_sources_metadata = make_db(**langchain_kwargs)
return db | do prep first time, involving downloads # FIXME: Add github caching then add here :return: |
166,882 | import ast
import asyncio
import copy
import functools
import glob
import gzip
import inspect
import json
import os
import pathlib
import pickle
import re
import shutil
import subprocess
import tempfile
import time
import traceback
import types
import typing
import urllib.error
import uuid
import zipfile
import tarfile
from collections import defaultdict
from datetime import datetime
from functools import reduce
from operator import concat
from urllib.parse import urlparse
import filelock
import tabulate
from joblib import delayed
from langchain.callbacks import streaming_stdout
from langchain.callbacks.base import Callbacks
from langchain.document_transformers import Html2TextTransformer, BeautifulSoupTransformer
from langchain.embeddings import HuggingFaceInstructEmbeddings
from langchain_community.llms.huggingface_pipeline import VALID_TASKS
from langchain.llms.utils import enforce_stop_tokens
from langchain.prompts.chat import ChatPromptValue
from langchain.schema import LLMResult, Generation, PromptValue
from langchain.schema.output import GenerationChunk
from langchain_core.messages import BaseMessage
from langchain_core.outputs import ChatResult
from langchain_experimental.tools import PythonREPLTool
from langchain.tools.json.tool import JsonSpec
from langchain_google_genai import ChatGoogleGenerativeAI
from langchain_mistralai import ChatMistralAI
from pydantic.v1 import root_validator
from tqdm import tqdm
from src.db_utils import length_db1, set_dbid, set_userid, get_dbid, get_userid_direct, get_username_direct, \
set_userid_direct
from src.image_utils import fix_image_file, get_image_types, get_image_file
from src.output_parser import H2OPythonMRKLOutputParser
from src.pandas_agent_langchain import create_csv_agent, create_pandas_dataframe_agent
from utils import wrapped_partial, EThread, import_matplotlib, sanitize_filename, makedirs, get_url, flatten_list, \
get_device, ProgressParallel, remove, hash_file, clear_torch_cache, NullContext, get_hf_server, FakeTokenizer, \
have_libreoffice, have_arxiv, have_playwright, have_selenium, have_tesseract, have_doctr, have_pymupdf, set_openai, \
get_list_or_str, have_pillow, only_selenium, only_playwright, only_unstructured_urls, get_short_name, \
get_accordion, have_jq, get_doc, get_source, have_chromamigdb, get_token_count, reverse_ucurve_list, get_size, \
get_test_name_core, download_simple, have_fiftyone, have_librosa, return_good_url, n_gpus_global, \
get_accordion_named, hyde_titles, have_cv2, FullSet, create_relative_symlink, split_list, get_gradio_tmp, merge_dict
from enums import DocumentSubset, no_lora_str, model_token_mapping, source_prefix, source_postfix, non_query_commands, \
LangChainAction, LangChainMode, DocumentChoice, LangChainTypes, font_size, head_acc, super_source_prefix, \
super_source_postfix, langchain_modes_intrinsic, get_langchain_prompts, LangChainAgent, docs_joiner_default, \
docs_ordering_types_default, langchain_modes_non_db, does_support_functiontools, doc_json_mode_system_prompt, \
auto_choices, max_docs_public, max_chunks_per_doc_public, max_docs_public_api, max_chunks_per_doc_public_api, \
user_prompt_for_fake_system_prompt, does_support_json_mode
from evaluate_params import gen_hyper, gen_hyper0
from gen import SEED, get_limited_prompt, get_docs_tokens, get_relaxed_max_new_tokens, get_model_retry, gradio_to_llm, \
get_client_from_inference_server
from prompter import non_hf_types, PromptType, Prompter, get_vllm_extra_dict, system_docqa, system_summary, \
is_vision_model
from src.serpapi import H2OSerpAPIWrapper
from utils_langchain import StreamingGradioCallbackHandler, _chunk_sources, _add_meta, add_parser, fix_json_meta, \
load_general_summarization_chain, H2OHuggingFaceHubEmbeddings
import numpy as np
import pandas as pd
import requests
from langchain.chains.qa_with_sources import load_qa_with_sources_chain
from langchain.document_loaders import PyPDFLoader, TextLoader, CSVLoader, PythonLoader, TomlLoader, \
UnstructuredURLLoader, UnstructuredHTMLLoader, UnstructuredWordDocumentLoader, UnstructuredMarkdownLoader, \
EverNoteLoader, UnstructuredEmailLoader, UnstructuredODTLoader, UnstructuredPowerPointLoader, \
UnstructuredEPubLoader, UnstructuredImageLoader, UnstructuredRTFLoader, ArxivLoader, UnstructuredPDFLoader, \
UnstructuredExcelLoader, JSONLoader, AsyncHtmlLoader, AsyncChromiumLoader
from langchain.text_splitter import Language, RecursiveCharacterTextSplitter, TextSplitter
from langchain.chains.question_answering import load_qa_chain
from langchain.docstore.document import Document
from langchain.prompts import PromptTemplate
from langchain.llms import HuggingFaceTextGenInference, HuggingFacePipeline
from langchain.vectorstores import Chroma
from chromamig import ChromaMig
from langchain.embeddings import FakeEmbeddings
from functools import partial
from typing import Any, Dict, List, Optional, Iterable
from pydantic import Field
from langchain.callbacks.manager import CallbackManagerForLLMRun, AsyncCallbackManagerForLLMRun
from langchain.llms.base import LLM
from langchain.chat_models import ChatOpenAI, AzureChatOpenAI
from langchain.chat_models import ChatAnthropic as ChatAnthropic2
from langchain_anthropic import ChatAnthropic as ChatAnthropic3
from langchain.llms import OpenAI, AzureOpenAI, Replicate
from langchain_core.language_models.chat_models import BaseChatModel
from langchain_core.language_models.chat_models import (
BaseChatModel,
agenerate_from_stream,
generate_from_stream
)
import posthog
def get_metadatas(db, full_required=True, k_max=10000):
from langchain.vectorstores import FAISS
if isinstance(db, FAISS):
metadatas = [v.metadata for k, v in db.docstore._dict.items()]
elif is_chroma_db(db):
if full_required or not (large_chroma_db(db) and is_new_chroma_db(db)):
db_get = get_documents(db)
documents = db_get['documents']
if documents is None:
documents = []
metadatas = db_get['metadatas']
if metadatas is None:
if documents is not None:
metadatas = [{}] * len(documents)
else:
metadatas = []
else:
# just use sim search, since too many
docs1 = sim_search(db, k=k_max, with_score=False)
metadatas = [x.metadata for x in docs1]
elif db is not None:
# FIXME: Hack due to https://github.com/weaviate/weaviate/issues/1947
# seems no way to get all metadata, so need to avoid this approach for weaviate
with get_context_cast():
metadatas = [x.metadata for x in db.similarity_search("", k=k_max)]
else:
metadatas = []
return metadatas
def get_existing_files(db):
# Note: Below full scan if used, but this function not used yet
metadatas = get_metadatas(db)
metadata_sources = set([x['source'] for x in metadatas])
return metadata_sources | null |
166,883 |
def noop_load(*args, **kwargs):
return None | null |
166,884 | import copy
import torch
from evaluate_params import eval_func_param_names, input_args_list
from gen import evaluate, check_locals
from prompter import non_hf_types
from utils import clear_torch_cache, NullContext, get_kwargs
input_args_list = ['model_state', 'my_db_state', 'selection_docs_state', 'requests_state', 'roles_state']
eval_func_param_names = ['instruction',
'iinput',
'context',
'stream_output',
'prompt_type',
'prompt_dict'] + \
gen_hyper + \
['chat',
'instruction_nochat',
'iinput_nochat',
'langchain_mode',
'add_chat_history_to_context',
'langchain_action',
'langchain_agents',
'top_k_docs',
'chunk',
'chunk_size',
'document_subset',
'document_choice',
'document_source_substrings',
'document_source_substrings_op',
'document_content_substrings',
'document_content_substrings_op',
'pre_prompt_query',
'prompt_query',
'pre_prompt_summary',
'prompt_summary',
'hyde_llm_prompt',
'system_prompt',
] + \
reader_names + \
['visible_models',
'h2ogpt_key',
'add_search_to_context',
'chat_conversation',
'text_context_list',
'docs_ordering_type',
'min_max_new_tokens',
'max_input_tokens',
'max_total_input_tokens',
'docs_token_handling',
'docs_joiner',
'hyde_level',
'hyde_template',
'hyde_show_only_final',
'doc_json_mode',
'metadata_in_context',
'chatbot_role',
'speaker',
'tts_language',
'tts_speed',
'image_file',
'image_control',
]
def evaluate(
model_state,
my_db_state,
selection_docs_state,
requests_state,
roles_state,
# START NOTE: Examples must have same order of parameters
instruction,
iinput,
context,
stream_output,
prompt_type,
prompt_dict,
temperature,
top_p,
top_k,
penalty_alpha,
num_beams,
max_new_tokens,
min_new_tokens,
early_stopping,
max_time,
repetition_penalty,
num_return_sequences,
do_sample,
chat,
instruction_nochat,
iinput_nochat,
langchain_mode,
add_chat_history_to_context,
langchain_action,
langchain_agents,
top_k_docs,
chunk,
chunk_size,
document_subset,
document_choice,
document_source_substrings,
document_source_substrings_op,
document_content_substrings,
document_content_substrings_op,
pre_prompt_query,
prompt_query,
pre_prompt_summary,
prompt_summary,
hyde_llm_prompt,
system_prompt,
image_audio_loaders,
pdf_loaders,
url_loaders,
jq_schema,
extract_frames,
llava_prompt,
visible_models,
h2ogpt_key,
add_search_to_context,
chat_conversation,
text_context_list,
docs_ordering_type,
min_max_new_tokens,
max_input_tokens,
max_total_input_tokens,
docs_token_handling,
docs_joiner,
hyde_level,
hyde_template,
hyde_show_only_final,
doc_json_mode,
metadata_in_context,
chatbot_role,
speaker,
tts_language,
tts_speed,
image_file,
image_control,
# END NOTE: Examples must have same order of parameters
captions_model=None,
caption_loader=None,
doctr_loader=None,
pix2struct_loader=None,
llava_model=None,
image_gen_loader=None,
image_gen_loader_high=None,
image_change_loader=None,
enable_imagegen_high_sd=None,
asr_model=None,
asr_loader=None,
async_output=None,
num_async=None,
src_lang=None,
tgt_lang=None,
debug=False,
concurrency_count=None,
save_dir=None,
sanitize_bot_response=False,
model_state0=None,
memory_restriction_level=None,
max_max_new_tokens=None,
is_public=None,
from_ui=True,
regenerate_clients=None,
regenerate_gradio_clients=None,
max_max_time=None,
raise_generate_gpu_exceptions=None,
lora_weights=None,
use_llm_if_no_docs=True,
load_db_if_exists=True,
dbs=None,
detect_user_path_changes_every_query=None,
use_openai_embedding=None,
use_openai_model=None,
hf_embedding_model=None,
migrate_embedding_model=None,
auto_migrate_db=None,
cut_distance=None,
db_type=None,
n_jobs=None,
first_para=None,
text_limit=None,
show_accordions=None,
hyde_show_intermediate_in_accordion=None,
top_k_docs_max_show=None,
show_link_in_sources=None,
langchain_instruct_mode=None,
verbose=False,
gradio=True,
cli=False,
use_cache=None,
auto_reduce_chunks=None,
max_chunks=None,
headsize=None,
model_lock=None,
force_langchain_evaluate=None,
model_state_none=None,
llamacpp_path=None,
llamacpp_dict=None,
exllama_dict=None,
gptq_dict=None,
attention_sinks=None,
sink_dict=None,
truncation_generation=None,
hf_model_dict=None,
load_exllama=None,
answer_with_sources=None,
append_sources_to_answer=None,
append_sources_to_chat=None,
image_audio_loaders_options0=None,
pdf_loaders_options0=None,
url_loaders_options0=None,
jq_schema0=None,
extract_frames0=None,
keep_sources_in_context=None,
gradio_errors_to_chatbot=None,
allow_chat_system_prompt=None,
# carry defaults to know what forced-off means
use_pymupdf=None,
use_unstructured_pdf=None,
use_pypdf=None,
enable_pdf_ocr=None,
enable_pdf_doctr=None,
try_pdf_as_html=None,
load_awq=None,
):
# ensure passed these
assert concurrency_count is not None
assert memory_restriction_level is not None
assert raise_generate_gpu_exceptions is not None
assert use_openai_embedding is not None
assert use_openai_model is not None
assert hf_embedding_model is not None
assert migrate_embedding_model is not None
assert auto_migrate_db is not None
assert db_type is not None
assert top_k_docs is not None and isinstance(top_k_docs, int)
assert chunk is not None and isinstance(chunk, bool)
assert chunk_size is not None and isinstance(chunk_size, int)
assert n_jobs is not None
assert first_para is not None
assert isinstance(add_chat_history_to_context, bool)
assert isinstance(add_search_to_context, bool)
assert load_exllama is not None
# for lazy client (even chat client)
if image_audio_loaders is None:
image_audio_loaders = image_audio_loaders_options0
if pdf_loaders is None:
pdf_loaders = pdf_loaders_options0
if url_loaders is None:
url_loaders = url_loaders_options0
if jq_schema is None:
jq_schema = jq_schema0
if extract_frames is None:
extract_frames = extract_frames0
if isinstance(langchain_agents, str):
if langchain_agents.strip().startswith('['):
# already list, but as string
langchain_agents = str_to_list(langchain_agents)
else:
# just 1 item and make list
langchain_agents = [langchain_agents]
chat_conversation = str_to_list(chat_conversation)
text_context_list = str_to_list(text_context_list)
langchain_modes = selection_docs_state['langchain_modes']
langchain_mode_paths = selection_docs_state['langchain_mode_paths']
langchain_mode_types = selection_docs_state['langchain_mode_types']
if debug:
locals_dict = locals().copy()
locals_dict.pop('model_state', None)
locals_dict.pop('model_state0', None)
locals_dict.pop('model_states', None)
print(locals_dict)
if langchain_action in [LangChainAction.IMAGE_GENERATE.value, LangChainAction.IMAGE_GENERATE_HIGH.value]:
t_generate = time.time()
if langchain_action in [LangChainAction.IMAGE_GENERATE.value]:
assert image_gen_loader, "Generating image, but image_gen_loader is None"
from src.vision.sdxl import make_image
pipe = image_gen_loader
elif langchain_action in [LangChainAction.IMAGE_GENERATE_HIGH.value]:
assert image_gen_loader_high, "Generating image, but image_gen_loader_high is None"
if enable_imagegen_high_sd:
from src.vision.stable_diffusion_xl import make_image
else:
from src.vision.playv2 import make_image
pipe = image_gen_loader_high
else:
raise ValueError("No such langchain_action=%s" % langchain_action)
filename_image = sanitize_filename("image_%s_%s.png" % (instruction, str(uuid.uuid4())),
file_length_limit=50)
gradio_tmp = get_gradio_tmp()
image_file_gen = make_image(instruction,
filename=os.path.join(gradio_tmp, filename_image),
pipe=pipe,
)
response = (image_file_gen,)
# FIXME: Could run this through image model if was selected
extra_dict = dict(t_generate=time.time() - t_generate,
instruction=instruction,
prompt_raw=instruction,
prompt_type=prompt_type,
base_model=LangChainAction.IMAGE_GENERATE.value)
save_dict = dict(prompt=instruction, output=response, extra_dict=extra_dict)
yield dict(response=response, sources=[], save_dict=save_dict, llm_answers={},
response_no_refs="Generated image for %s" % instruction,
sources_str="", prompt_raw=instruction)
return
no_model_msg = "Please choose a base model with --base_model (CLI) or load in Models Tab (gradio).\n" \
"Then start New Conversation"
if model_state is None:
model_state = model_state_none.copy()
if model_state0 is None:
# e.g. for no gradio case, set dummy value, else should be set
model_state0 = model_state_none.copy()
# model_state['model] is only 'model' if should use model_state0
# model could also be None
have_model_lock = model_lock is not None
have_fresh_model = model_state['model'] not in [None, 'model', no_model_str]
# for gradio UI control, expect model_state and model_state0 to match, so if have_model_lock=True, then should have_fresh_model=True
# but gradio API control will only use nochat api etc. and won't use fresh model, so can't assert in general
# if have_model_lock:
# assert have_fresh_model, "Expected model_state and model_state0 to match if have_model_lock"
have_cli_model = model_state0['model'] not in [None, 'model', no_model_str]
no_llm_ok = langchain_action in [LangChainAction.IMAGE_GENERATE.value,
LangChainAction.IMAGE_GENERATE_HIGH.value,
LangChainAction.IMAGE_CHANGE.value,
]
chosen_model_state = model_state0
if have_fresh_model:
# USE FRESH MODEL
if not have_model_lock:
# model_state0 is just one of model_state if model_lock, so don't nuke
# try to free-up original model (i.e. list was passed as reference)
if model_state0['model'] and hasattr(model_state0['model'], 'cpu'):
model_state0['model'].cpu()
model_state0['model'] = None
# try to free-up original tokenizer (i.e. list was passed as reference)
if model_state0['tokenizer']:
model_state0['tokenizer'] = None
clear_torch_cache()
chosen_model_state = model_state
elif have_cli_model:
# USE MODEL SETUP AT CLI
assert isinstance(model_state['model'], (type(None), str)) # expect no fresh model
elif not no_llm_ok:
raise AssertionError(no_model_msg)
# get variables
model = chosen_model_state['model']
tokenizer = chosen_model_state['tokenizer']
device = chosen_model_state['device']
base_model = chosen_model_state['base_model']
tokenizer_base_model = chosen_model_state['tokenizer_base_model']
lora_weights = chosen_model_state['lora_weights']
inference_server = chosen_model_state['inference_server']
visible_models = chosen_model_state['visible_models']
# use overall key if have, so key for this gradio and any inner gradio
if chosen_model_state['h2ogpt_key'] is not None:
h2ogpt_key = chosen_model_state['h2ogpt_key']
# prefer use input from API over model state
prompt_type = prompt_type or chosen_model_state['prompt_type']
prompt_dict = prompt_dict or chosen_model_state['prompt_dict']
if base_model is None and not no_llm_ok:
raise AssertionError(no_model_msg)
assert base_model.strip(), no_model_msg
assert model is not None, "Model is missing"
assert tokenizer is not None, "Tokenizer is missing"
# choose chat or non-chat mode
if not chat:
instruction = instruction_nochat
iinput = iinput_nochat
# avoid instruction in chat_conversation itself, since always used as additional context to prompt in what follows
if isinstance(chat_conversation, list) and \
len(chat_conversation) > 0 and \
len(chat_conversation[-1]) == 2 and \
chat_conversation[-1][0] == instruction and \
chat_conversation[-1][1] in [None, '']:
chat_conversation = chat_conversation[:-1]
if not add_chat_history_to_context:
# make it easy to ignore without needing add_chat_history_to_context
# some langchain or unit test may need to then handle more general case
chat_conversation = []
# in some cases, like lean nochat API, don't want to force sending prompt_type, allow default choice
# This doesn't do switch-a-roo, assume already done, so might be wrong model and can't infer
model_lower = base_model.lower()
llamacpp_dict = str_to_dict(llamacpp_dict)
if not prompt_type and prompt_type != 'custom':
prompt_type_trial = model_name_to_prompt_type(base_model,
llamacpp_dict=llamacpp_dict)
if prompt_type_trial:
prompt_type = prompt_type_trial
if verbose:
print("Auto-selecting prompt_type=%s for %s" % (prompt_type, base_model), flush=True)
assert prompt_type is not None, "prompt_type was None"
# Control generation hyperparameters
# adjust for bad inputs, e.g. in case also come from API that doesn't get constrained by gradio sliders
# below is for TGI server, not required for HF transformers
# limits are chosen similar to gradio_runner.py sliders/numbers
top_p = min(max(1e-3, top_p), 1.0 - 1e-3)
top_k = min(max(1, int(top_k)), 100)
penalty_alpha = min(2.0, max(0.0, penalty_alpha))
if temperature == 0.0:
# override
do_sample = False
# Note: Could do below, but for now gradio way can control do_sample directly
# elif temperature >= 0.01:
# do_sample = True
temperature = min(max(0.01, temperature), 2.0)
max_input_tokens = int(max_input_tokens) if max_input_tokens is not None else -1
max_total_input_tokens = int(max_total_input_tokens) if max_total_input_tokens is not None else -1
# FIXME: https://github.com/h2oai/h2ogpt/issues/106
num_beams = 1 if stream_output else num_beams # See max_beams in gradio_runner
if model_lower == 'distilgpt2':
# always truncate for certain models that totally fail otherwise
truncation_generation = True
max_max_new_tokens = get_max_max_new_tokens(chosen_model_state,
memory_restriction_level=memory_restriction_level,
max_new_tokens=max_new_tokens,
attention_sinks=attention_sinks,
max_max_new_tokens=max_max_new_tokens,
truncation_generation=truncation_generation)
if min_max_new_tokens is None:
# default for nochat api
min_max_new_tokens = 512
if max_input_tokens is None:
max_input_tokens = -1
if max_total_input_tokens is None:
max_total_input_tokens = -1
if docs_ordering_type is None:
docs_ordering_type = docs_ordering_types_default
if docs_token_handling is None:
docs_token_handling = docs_token_handling_default
if docs_joiner is None:
docs_joiner = docs_joiner_default
model_max_length = get_model_max_length(chosen_model_state)
max_new_tokens = min(max(1, int(max_new_tokens)), max_max_new_tokens)
min_new_tokens = min(max(0, int(min_new_tokens)), max_new_tokens)
max_time = min(max(0, max_time), max_max_time)
repetition_penalty = min(max(0.01, repetition_penalty), 3.0)
num_return_sequences = 1 if chat else min(max(1, int(num_return_sequences)), 10)
min_top_k_docs, max_top_k_docs, label_top_k_docs = get_minmax_top_k_docs(is_public, from_ui)
# limit total tokens processed, e.g. for summarization, if public instance
if is_public:
# control API too for public case
if from_ui:
max_input_tokens = max_input_tokens_public
else:
max_input_tokens = max_input_tokens_public_api
if from_ui:
max_total_input_tokens = min(max_total_input_tokens, max_total_input_tokens_public)
else:
max_total_input_tokens = min(max_total_input_tokens, max_total_input_tokens_public_api)
top_k_docs = min(max(min_top_k_docs, int(top_k_docs)), max_top_k_docs)
chunk_size = min(max(128, int(chunk_size)), 2048)
if not context:
context = ''
# NOTE!!!!!!!!!! Choice of developer. But only possible to force stream if num_beams=1
# stream if can, so can control task iteration and time of iteration
# not required, but helpful for max_time control etc.
stream_output0 = stream_output
stream_output = gradio and num_beams == 1
# get prompter
prompter = Prompter(prompt_type, prompt_dict, debug=debug, stream_output=stream_output,
system_prompt=system_prompt)
# THIRD PLACE where LangChain referenced, but imports only occur if enabled and have db to use
assert langchain_mode in langchain_modes, "Invalid langchain_mode %s not in %s" % (langchain_mode, langchain_modes)
assert langchain_action in langchain_actions, "Invalid langchain_action %s not in %s" % (
langchain_action, langchain_actions)
assert len(
set(langchain_agents).difference(langchain_agents_list)) == 0, "Invalid langchain_agents %s" % langchain_agents
# get db, but also fill db state so return already has my_db_state and dbs filled so faster next query
if langchain_mode != LangChainMode.DISABLED.value:
from src.gpt_langchain import get_any_db
db = get_any_db(my_db_state, langchain_mode, langchain_mode_paths, langchain_mode_types,
dbs=dbs,
load_db_if_exists=load_db_if_exists,
db_type=db_type,
use_openai_embedding=use_openai_embedding,
hf_embedding_model=hf_embedding_model,
migrate_embedding_model=migrate_embedding_model,
auto_migrate_db=auto_migrate_db,
for_sources_list=True,
verbose=verbose,
n_jobs=n_jobs,
)
else:
db = None
t_generate = time.time()
langchain_only_model = base_model in non_hf_types or \
load_exllama or \
inference_server.startswith('replicate') or \
inference_server.startswith('sagemaker') or \
inference_server.startswith('openai_azure_chat') or \
inference_server.startswith('openai_azure') or \
inference_server.startswith('anthropic') or \
inference_server.startswith('google') or \
inference_server.startswith('mistralai')
do_langchain_path = langchain_mode not in [False, 'Disabled', 'LLM'] or \
langchain_only_model or \
force_langchain_evaluate or \
len(text_context_list) > 0
if len(langchain_agents) > 0:
do_langchain_path = True
if add_search_to_context:
# easier to manage prompt etc. by doing full langchain path
do_langchain_path = True
gen_hyper_dict = dict(do_sample=do_sample,
temperature=temperature,
repetition_penalty=repetition_penalty,
top_p=top_p,
top_k=top_k,
penalty_alpha=penalty_alpha,
num_beams=num_beams,
min_new_tokens=min_new_tokens,
max_new_tokens=max_new_tokens,
early_stopping=early_stopping,
max_time=max_time,
num_return_sequences=num_return_sequences,
)
extra_dict = gen_hyper_dict.copy()
extra_dict.update(dict(base_model=base_model,
prompt_type=prompt_type,
inference_server=inference_server,
langchain_mode=langchain_mode,
langchain_action=langchain_action,
langchain_agents=langchain_agents,
document_subset=document_subset,
document_choice=document_choice,
document_source_substrings=document_source_substrings,
document_source_substrings_op=document_source_substrings_op,
document_content_substrings=document_content_substrings,
document_content_substrings_op=document_content_substrings_op,
add_search_to_context=add_search_to_context,
instruction=instruction,
iinput=iinput,
context=context,
ntokens=None,
tokens_persecond=None,
llamacpp_dict=llamacpp_dict,
))
save_dict = dict(base_model=base_model, save_dir=save_dir, extra_dict=extra_dict)
if do_langchain_path:
text = ''
sources = []
sources_str = ''
response = ''
response_no_refs = ''
prompt_raw = ''
# use smaller cut_distance for wiki_full since so many matches could be obtained, and often irrelevant unless close
from gpt_langchain import run_qa_db
loaders_dict, captions_model, asr_model = gr_to_lg(image_audio_loaders,
pdf_loaders,
url_loaders,
use_pymupdf=use_pymupdf,
use_unstructured_pdf=use_unstructured_pdf,
use_pypdf=use_pypdf,
enable_pdf_ocr=enable_pdf_ocr,
enable_pdf_doctr=enable_pdf_doctr,
try_pdf_as_html=try_pdf_as_html,
captions_model=captions_model,
asr_model=asr_model,
)
loaders_dict.update(dict(captions_model=captions_model,
caption_loader=caption_loader,
doctr_loader=doctr_loader,
pix2struct_loader=pix2struct_loader,
llava_model=llava_model,
asr_model=asr_model,
asr_loader=asr_loader,
jq_schema=jq_schema,
extract_frames=extract_frames,
llava_prompt=llava_prompt,
))
data_point = dict(context=context, instruction=instruction, input=iinput)
# no longer stuff chat history directly into context this early
prompt_basic = prompter.generate_prompt(data_point, context_from_history=False)
prompt = prompt_basic
num_prompt_tokens = 0
llm_answers = {}
for r in run_qa_db(
inference_server=inference_server,
regenerate_clients=regenerate_clients,
regenerate_gradio_clients=regenerate_gradio_clients,
model_name=base_model, model=model, tokenizer=tokenizer,
langchain_only_model=langchain_only_model,
load_awq=load_awq,
async_output=async_output,
num_async=num_async,
prompter=prompter,
use_llm_if_no_docs=use_llm_if_no_docs,
load_db_if_exists=load_db_if_exists,
db=db,
langchain_mode_paths=langchain_mode_paths,
langchain_mode_types=langchain_mode_types,
detect_user_path_changes_every_query=detect_user_path_changes_every_query,
cut_distance=1.1 if langchain_mode in ['wiki_full'] else cut_distance,
answer_with_sources=answer_with_sources,
append_sources_to_answer=append_sources_to_answer,
append_sources_to_chat=append_sources_to_chat,
add_chat_history_to_context=add_chat_history_to_context,
add_search_to_context=add_search_to_context,
keep_sources_in_context=keep_sources_in_context,
gradio_errors_to_chatbot=gradio_errors_to_chatbot,
memory_restriction_level=memory_restriction_level,
system_prompt=system_prompt,
allow_chat_system_prompt=allow_chat_system_prompt,
use_openai_embedding=use_openai_embedding,
use_openai_model=use_openai_model,
hf_embedding_model=hf_embedding_model,
migrate_embedding_model=migrate_embedding_model,
auto_migrate_db=auto_migrate_db,
first_para=first_para,
text_limit=text_limit,
show_accordions=show_accordions,
hyde_show_intermediate_in_accordion=hyde_show_intermediate_in_accordion,
top_k_docs_max_show=top_k_docs_max_show,
show_link_in_sources=show_link_in_sources,
langchain_instruct_mode=langchain_instruct_mode,
# evaluate args items
query=instruction,
iinput=iinput,
context=context,
stream_output0=stream_output0,
stream_output=stream_output,
chunk=chunk,
chunk_size=chunk_size,
**loaders_dict,
langchain_mode=langchain_mode,
langchain_action=langchain_action,
langchain_agents=langchain_agents,
document_subset=document_subset,
document_choice=document_choice,
document_source_substrings=document_source_substrings,
document_source_substrings_op=document_source_substrings_op,
document_content_substrings=document_content_substrings,
document_content_substrings_op=document_content_substrings_op,
top_k_docs=top_k_docs,
prompt_type=prompt_type,
prompt_dict=prompt_dict,
pre_prompt_query=pre_prompt_query,
prompt_query=prompt_query,
pre_prompt_summary=pre_prompt_summary,
prompt_summary=prompt_summary,
hyde_llm_prompt=hyde_llm_prompt,
text_context_list=text_context_list,
chat_conversation=chat_conversation,
visible_models=visible_models,
h2ogpt_key=h2ogpt_key,
docs_ordering_type=docs_ordering_type,
min_max_new_tokens=min_max_new_tokens,
max_input_tokens=max_input_tokens,
max_total_input_tokens=max_total_input_tokens,
docs_token_handling=docs_token_handling,
docs_joiner=docs_joiner,
hyde_level=hyde_level,
hyde_template=hyde_template,
hyde_show_only_final=hyde_show_only_final,
doc_json_mode=doc_json_mode,
metadata_in_context=metadata_in_context,
**gen_hyper_dict,
db_type=db_type,
n_jobs=n_jobs,
verbose=verbose,
cli=cli,
sanitize_bot_response=sanitize_bot_response,
lora_weights=lora_weights,
llamacpp_path=llamacpp_path,
llamacpp_dict=llamacpp_dict,
exllama_dict=exllama_dict,
gptq_dict=gptq_dict,
attention_sinks=attention_sinks,
sink_dict=sink_dict,
truncation_generation=truncation_generation,
hf_model_dict=hf_model_dict,
auto_reduce_chunks=auto_reduce_chunks,
max_chunks=max_chunks,
headsize=headsize,
image_file=image_file,
image_control=image_control,
):
# doesn't accumulate, new answer every yield, so only save that full answer
response = r['response']
sources = r['sources']
num_prompt_tokens = r['num_prompt_tokens']
llm_answers = r['llm_answers']
response_no_refs = r['response_no_refs']
sources_str = r['sources_str']
prompt_raw = str(r['prompt_raw'])
if stream_output:
yield dict(response=response, sources=[], save_dict={}, llm_answers=llm_answers,
response_no_refs=response_no_refs, sources_str='', prompt_raw='')
extra_dict.update(dict(num_prompt_tokens=num_prompt_tokens,
t_generate=time.time() - t_generate,
# tokens_persecond computed in save_generate_output
sources_str=sources_str,
sources=sources,
))
save_dict.update(dict(prompt=prompt, output=response, where_from="run_qa_db", extra_dict=extra_dict))
yield dict(response=response, sources=sources, save_dict=save_dict, llm_answers=llm_answers,
response_no_refs=response_no_refs, sources_str=sources_str, prompt_raw=prompt_raw)
if verbose:
print(
'Post-Generate Langchain: %s decoded_output: %s' %
(str(datetime.now()), len(response) if response else -1),
flush=True)
if response or sources or langchain_only_model:
# if got no response (e.g. not showing sources and got no sources,
# so nothing to give to LLM), then slip through and ask LLM
# Or if llama/gptj, then just return since they had no response and can't go down below code path
# don't clear torch cache here, delays multi-generation, and bot(), all_bot(), and evaluate_nochat() do it
return
# NOT LANGCHAIN PATH, raw LLM
# restrict instruction + , typically what has large input
from gradio_utils.grclient import GradioClient
from gradio_client import Client
gradio_server = inference_server.startswith('http') and (
isinstance(model, GradioClient) or isinstance(model, Client))
prompt, \
instruction, iinput, context, \
num_prompt_tokens, max_new_tokens, num_prompt_tokens0, num_prompt_tokens_actual, \
history_to_use_final, external_handle_chat_conversation, \
top_k_docs_trial, one_doc_size, truncation_generation, system_prompt = \
get_limited_prompt(instruction,
iinput,
tokenizer,
prompter=prompter,
inference_server=inference_server,
# prompt_type=prompt_type, # use prompter
# prompt_dict=prompt_dict, # use prompter
# chat=chat, # use prompter
max_new_tokens=max_new_tokens,
# system_prompt=system_prompt, # use prompter
allow_chat_system_prompt=allow_chat_system_prompt,
context=context,
chat_conversation=chat_conversation,
keep_sources_in_context=keep_sources_in_context,
model_max_length=model_max_length,
memory_restriction_level=memory_restriction_level,
langchain_mode=langchain_mode,
add_chat_history_to_context=add_chat_history_to_context,
min_max_new_tokens=min_max_new_tokens,
max_input_tokens=max_input_tokens,
max_total_input_tokens=max_total_input_tokens,
truncation_generation=truncation_generation,
gradio_server=gradio_server,
attention_sinks=attention_sinks,
hyde_level=hyde_level,
gradio_errors_to_chatbot=gradio_errors_to_chatbot,
)
if inference_server.startswith('vllm') or \
inference_server.startswith('openai') or \
inference_server.startswith('http'):
text = ''
gen_server_kwargs = {}
if inference_server.startswith('vllm') or inference_server.startswith('openai'):
assert not inference_server.startswith('openai_azure_chat'), "Not fo Azure, use langchain path"
assert not inference_server.startswith('openai_azure'), "Not for Azure, use langchain path"
if isinstance(model, dict):
openai_client, openai_async_client, inf_type = model['client'], model['async_client'], model['inf_type']
else:
openai_client, openai_async_client, \
inf_type, _, _, _, _ = set_openai(inference_server, model_name=base_model)
where_from = inf_type
responses = None
terminate_response = prompter.terminate_response or []
stop_sequences = list(set(terminate_response + [prompter.PreResponse]))
stop_sequences = [x for x in stop_sequences if x]
# OpenAI will complain if ask for too many new tokens, takes it as min in some sense, wrongly so.
max_new_tokens_openai = min(max_new_tokens, model_max_length - num_prompt_tokens)
gen_server_kwargs = dict(temperature=temperature if do_sample else 0.001,
max_tokens=max_new_tokens_openai,
top_p=top_p if do_sample else 1,
frequency_penalty=0,
seed=SEED,
n=num_return_sequences,
presence_penalty=(repetition_penalty - 1.0) * 2.0 + 0.0, # so good default
)
try:
if inf_type == 'vllm' or inf_type == 'openai':
if inf_type == 'vllm':
vllm_extra_dict = get_vllm_extra_dict(tokenizer, stop_sequences=stop_sequences,
# repetition_penalty=repetition_penalty, # could pass
)
other_dict = dict(timeout=max_time)
else:
vllm_extra_dict = {}
other_dict = dict(timeout=max_time)
responses = openai_client.completions.create(
model=base_model,
prompt=prompt,
**gen_server_kwargs,
stop=stop_sequences,
**vllm_extra_dict,
stream=stream_output,
**other_dict,
)
text = ''
sources = []
response = ''
if not stream_output:
text = responses.choices[0].text
response = prompter.get_response(prompt + text, prompt=prompt,
sanitize_bot_response=sanitize_bot_response)
else:
collected_events = []
tgen0 = time.time()
for event in responses:
collected_events.append(event) # save the event response
delta = event.choices[0].text # extract the text
text += delta # append the text
if delta:
response = prompter.get_response(prompt + text, prompt=prompt,
sanitize_bot_response=sanitize_bot_response)
yield dict(response=response, sources=sources, save_dict={}, llm_answers={},
response_no_refs=response, sources_str='', prompt_raw='')
if time.time() - tgen0 > max_time:
if verbose:
print("Took too long for OpenAI or VLLM: %s" % (time.time() - tgen0), flush=True)
break
time.sleep(0.01)
elif inf_type == 'vllm_chat' or inf_type == 'openai_chat':
other_dict = dict(timeout=max_time)
if system_prompt in [None, 'None', 'auto']:
openai_system_prompt = "You are a helpful assistant."
else:
openai_system_prompt = system_prompt
messages0 = []
if openai_system_prompt:
messages0.append({"role": "system", "content": openai_system_prompt})
if chat_conversation and add_chat_history_to_context:
assert external_handle_chat_conversation, "Should be handling only externally"
# history_to_use_final handles token counting issues
for message1 in history_to_use_final:
if len(message1) == 2 and (message1[0] is None or message1[1] is None):
# then not really part of LLM, internal, so avoid
continue
if len(message1) == 2:
if message1[0]:
messages0.append(
{'role': 'user', 'content': gradio_to_llm(message1[0], bot=False)})
if message1[1]:
messages0.append(
{'role': 'assistant', 'content': gradio_to_llm(message1[1], bot=True)})
if prompt:
messages0.append({'role': 'user', 'content': prompt})
responses = openai_client.chat.completions.create(
model=base_model,
messages=messages0,
stream=stream_output,
**gen_server_kwargs,
**other_dict,
)
text = ""
sources = []
response = ""
if not stream_output:
text = responses.choices[0].message.content
response = prompter.get_response(prompt + text, prompt=prompt,
sanitize_bot_response=sanitize_bot_response)
else:
tgen0 = time.time()
for chunk in responses:
delta = chunk.choices[0].delta.content
if delta:
text += delta
response = prompter.get_response(prompt + text, prompt=prompt,
sanitize_bot_response=sanitize_bot_response)
yield dict(response=response, sources=sources, save_dict={}, llm_answers={},
response_no_refs=response, sources_str='', prompt_raw='')
if time.time() - tgen0 > max_time:
if verbose:
print("Took too long for OpenAI or VLLM Chat: %s" % (time.time() - tgen0),
flush=True)
break
else:
raise RuntimeError("No such OpenAI mode: %s" % inference_server)
finally:
if responses is not None:
try:
responses.close()
except Exception as e:
print("Failed to close OpenAI response: %s" % str(e), flush=True)
if regenerate_clients and openai_client is not None:
try:
openai_client.close()
except Exception as e:
print("Failed to close OpenAI client: %s" % str(e), flush=True)
elif inference_server.startswith('http') and is_vision_model(base_model):
where_from = "gr_client for llava"
sources = []
inference_server, headers = get_hf_server(inference_server)
if isinstance(model, GradioClient) and not regenerate_gradio_clients:
gr_client = model.clone()
elif isinstance(model, Client) and not regenerate_gradio_clients:
gr_client = model
else:
inference_server, gr_client, hf_client = get_client_from_inference_server(inference_server,
base_model=base_model)
assert gr_client is not None
assert hf_client is None
# NOTE: llava doesn't handle context or system prompt directly
img_file = get_image_file(image_file, image_control, document_choice)
llava_kwargs = dict(file=img_file,
llava_model=inference_server,
# prompt=instruction,
prompt=prompt, # prepared prompt with chat history etc.
chat_conversation=chat_conversation,
allow_prompt_auto=False,
image_model=base_model, temperature=temperature,
top_p=top_p, max_new_tokens=max_new_tokens,
client=gr_client if not regenerate_gradio_clients else None,
)
if not stream_output:
from src.vision.utils_vision import get_llava_response
response, _ = get_llava_response(**llava_kwargs)
yield dict(response=response, sources=[], save_dict={}, error='', llm_answers={},
response_no_refs=response, sources_str='', prompt_raw='')
else:
response = ''
tgen0 = time.time()
from src.vision.utils_vision import get_llava_stream
for response in get_llava_stream(**llava_kwargs):
yield dict(response=response, sources=[], save_dict={}, error='', llm_answers={},
response_no_refs=response, sources_str='', prompt_raw='')
if time.time() - tgen0 > max_time:
if verbose:
print("Took too long for TGI: %s" % (time.time() - tgen0), flush=True)
break
elif inference_server.startswith('http'):
inference_server, headers = get_hf_server(inference_server)
from text_generation import Client as HFClient
if isinstance(model, GradioClient) and not regenerate_gradio_clients:
gr_client = model.clone()
hf_client = None
elif isinstance(model, HFClient) and not regenerate_gradio_clients:
gr_client = None
hf_client = model
else:
inference_server, gr_client, hf_client = get_client_from_inference_server(inference_server,
base_model=base_model)
if gr_client is not None:
# Note: h2oGPT gradio server could handle input token size issues for prompt,
# but best to handle here so send less data to server
chat_client = chat
where_from = "gr_client"
client_langchain_mode = 'Disabled'
client_add_chat_history_to_context = add_chat_history_to_context
client_add_search_to_context = False
client_langchain_action = LangChainAction.QUERY.value
client_langchain_agents = []
gen_server_kwargs = dict(temperature=temperature,
top_p=top_p,
top_k=top_k,
penalty_alpha=penalty_alpha,
num_beams=num_beams,
max_new_tokens=max_new_tokens,
min_new_tokens=min_new_tokens,
early_stopping=early_stopping,
max_time=max_time,
repetition_penalty=repetition_penalty,
num_return_sequences=num_return_sequences,
do_sample=do_sample,
chat=chat_client,
)
# account for gradio into gradio that handles prompting, avoid duplicating prompter prompt injection
if prompt_type in [None, '', PromptType.plain.name, PromptType.plain.value,
str(PromptType.plain.value)]:
# if our prompt is plain, assume either correct or gradio server knows different prompt type,
# so pass empty prompt_Type
gr_prompt_type = ''
gr_prompt_dict = ''
gr_prompt = prompt # already prepared prompt
gr_context = ''
gr_iinput = ''
else:
# if already have prompt_type that is not plain, None, or '', then already applied some prompting
# But assume server can handle prompting, and need to avoid double-up.
# Also assume server can do better job of using stopping.py to stop early, so avoid local prompting, let server handle
# So avoid "prompt" and let gradio server reconstruct from prompt_type we passed
# Note it's ok that prompter.get_response() has prompt+text, prompt=prompt passed,
# because just means extra processing and removal of prompt, but that has no human-bot prompting doesn't matter
# since those won't appear
gr_context = context
gr_prompt = instruction
gr_iinput = iinput
gr_prompt_type = prompt_type
gr_prompt_dict = prompt_dict
# ensure image in correct format
img_file = get_image_file(image_file, image_control, document_choice)
if img_file is not None and os.path.isfile(img_file):
from src.vision.utils_vision import img_to_base64
img_file = img_to_base64(img_file)
elif isinstance(img_file, str):
# assume already bytes
img_file = img_file
else:
img_file = None
client_kwargs = dict(instruction=gr_prompt if chat_client else '', # only for chat=True
iinput=gr_iinput, # only for chat=True
context=gr_context,
# streaming output is supported, loops over and outputs each generation in streaming mode
# but leave stream_output=False for simple input/output mode
stream_output=stream_output,
**gen_server_kwargs,
prompt_type=gr_prompt_type,
prompt_dict=gr_prompt_dict,
instruction_nochat=gr_prompt if not chat_client else '',
iinput_nochat=gr_iinput, # only for chat=False
langchain_mode=client_langchain_mode,
add_chat_history_to_context=client_add_chat_history_to_context,
chat_conversation=chat_conversation,
text_context_list=text_context_list,
chatbot_role=chatbot_role,
speaker=speaker,
tts_language=tts_language,
tts_speed=tts_speed,
langchain_action=client_langchain_action,
langchain_agents=client_langchain_agents,
top_k_docs=top_k_docs,
chunk=chunk,
chunk_size=chunk_size,
document_subset=DocumentSubset.Relevant.name,
document_choice=[DocumentChoice.ALL.value],
document_source_substrings=[],
document_source_substrings_op='and',
document_content_substrings=[],
document_content_substrings_op='and',
pre_prompt_query=pre_prompt_query,
prompt_query=prompt_query,
pre_prompt_summary=pre_prompt_summary,
prompt_summary=prompt_summary,
hyde_llm_prompt=hyde_llm_prompt,
system_prompt=system_prompt,
image_audio_loaders=image_audio_loaders,
pdf_loaders=pdf_loaders,
url_loaders=url_loaders,
jq_schema=jq_schema,
extract_frames=extract_frames,
llava_prompt=llava_prompt,
visible_models=visible_models,
h2ogpt_key=h2ogpt_key,
add_search_to_context=client_add_search_to_context,
docs_ordering_type=docs_ordering_type,
min_max_new_tokens=min_max_new_tokens,
max_input_tokens=max_input_tokens,
max_total_input_tokens=max_total_input_tokens,
docs_token_handling=docs_token_handling,
docs_joiner=docs_joiner,
hyde_level=hyde_level,
hyde_template=hyde_template,
hyde_show_only_final=hyde_show_only_final,
doc_json_mode=doc_json_mode,
metadata_in_context=metadata_in_context,
image_file=img_file,
image_control=None, # already stuffed into image_file
)
assert len(set(list(client_kwargs.keys())).symmetric_difference(eval_func_param_names)) == 0
api_name = '/submit_nochat_api' # NOTE: like submit_nochat but stable API for string dict passing
response = ''
text = ''
sources = []
strex = ''
if not stream_output:
res = gr_client.predict(str(dict(client_kwargs)), api_name=api_name)
res_dict = ast.literal_eval(res)
text = res_dict['response']
sources = res_dict['sources']
response = prompter.get_response(prompt + text, prompt=prompt,
sanitize_bot_response=sanitize_bot_response)
else:
new_stream = False # hanging for many chatbots
gr_stream_kwargs = dict(client_kwargs=client_kwargs,
api_name=api_name,
prompt=prompt, prompter=prompter,
sanitize_bot_response=sanitize_bot_response,
max_time=max_time,
is_public=is_public,
verbose=verbose)
if new_stream:
res_dict = yield from gr_client.stream(**gr_stream_kwargs)
else:
res_dict = yield from gr_client.simple_stream(**gr_stream_kwargs)
response = res_dict.get('response', '')
elif hf_client:
# quick sanity check to avoid long timeouts, just see if can reach server
requests.get(inference_server, timeout=int(os.getenv('REQUEST_TIMEOUT_FAST', '10')))
# HF inference server needs control over input tokens
where_from = "hf_client"
response = ''
sources = []
# prompt must include all human-bot like tokens, already added by prompt
# https://github.com/huggingface/text-generation-inference/tree/main/clients/python#types
terminate_response = prompter.terminate_response or []
stop_sequences = list(set(terminate_response + [prompter.PreResponse]))
stop_sequences = [x for x in stop_sequences if x]
gen_server_kwargs = dict(do_sample=do_sample,
max_new_tokens=max_new_tokens,
# best_of=None,
repetition_penalty=repetition_penalty,
return_full_text=False,
seed=SEED,
stop_sequences=stop_sequences,
temperature=temperature,
top_k=top_k,
top_p=top_p,
# truncate=False, # behaves oddly
# typical_p=top_p,
# watermark=False,
# decoder_input_details=False,
)
# work-around for timeout at constructor time, will be issue if multi-threading,
# so just do something reasonable or max_time if larger
# lower bound because client is re-used if multi-threading
hf_client.timeout = max(300, max_time)
if not stream_output:
text = hf_client.generate(prompt, **gen_server_kwargs).generated_text
response = prompter.get_response(prompt + text, prompt=prompt,
sanitize_bot_response=sanitize_bot_response)
else:
tgen0 = time.time()
text = ""
for responses in hf_client.generate_stream(prompt, **gen_server_kwargs):
if not responses.token.special:
# stop_sequences
text_chunk = responses.token.text
text += text_chunk
response = prompter.get_response(prompt + text, prompt=prompt,
sanitize_bot_response=sanitize_bot_response)
sources = []
yield dict(response=response, sources=sources, save_dict={}, llm_answers={},
response_no_refs=response, sources_str='', prompt_raw='')
time.sleep(0.01)
if time.time() - tgen0 > max_time:
if verbose:
print("Took too long for TGI: %s" % (time.time() - tgen0), flush=True)
break
else:
raise RuntimeError("Failed to get client: %s" % inference_server)
else:
raise RuntimeError("No such inference_server %s" % inference_server)
# only return yield with save_dict and prompt_raw here to keep streaming light
extra_dict.update(gen_server_kwargs)
extra_dict.update(dict(inference_server=inference_server, # changes in some cases
num_prompt_tokens=num_prompt_tokens,
t_generate=time.time() - t_generate,
ntokens=None,
prompt_type=prompt_type,
tokens_persecond=None,
))
save_dict.update(dict(prompt=prompt, output=text, where_from=where_from, extra_dict=extra_dict))
# if not streaming, only place yield should be done
yield dict(response=response, sources=sources, save_dict=save_dict, llm_answers={},
response_no_refs=response, sources_str='', prompt_raw=prompt)
return
else:
assert not inference_server, "inference_server=%s not supported" % inference_server
if isinstance(tokenizer, str):
# pipeline
if tokenizer == "summarization":
key = 'summary_text'
else:
raise RuntimeError("No such task type %s" % tokenizer)
# NOTE: uses max_length only
sources = []
response = model(prompt, max_length=max_new_tokens)[0][key]
yield dict(response=response, sources=sources, save_dict=save_dict,
llm_answers={},
response_no_refs=response, sources_str='', prompt_raw=prompt)
return
if 'mbart-' in base_model.lower():
assert src_lang is not None
tokenizer.src_lang = languages_covered()[src_lang]
stopping_criteria = get_stopping(prompt_type, prompt_dict, tokenizer, device, base_model,
model_max_length=model_max_length,
prompter=prompter,
truncation_generation=truncation_generation)
inputs = tokenizer(prompt, return_tensors="pt")
if debug and len(inputs["input_ids"]) > 0:
print('input_ids length', len(inputs["input_ids"][0]), flush=True)
input_ids = inputs["input_ids"].to(device)
# CRITICAL LIMIT else will fail
max_max_tokens = int(tokenizer.model_max_length)
max_input_tokens_default = max(0, int(max_max_tokens - min_new_tokens))
if max_input_tokens >= 0:
max_input_tokens = min(max_input_tokens_default, max_input_tokens)
else:
max_input_tokens = max_input_tokens_default
# NOTE: Don't limit up front due to max_new_tokens, let go up to max or reach max_max_tokens in stopping.py
assert isinstance(max_input_tokens, int), "Bad type for max_input_tokens=%s %s" % (
max_input_tokens, type(max_input_tokens))
input_ids = input_ids[:, -max_input_tokens:]
# required for falcon if multiple threads or asyncio accesses to model during generation
if use_cache is None:
use_cache = False if 'falcon' in base_model else True
if attention_sinks:
assert use_cache, "attention sinks requires use_cache=True"
bad_word_ids = [tokenizer.eos_token_id]
gen_config_kwargs = dict(num_beams=num_beams,
do_sample=do_sample,
repetition_penalty=float(repetition_penalty),
num_return_sequences=num_return_sequences,
renormalize_logits=True,
remove_invalid_values=True,
use_cache=use_cache,
max_new_tokens=max_new_tokens, # unsure if required here
)
if do_sample:
gen_config_kwargs.update(dict(temperature=float(temperature),
top_p=float(top_p),
top_k=top_k))
if penalty_alpha > 0:
gen_config_kwargs.update(dict(penalty_alpha=penalty_alpha))
if True:
# unclear impact, some odd things going on inside
# leads to:
# The attention mask and the pad token id were not set. As a consequence, you may observe unexpected behavior. Please pass your input's `attention_mask` to obtain reliable results.
# Setting `pad_token_id` to `eos_token_id`:2 for open-end generation.
# or leads to:
# Using cls_token, but it is not set yet.
# Using mask_token, but it is not set yet.
# Using pad_token, but it is not set yet.
# Using sep_token, but it is not set yet.
token_ids = ['eos_token_id', 'pad_token_id', 'bos_token_id', 'cls_token_id', 'sep_token_id']
for token_id in token_ids:
if hasattr(tokenizer, token_id) and getattr(tokenizer, token_id) is not None:
gen_config_kwargs.update({token_id: getattr(tokenizer, token_id)})
generation_config = GenerationConfig(**gen_config_kwargs)
gen_kwargs = dict(input_ids=input_ids,
generation_config=generation_config,
return_dict_in_generate=True,
output_scores=True,
max_new_tokens=max_new_tokens, # prompt + new
min_new_tokens=min_new_tokens, # prompt + new
early_stopping=early_stopping, # False, True, "never"
max_time=max_time,
stopping_criteria=stopping_criteria,
)
if use_cache and attention_sinks:
from transformers import SinkCache
sink_dict['window_length'] = sink_dict.get('window_length', max_input_tokens)
sink_dict['num_sink_tokens'] = sink_dict.get('num_sink_tokens', 4)
cache = SinkCache(**sink_dict)
gen_kwargs.update(dict(past_key_values=cache))
if 'gpt2' in base_model.lower():
gen_kwargs.update(dict(bos_token_id=tokenizer.bos_token_id, pad_token_id=tokenizer.eos_token_id))
elif 'mbart-' in base_model.lower():
assert tgt_lang is not None
tgt_lang = languages_covered()[tgt_lang]
gen_kwargs.update(dict(forced_bos_token_id=tokenizer.lang_code_to_id[tgt_lang]))
else:
token_ids = ['eos_token_id', 'bos_token_id', 'pad_token_id']
for token_id in token_ids:
if hasattr(tokenizer, token_id) and getattr(tokenizer, token_id) is not None:
gen_kwargs.update({token_id: getattr(tokenizer, token_id)})
decoder_kwargs = dict(skip_special_tokens=True,
clean_up_tokenization_spaces=True)
decoder = functools.partial(tokenizer.decode,
**decoder_kwargs
)
with torch.no_grad():
have_lora_weights = lora_weights not in [no_lora_str, '', None]
context_class_cast = NullContext if device == 'cpu' or have_lora_weights or device == 'mps' else torch.autocast
if t5_type(base_model):
# issues when casting to float16, can mess up t5 model, e.g. only when not streaming, or other odd behaviors
context_class_cast = NullContext
with context_class_cast(device):
# protection for gradio not keeping track of closed users,
# else hit bitsandbytes lack of thread safety:
# https://github.com/h2oai/h2ogpt/issues/104
# but only makes sense if concurrency_count == 1
context_class = NullContext # if concurrency_count > 1 else filelock.FileLock
if verbose:
print('Pre-Generate: %s' % str(datetime.now()), flush=True)
decoded_output = ''
response = ''
with context_class("generate.lock"):
if verbose:
print('Generate: %s' % str(datetime.now()), flush=True)
always_use_streaming_method = True # to deal with complex parsing of prompt vs. generation due to odd tokenizing
if stream_output or always_use_streaming_method:
skip_prompt = True # True means first output excludes prompt
streamer = H2OTextIteratorStreamer(tokenizer, skip_prompt=skip_prompt, block=False,
**decoder_kwargs)
gen_kwargs.update(dict(streamer=streamer))
target = wrapped_partial(generate_with_exceptions, model.generate,
raise_generate_gpu_exceptions=raise_generate_gpu_exceptions,
**gen_kwargs)
bucket = queue.Queue()
thread = EThread(target=target, streamer=streamer, bucket=bucket)
thread.start()
ret = dict(response='', sources='', save_dict=dict(), llm_answers={},
response_no_refs='', sources_str='', prompt_raw=prompt)
outputs = ""
sources = []
tgen0 = time.time()
try:
for new_text in streamer:
if bucket.qsize() > 0 or thread.exc:
thread.join()
outputs += new_text
response = prompter.get_response(outputs, prompt=None,
only_new_text=True,
sanitize_bot_response=sanitize_bot_response)
ret = dict(response=response, sources=sources, save_dict=save_dict, llm_answers={},
response_no_refs=response, sources_str='', prompt_raw=prompt)
if stream_output:
yield ret
if time.time() - tgen0 > max_time:
if verbose:
print("Took too long for Torch: %s" % (time.time() - tgen0), flush=True)
break
if stream_output:
# will yield at end if required
# yield if anything left over as can happen (FIXME: Understand better)
yield ret
except BaseException:
# if any exception, raise that exception if was from thread, first
if thread.exc:
raise thread.exc
raise
finally:
# don't clear torch cache here, delays multi-generation, and bot(), all_bot(), and evaluate_nochat() do it
# in case no exception and didn't join with thread yet, then join
if not thread.exc:
thread.join()
# in case raise StopIteration or broke queue loop in streamer, but still have exception
if thread.exc:
raise thread.exc
decoded_output = outputs
ntokens = len(outputs) // 4 # hack for now
else:
# below length removal doesn't work in general, because encoding does not match internal of model generation
input_ids_len = gen_kwargs['input_ids'][0].shape[0]
try:
outputs = model.generate(**gen_kwargs)
finally:
pass
# don't clear torch cache here, delays multi-generation, and bot(), all_bot(), and evaluate_nochat() do it
# skip first IDs
ntokens = sum([len(s) - input_ids_len for s in outputs.sequences]) if save_dir else -1
outputs = [decoder(s[input_ids_len:]) for s in outputs.sequences]
sources = []
response = prompter.get_response(outputs, prompt=None,
only_new_text=True,
sanitize_bot_response=sanitize_bot_response)
if outputs and len(outputs) >= 1:
decoded_output = prompt + outputs[0]
# full return with save_dict and prompt_raw
# if not streaming, only place yield should be
extra_dict.update(gen_config_kwargs)
extra_dict.update(dict(num_prompt_tokens=num_prompt_tokens,
t_generate=time.time() - t_generate,
sources_str='',
ntokens=ntokens,
tokens_persecond=ntokens / (time.time() - t_generate),
))
save_dict.update(dict(prompt=prompt, output=decoded_output,
where_from="evaluate_%s" % str(stream_output),
extra_dict=extra_dict))
yield dict(response=response, sources=sources, save_dict=save_dict, llm_answers={},
response_no_refs=response, sources_str='', prompt_raw=prompt)
if torch.cuda.is_available() and device not in ['cpu', 'mps']:
torch.cuda.empty_cache()
if hasattr(model, 'memory') and hasattr(model.memory, 'reset'):
model.memory.reset()
if verbose:
print('Post-Generate: %s decoded_output: %s' % (
str(datetime.now()), len(decoded_output) if decoded_output else -1), flush=True)
def check_locals(**kwargs):
# ensure everything in evaluate is here
can_skip_because_locally_generated = no_default_param_names + [
# get_model:
'reward_type'
]
missing1 = []
for k in eval_func_param_names:
if k in can_skip_because_locally_generated:
continue
if k not in kwargs:
missing1.append(k)
assert not missing1, "Missing %s" % missing1
missing2 = []
for k in inputs_kwargs_list:
if k in can_skip_because_locally_generated:
continue
if k not in kwargs:
missing2.append(k)
assert not missing2, "Missing %s" % missing2
non_hf_types = ['gpt4all_llama', 'llama', 'gptj']
def clear_torch_cache(allow_skip=False):
if allow_skip and os.getenv('CLEAR_CLEAR_TORCH', '2') == '1' or os.getenv('CLEAR_CLEAR_TORCH', '2') == '0':
return
try:
import torch
if torch.cuda.is_available():
torch.cuda.empty_cache()
torch.cuda.ipc_collect()
gc.collect()
except RuntimeError as e:
print("clear_torch_cache error: %s" % ''.join(traceback.format_tb(e.__traceback__)), flush=True)
class NullContext(threading.local):
"""No-op context manager, executes block without doing any additional processing.
Used as a stand-in if a particular block of code is only sometimes
used with a normal context manager:
"""
def __init__(self, *args, **kwargs):
pass
def __enter__(self):
return self
def __exit__(self, exc_type, exc_value, exc_traceback):
self.finally_act()
def finally_act(self):
pass
def get_kwargs(func, exclude_names=None, **kwargs):
func_names = list(inspect.signature(func).parameters)
missing_kwargs = [x for x in func_names if x not in kwargs]
if exclude_names:
for k in exclude_names:
if k in missing_kwargs:
missing_kwargs.remove(k)
if k in func_names:
func_names.remove(k)
assert not missing_kwargs, "Missing %s" % missing_kwargs
kwargs = {k: v for k, v in kwargs.items() if k in func_names}
return kwargs
def run_cli( # for local function:
base_model=None, lora_weights=None, inference_server=None, regenerate_clients=None, regenerate_gradio_clients=None,
debug=None,
examples=None, memory_restriction_level=None,
# evaluate kwargs
n_jobs=None, llamacpp_path=None, llamacpp_dict=None, exllama_dict=None, gptq_dict=None, attention_sinks=None,
sink_dict=None, truncation_generation=None, hf_model_dict=None, load_exllama=None,
use_pymupdf=None,
use_unstructured_pdf=None,
use_pypdf=None,
enable_pdf_ocr=None,
enable_pdf_doctr=None,
enable_imagegen_high_sd=None,
try_pdf_as_html=None,
# for some evaluate args
load_awq='',
stream_output=None, async_output=None, num_async=None,
prompt_type=None, prompt_dict=None, system_prompt=None,
temperature=None, top_p=None, top_k=None, penalty_alpha=None, num_beams=None,
max_new_tokens=None, min_new_tokens=None, early_stopping=None, max_time=None, repetition_penalty=None,
num_return_sequences=None, do_sample=None, chat=None,
langchain_mode=None, langchain_action=None, langchain_agents=None,
document_subset=None, document_choice=None,
document_source_substrings=None,
document_source_substrings_op=None,
document_content_substrings=None,
document_content_substrings_op=None,
top_k_docs=None, chunk=None, chunk_size=None,
pre_prompt_query=None, prompt_query=None,
pre_prompt_summary=None, prompt_summary=None, hyde_llm_prompt=None,
image_audio_loaders=None,
pdf_loaders=None,
url_loaders=None,
jq_schema=None,
extract_frames=None,
extract_frames0=None,
llava_prompt=None,
visible_models=None,
h2ogpt_key=None,
add_search_to_context=None,
chat_conversation=None,
text_context_list=None,
docs_ordering_type=None,
min_max_new_tokens=None,
max_input_tokens=None,
max_total_input_tokens=None,
docs_token_handling=None,
docs_joiner=None,
hyde_level=None,
hyde_template=None,
hyde_show_only_final=None,
hyde_show_intermediate_in_accordion=None,
doc_json_mode=None,
metadata_in_context=None,
chatbot_role=None,
speaker=None,
tts_language=None,
tts_speed=None,
image_file=None,
image_control=None,
# for evaluate kwargs
captions_model=None,
caption_loader=None,
doctr_loader=None,
pix2struct_loader=None,
llava_model=None,
image_gen_loader=None,
image_gen_loader_high=None,
image_change_loader=None,
asr_model=None,
asr_loader=None,
image_audio_loaders_options0=None,
pdf_loaders_options0=None,
url_loaders_options0=None,
jq_schema0=None,
keep_sources_in_context=None,
gradio_errors_to_chatbot=None,
allow_chat_system_prompt=None,
src_lang=None, tgt_lang=None, concurrency_count=None, save_dir=None, sanitize_bot_response=None,
model_state0=None,
score_model_state0=None,
max_max_new_tokens=None,
is_public=None,
max_max_time=None,
raise_generate_gpu_exceptions=None, load_db_if_exists=None, use_llm_if_no_docs=None,
my_db_state0=None, selection_docs_state0=None, dbs=None, langchain_modes=None, langchain_mode_paths=None,
detect_user_path_changes_every_query=None,
use_openai_embedding=None, use_openai_model=None,
hf_embedding_model=None, migrate_embedding_model=None, auto_migrate_db=None,
cut_distance=None,
answer_with_sources=None,
append_sources_to_answer=None,
append_sources_to_chat=None,
show_accordions=None,
top_k_docs_max_show=None,
show_link_in_sources=None,
langchain_instruct_mode=None,
add_chat_history_to_context=None,
context=None, iinput=None,
db_type=None, first_para=None, text_limit=None, verbose=None,
gradio=None, cli=None,
use_cache=None,
auto_reduce_chunks=None, max_chunks=None, headsize=None,
model_lock=None, force_langchain_evaluate=None,
model_state_none=None,
# unique to this function:
cli_loop=None,
):
# avoid noisy command line outputs
import warnings
warnings.filterwarnings("ignore")
import logging
logging.getLogger("torch").setLevel(logging.ERROR)
logging.getLogger("transformers").setLevel(logging.ERROR)
from_ui = False
check_locals(**locals())
score_model = "" # FIXME: For now, so user doesn't have to pass
verifier_server = "" # FIXME: For now, so user doesn't have to pass
n_gpus = torch.cuda.device_count() if torch.cuda.is_available() else 0
device = 'cpu' if n_gpus == 0 else 'cuda'
context_class = NullContext if n_gpus > 1 or n_gpus == 0 else torch.device
with context_class(device):
from functools import partial
requests_state0 = {}
roles_state0 = None
args = (None, my_db_state0, selection_docs_state0, requests_state0, roles_state0)
assert len(args) == len(input_args_list)
fun = partial(evaluate,
*args,
**get_kwargs(evaluate, exclude_names=input_args_list + eval_func_param_names,
**locals()))
example1 = examples[-1] # pick reference example
all_generations = []
if not context:
context = ''
if chat_conversation is None:
chat_conversation = []
while True:
clear_torch_cache(allow_skip=True)
instruction = input("\nEnter an instruction: ")
if instruction == "exit":
break
eval_vars = copy.deepcopy(example1)
eval_vars[eval_func_param_names.index('instruction')] = \
eval_vars[eval_func_param_names.index('instruction_nochat')] = instruction
eval_vars[eval_func_param_names.index('iinput')] = \
eval_vars[eval_func_param_names.index('iinput_nochat')] = iinput
eval_vars[eval_func_param_names.index('context')] = context
# grab other parameters, like langchain_mode
for k in eval_func_param_names:
if k in locals():
eval_vars[eval_func_param_names.index(k)] = locals()[k]
gener = fun(*tuple(eval_vars))
outr = ''
res_old = ''
for gen_output in gener:
res = gen_output['response']
sources = gen_output.get('sources', 'Failure of Generation')
if base_model not in non_hf_types or base_model in ['llama']:
if not stream_output:
print(res)
else:
# then stream output for gradio that has full output each generation, so need here to show only new chars
diff = res[len(res_old):]
print(diff, end='', flush=True)
res_old = res
outr = res # don't accumulate
else:
outr += res # just is one thing
if sources:
# show sources at end after model itself had streamed to std rest of response
print('\n\n' + str(sources), flush=True)
all_generations.append(outr + '\n')
if not cli_loop:
break
if add_chat_history_to_context:
# for CLI keep track of conversation
chat_conversation.extend([[instruction, outr]])
return all_generations | null |
166,885 | from typing import List, Union, Any, Tuple, Optional
import requests
import torch
from langchain.docstore.document import Document
from langchain.document_loaders import ImageCaptionLoader
import numpy as np
from utils import get_device, clear_torch_cache, NullContext
from doctr.utils.common_types import AbstractFile
The provided code snippet includes necessary dependencies for implementing the `boxes_sort` function. Write a Python function `def boxes_sort(boxes)` to solve the following problem:
From left top to right bottom Params: boxes: [[x1, y1, x2, y2], [x1, y1, x2, y2], ...]
Here is the function:
def boxes_sort(boxes):
""" From left top to right bottom
Params:
boxes: [[x1, y1, x2, y2], [x1, y1, x2, y2], ...]
"""
sorted_id = sorted(range(len(boxes)), key=lambda x: (boxes[x][1]))
# sorted_boxes = [boxes[id] for id in sorted_id]
return sorted_id | From left top to right bottom Params: boxes: [[x1, y1, x2, y2], [x1, y1, x2, y2], ...] |
166,886 | from typing import List, Union, Any, Tuple, Optional
import requests
import torch
from langchain.docstore.document import Document
from langchain.document_loaders import ImageCaptionLoader
import numpy as np
from utils import get_device, clear_torch_cache, NullContext
from doctr.utils.common_types import AbstractFile
The provided code snippet includes necessary dependencies for implementing the `is_same_line` function. Write a Python function `def is_same_line(box1, box2)` to solve the following problem:
Params: box1: [x1, y1, x2, y2] box2: [x1, y1, x2, y2]
Here is the function:
def is_same_line(box1, box2):
"""
Params:
box1: [x1, y1, x2, y2]
box2: [x1, y1, x2, y2]
"""
box1_midy = (box1[1] + box1[3]) / 2
box2_midy = (box2[1] + box2[3]) / 2
if box1_midy < box2[3] and box1_midy > box2[1] and box2_midy < box1[3] and box2_midy > box1[1]:
return True
else:
return False | Params: box1: [x1, y1, x2, y2] box2: [x1, y1, x2, y2] |
166,887 | from typing import List, Union, Any, Tuple, Optional
import requests
import torch
from langchain.docstore.document import Document
from langchain.document_loaders import ImageCaptionLoader
import numpy as np
from utils import get_device, clear_torch_cache, NullContext
from doctr.utils.common_types import AbstractFile
The provided code snippet includes necessary dependencies for implementing the `union_box` function. Write a Python function `def union_box(box1, box2)` to solve the following problem:
Params: box1: [x1, y1, x2, y2] box2: [x1, y1, x2, y2]
Here is the function:
def union_box(box1, box2):
"""
Params:
box1: [x1, y1, x2, y2]
box2: [x1, y1, x2, y2]
"""
x1 = min(box1[0], box2[0])
y1 = min(box1[1], box2[1])
x2 = max(box1[2], box2[2])
y2 = max(box1[3], box2[3])
return [x1, y1, x2, y2] | Params: box1: [x1, y1, x2, y2] box2: [x1, y1, x2, y2] |
166,888 | from typing import List, Union, Any, Tuple, Optional
import requests
import torch
from langchain.docstore.document import Document
from langchain.document_loaders import ImageCaptionLoader
import numpy as np
from utils import get_device, clear_torch_cache, NullContext
from doctr.utils.common_types import AbstractFile
def space_layout(texts, boxes, threshold_show_spaces=8, threshold_char_width=0.02):
line_boxes = []
line_texts = []
max_line_char_num = 0
line_width = 0
# print(f"len_boxes: {len(boxes)}")
boxes = np.array(boxes)
texts = np.array(texts)
while len(boxes) > 0:
box = boxes[0]
mid = (boxes[:, 3] + boxes[:, 1]) / 2
inline_boxes = np.logical_and(mid > box[1], mid < box[3])
sorted_xs = np.argsort(boxes[inline_boxes][:, 0], axis=0)
line_box = boxes[inline_boxes][sorted_xs]
line_text = texts[inline_boxes][sorted_xs]
boxes = boxes[~inline_boxes]
texts = texts[~inline_boxes]
line_boxes.append(line_box.tolist())
line_texts.append(line_text.tolist())
if len(" ".join(line_texts[-1])) > max_line_char_num:
max_line_char_num = len(" ".join(line_texts[-1]))
line_width = np.array(line_boxes[-1])
line_width = line_width[:, 2].max() - line_width[:, 0].min()
char_width = (line_width / max_line_char_num) if max_line_char_num > 0 else 0
if threshold_char_width == 0.0:
if char_width == 0:
char_width = 1
else:
if char_width <= 0.02:
char_width = 0.02
space_line_texts = []
for i, line_box in enumerate(line_boxes):
space_line_text = ""
for j, box in enumerate(line_box):
left_char_num = int(box[0] / char_width)
left_char_num = max((left_char_num - len(space_line_text)), 1)
# verbose layout
# space_line_text += " " * left_char_num
# minified layout
if left_char_num > threshold_show_spaces:
space_line_text += f" <{left_char_num}> "
else:
space_line_text += " "
space_line_text += line_texts[i][j]
space_line_texts.append(space_line_text + "\n")
return "".join(space_line_texts) | null |
166,889 | from typing import List, Union, Any, Tuple, Optional
import requests
import torch
from langchain.docstore.document import Document
from langchain.document_loaders import ImageCaptionLoader
import numpy as np
from utils import get_device, clear_torch_cache, NullContext
from doctr.utils.common_types import AbstractFile
The provided code snippet includes necessary dependencies for implementing the `read_pdf` function. Write a Python function `def read_pdf( file: AbstractFile, scale: float = 300 / 72, rgb_mode: bool = True, password: Optional[str] = None, **kwargs: Any, ) -> List[np.ndarray]` to solve the following problem:
Read a PDF file and convert it into an image in numpy format >>> from doctr.documents import read_pdf >>> doc = read_pdf("path/to/your/doc.pdf") Args: file: the path to the PDF file scale: rendering scale (1 corresponds to 72dpi) rgb_mode: if True, the output will be RGB, otherwise BGR password: a password to unlock the document, if encrypted kwargs: additional parameters to :meth:`pypdfium2.PdfPage.render` Returns: the list of pages decoded as numpy ndarray of shape H x W x C
Here is the function:
def read_pdf(
file: AbstractFile,
scale: float = 300 / 72,
rgb_mode: bool = True,
password: Optional[str] = None,
**kwargs: Any,
) -> List[np.ndarray]:
"""Read a PDF file and convert it into an image in numpy format
>>> from doctr.documents import read_pdf
>>> doc = read_pdf("path/to/your/doc.pdf")
Args:
file: the path to the PDF file
scale: rendering scale (1 corresponds to 72dpi)
rgb_mode: if True, the output will be RGB, otherwise BGR
password: a password to unlock the document, if encrypted
kwargs: additional parameters to :meth:`pypdfium2.PdfPage.render`
Returns:
the list of pages decoded as numpy ndarray of shape H x W x C
"""
# Rasterise pages to numpy ndarrays with pypdfium2
import pypdfium2 as pdfium
pdf = pdfium.PdfDocument(file, password=password, autoclose=True)
return [page.render(scale=scale, rev_byteorder=rgb_mode, **kwargs).to_numpy() for page in pdf] | Read a PDF file and convert it into an image in numpy format >>> from doctr.documents import read_pdf >>> doc = read_pdf("path/to/your/doc.pdf") Args: file: the path to the PDF file scale: rendering scale (1 corresponds to 72dpi) rgb_mode: if True, the output will be RGB, otherwise BGR password: a password to unlock the document, if encrypted kwargs: additional parameters to :meth:`pypdfium2.PdfPage.render` Returns: the list of pages decoded as numpy ndarray of shape H x W x C |
166,890 | import ast
import contextlib
import functools
import gc
import getpass
import hashlib
import inspect
import json
import os
import pathlib
import pickle
import platform
import random
import shutil
import subprocess
import sys
import threading
import time
import traceback
import zipfile
import tarfile
from concurrent.futures import ProcessPoolExecutor
from datetime import datetime
from typing import Tuple, Callable, Dict
from queue import Queue, Empty
from concurrent.futures import ThreadPoolExecutor
import filelock
import fire
import numpy as np
import pandas as pd
import requests
import uuid
import tabulate
from fire import inspectutils
from joblib import Parallel
from tqdm.auto import tqdm
from src.utils_procs import reulimit
from importlib.metadata import distribution, PackageNotFoundError
import distutils.spawn
import os
The provided code snippet includes necessary dependencies for implementing the `set_seed` function. Write a Python function `def set_seed(seed: int)` to solve the following problem:
Sets the seed of the entire notebook so results are the same every time we run. This is for REPRODUCIBILITY.
Here is the function:
def set_seed(seed: int):
"""
Sets the seed of the entire notebook so results are the same every time we run.
This is for REPRODUCIBILITY.
"""
import torch
np.random.seed(seed)
random_state = np.random.RandomState(seed)
random.seed(seed)
torch.manual_seed(seed)
torch.cuda.manual_seed(seed)
torch.backends.cudnn.deterministic = True
torch.backends.cudnn.benchmark = False
os.environ['PYTHONHASHSEED'] = str(seed)
return random_state | Sets the seed of the entire notebook so results are the same every time we run. This is for REPRODUCIBILITY. |
166,891 | import ast
import contextlib
import functools
import gc
import getpass
import hashlib
import inspect
import json
import os
import pathlib
import pickle
import platform
import random
import shutil
import subprocess
import sys
import threading
import time
import traceback
import zipfile
import tarfile
from concurrent.futures import ProcessPoolExecutor
from datetime import datetime
from typing import Tuple, Callable, Dict
from queue import Queue, Empty
from concurrent.futures import ThreadPoolExecutor
import filelock
import fire
import numpy as np
import pandas as pd
import requests
import uuid
import tabulate
from fire import inspectutils
from joblib import Parallel
from tqdm.auto import tqdm
from src.utils_procs import reulimit
def _tar_data(root_dirs=None, tar_file=None, base_dir='./'):
if isinstance(root_dirs, str):
root_dirs = [root_dirs]
if tar_file is None:
datetime_str = str(datetime.now()).replace(" ", "_").replace(":", "_")
host_name = os.getenv('HF_HOSTNAME', 'emptyhost')
tar_file = "data_%s_%s.tar.gz" % (datetime_str, host_name)
assert root_dirs is not None
base_path = os.path.dirname(tar_file)
if not os.path.isdir(base_path) and os.path.dirname(tar_file):
base_path = makedirs(base_path, exist_ok=True, tmp_ok=True, use_base=True)
tar_file = os.path.join(base_path, os.path.basename(tar_file))
with tarfile.open(tar_file, "w:gz") as expt_tar:
for root_dir in root_dirs:
if root_dir is None:
continue
for root, d, files in os.walk(root_dir):
for file in files:
file_to_archive = os.path.join(root, file)
assert os.path.exists(file_to_archive)
path_to_archive = os.path.relpath(file_to_archive, base_dir)
expt_tar.add(name=file_to_archive, arcname=path_to_archive)
return tar_file, tar_file
from importlib.metadata import distribution, PackageNotFoundError
import distutils.spawn
import os
def tar_data(root_dirs=None, tar_file=None, base_dir='./', fail_any_exception=False):
try:
return _tar_data(tar_file=tar_file, base_dir=base_dir, root_dirs=root_dirs)
except Exception as e:
traceback.print_exc()
print('Exception in tar archiving: %s' % str(e))
if not fail_any_exception:
raise | null |
166,892 | import ast
import contextlib
import functools
import gc
import getpass
import hashlib
import inspect
import json
import os
import pathlib
import pickle
import platform
import random
import shutil
import subprocess
import sys
import threading
import time
import traceback
import zipfile
import tarfile
from concurrent.futures import ProcessPoolExecutor
from datetime import datetime
from typing import Tuple, Callable, Dict
from queue import Queue, Empty
from concurrent.futures import ThreadPoolExecutor
import filelock
import fire
import numpy as np
import pandas as pd
import requests
import uuid
import tabulate
from fire import inspectutils
from joblib import Parallel
from tqdm.auto import tqdm
from src.utils_procs import reulimit
from importlib.metadata import distribution, PackageNotFoundError
import distutils.spawn
import os
def import_matplotlib():
import matplotlib
matplotlib.use('agg')
# KEEP THESE HERE! START
import matplotlib.pyplot as plt
import pandas as pd
# to avoid dlopen deadlock in fork
import pandas.core.computation.expressions as pd_expressions
import pandas.core.algorithms as pd_algorithms
import pandas.core.common as pd_com
import numpy as np
# KEEP THESE HERE! END | null |
166,893 | import ast
import contextlib
import functools
import gc
import getpass
import hashlib
import inspect
import json
import os
import pathlib
import pickle
import platform
import random
import shutil
import subprocess
import sys
import threading
import time
import traceback
import zipfile
import tarfile
from concurrent.futures import ProcessPoolExecutor
from datetime import datetime
from typing import Tuple, Callable, Dict
from queue import Queue, Empty
from concurrent.futures import ThreadPoolExecutor
import filelock
import fire
import numpy as np
import pandas as pd
import requests
import uuid
import tabulate
from fire import inspectutils
from joblib import Parallel
from tqdm.auto import tqdm
from src.utils_procs import reulimit
def get_device(n_gpus=None):
import torch
if torch.cuda.is_available() and n_gpus != 0:
device = "cuda"
elif torch.backends.mps.is_built():
device = "mps"
else:
device = "cpu"
return device
def cuda_vis_check(total_gpus):
"""Helper function to count GPUs by environment variable
Stolen from Jon's h2o4gpu utils
"""
cudavis = os.getenv("CUDA_VISIBLE_DEVICES")
which_gpus = []
if cudavis is not None:
# prune away white-space, non-numerics,
# except commas for simple checking
cudavis = "".join(cudavis.split())
import re
cudavis = re.sub("[^0-9,]", "", cudavis)
lencudavis = len(cudavis)
if lencudavis == 0:
total_gpus = 0
else:
total_gpus = min(
total_gpus,
os.getenv("CUDA_VISIBLE_DEVICES").count(",") + 1)
which_gpus = os.getenv("CUDA_VISIBLE_DEVICES").split(",")
which_gpus = [int(x) for x in which_gpus]
else:
which_gpus = list(range(0, total_gpus))
return total_gpus, which_gpus
from importlib.metadata import distribution, PackageNotFoundError
import distutils.spawn
import os
def get_ngpus_vis(raise_if_exception=True):
ngpus_vis1 = None
shell = False
if shell:
cmd = "nvidia-smi -L 2> /dev/null"
else:
cmd = ["nvidia-smi", "-L"]
try:
timeout = 5 * 3
o = subprocess.check_output(cmd, shell=shell, timeout=timeout)
lines = o.decode("utf-8").splitlines()
ngpus_vis1 = 0
for line in lines:
if 'Failed to initialize NVML' not in line:
ngpus_vis1 += 1
except (FileNotFoundError, subprocess.CalledProcessError, OSError):
# GPU systems might not have nvidia-smi, so can't fail
pass
except subprocess.TimeoutExpired as e:
print('Failed get_ngpus_vis: %s' % str(e))
if raise_if_exception:
raise
if ngpus_vis1 is None:
import torch
if get_device() == 'cuda':
ngpus_vis1 = torch.cuda.device_count() if torch.cuda.is_available() else 0
else:
ngpus_vis1 = 0
ngpus_vis1, which_gpus = cuda_vis_check(ngpus_vis1)
return ngpus_vis1 | null |
166,894 | import ast
import contextlib
import functools
import gc
import getpass
import hashlib
import inspect
import json
import os
import pathlib
import pickle
import platform
import random
import shutil
import subprocess
import sys
import threading
import time
import traceback
import zipfile
import tarfile
from concurrent.futures import ProcessPoolExecutor
from datetime import datetime
from typing import Tuple, Callable, Dict
from queue import Queue, Empty
from concurrent.futures import ThreadPoolExecutor
import filelock
import fire
import numpy as np
import pandas as pd
import requests
import uuid
import tabulate
from fire import inspectutils
from joblib import Parallel
from tqdm.auto import tqdm
from src.utils_procs import reulimit
from importlib.metadata import distribution, PackageNotFoundError
import distutils.spawn
import os
def get_mem_gpus(raise_if_exception=True, ngpus=None):
totalmem_gpus1 = 0
usedmem_gpus1 = 0
freemem_gpus1 = 0
if ngpus == 0:
return totalmem_gpus1, usedmem_gpus1, freemem_gpus1
try:
cmd = "nvidia-smi -q 2> /dev/null | grep -A 3 'FB Memory Usage'"
o = subprocess.check_output(cmd, shell=True, timeout=15)
lines = o.decode("utf-8").splitlines()
for line in lines:
if 'Total' in line:
totalmem_gpus1 += int(line.split()[2]) * 1024 ** 2
if 'Used' in line:
usedmem_gpus1 += int(line.split()[2]) * 1024 ** 2
if 'Free' in line:
freemem_gpus1 += int(line.split()[2]) * 1024 ** 2
except (FileNotFoundError, subprocess.CalledProcessError, OSError):
# GPU systems might not have nvidia-smi, so can't fail
pass
except subprocess.TimeoutExpired as e:
print('Failed get_mem_gpus: %s' % str(e))
if raise_if_exception:
raise
return totalmem_gpus1, usedmem_gpus1, freemem_gpus1 | null |
166,895 | import ast
import contextlib
import functools
import gc
import getpass
import hashlib
import inspect
import json
import os
import pathlib
import pickle
import platform
import random
import shutil
import subprocess
import sys
import threading
import time
import traceback
import zipfile
import tarfile
from concurrent.futures import ProcessPoolExecutor
from datetime import datetime
from typing import Tuple, Callable, Dict
from queue import Queue, Empty
from concurrent.futures import ThreadPoolExecutor
import filelock
import fire
import numpy as np
import pandas as pd
import requests
import uuid
import tabulate
from fire import inspectutils
from joblib import Parallel
from tqdm.auto import tqdm
from src.utils_procs import reulimit
class ForkContext(threading.local):
"""
Set context for forking
Ensures state is returned once done
"""
def __init__(self, args=None, kwargs=None, forkdata_capable=True):
"""
:param args:
:param kwargs:
:param forkdata_capable: whether fork is forkdata capable and will use copy-on-write forking of args/kwargs
"""
self.forkdata_capable = forkdata_capable
if self.forkdata_capable:
self.has_args = args is not None
self.has_kwargs = kwargs is not None
forkdatacontext.args = args
forkdatacontext.kwargs = kwargs
else:
self.has_args = False
self.has_kwargs = False
def __enter__(self):
try:
# flush all outputs so doesn't happen during fork -- don't print/log inside ForkContext contexts!
sys.stdout.flush()
sys.stderr.flush()
except BaseException as e:
# exit not called if exception, and don't want to leave forkdatacontext filled in that case
print("ForkContext failure on enter: %s" % str(e))
self.finally_act()
raise
return self
def __exit__(self, exc_type, exc_value, exc_traceback):
self.finally_act()
def finally_act(self):
"""
Done when exception hit or exit is reached in context
first reset forkdatacontext as crucial to have reset even if later 2 calls fail
:return: None
"""
if self.forkdata_capable and (self.has_args or self.has_kwargs):
forkdatacontext._reset()
def _traced_func(func, *args, **kwargs):
func, args, kwargs = forkdatacontext.get_args_kwargs_for_traced_func(func, args, kwargs)
return func(*args, **kwargs)
from importlib.metadata import distribution, PackageNotFoundError
import distutils.spawn
import os
def call_subprocess_onetask(func, args=None, kwargs=None):
if platform.system() in ['Darwin', 'Windows']:
return func(*args, **kwargs)
if isinstance(args, list):
args = tuple(args)
if args is None:
args = ()
if kwargs is None:
kwargs = {}
args = list(args)
args = [func] + args
args = tuple(args)
with ForkContext(args=args, kwargs=kwargs):
args = (None,)
kwargs = {}
with ProcessPoolExecutor(max_workers=1) as executor:
future = executor.submit(_traced_func, *args, **kwargs)
return future.result() | null |
166,896 | import ast
import contextlib
import functools
import gc
import getpass
import hashlib
import inspect
import json
import os
import pathlib
import pickle
import platform
import random
import shutil
import subprocess
import sys
import threading
import time
import traceback
import zipfile
import tarfile
from concurrent.futures import ProcessPoolExecutor
from datetime import datetime
from typing import Tuple, Callable, Dict
from queue import Queue, Empty
from concurrent.futures import ThreadPoolExecutor
import filelock
import fire
import numpy as np
import pandas as pd
import requests
import uuid
import tabulate
from fire import inspectutils
from joblib import Parallel
from tqdm.auto import tqdm
from src.utils_procs import reulimit
from importlib.metadata import distribution, PackageNotFoundError
import distutils.spawn
import os
def start_faulthandler():
# If hit server or any subprocess with signal SIGUSR1, it'll print out all threads stack trace, but wont't quit or coredump
# If more than one fork tries to write at same time, then looks corrupted.
import faulthandler
# SIGUSR1 in h2oai/__init__.py as well
faulthandler.enable()
if hasattr(faulthandler, 'register'):
# windows/mac
import signal
faulthandler.register(signal.SIGUSR1) | null |
166,897 | import ast
import contextlib
import functools
import gc
import getpass
import hashlib
import inspect
import json
import os
import pathlib
import pickle
import platform
import random
import shutil
import subprocess
import sys
import threading
import time
import traceback
import zipfile
import tarfile
from concurrent.futures import ProcessPoolExecutor
from datetime import datetime
from typing import Tuple, Callable, Dict
from queue import Queue, Empty
from concurrent.futures import ThreadPoolExecutor
import filelock
import fire
import numpy as np
import pandas as pd
import requests
import uuid
import tabulate
from fire import inspectutils
from joblib import Parallel
from tqdm.auto import tqdm
from src.utils_procs import reulimit
from importlib.metadata import distribution, PackageNotFoundError
import distutils.spawn
import os
def get_local_ip():
import socket
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
try:
# doesn't even have to be reachable
s.connect(('10.255.255.255', 1))
IP = s.getsockname()[0]
except Exception:
IP = '127.0.0.1'
finally:
s.close()
return IP | null |
166,898 | import ast
import contextlib
import functools
import gc
import getpass
import hashlib
import inspect
import json
import os
import pathlib
import pickle
import platform
import random
import shutil
import subprocess
import sys
import threading
import time
import traceback
import zipfile
import tarfile
from concurrent.futures import ProcessPoolExecutor
from datetime import datetime
from typing import Tuple, Callable, Dict
from queue import Queue, Empty
from concurrent.futures import ThreadPoolExecutor
import filelock
import fire
import numpy as np
import pandas as pd
import requests
import uuid
import tabulate
from fire import inspectutils
from joblib import Parallel
from tqdm.auto import tqdm
from src.utils_procs import reulimit
from importlib.metadata import distribution, PackageNotFoundError
import distutils.spawn
import os
The provided code snippet includes necessary dependencies for implementing the `deepcopy_by_pickle_object` function. Write a Python function `def deepcopy_by_pickle_object(object)` to solve the following problem:
Faster deepcopy, can only work on things that are picklable. Naive Deepcopy is more general. Same method as for class Individual :param object: :return:
Here is the function:
def deepcopy_by_pickle_object(object):
"""
Faster deepcopy, can only work on things that are picklable. Naive Deepcopy is more general.
Same method as for class Individual
:param object:
:return:
"""
gc.disable()
new_object = pickle.loads(pickle.dumps(object, -1))
gc.enable()
return new_object | Faster deepcopy, can only work on things that are picklable. Naive Deepcopy is more general. Same method as for class Individual :param object: :return: |
166,899 | import ast
import contextlib
import functools
import gc
import getpass
import hashlib
import inspect
import json
import os
import pathlib
import pickle
import platform
import random
import shutil
import subprocess
import sys
import threading
import time
import traceback
import zipfile
import tarfile
from concurrent.futures import ProcessPoolExecutor
from datetime import datetime
from typing import Tuple, Callable, Dict
from queue import Queue, Empty
from concurrent.futures import ThreadPoolExecutor
import filelock
import fire
import numpy as np
import pandas as pd
import requests
import uuid
import tabulate
from fire import inspectutils
from joblib import Parallel
from tqdm.auto import tqdm
from src.utils_procs import reulimit
from importlib.metadata import distribution, PackageNotFoundError
import distutils.spawn
import os
def fix_json(s):
# Attempt to parse the string as-is.
try:
return json.loads(s)
except json.JSONDecodeError:
pass
# Initialize variables.
new_s = ""
stack = []
is_inside_string = False
escaped = False
# Process each character in the string one at a time.
for char in s:
if is_inside_string:
if char == '"' and not escaped:
is_inside_string = False
elif char == '\n' and not escaped:
char = '\\n' # Replace the newline character with the escape sequence.
elif char == '\\':
escaped = not escaped
else:
escaped = False
else:
if char == '"':
is_inside_string = True
escaped = False
elif char == '{':
stack.append('}')
elif char == '[':
stack.append(']')
elif char == '}' or char == ']':
if stack and stack[-1] == char:
stack.pop()
else:
# Mismatched closing character; the input is malformed.
return None
# Append the processed character to the new string.
new_s += char
# If we're still inside a string at the end of processing, we need to close the string.
if is_inside_string:
new_s += '"'
# Close any remaining open structures in the reverse order that they were opened.
for closing_char in reversed(stack):
new_s += closing_char
# Attempt to parse the modified string as JSON.
try:
return json.loads(new_s)
except json.JSONDecodeError:
# If we still can't parse the string as JSON, return None to indicate failure.
return None | null |
166,900 | import ast
import contextlib
import functools
import gc
import getpass
import hashlib
import inspect
import json
import os
import pathlib
import pickle
import platform
import random
import shutil
import subprocess
import sys
import threading
import time
import traceback
import zipfile
import tarfile
from concurrent.futures import ProcessPoolExecutor
from datetime import datetime
from typing import Tuple, Callable, Dict
from queue import Queue, Empty
from concurrent.futures import ThreadPoolExecutor
import filelock
import fire
import numpy as np
import pandas as pd
import requests
import uuid
import tabulate
from fire import inspectutils
from joblib import Parallel
from tqdm.auto import tqdm
from src.utils_procs import reulimit
from importlib.metadata import distribution, PackageNotFoundError
import distutils.spawn
import os
def wrap_in_try_except(code):
# Add import traceback
code = "import traceback\n" + code
# Parse the input code into an AST
parsed_code = ast.parse(code)
# Wrap the entire code's AST in a single try-except block
try_except = ast.Try(
body=parsed_code.body,
handlers=[
ast.ExceptHandler(
type=ast.Name(id="Exception", ctx=ast.Load()),
name=None,
body=[
ast.Expr(
value=ast.Call(
func=ast.Attribute(value=ast.Name(id="traceback", ctx=ast.Load()), attr="print_exc",
ctx=ast.Load()),
args=[],
keywords=[]
)
),
]
)
],
orelse=[],
finalbody=[]
)
# Assign the try-except block as the new body
parsed_code.body = [try_except]
# Convert the modified AST back to source code
return ast.unparse(parsed_code) | null |
166,901 | import ast
import contextlib
import functools
import gc
import getpass
import hashlib
import inspect
import json
import os
import pathlib
import pickle
import platform
import random
import shutil
import subprocess
import sys
import threading
import time
import traceback
import zipfile
import tarfile
from concurrent.futures import ProcessPoolExecutor
from datetime import datetime
from typing import Tuple, Callable, Dict
from queue import Queue, Empty
from concurrent.futures import ThreadPoolExecutor
import filelock
import fire
import numpy as np
import pandas as pd
import requests
import uuid
import tabulate
from fire import inspectutils
from joblib import Parallel
from tqdm.auto import tqdm
from src.utils_procs import reulimit
from importlib.metadata import distribution, PackageNotFoundError
import distutils.spawn
def enqueue_output(file, queue):
import os
def read_popen_pipes(p):
with ThreadPoolExecutor(2) as pool:
q_stdout, q_stderr = Queue(), Queue()
pool.submit(enqueue_output, p.stdout, q_stdout)
pool.submit(enqueue_output, p.stderr, q_stderr)
while True:
if p.poll() is not None and q_stdout.empty() and q_stderr.empty():
break
out_line = err_line = ''
try:
out_line = q_stdout.get_nowait()
except Empty:
pass
try:
err_line = q_stderr.get_nowait()
except Empty:
pass
yield out_line, err_line | null |
166,902 | import ast
import contextlib
import functools
import gc
import getpass
import hashlib
import inspect
import json
import os
import pathlib
import pickle
import platform
import random
import shutil
import subprocess
import sys
import threading
import time
import traceback
import zipfile
import tarfile
from concurrent.futures import ProcessPoolExecutor
from datetime import datetime
from typing import Tuple, Callable, Dict
from queue import Queue, Empty
from concurrent.futures import ThreadPoolExecutor
import filelock
import fire
import numpy as np
import pandas as pd
import requests
import uuid
import tabulate
from fire import inspectutils
from joblib import Parallel
from tqdm.auto import tqdm
from src.utils_procs import reulimit
from importlib.metadata import distribution, PackageNotFoundError
import distutils.spawn
import os
def start_process(cmd):
start_cmd = sys.executable + " -i -q -u"
print_cmd = 'print("{}")'
cmd = [start_cmd] + [cmd]
process = subprocess.Popen(cmd, stdout=subprocess.PIPE)
for c in iter(lambda: process.stdout.read(1), b''):
sys.stdout.write(c) | null |
166,903 | import ast
import contextlib
import functools
import gc
import getpass
import hashlib
import inspect
import json
import os
import pathlib
import pickle
import platform
import random
import shutil
import subprocess
import sys
import threading
import time
import traceback
import zipfile
import tarfile
from concurrent.futures import ProcessPoolExecutor
from datetime import datetime
from typing import Tuple, Callable, Dict
from queue import Queue, Empty
from concurrent.futures import ThreadPoolExecutor
import filelock
import fire
import numpy as np
import pandas as pd
import requests
import uuid
import tabulate
from fire import inspectutils
from joblib import Parallel
from tqdm.auto import tqdm
from src.utils_procs import reulimit
from importlib.metadata import distribution, PackageNotFoundError
import distutils.spawn
import os
def undo_reverse_ucurve_list(lst):
if not lst:
return []
if len(lst) == 1:
return lst
if len(lst) == 2:
return [lst[1], lst[0]]
# Split the list into two halves: the first half and the second half (reversed)
mid = len(lst) // 2
first_half = lst[:mid]
second_half = lst[mid:][::-1]
# Merge the two halves by taking elements alternatively from the second half and then the first half
result = []
for i in range(mid):
result.append(second_half[i])
result.append(first_half[i])
# If the length of the list is odd, append the last element of the second half
if len(lst) % 2 != 0:
result.append(second_half[-1])
return result | null |
166,904 | import os
from functools import wraps
import psutil
def get_all_rlimit(pid=None):
if pid is None:
pid = os.getpid()
ps = psfunc(psutil.Process, pid)
result = {}
for rlim_str, rlim in zip(rlims_str, rlims):
if rlims is None:
continue
result[(rlim_str, rlim)] = rlimitproc(ps, rlim)
return result
limit_nofile = 131071
limit_nproc = 16384
def psfunc(func, *args, **kwargs):
"""
Safely ask for psutil function call
psutil accesses /proc entries that can random disappear, and psutil does not have sufficient protection
for user against various errors either direct or a cascade within the package.
:param func: psutil function to use
:param args: args
:param kwargs: kwargs
:return: function return value
"""
try:
return func(*args, **kwargs)
except (psutil.NoSuchProcess, psutil.AccessDenied, FileNotFoundError, OSError, TypeError, AttributeError):
pass
except Exception as e:
if os.environ.get('HARD_ASSERTS'):
raise
def reulimit(pid=None, verbose=False):
from sys import platform
if not (platform == "linux" or platform == "linux2"):
return
if pid is None:
pid = os.getpid()
ps = psfunc(psutil.Process, pid)
ulimits_dict = get_all_rlimit()
for k, v in zip(ulimits_dict.keys(), ulimits_dict.values()):
if k[1] == psutil.RLIMIT_CORE:
continue
if verbose:
print("rlimit %s of %s" % (str(k[0]), str(v[0])))
if isinstance(v, tuple) and len(v) == 2:
newlimits = list(v)
# set soft to hard limit
if newlimits[0] != newlimits[1]:
if k[1] == psutil.RLIMIT_NOFILE:
hard_limit = newlimits[1] if newlimits[1] != -1 else limit_nofile
newlimits[0] = max(newlimits[0], min(limit_nofile, hard_limit))
elif k[1] == psutil.RLIMIT_NPROC:
hard_limit = newlimits[1] if newlimits[1] != -1 else limit_nproc
newlimits[0] = max(newlimits[0], min(limit_nproc, hard_limit))
else:
newlimits[0] = newlimits[1]
try:
ps.rlimit(k[1], limits=tuple(newlimits))
if verbose:
print("Set rlimit %s of %s -> %s" % (str(k[0]), str(v[0]), str(newlimits[0])))
except (TypeError, AttributeError, psutil.AccessDenied):
print("Could not set desired rlimit %s of %s -> %s" % (
str(k[0]), str(v[0]), str(newlimits[0])))
except (FileNotFoundError, OSError, psutil.NoSuchProcess):
pass
except Exception as e:
print("Couldn't set ulimit %s" % str(e))
if os.environ.get('HARD_ASSERTS'):
raise
return | null |
166,905 | import os
from functools import wraps
import psutil
def rlimitproc(pp, rlim):
try:
return pp.rlimit(rlim)
except (psutil.NoSuchProcess, psutil.AccessDenied, FileNotFoundError, OSError, TypeError, AttributeError):
pass
except ValueError as e:
if 'invalid resource specified' in str(e):
print("rlimitproc exception for rlim %s: %s" % (rlim, str(e)))
else:
raise
except Exception as e:
print("rlimitproc exception: rlim %s: %s" % (rlim, str(e)))
if os.environ.get('HARD_ASSERTS'):
raise
pass
return -1, -1
limit_nproc = 16384
def psfunc(func, *args, **kwargs):
"""
Safely ask for psutil function call
psutil accesses /proc entries that can random disappear, and psutil does not have sufficient protection
for user against various errors either direct or a cascade within the package.
:param func: psutil function to use
:param args: args
:param kwargs: kwargs
:return: function return value
"""
try:
return func(*args, **kwargs)
except (psutil.NoSuchProcess, psutil.AccessDenied, FileNotFoundError, OSError, TypeError, AttributeError):
pass
except Exception as e:
if os.environ.get('HARD_ASSERTS'):
raise
def get_nproc_limit(pid=None):
if pid is None:
pid = os.getpid()
ps = psfunc(psutil.Process, pid)
if ps is not None:
nproc = rlimitproc(ps, psutil.RLIMIT_NPROC) # (soft, hard)
else:
nproc = (-1, -1)
nproc = list(nproc)
if nproc[0] == -1:
nproc[0] = limit_nproc
if nproc[1] == -1:
nproc[1] = limit_nproc
return tuple(nproc) | null |
166,906 | import os
from functools import wraps
import psutil
def psfunc(func, *args, **kwargs):
"""
Safely ask for psutil function call
psutil accesses /proc entries that can random disappear, and psutil does not have sufficient protection
for user against various errors either direct or a cascade within the package.
:param func: psutil function to use
:param args: args
:param kwargs: kwargs
:return: function return value
"""
try:
return func(*args, **kwargs)
except (psutil.NoSuchProcess, psutil.AccessDenied, FileNotFoundError, OSError, TypeError, AttributeError):
pass
except Exception as e:
if os.environ.get('HARD_ASSERTS'):
raise
The provided code snippet includes necessary dependencies for implementing the `wrap_psutil` function. Write a Python function `def wrap_psutil(func)` to solve the following problem:
Decorate a function that uses psutil in case of ignorable exception
Here is the function:
def wrap_psutil(func):
""" Decorate a function that uses psutil in case of ignorable exception
"""
@wraps(func)
def f(*args, **kwargs):
val = psfunc(func, *args, **kwargs)
return val
return f | Decorate a function that uses psutil in case of ignorable exception |
166,907 | import os
from functools import wraps
import psutil
def psfunc(func, *args, **kwargs):
"""
Safely ask for psutil function call
psutil accesses /proc entries that can random disappear, and psutil does not have sufficient protection
for user against various errors either direct or a cascade within the package.
:param func: psutil function to use
:param args: args
:param kwargs: kwargs
:return: function return value
"""
try:
return func(*args, **kwargs)
except (psutil.NoSuchProcess, psutil.AccessDenied, FileNotFoundError, OSError, TypeError, AttributeError):
pass
except Exception as e:
if os.environ.get('HARD_ASSERTS'):
raise
def psfunc_list(func, *args, **kwargs):
ret = psfunc(func, *args, **kwargs)
if ret is None:
return []
else:
return ret | null |
166,908 | import os
from functools import wraps
import psutil
The provided code snippet includes necessary dependencies for implementing the `psattr` function. Write a Python function `def psattr(obj, attr)` to solve the following problem:
Safely ask for an attributes value for psutil psutil accesses /proc entries that can random disappear, and psutil does not have sufficient protection for user against various errors either direct or a cascade within the package. :param obj: psutil object with attributes :param attr: attribute name to get :return: attribute value
Here is the function:
def psattr(obj, attr):
"""
Safely ask for an attributes value for psutil
psutil accesses /proc entries that can random disappear, and psutil does not have sufficient protection
for user against various errors either direct or a cascade within the package.
:param obj: psutil object with attributes
:param attr: attribute name to get
:return: attribute value
"""
try:
return getattr(obj, attr)
except (psutil.NoSuchProcess, psutil.AccessDenied, FileNotFoundError, OSError, TypeError, AttributeError):
pass
except Exception as e:
if os.environ.get('HARD_ASSERTS'):
raise | Safely ask for an attributes value for psutil psutil accesses /proc entries that can random disappear, and psutil does not have sufficient protection for user against various errors either direct or a cascade within the package. :param obj: psutil object with attributes :param attr: attribute name to get :return: attribute value |
166,909 | import os
from functools import wraps
import psutil
def rlimitproc(pp, rlim):
try:
return pp.rlimit(rlim)
except (psutil.NoSuchProcess, psutil.AccessDenied, FileNotFoundError, OSError, TypeError, AttributeError):
pass
except ValueError as e:
if 'invalid resource specified' in str(e):
print("rlimitproc exception for rlim %s: %s" % (rlim, str(e)))
else:
raise
except Exception as e:
print("rlimitproc exception: rlim %s: %s" % (rlim, str(e)))
if os.environ.get('HARD_ASSERTS'):
raise
pass
return -1, -1
limit_nofile = 131071
def psfunc(func, *args, **kwargs):
"""
Safely ask for psutil function call
psutil accesses /proc entries that can random disappear, and psutil does not have sufficient protection
for user against various errors either direct or a cascade within the package.
:param func: psutil function to use
:param args: args
:param kwargs: kwargs
:return: function return value
"""
try:
return func(*args, **kwargs)
except (psutil.NoSuchProcess, psutil.AccessDenied, FileNotFoundError, OSError, TypeError, AttributeError):
pass
except Exception as e:
if os.environ.get('HARD_ASSERTS'):
raise
def get_file_limit(pid=None):
if pid is None:
pid = os.getpid()
ps = psfunc(psutil.Process, pid)
if ps is not None:
nofile = rlimitproc(ps, psutil.RLIMIT_NOFILE) # (soft, hard)
else:
nofile = (-1, -1)
nofile = list(nofile)
if nofile[0] == -1:
nofile[0] = limit_nofile
if nofile[1] == -1:
nofile[1] = limit_nofile
return tuple(nofile) | null |
166,910 | import sys
import os
import traceback
def protect_stream(stream_name):
if stream_name == "stdout":
sys.stdout = FinalizeStream(StreamProxy(sys.stdout))
elif stream_name == "stderr":
sys.stderr = FinalizeStream(StreamProxy(sys.stderr))
else:
raise ValueError("Unsupported stream name. Choose 'stdout' or 'stderr'.")
def protect_stdout_stderr():
# Protect both stdout and stderr at the start of your application
protect_stream("stdout")
protect_stream("stderr") | null |
166,911 | import textwrap
import re
from src.utils import flatten_list, have_emoji, have_langid
def setup_nltk():
import nltk # we'll use this to split into sentences
nltk.download("punkt") | null |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.