diff --git a/minigpt2/lib/python3.10/site-packages/ray/data/__pycache__/__init__.cpython-310.pyc b/minigpt2/lib/python3.10/site-packages/ray/data/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..fd903f4bd790b6af45f056f3673e42168e969fa7 Binary files /dev/null and b/minigpt2/lib/python3.10/site-packages/ray/data/__pycache__/__init__.cpython-310.pyc differ diff --git a/minigpt2/lib/python3.10/site-packages/ray/data/__pycache__/context.cpython-310.pyc b/minigpt2/lib/python3.10/site-packages/ray/data/__pycache__/context.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..57727ca45c1c30f3ad90552f3be19af6e68743e2 Binary files /dev/null and b/minigpt2/lib/python3.10/site-packages/ray/data/__pycache__/context.cpython-310.pyc differ diff --git a/minigpt2/lib/python3.10/site-packages/ray/data/__pycache__/iterator.cpython-310.pyc b/minigpt2/lib/python3.10/site-packages/ray/data/__pycache__/iterator.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..a5ead8eb2bdc408d227a917034b25d0eb64b4d42 Binary files /dev/null and b/minigpt2/lib/python3.10/site-packages/ray/data/__pycache__/iterator.cpython-310.pyc differ diff --git a/minigpt2/lib/python3.10/site-packages/ray/data/_internal/arrow_ops/__init__.py b/minigpt2/lib/python3.10/site-packages/ray/data/_internal/arrow_ops/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/minigpt2/lib/python3.10/site-packages/ray/data/_internal/arrow_ops/__pycache__/__init__.cpython-310.pyc b/minigpt2/lib/python3.10/site-packages/ray/data/_internal/arrow_ops/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..68a80b6e2198e71fa8c26236615914298d91faa5 Binary files /dev/null and b/minigpt2/lib/python3.10/site-packages/ray/data/_internal/arrow_ops/__pycache__/__init__.cpython-310.pyc differ diff --git a/minigpt2/lib/python3.10/site-packages/ray/data/_internal/arrow_ops/__pycache__/transform_polars.cpython-310.pyc b/minigpt2/lib/python3.10/site-packages/ray/data/_internal/arrow_ops/__pycache__/transform_polars.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..73dcae910e6df19a2650751dfb40c26cfd41e302 Binary files /dev/null and b/minigpt2/lib/python3.10/site-packages/ray/data/_internal/arrow_ops/__pycache__/transform_polars.cpython-310.pyc differ diff --git a/minigpt2/lib/python3.10/site-packages/ray/data/_internal/arrow_ops/__pycache__/transform_pyarrow.cpython-310.pyc b/minigpt2/lib/python3.10/site-packages/ray/data/_internal/arrow_ops/__pycache__/transform_pyarrow.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..1792d9280ad8e66e1867866d101cf1ee64c14f4f Binary files /dev/null and b/minigpt2/lib/python3.10/site-packages/ray/data/_internal/arrow_ops/__pycache__/transform_pyarrow.cpython-310.pyc differ diff --git a/minigpt2/lib/python3.10/site-packages/ray/data/_internal/arrow_ops/transform_polars.py b/minigpt2/lib/python3.10/site-packages/ray/data/_internal/arrow_ops/transform_polars.py new file mode 100644 index 0000000000000000000000000000000000000000..93c615f55d21e2bb4f31dfdf419ab12189e8d998 --- /dev/null +++ b/minigpt2/lib/python3.10/site-packages/ray/data/_internal/arrow_ops/transform_polars.py @@ -0,0 +1,40 @@ +from typing import TYPE_CHECKING, List + +try: + import pyarrow +except ImportError: + pyarrow = None + + +if TYPE_CHECKING: + from ray.data._internal.planner.exchange.sort_task_spec import SortKey + +pl = None + + +def check_polars_installed(): + try: + global pl + import polars as pl + except ImportError: + raise ImportError( + "polars not installed. Install with `pip install polars` or set " + "`DataContext.use_polars = False` to fall back to pyarrow" + ) + + +def sort(table: "pyarrow.Table", sort_key: "SortKey") -> "pyarrow.Table": + check_polars_installed() + df = pl.from_arrow(table) + return df.sort(sort_key.get_columns(), reverse=sort_key.get_descending()).to_arrow() + + +def concat_and_sort( + blocks: List["pyarrow.Table"], sort_key: "SortKey" +) -> "pyarrow.Table": + check_polars_installed() + blocks = [pl.from_arrow(block) for block in blocks] + df = pl.concat(blocks).sort( + sort_key.get_columns(), reverse=sort_key.get_descending() + ) + return df.to_arrow() diff --git a/minigpt2/lib/python3.10/site-packages/ray/data/_internal/arrow_ops/transform_pyarrow.py b/minigpt2/lib/python3.10/site-packages/ray/data/_internal/arrow_ops/transform_pyarrow.py new file mode 100644 index 0000000000000000000000000000000000000000..a71a1eae6f61fe962cda9ddcccf6e400ab9c7623 --- /dev/null +++ b/minigpt2/lib/python3.10/site-packages/ray/data/_internal/arrow_ops/transform_pyarrow.py @@ -0,0 +1,497 @@ +from typing import TYPE_CHECKING, List, Union + +import numpy as np +from packaging.version import parse as parse_version + +from ray._private.utils import _get_pyarrow_version +from ray.air.util.tensor_extensions.arrow import ( + INT32_OVERFLOW_THRESHOLD, + MIN_PYARROW_VERSION_CHUNKED_ARRAY_TO_NUMPY_ZERO_COPY_ONLY, + PYARROW_VERSION, +) + +try: + import pyarrow +except ImportError: + pyarrow = None + + +if TYPE_CHECKING: + from ray.data._internal.planner.exchange.sort_task_spec import SortKey + + +def sort(table: "pyarrow.Table", sort_key: "SortKey") -> "pyarrow.Table": + import pyarrow.compute as pac + + indices = pac.sort_indices(table, sort_keys=sort_key.to_arrow_sort_args()) + return take_table(table, indices) + + +def take_table( + table: "pyarrow.Table", + indices: Union[List[int], "pyarrow.Array", "pyarrow.ChunkedArray"], +) -> "pyarrow.Table": + """Select rows from the table. + + This method is an alternative to pyarrow.Table.take(), which breaks for + extension arrays. This is exposed as a static method for easier use on + intermediate tables, not underlying an ArrowBlockAccessor. + """ + from ray.air.util.transform_pyarrow import ( + _concatenate_extension_column, + _is_column_extension_type, + ) + + if any(_is_column_extension_type(col) for col in table.columns): + new_cols = [] + for col in table.columns: + if _is_column_extension_type(col) and col.num_chunks > 1: + # .take() will concatenate internally, which currently breaks for + # extension arrays. + col = _concatenate_extension_column(col) + new_cols.append(col.take(indices)) + table = pyarrow.Table.from_arrays(new_cols, schema=table.schema) + else: + table = table.take(indices) + return table + + +def unify_schemas( + schemas: List["pyarrow.Schema"], +) -> "pyarrow.Schema": + """Version of `pyarrow.unify_schemas()` which also handles checks for + variable-shaped tensors in the given schemas. + + This function scans all input schemas to identify columns that contain + variable-shaped tensors or objects. For tensor columns, it ensures the + use of appropriate tensor types (including variable-shaped tensor types). + For object columns, it uses a specific object type to accommodate any + objects present. Additionally, it handles columns with null-typed lists + by determining their actual types from the given schemas. + + Currently, it disallows the concatenation of tensor columns and + pickled object columsn for performance reasons. + """ + import pyarrow as pa + + from ray.air.util.object_extensions.arrow import ArrowPythonObjectType + from ray.air.util.tensor_extensions.arrow import ( + ArrowTensorType, + ArrowVariableShapedTensorType, + ) + + schemas_to_unify = [] + schema_field_overrides = {} + + # Rollup columns with opaque (null-typed) lists, to override types in + # the following for-loop. + cols_with_null_list = set() + + all_columns = set() + for schema in schemas: + for col_name in schema.names: + col_type = schema.field(col_name).type + if pa.types.is_list(col_type) and pa.types.is_null(col_type.value_type): + cols_with_null_list.add(col_name) + all_columns.add(col_name) + + from ray.air.util.tensor_extensions.arrow import ( + get_arrow_extension_fixed_shape_tensor_types, + get_arrow_extension_tensor_types, + ) + + arrow_tensor_types = get_arrow_extension_tensor_types() + arrow_fixed_shape_tensor_types = get_arrow_extension_fixed_shape_tensor_types() + + columns_with_objects = set() + columns_with_tensor_array = set() + for col_name in all_columns: + for s in schemas: + indices = s.get_all_field_indices(col_name) + if len(indices) > 1: + # This is broken for Pandas blocks and broken with the logic here + raise ValueError( + f"Schema {s} has multiple fields with the same name: {col_name}" + ) + elif len(indices) == 0: + continue + if isinstance(s.field(col_name).type, ArrowPythonObjectType): + columns_with_objects.add(col_name) + if isinstance(s.field(col_name).type, arrow_tensor_types): + columns_with_tensor_array.add(col_name) + + if len(columns_with_objects.intersection(columns_with_tensor_array)) > 0: + # This is supportable if we use object type, but it will be expensive + raise ValueError( + "Found columns with both objects and tensors: " + f"{columns_with_tensor_array.intersection(columns_with_objects)}" + ) + for col_name in columns_with_tensor_array: + tensor_array_types = [ + s.field(col_name).type + for s in schemas + if isinstance(s.field(col_name).type, arrow_tensor_types) + ] + + if ArrowTensorType._need_variable_shaped_tensor_array(tensor_array_types): + if isinstance(tensor_array_types[0], ArrowVariableShapedTensorType): + new_type = tensor_array_types[0] + elif isinstance(tensor_array_types[0], arrow_fixed_shape_tensor_types): + new_type = ArrowVariableShapedTensorType( + dtype=tensor_array_types[0].scalar_type, + ndim=len(tensor_array_types[0].shape), + ) + else: + raise ValueError( + "Detected need for variable shaped tensor representation, " + f"but schema is not ArrayTensorType: {tensor_array_types[0]}" + ) + schema_field_overrides[col_name] = new_type + + for col_name in columns_with_objects: + schema_field_overrides[col_name] = ArrowPythonObjectType() + + if cols_with_null_list: + # For each opaque list column, iterate through all schemas until we find + # a valid value_type that can be used to override the column types in + # the following for-loop. + for col_name in cols_with_null_list: + for schema in schemas: + col_type = schema.field(col_name).type + if not pa.types.is_list(col_type) or not pa.types.is_null( + col_type.value_type + ): + schema_field_overrides[col_name] = col_type + break + + if schema_field_overrides: + # Go through all schemas and update the types of columns from the above loop. + for schema in schemas: + for col_name, col_new_type in schema_field_overrides.items(): + var_shaped_col = schema.field(col_name).with_type(col_new_type) + col_idx = schema.get_field_index(col_name) + schema = schema.set(col_idx, var_shaped_col) + schemas_to_unify.append(schema) + else: + schemas_to_unify = schemas + # Let Arrow unify the schema of non-tensor extension type columns. + return pyarrow.unify_schemas(schemas_to_unify) + + +def _concatenate_chunked_arrays(arrs: "pyarrow.ChunkedArray") -> "pyarrow.ChunkedArray": + """ + Concatenate provided chunked arrays into a single chunked array. + """ + from ray.data.extensions import get_arrow_extension_tensor_types + + tensor_types = get_arrow_extension_tensor_types() + + # Single flat list of chunks across all chunked arrays. + chunks = [] + type_ = None + for arr in arrs: + if type_ is None: + type_ = arr.type + else: + if isinstance(type_, tensor_types): + raise ValueError( + "_concatenate_chunked_arrays should only be used on non-tensor " + f"extension types, but got a chunked array of type {type_}." + ) + assert type_ == arr.type, f"Types mismatch: {type_} != {arr.type}" + # Add chunks for this chunked array to flat chunk list. + chunks.extend(arr.chunks) + # Construct chunked array on flat list of chunks. + return pyarrow.chunked_array(chunks, type=type_) + + +def concat(blocks: List["pyarrow.Table"]) -> "pyarrow.Table": + """Concatenate provided Arrow Tables into a single Arrow Table. This has special + handling for extension types that pyarrow.concat_tables does not yet support. + """ + import pyarrow as pa + + from ray.air.util.tensor_extensions.arrow import ArrowConversionError + from ray.data.extensions import ( + ArrowPythonObjectArray, + ArrowPythonObjectType, + ArrowTensorArray, + get_arrow_extension_tensor_types, + ) + + tensor_types = get_arrow_extension_tensor_types() + + if not blocks: + # Short-circuit on empty list of blocks. + return blocks + + if len(blocks) == 1: + return blocks[0] + + # Rollup columns with opaque (null-typed) lists, to process in following for-loop. + cols_with_null_list = set() + for b in blocks: + for col_name in b.schema.names: + col_type = b.schema.field(col_name).type + if pa.types.is_list(col_type) and pa.types.is_null(col_type.value_type): + cols_with_null_list.add(col_name) + + # If the result contains pyarrow schemas, unify them + schemas_to_unify = [b.schema for b in blocks] + try: + schema = unify_schemas(schemas_to_unify) + except Exception as e: + raise ArrowConversionError(str(blocks)) from e + + if ( + any(isinstance(type_, pa.ExtensionType) for type_ in schema.types) + or cols_with_null_list + ): + # Custom handling for extension array columns. + cols = [] + for col_name in schema.names: + col_chunked_arrays = [] + for block in blocks: + col_chunked_arrays.append(block.column(col_name)) + + if isinstance(schema.field(col_name).type, tensor_types): + # For our tensor extension types, manually construct a chunked array + # containing chunks from all blocks. This is to handle + # homogeneous-shaped block columns having different shapes across + # blocks: if tensor element shapes differ across blocks, a + # variable-shaped tensor array will be returned. + col = ArrowTensorArray._chunk_tensor_arrays( + [chunk for ca in col_chunked_arrays for chunk in ca.chunks] + ) + elif isinstance(schema.field(col_name).type, ArrowPythonObjectType): + chunks_to_concat = [] + # Cast everything to objects if concatenated with an object column + for ca in col_chunked_arrays: + for chunk in ca.chunks: + if isinstance(ca.type, ArrowPythonObjectType): + chunks_to_concat.append(chunk) + else: + chunks_to_concat.append( + ArrowPythonObjectArray.from_objects(chunk.to_pylist()) + ) + col = pa.chunked_array(chunks_to_concat) + else: + if col_name in cols_with_null_list: + # For each opaque list column, iterate through all schemas until + # we find a valid value_type that can be used to override the + # column types in the following for-loop. + scalar_type = None + for arr in col_chunked_arrays: + if not pa.types.is_list(arr.type) or not pa.types.is_null( + arr.type.value_type + ): + scalar_type = arr.type + break + + if scalar_type is not None: + for c_idx in range(len(col_chunked_arrays)): + c = col_chunked_arrays[c_idx] + if pa.types.is_list(c.type) and pa.types.is_null( + c.type.value_type + ): + if pa.types.is_list(scalar_type): + # If we are dealing with a list input, + # cast the array to the scalar_type found above. + col_chunked_arrays[c_idx] = c.cast(scalar_type) + else: + # If we are dealing with a single value, construct + # a new array with null values filled. + col_chunked_arrays[c_idx] = pa.chunked_array( + [pa.nulls(c.length(), type=scalar_type)] + ) + + col = _concatenate_chunked_arrays(col_chunked_arrays) + cols.append(col) + + # Build the concatenated table. + table = pyarrow.Table.from_arrays(cols, schema=schema) + # Validate table schema (this is a cheap check by default). + table.validate() + else: + # No extension array columns, so use built-in pyarrow.concat_tables. + if parse_version(_get_pyarrow_version()) >= parse_version("14.0.0"): + # `promote` was superseded by `promote_options='default'` in Arrow 14. To + # prevent `FutureWarning`s, we manually check the Arrow version and use the + # appropriate parameter. + table = pyarrow.concat_tables(blocks, promote_options="default") + else: + table = pyarrow.concat_tables(blocks, promote=True) + return table + + +def concat_and_sort( + blocks: List["pyarrow.Table"], sort_key: "SortKey" +) -> "pyarrow.Table": + import pyarrow.compute as pac + + ret = concat(blocks) + indices = pac.sort_indices(ret, sort_keys=sort_key.to_arrow_sort_args()) + return take_table(ret, indices) + + +def to_numpy( + array: Union["pyarrow.Array", "pyarrow.ChunkedArray"], + *, + zero_copy_only: bool = True, +) -> np.ndarray: + """Wrapper for `Array`s and `ChunkedArray`s `to_numpy` API, + handling API divergence b/w Arrow versions""" + + import pyarrow as pa + + if isinstance(array, pa.Array): + return array.to_numpy(zero_copy_only=zero_copy_only) + elif isinstance(array, pa.ChunkedArray): + if PYARROW_VERSION >= MIN_PYARROW_VERSION_CHUNKED_ARRAY_TO_NUMPY_ZERO_COPY_ONLY: + return array.to_numpy(zero_copy_only=zero_copy_only) + else: + return array.to_numpy() + else: + raise ValueError( + f"Either of `Array` or `ChunkedArray` was expected, got {type(array)}" + ) + + +def combine_chunks(table: "pyarrow.Table") -> "pyarrow.Table": + """This is counterpart for Pyarrow's `Table.combine_chunks` that's using + extended `ChunkedArray` combination protocol. + + For more details check out `combine_chunked_array` py-doc + """ + + new_column_values_arrays = [] + + for col in table.columns: + new_column_values_arrays.append(combine_chunked_array(col)) + + return pyarrow.Table.from_arrays(new_column_values_arrays, schema=table.schema) + + +def combine_chunked_array( + array: "pyarrow.ChunkedArray", +) -> Union["pyarrow.Array", "pyarrow.ChunkedArray"]: + """This is counterpart for Pyarrow's `ChunkedArray.combine_chunks` that additionally + + 1. Handles `ExtensionType`s (like ArrowTensorType, ArrowTensorTypeV2, + ArrowPythonObjectType, etc) + + 2. Making sure `ChunkedArray`s comprising provided `Table` are combined + safely, ie avoiding overflows of Arrow's internal offsets (using int32 for + most of its native types, other than "large" kind). + + For more details check py-doc of `_try_combine_chunks_safe` method. + """ + + import pyarrow as pa + + from ray.air.util.transform_pyarrow import ( + _concatenate_extension_column, + _is_column_extension_type, + ) + + assert isinstance( + array, pa.ChunkedArray + ), f"Expected `ChunkedArray`, got {type(array)}" + + if _is_column_extension_type(array): + # Arrow `ExtensionArray`s can't be concatenated via `combine_chunks`, + # hence require manual concatenation + return _concatenate_extension_column(array) + elif len(array.chunks) == 0: + # NOTE: In case there's no chunks, we need to explicitly create + # an empty array since calling into `combine_chunks` would fail + # due to it expecting at least 1 chunk to be present + return pa.array([], type=array.type) + else: + return _try_combine_chunks_safe(array) + + +def _try_combine_chunks_safe( + array: "pyarrow.ChunkedArray", max_chunk_size=INT32_OVERFLOW_THRESHOLD +) -> Union["pyarrow.Array", "pyarrow.ChunkedArray"]: + """This method provides a safe way of combining `ChunkedArray`s exceeding 2 GiB + in size, which aren't using "large_*" types (and therefore relying on int32 + offsets). + + When handling provided `ChunkedArray` this method will be either + + - Relying on PyArrow's default `combine_chunks` (therefore returning single + contiguous `Array`) in cases when + - Array's total size is < 2 GiB + - Array's underlying type is of "large" kind (ie using one of the + `large_*` type family) + - Safely combining subsets of tasks such that resulting `Array`s to not + exceed 2 GiB in size (therefore returning another `ChunkedArray` albeit + with potentially smaller number of chunks that have resulted from clumping + the original ones) + + Returns: + - pa.Array if it's possible to combine provided pa.ChunkedArray into single + contiguous array + - pa.ChunkedArray (albeit with chunks re-combined) if it's not possible to + produce single pa.Array + """ + + import pyarrow as pa + + from ray.air.util.transform_pyarrow import _is_column_extension_type + + assert not _is_column_extension_type( + array + ), f"Arrow `ExtensionType`s are not accepted (got {array.type})" + + int64_type_predicates = [ + pa.types.is_large_list, + pa.types.is_large_string, + pa.types.is_large_binary, + pa.types.is_large_unicode, + ] + + if array.nbytes < max_chunk_size or any( + p(array.type) for p in int64_type_predicates + ): + # It's safe to combine provided `ChunkedArray` in either of 2 cases: + # - It's cumulative size is < 2 GiB + # - It's of 'large' kind (ie one using int64 offsets internally) + return array.combine_chunks() + + # In this case it's actually *NOT* safe to try to directly combine + # Arrow's `ChunkedArray` and is impossible to produce single, contiguous + # `Array` since + # - It's estimated to hold > 2 GiB + # - Its type is not of the "large" kind (and hence is using int32 + # offsets internally, which would overflow) + # + # In this case instead of combining into single contiguous array, we + # instead just "clump" existing chunks into bigger ones, but no bigger + # than 2 GiB each. + # + # NOTE: This branch actually returns `ChunkedArray` and not an `Array` + + # To stay under 2 GiB limit we are slicing provided list of chunks into + # slices no larger than 2 GiB (as compared to just directly using `concat_arrays`) + slices = [] + + cur_slice_start = 0 + cur_slice_size_bytes = 0 + + for i, chunk in enumerate(array.chunks): + chunk_size = chunk.nbytes + + if cur_slice_size_bytes + chunk_size > max_chunk_size: + slices.append(array.chunks[cur_slice_start:i]) + + cur_slice_start = i + cur_slice_size_bytes = 0 + + cur_slice_size_bytes += chunk_size + + # Add remaining chunks as last slice + slices.append(array.chunks[cur_slice_start:]) + + return pa.chunked_array([pa.concat_arrays(s) for s in slices]) diff --git a/minigpt2/lib/python3.10/site-packages/ray/data/_internal/logical/__init__.py b/minigpt2/lib/python3.10/site-packages/ray/data/_internal/logical/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/minigpt2/lib/python3.10/site-packages/ray/data/_internal/logical/interfaces/plan.py b/minigpt2/lib/python3.10/site-packages/ray/data/_internal/logical/interfaces/plan.py new file mode 100644 index 0000000000000000000000000000000000000000..e21afc30b2375dcd088125ea7aaf2d7f1c1b2b50 --- /dev/null +++ b/minigpt2/lib/python3.10/site-packages/ray/data/_internal/logical/interfaces/plan.py @@ -0,0 +1,25 @@ +from typing import TYPE_CHECKING + +from .operator import Operator + +if TYPE_CHECKING: + from ray.data import DataContext + + +class Plan: + """Abstract class for logical/physical execution plans. + + This plan should hold an operator representing the plan DAG and any auxiliary data + that's useful for plan optimization or execution. + """ + + def __init__(self, context: "DataContext"): + self._context = context + + @property + def dag(self) -> Operator: + raise NotImplementedError + + @property + def context(self) -> Operator: + return self._context diff --git a/minigpt2/lib/python3.10/site-packages/ray/data/_internal/logical/operators/__pycache__/__init__.cpython-310.pyc b/minigpt2/lib/python3.10/site-packages/ray/data/_internal/logical/operators/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..ddc3cffcd72839c89b4cbdc140e00e8b95c41d44 Binary files /dev/null and b/minigpt2/lib/python3.10/site-packages/ray/data/_internal/logical/operators/__pycache__/__init__.cpython-310.pyc differ diff --git a/minigpt2/lib/python3.10/site-packages/ray/data/_internal/logical/operators/__pycache__/all_to_all_operator.cpython-310.pyc b/minigpt2/lib/python3.10/site-packages/ray/data/_internal/logical/operators/__pycache__/all_to_all_operator.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..c38a6cb845cefb57b3b6568f74edc7f609ea4d75 Binary files /dev/null and b/minigpt2/lib/python3.10/site-packages/ray/data/_internal/logical/operators/__pycache__/all_to_all_operator.cpython-310.pyc differ diff --git a/minigpt2/lib/python3.10/site-packages/ray/data/_internal/logical/operators/__pycache__/from_operators.cpython-310.pyc b/minigpt2/lib/python3.10/site-packages/ray/data/_internal/logical/operators/__pycache__/from_operators.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..20e2522671c0f0bffb75245797d4468e9b73d235 Binary files /dev/null and b/minigpt2/lib/python3.10/site-packages/ray/data/_internal/logical/operators/__pycache__/from_operators.cpython-310.pyc differ diff --git a/minigpt2/lib/python3.10/site-packages/ray/data/_internal/logical/operators/__pycache__/map_operator.cpython-310.pyc b/minigpt2/lib/python3.10/site-packages/ray/data/_internal/logical/operators/__pycache__/map_operator.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..2a73698baf7c1f009bbdf29e00a6b4e202025504 Binary files /dev/null and b/minigpt2/lib/python3.10/site-packages/ray/data/_internal/logical/operators/__pycache__/map_operator.cpython-310.pyc differ diff --git a/minigpt2/lib/python3.10/site-packages/ray/data/_internal/logical/operators/__pycache__/write_operator.cpython-310.pyc b/minigpt2/lib/python3.10/site-packages/ray/data/_internal/logical/operators/__pycache__/write_operator.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..c243a682f0c2b28a437b05c22b24ebc25a63d906 Binary files /dev/null and b/minigpt2/lib/python3.10/site-packages/ray/data/_internal/logical/operators/__pycache__/write_operator.cpython-310.pyc differ diff --git a/minigpt2/lib/python3.10/site-packages/ray/data/_internal/logical/optimizers.py b/minigpt2/lib/python3.10/site-packages/ray/data/_internal/logical/optimizers.py new file mode 100644 index 0000000000000000000000000000000000000000..a7c2b68c06feac94761695904001d2ead6a0bde4 --- /dev/null +++ b/minigpt2/lib/python3.10/site-packages/ray/data/_internal/logical/optimizers.py @@ -0,0 +1,94 @@ +from typing import List, Optional, Type + +from ray.data._internal.logical.interfaces import ( + LogicalPlan, + Optimizer, + PhysicalPlan, + Rule, +) +from ray.data._internal.logical.rules.inherit_batch_format import InheritBatchFormatRule +from ray.data._internal.logical.rules.inherit_target_max_block_size import ( + InheritTargetMaxBlockSizeRule, +) +from ray.data._internal.logical.rules.operator_fusion import OperatorFusionRule +from ray.data._internal.logical.rules.randomize_blocks import ReorderRandomizeBlocksRule +from ray.data._internal.logical.rules.set_read_parallelism import SetReadParallelismRule +from ray.data._internal.logical.rules.zero_copy_map_fusion import ( + EliminateBuildOutputBlocks, +) +from ray.data._internal.planner.planner import Planner +from ray.util.annotations import DeveloperAPI + +_LOGICAL_RULES = [ + ReorderRandomizeBlocksRule, + InheritBatchFormatRule, +] + +_PHYSICAL_RULES = [ + InheritTargetMaxBlockSizeRule, + SetReadParallelismRule, + OperatorFusionRule, + EliminateBuildOutputBlocks, +] + + +@DeveloperAPI +def register_logical_rule(cls: Type[Rule], insert_index: Optional[int] = None): + if cls in _LOGICAL_RULES: + return + + if insert_index is None: + _LOGICAL_RULES.append(cls) + else: + _LOGICAL_RULES.insert(insert_index, cls) + + +@DeveloperAPI +def get_logical_rules() -> List[Type[Rule]]: + return list(_LOGICAL_RULES) + + +@DeveloperAPI +def register_physical_rule(cls: Type[Rule], insert_index: Optional[int] = None): + if cls in _PHYSICAL_RULES: + return + + if insert_index is None: + _PHYSICAL_RULES.append(cls) + else: + _PHYSICAL_RULES.insert(insert_index, cls) + + +@DeveloperAPI +def get_physical_rules() -> List[Type[Rule]]: + return list(_PHYSICAL_RULES) + + +class LogicalOptimizer(Optimizer): + """The optimizer for logical operators.""" + + @property + def rules(self) -> List[Rule]: + return [rule_cls() for rule_cls in _LOGICAL_RULES] + + +class PhysicalOptimizer(Optimizer): + """The optimizer for physical operators.""" + + @property + def rules(self) -> List[Rule]: + return [rule_cls() for rule_cls in _PHYSICAL_RULES] + + +def get_execution_plan(logical_plan: LogicalPlan) -> PhysicalPlan: + """Get the physical execution plan for the provided logical plan. + + This process has 3 steps: + (1) logical optimization: optimize logical operators. + (2) planning: convert logical to physical operators. + (3) physical optimization: optimize physical operators. + """ + optimized_logical_plan = LogicalOptimizer().optimize(logical_plan) + logical_plan._dag = optimized_logical_plan.dag + physical_plan = Planner().plan(optimized_logical_plan) + return PhysicalOptimizer().optimize(physical_plan) diff --git a/minigpt2/lib/python3.10/site-packages/ray/data/_internal/logical/util.py b/minigpt2/lib/python3.10/site-packages/ray/data/_internal/logical/util.py new file mode 100644 index 0000000000000000000000000000000000000000..af6f2420a2696f2be39350044c2b15dda886d74a --- /dev/null +++ b/minigpt2/lib/python3.10/site-packages/ray/data/_internal/logical/util.py @@ -0,0 +1,112 @@ +import json +import re +import threading +from typing import Dict + +from ray._private.usage.usage_lib import TagKey, record_extra_usage_tag +from ray.data._internal.logical.interfaces import LogicalOperator +from ray.data._internal.logical.operators.map_operator import AbstractUDFMap +from ray.data._internal.logical.operators.read_operator import Read +from ray.data._internal.logical.operators.write_operator import Write + +# The dictionary for the operator name and count. +_recorded_operators = dict() +_recorded_operators_lock = threading.Lock() + +# The white list of operator names allowed to be recorded. +_op_name_white_list = [ + # Read + "ReadBigQuery", + "ReadRange", + "ReadMongo", + "ReadParquet", + "ReadParquetBulk", + "ReadImage", + "ReadJSON", + "ReadCSV", + "ReadText", + "ReadNumpy", + "ReadTFRecord", + "ReadBinary", + "ReadTorch", + "ReadAvro", + "ReadWebDataset", + "ReadSQL", + "ReadDatabricksUC", + "ReadLance", + "ReadHuggingFace", + "ReadCustom", + # From + "FromArrow", + "FromItems", + "FromNumpy", + "FromPandas", + # Write + "WriteBigQuery", + "WriteParquet", + "WriteJSON", + "WriteCSV", + "WriteTFRecord", + "WriteNumpy", + "WriteMongo", + "WriteWebDataset", + "WriteSQL", + "WriteCustom", + # Map + "Map", + "MapBatches", + "Filter", + "FlatMap", + # All-to-all + "RandomizeBlockOrder", + "RandomShuffle", + "Repartition", + "Sort", + "Aggregate", + # N-ary + "Zip", + "Union", +] + + +def record_operators_usage(op: LogicalOperator): + """Record logical operator usage with Ray telemetry.""" + ops_dict = dict() + _collect_operators_to_dict(op, ops_dict) + ops_json_str = "" + with _recorded_operators_lock: + for op, count in ops_dict.items(): + _recorded_operators.setdefault(op, 0) + _recorded_operators[op] += count + ops_json_str = json.dumps(_recorded_operators) + + record_extra_usage_tag(TagKey.DATA_LOGICAL_OPS, ops_json_str) + + +def _collect_operators_to_dict(op: LogicalOperator, ops_dict: Dict[str, int]): + """Collect the logical operator name and count into `ops_dict`.""" + for child in op.input_dependencies: + _collect_operators_to_dict(child, ops_dict) + + op_name = op.name + + # Check read and write operator, and anonymize user-defined data source. + if isinstance(op, Read): + op_name = f"Read{op._datasource.get_name()}" + if op_name not in _op_name_white_list: + op_name = "ReadCustom" + elif isinstance(op, Write): + op_name = f"Write{op._datasink_or_legacy_datasource.get_name()}" + if op_name not in _op_name_white_list: + op_name = "WriteCustom" + elif isinstance(op, AbstractUDFMap): + # Remove the function name from the map operator name. + # E.g., Map() -> Map + op_name = re.sub("\\(.*\\)$", "", op_name) + + # Anonymize any operator name if not in white list. + if op_name not in _op_name_white_list: + op_name = "Unknown" + + ops_dict.setdefault(op_name, 0) + ops_dict[op_name] += 1 diff --git a/minigpt2/lib/python3.10/site-packages/ray/data/datasource/__init__.py b/minigpt2/lib/python3.10/site-packages/ray/data/datasource/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..2e83a6bd916b838cd33a78b7be356a9556c3d760 --- /dev/null +++ b/minigpt2/lib/python3.10/site-packages/ray/data/datasource/__init__.py @@ -0,0 +1,58 @@ +from ray.data._internal.datasource.sql_datasource import Connection +from ray.data.datasource.datasink import Datasink, DummyOutputDatasink +from ray.data.datasource.datasource import ( + Datasource, + RandomIntRowDatasource, + Reader, + ReadTask, +) +from ray.data.datasource.file_based_datasource import ( + FileBasedDatasource, + _S3FileSystemWrapper, +) +from ray.data.datasource.file_datasink import ( + BlockBasedFileDatasink, + RowBasedFileDatasink, +) +from ray.data.datasource.file_meta_provider import ( + BaseFileMetadataProvider, + DefaultFileMetadataProvider, + FastFileMetadataProvider, + FileMetadataProvider, +) +from ray.data.datasource.filename_provider import FilenameProvider +from ray.data.datasource.parquet_meta_provider import ParquetMetadataProvider +from ray.data.datasource.partitioning import ( + Partitioning, + PartitionStyle, + PathPartitionFilter, + PathPartitionParser, +) + +# Note: HuggingFaceDatasource should NOT be imported here, because +# we want to only import the Hugging Face datasets library when we use +# ray.data.from_huggingface() or HuggingFaceDatasource() directly. +__all__ = [ + "BaseFileMetadataProvider", + "BlockBasedFileDatasink", + "Connection", + "Datasink", + "Datasource", + "DeltaSharingDatasource", + "DefaultFileMetadataProvider", + "DummyOutputDatasink", + "FastFileMetadataProvider", + "FileBasedDatasource", + "FileMetadataProvider", + "FilenameProvider", + "ParquetMetadataProvider", + "PartitionStyle", + "PathPartitionFilter", + "PathPartitionParser", + "Partitioning", + "RandomIntRowDatasource", + "ReadTask", + "Reader", + "RowBasedFileDatasink", + "_S3FileSystemWrapper", +] diff --git a/minigpt2/lib/python3.10/site-packages/ray/data/datasource/__pycache__/__init__.cpython-310.pyc b/minigpt2/lib/python3.10/site-packages/ray/data/datasource/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..f6e07bb3324a769cba745cb35f2fa9053fef47c5 Binary files /dev/null and b/minigpt2/lib/python3.10/site-packages/ray/data/datasource/__pycache__/__init__.cpython-310.pyc differ diff --git a/minigpt2/lib/python3.10/site-packages/ray/data/datasource/__pycache__/datasink.cpython-310.pyc b/minigpt2/lib/python3.10/site-packages/ray/data/datasource/__pycache__/datasink.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..9e51882c72960ade650a9b343c0a5d7eb83a3ca2 Binary files /dev/null and b/minigpt2/lib/python3.10/site-packages/ray/data/datasource/__pycache__/datasink.cpython-310.pyc differ diff --git a/minigpt2/lib/python3.10/site-packages/ray/data/datasource/__pycache__/datasource.cpython-310.pyc b/minigpt2/lib/python3.10/site-packages/ray/data/datasource/__pycache__/datasource.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..c78bc802ff5267c8799a90a7f18a2a464491de00 Binary files /dev/null and b/minigpt2/lib/python3.10/site-packages/ray/data/datasource/__pycache__/datasource.cpython-310.pyc differ diff --git a/minigpt2/lib/python3.10/site-packages/ray/data/datasource/__pycache__/file_based_datasource.cpython-310.pyc b/minigpt2/lib/python3.10/site-packages/ray/data/datasource/__pycache__/file_based_datasource.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..f14af6c47db9ed899f1408abd2223108a8495923 Binary files /dev/null and b/minigpt2/lib/python3.10/site-packages/ray/data/datasource/__pycache__/file_based_datasource.cpython-310.pyc differ diff --git a/minigpt2/lib/python3.10/site-packages/ray/data/datasource/__pycache__/file_datasink.cpython-310.pyc b/minigpt2/lib/python3.10/site-packages/ray/data/datasource/__pycache__/file_datasink.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..d24a2e5b5921063654c6e5b0afa636913127ad2f Binary files /dev/null and b/minigpt2/lib/python3.10/site-packages/ray/data/datasource/__pycache__/file_datasink.cpython-310.pyc differ diff --git a/minigpt2/lib/python3.10/site-packages/ray/data/datasource/__pycache__/file_meta_provider.cpython-310.pyc b/minigpt2/lib/python3.10/site-packages/ray/data/datasource/__pycache__/file_meta_provider.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..89018268ee3e0071661caf2c45de119ad99be028 Binary files /dev/null and b/minigpt2/lib/python3.10/site-packages/ray/data/datasource/__pycache__/file_meta_provider.cpython-310.pyc differ diff --git a/minigpt2/lib/python3.10/site-packages/ray/data/datasource/__pycache__/filename_provider.cpython-310.pyc b/minigpt2/lib/python3.10/site-packages/ray/data/datasource/__pycache__/filename_provider.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..1368695181330ae3a2d742acda2e0150174651f8 Binary files /dev/null and b/minigpt2/lib/python3.10/site-packages/ray/data/datasource/__pycache__/filename_provider.cpython-310.pyc differ diff --git a/minigpt2/lib/python3.10/site-packages/ray/data/datasource/__pycache__/parquet_meta_provider.cpython-310.pyc b/minigpt2/lib/python3.10/site-packages/ray/data/datasource/__pycache__/parquet_meta_provider.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..67e47b50b67e6a8440dee4439258d10ef318b768 Binary files /dev/null and b/minigpt2/lib/python3.10/site-packages/ray/data/datasource/__pycache__/parquet_meta_provider.cpython-310.pyc differ diff --git a/minigpt2/lib/python3.10/site-packages/ray/data/datasource/__pycache__/partitioning.cpython-310.pyc b/minigpt2/lib/python3.10/site-packages/ray/data/datasource/__pycache__/partitioning.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..bbe9f909d714ab4d1dd43a0799d760fd18a38b71 Binary files /dev/null and b/minigpt2/lib/python3.10/site-packages/ray/data/datasource/__pycache__/partitioning.cpython-310.pyc differ diff --git a/minigpt2/lib/python3.10/site-packages/ray/data/datasource/__pycache__/path_util.cpython-310.pyc b/minigpt2/lib/python3.10/site-packages/ray/data/datasource/__pycache__/path_util.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..390fa0ab1a04cb21288d9e429a94aad0790d7bee Binary files /dev/null and b/minigpt2/lib/python3.10/site-packages/ray/data/datasource/__pycache__/path_util.cpython-310.pyc differ diff --git a/minigpt2/lib/python3.10/site-packages/ray/data/datasource/datasink.py b/minigpt2/lib/python3.10/site-packages/ray/data/datasource/datasink.py new file mode 100644 index 0000000000000000000000000000000000000000..fe4d4cf4ef9a308a90ebeb1e6494536ac66ca1a4 --- /dev/null +++ b/minigpt2/lib/python3.10/site-packages/ray/data/datasource/datasink.py @@ -0,0 +1,198 @@ +import logging +from dataclasses import dataclass, fields +from typing import Iterable, List, Optional + +import ray +from ray.data._internal.execution.interfaces import TaskContext +from ray.data.block import Block, BlockAccessor +from ray.util.annotations import DeveloperAPI + +logger = logging.getLogger(__name__) + + +@dataclass +@DeveloperAPI +class WriteResult: + """Result of a write operation, containing stats/metrics + on the written data. + + Attributes: + total_num_rows: The total number of rows written. + total_size_bytes: The total size of the written data in bytes. + """ + + num_rows: int = 0 + size_bytes: int = 0 + + @staticmethod + def aggregate_write_results(write_results: List["WriteResult"]) -> "WriteResult": + """Aggregate a list of write results. + + Args: + write_results: A list of write results. + + Returns: + A single write result that aggregates the input results. + """ + total_num_rows = 0 + total_size_bytes = 0 + + for write_result in write_results: + total_num_rows += write_result.num_rows + total_size_bytes += write_result.size_bytes + + return WriteResult( + num_rows=total_num_rows, + size_bytes=total_size_bytes, + ) + + +@DeveloperAPI +class Datasink: + """Interface for defining write-related logic. + + If you want to write data to something that isn't built-in, subclass this class + and call :meth:`~ray.data.Dataset.write_datasink`. + """ + + def on_write_start(self) -> None: + """Callback for when a write job starts. + + Use this method to perform setup for write tasks. For example, creating a + staging bucket in S3. + """ + pass + + def write( + self, + blocks: Iterable[Block], + ctx: TaskContext, + ) -> None: + """Write blocks. This is used by a single write task. + + Args: + blocks: Generator of data blocks. + ctx: ``TaskContext`` for the write task. + """ + raise NotImplementedError + + def on_write_complete(self, write_result_blocks: List[Block]) -> WriteResult: + """Callback for when a write job completes. + + This can be used to "commit" a write output. This method must + succeed prior to ``write_datasink()`` returning to the user. If this + method fails, then ``on_write_failed()`` is called. + + Args: + write_result_blocks: The blocks resulting from executing + the Write operator, containing write results and stats. + Returns: + A ``WriteResult`` object containing the aggregated stats of all + the input write results. + """ + write_results = [ + result["write_result"].iloc[0] for result in write_result_blocks + ] + aggregated_write_results = WriteResult.aggregate_write_results(write_results) + + aggregated_results_str = "" + for k in fields(aggregated_write_results.__class__): + v = getattr(aggregated_write_results, k.name) + aggregated_results_str += f"\t- {k.name}: {v}\n" + + logger.info( + f"Write operation succeeded. Aggregated write results:\n" + f"{aggregated_results_str}" + ) + return aggregated_write_results + + def on_write_failed(self, error: Exception) -> None: + """Callback for when a write job fails. + + This is called on a best-effort basis on write failures. + + Args: + error: The first error encountered. + """ + pass + + def get_name(self) -> str: + """Return a human-readable name for this datasink. + + This is used as the names of the write tasks. + """ + name = type(self).__name__ + datasink_suffix = "Datasink" + if name.startswith("_"): + name = name[1:] + if name.endswith(datasink_suffix): + name = name[: -len(datasink_suffix)] + return name + + @property + def supports_distributed_writes(self) -> bool: + """If ``False``, only launch write tasks on the driver's node.""" + return True + + @property + def num_rows_per_write(self) -> Optional[int]: + """The target number of rows to pass to each :meth:`~ray.data.Datasink.write` call. + + If ``None``, Ray Data passes a system-chosen number of rows. + """ + return None + + +@DeveloperAPI +class DummyOutputDatasink(Datasink): + """An example implementation of a writable datasource for testing. + Examples: + >>> import ray + >>> from ray.data.datasource import DummyOutputDatasink + >>> output = DummyOutputDatasink() + >>> ray.data.range(10).write_datasink(output) + >>> assert output.num_ok == 1 + """ + + def __init__(self): + ctx = ray.data.DataContext.get_current() + + # Setup a dummy actor to send the data. In a real datasource, write + # tasks would send data to an external system instead of a Ray actor. + @ray.remote(scheduling_strategy=ctx.scheduling_strategy) + class DataSink: + def __init__(self): + self.rows_written = 0 + self.enabled = True + + def write(self, block: Block) -> None: + block = BlockAccessor.for_block(block) + self.rows_written += block.num_rows() + + def get_rows_written(self): + return self.rows_written + + self.data_sink = DataSink.remote() + self.num_ok = 0 + self.num_failed = 0 + self.enabled = True + + def write( + self, + blocks: Iterable[Block], + ctx: TaskContext, + ) -> None: + tasks = [] + if not self.enabled: + raise ValueError("disabled") + for b in blocks: + tasks.append(self.data_sink.write.remote(b)) + ray.get(tasks) + + def on_write_complete(self, write_result_blocks: List[Block]) -> WriteResult: + self.num_ok += 1 + aggregated_results = super().on_write_complete(write_result_blocks) + return aggregated_results + + def on_write_failed(self, error: Exception) -> None: + self.num_failed += 1 diff --git a/minigpt2/lib/python3.10/site-packages/ray/data/datasource/datasource.py b/minigpt2/lib/python3.10/site-packages/ray/data/datasource/datasource.py new file mode 100644 index 0000000000000000000000000000000000000000..c09460d9bdf99ee08b793cffb0850dfbfe952367 --- /dev/null +++ b/minigpt2/lib/python3.10/site-packages/ray/data/datasource/datasource.py @@ -0,0 +1,243 @@ +from typing import Callable, Iterable, List, Optional + +import numpy as np + +from ray.data._internal.util import _check_pyarrow_version +from ray.data.block import Block, BlockMetadata +from ray.util.annotations import Deprecated, DeveloperAPI, PublicAPI + + +@PublicAPI +class Datasource: + """Interface for defining a custom :class:`~ray.data.Dataset` datasource. + + To read a datasource into a dataset, use :meth:`~ray.data.read_datasource`. + """ # noqa: E501 + + @Deprecated + def create_reader(self, **read_args) -> "Reader": + """ + Deprecated: Implement :meth:`~ray.data.Datasource.get_read_tasks` and + :meth:`~ray.data.Datasource.estimate_inmemory_data_size` instead. + """ + return _LegacyDatasourceReader(self, **read_args) + + @Deprecated + def prepare_read(self, parallelism: int, **read_args) -> List["ReadTask"]: + """ + Deprecated: Implement :meth:`~ray.data.Datasource.get_read_tasks` and + :meth:`~ray.data.Datasource.estimate_inmemory_data_size` instead. + """ + raise NotImplementedError + + def get_name(self) -> str: + """Return a human-readable name for this datasource. + This will be used as the names of the read tasks. + """ + name = type(self).__name__ + datasource_suffix = "Datasource" + if name.endswith(datasource_suffix): + name = name[: -len(datasource_suffix)] + return name + + def estimate_inmemory_data_size(self) -> Optional[int]: + """Return an estimate of the in-memory data size, or None if unknown. + + Note that the in-memory data size may be larger than the on-disk data size. + """ + raise NotImplementedError + + def get_read_tasks(self, parallelism: int) -> List["ReadTask"]: + """Execute the read and return read tasks. + + Args: + parallelism: The requested read parallelism. The number of read + tasks should equal to this value if possible. + + Returns: + A list of read tasks that can be executed to read blocks from the + datasource in parallel. + """ + raise NotImplementedError + + @property + def should_create_reader(self) -> bool: + has_implemented_get_read_tasks = ( + type(self).get_read_tasks is not Datasource.get_read_tasks + ) + has_implemented_estimate_inmemory_data_size = ( + type(self).estimate_inmemory_data_size + is not Datasource.estimate_inmemory_data_size + ) + return ( + not has_implemented_get_read_tasks + or not has_implemented_estimate_inmemory_data_size + ) + + @property + def supports_distributed_reads(self) -> bool: + """If ``False``, only launch read tasks on the driver's node.""" + return True + + +@Deprecated +class Reader: + """A bound read operation for a :class:`~ray.data.Datasource`. + + This is a stateful class so that reads can be prepared in multiple stages. + For example, it is useful for :class:`Datasets ` to know the + in-memory size of the read prior to executing it. + """ + + def estimate_inmemory_data_size(self) -> Optional[int]: + """Return an estimate of the in-memory data size, or None if unknown. + + Note that the in-memory data size may be larger than the on-disk data size. + """ + raise NotImplementedError + + def get_read_tasks(self, parallelism: int) -> List["ReadTask"]: + """Execute the read and return read tasks. + + Args: + parallelism: The requested read parallelism. The number of read + tasks should equal to this value if possible. + read_args: Additional kwargs to pass to the datasource impl. + + Returns: + A list of read tasks that can be executed to read blocks from the + datasource in parallel. + """ + raise NotImplementedError + + +class _LegacyDatasourceReader(Reader): + def __init__(self, datasource: Datasource, **read_args): + self._datasource = datasource + self._read_args = read_args + + def estimate_inmemory_data_size(self) -> Optional[int]: + return None + + def get_read_tasks(self, parallelism: int) -> List["ReadTask"]: + return self._datasource.prepare_read(parallelism, **self._read_args) + + +@DeveloperAPI +class ReadTask(Callable[[], Iterable[Block]]): + """A function used to read blocks from the :class:`~ray.data.Dataset`. + + Read tasks are generated by :meth:`~ray.data.Datasource.get_read_tasks`, + and return a list of ``ray.data.Block`` when called. Initial metadata about the read + operation can be retrieved via the ``metadata`` attribute prior to executing the + read. Final metadata is returned after the read along with the blocks. + + Ray will execute read tasks in remote functions to parallelize execution. + Note that the number of blocks returned can vary at runtime. For example, + if a task is reading a single large file it can return multiple blocks to + avoid running out of memory during the read. + + The initial metadata should reflect all the blocks returned by the read, + e.g., if the metadata says ``num_rows=1000``, the read can return a single + block of 1000 rows, or multiple blocks with 1000 rows altogether. + + The final metadata (returned with the actual block) reflects the exact + contents of the block itself. + """ + + def __init__(self, read_fn: Callable[[], Iterable[Block]], metadata: BlockMetadata): + self._metadata = metadata + self._read_fn = read_fn + + @property + def metadata(self) -> BlockMetadata: + return self._metadata + + @property + def read_fn(self) -> Callable[[], Iterable[Block]]: + return self._read_fn + + def __call__(self) -> Iterable[Block]: + result = self._read_fn() + if not hasattr(result, "__iter__"): + DeprecationWarning( + "Read function must return Iterable[Block], got {}. " + "Probably you need to return `[block]` instead of " + "`block`.".format(result) + ) + yield from result + + +@DeveloperAPI +class RandomIntRowDatasource(Datasource): + """An example datasource that generates rows with random int64 columns. + + Examples: + >>> import ray + >>> from ray.data.datasource import RandomIntRowDatasource + >>> source = RandomIntRowDatasource() # doctest: +SKIP + >>> ray.data.read_datasource( # doctest: +SKIP + ... source, n=10, num_columns=2).take() + {'c_0': 1717767200176864416, 'c_1': 999657309586757214} + {'c_0': 4983608804013926748, 'c_1': 1160140066899844087} + """ + + def __init__(self, n: int, num_columns: int): + self._n = n + self._num_columns = num_columns + + def estimate_inmemory_data_size(self) -> Optional[int]: + return self._n * self._num_columns * 8 + + def get_read_tasks( + self, + parallelism: int, + ) -> List[ReadTask]: + _check_pyarrow_version() + import pyarrow + + read_tasks: List[ReadTask] = [] + n = self._n + num_columns = self._num_columns + block_size = max(1, n // parallelism) + + def make_block(count: int, num_columns: int) -> Block: + return pyarrow.Table.from_arrays( + np.random.randint( + np.iinfo(np.int64).max, size=(num_columns, count), dtype=np.int64 + ), + names=[f"c_{i}" for i in range(num_columns)], + ) + + schema = pyarrow.Table.from_pydict( + {f"c_{i}": [0] for i in range(num_columns)} + ).schema + + i = 0 + while i < n: + count = min(block_size, n - i) + meta = BlockMetadata( + num_rows=count, + size_bytes=8 * count * num_columns, + schema=schema, + input_files=None, + exec_stats=None, + ) + read_tasks.append( + ReadTask( + lambda count=count, num_columns=num_columns: [ + make_block(count, num_columns) + ], + meta, + ) + ) + i += block_size + + return read_tasks + + def get_name(self) -> str: + """Return a human-readable name for this datasource. + This will be used as the names of the read tasks. + Note: overrides the base `Datasource` method. + """ + return "RandomInt" diff --git a/minigpt2/lib/python3.10/site-packages/ray/data/datasource/file_based_datasource.py b/minigpt2/lib/python3.10/site-packages/ray/data/datasource/file_based_datasource.py new file mode 100644 index 0000000000000000000000000000000000000000..f7215a78079c681d1141197f886c5548b2a3180b --- /dev/null +++ b/minigpt2/lib/python3.10/site-packages/ray/data/datasource/file_based_datasource.py @@ -0,0 +1,533 @@ +import io +import logging +from typing import ( + TYPE_CHECKING, + Any, + Callable, + Dict, + Iterable, + Iterator, + List, + Literal, + Optional, + Union, +) + +import numpy as np + +import ray +from ray.data._internal.util import ( + _check_pyarrow_version, + _is_local_scheme, + call_with_retry, + make_async_gen, +) +from ray.data.block import Block, BlockAccessor +from ray.data.context import DataContext +from ray.data.datasource.datasource import Datasource, ReadTask +from ray.data.datasource.file_meta_provider import ( + BaseFileMetadataProvider, + DefaultFileMetadataProvider, +) +from ray.data.datasource.partitioning import ( + Partitioning, + PathPartitionFilter, + PathPartitionParser, +) +from ray.data.datasource.path_util import ( + _has_file_extension, + _resolve_paths_and_filesystem, +) +from ray.util.annotations import DeveloperAPI + +if TYPE_CHECKING: + import pandas as pd + import pyarrow + + +logger = logging.getLogger(__name__) + + +# We should parallelize file size fetch operations beyond this threshold. +FILE_SIZE_FETCH_PARALLELIZATION_THRESHOLD = 16 + +# 16 file size fetches from S3 takes ~1.5 seconds with Arrow's S3FileSystem. +PATHS_PER_FILE_SIZE_FETCH_TASK = 16 + +# The max retry backoff in seconds for opening file. +OPEN_FILE_RETRY_MAX_BACKOFF_SECONDS = 32 + +# The max number of attempts for opening file. +OPEN_FILE_MAX_ATTEMPTS = 10 + + +@DeveloperAPI +class FileBasedDatasource(Datasource): + """File-based datasource for reading files. + + Don't use this class directly. Instead, subclass it and implement `_read_stream()`. + """ + + # If `_WRITE_FILE_PER_ROW` is `True`, this datasource calls `_write_row` and writes + # each row to a file. Otherwise, this datasource calls `_write_block` and writes + # each block to a file. + _WRITE_FILE_PER_ROW = False + _FILE_EXTENSIONS: Optional[Union[str, List[str]]] = None + # Number of threads for concurrent reading within each read task. + # If zero or negative, reading will be performed in the main thread. + _NUM_THREADS_PER_TASK = 0 + + def __init__( + self, + paths: Union[str, List[str]], + *, + filesystem: Optional["pyarrow.fs.FileSystem"] = None, + schema: Optional[Union[type, "pyarrow.lib.Schema"]] = None, + open_stream_args: Optional[Dict[str, Any]] = None, + meta_provider: BaseFileMetadataProvider = DefaultFileMetadataProvider(), + partition_filter: PathPartitionFilter = None, + partitioning: Partitioning = None, + ignore_missing_paths: bool = False, + shuffle: Union[Literal["files"], None] = None, + include_paths: bool = False, + file_extensions: Optional[List[str]] = None, + ): + _check_pyarrow_version() + + self._supports_distributed_reads = not _is_local_scheme(paths) + if not self._supports_distributed_reads and ray.util.client.ray.is_connected(): + raise ValueError( + "Because you're using Ray Client, read tasks scheduled on the Ray " + "cluster can't access your local files. To fix this issue, store " + "files in cloud storage or a distributed filesystem like NFS." + ) + + self._schema = schema + self._open_stream_args = open_stream_args + self._meta_provider = meta_provider + self._partition_filter = partition_filter + self._partitioning = partitioning + self._ignore_missing_paths = ignore_missing_paths + self._include_paths = include_paths + paths, self._filesystem = _resolve_paths_and_filesystem(paths, filesystem) + paths, file_sizes = map( + list, + zip( + *meta_provider.expand_paths( + paths, + self._filesystem, + partitioning, + ignore_missing_paths=ignore_missing_paths, + ) + ), + ) + + if ignore_missing_paths and len(paths) == 0: + raise ValueError( + "None of the provided paths exist. " + "The 'ignore_missing_paths' field is set to True." + ) + + if self._partition_filter is not None: + # Use partition filter to skip files which are not needed. + path_to_size = dict(zip(paths, file_sizes)) + paths = self._partition_filter(paths) + file_sizes = [path_to_size[p] for p in paths] + if len(paths) == 0: + raise ValueError( + "No input files found to read. Please double check that " + "'partition_filter' field is set properly." + ) + + if file_extensions is not None: + path_to_size = dict(zip(paths, file_sizes)) + paths = [p for p in paths if _has_file_extension(p, file_extensions)] + file_sizes = [path_to_size[p] for p in paths] + if len(paths) == 0: + raise ValueError( + "No input files found to read with the following file extensions: " + f"{file_extensions}. Please double check that " + "'file_extensions' field is set properly." + ) + + _validate_shuffle_arg(shuffle) + self._file_metadata_shuffler = None + if shuffle == "files": + self._file_metadata_shuffler = np.random.default_rng() + + # Read tasks serialize `FileBasedDatasource` instances, and the list of paths + # can be large. To avoid slow serialization speeds, we store a reference to + # the paths rather than the paths themselves. + self._paths_ref = ray.put(paths) + self._file_sizes_ref = ray.put(file_sizes) + + def _paths(self) -> List[str]: + return ray.get(self._paths_ref) + + def _file_sizes(self) -> List[float]: + return ray.get(self._file_sizes_ref) + + def estimate_inmemory_data_size(self) -> Optional[int]: + total_size = 0 + for sz in self._file_sizes(): + if sz is not None: + total_size += sz + return total_size + + def get_read_tasks(self, parallelism: int) -> List[ReadTask]: + import numpy as np + + ctx = DataContext.get_current() + open_stream_args = self._open_stream_args + partitioning = self._partitioning + + paths = self._paths() + file_sizes = self._file_sizes() + + if self._file_metadata_shuffler is not None: + files_metadata = list(zip(paths, file_sizes)) + shuffled_files_metadata = [ + files_metadata[i] + for i in self._file_metadata_shuffler.permutation(len(files_metadata)) + ] + paths, file_sizes = list(map(list, zip(*shuffled_files_metadata))) + + read_stream = self._read_stream + filesystem = _wrap_s3_serialization_workaround(self._filesystem) + + if open_stream_args is None: + open_stream_args = {} + + open_input_source = self._open_input_source + + def read_files( + read_paths: Iterable[str], + ) -> Iterable[Block]: + nonlocal filesystem, open_stream_args, partitioning + + DataContext._set_current(ctx) + fs = _unwrap_s3_serialization_workaround(filesystem) + for read_path in read_paths: + partitions: Dict[str, str] = {} + if partitioning is not None: + parse = PathPartitionParser(partitioning) + partitions = parse(read_path) + + with _open_file_with_retry( + read_path, + lambda read_path=read_path: open_input_source( + fs, read_path, **open_stream_args + ), + ) as f: + for block in read_stream(f, read_path): + if partitions: + block = _add_partitions(block, partitions) + if self._include_paths: + block_accessor = BlockAccessor.for_block(block) + block = block_accessor.append_column( + "path", [read_path] * block_accessor.num_rows() + ) + yield block + + def create_read_task_fn(read_paths, num_threads): + def read_task_fn(): + nonlocal num_threads, read_paths + + # TODO: We should refactor the code so that we can get the results in + # order even when using multiple threads. + if ctx.execution_options.preserve_order: + num_threads = 0 + + if num_threads > 0: + if len(read_paths) < num_threads: + num_threads = len(read_paths) + + logger.debug( + f"Reading {len(read_paths)} files with {num_threads} threads." + ) + + yield from make_async_gen( + iter(read_paths), + read_files, + num_workers=num_threads, + ) + else: + logger.debug(f"Reading {len(read_paths)} files.") + yield from read_files(read_paths) + + return read_task_fn + + # fix https://github.com/ray-project/ray/issues/24296 + parallelism = min(parallelism, len(paths)) + + read_tasks = [] + split_paths = np.array_split(paths, parallelism) + split_file_sizes = np.array_split(file_sizes, parallelism) + + for read_paths, file_sizes in zip(split_paths, split_file_sizes): + if len(read_paths) <= 0: + continue + + meta = self._meta_provider( + read_paths, + self._schema, + rows_per_file=self._rows_per_file(), + file_sizes=file_sizes, + ) + + read_task_fn = create_read_task_fn(read_paths, self._NUM_THREADS_PER_TASK) + + read_task = ReadTask(read_task_fn, meta) + + read_tasks.append(read_task) + + return read_tasks + + def _open_input_source( + self, + filesystem: "pyarrow.fs.FileSystem", + path: str, + **open_args, + ) -> "pyarrow.NativeFile": + """Opens a source path for reading and returns the associated Arrow NativeFile. + + The default implementation opens the source path as a sequential input stream, + using ctx.streaming_read_buffer_size as the buffer size if none is given by the + caller. + + Implementations that do not support streaming reads (e.g. that require random + access) should override this method. + """ + import pyarrow as pa + from pyarrow.fs import HadoopFileSystem + + ctx = DataContext.get_current() + + compression = open_args.get("compression", None) + if compression is None: + try: + # If no compression manually given, try to detect + # compression codec from path. + compression = pa.Codec.detect(path).name + except (ValueError, TypeError): + # Arrow's compression inference on the file path + # doesn't work for Snappy, so we double-check ourselves. + import pathlib + + suffix = pathlib.Path(path).suffix + if suffix and suffix[1:] == "snappy": + compression = "snappy" + else: + compression = None + + buffer_size = open_args.pop("buffer_size", None) + if buffer_size is None: + buffer_size = ctx.streaming_read_buffer_size + + if compression == "snappy": + # Arrow doesn't support streaming Snappy decompression since the canonical + # C++ Snappy library doesn't natively support streaming decompression. We + # works around this by manually decompressing the file with python-snappy. + open_args["compression"] = None + else: + open_args["compression"] = compression + + file = call_with_retry( + lambda: filesystem.open_input_stream( + path, buffer_size=buffer_size, **open_args + ), + description=f"open file {path}", + match=ctx.retried_io_errors, + ) + + if compression == "snappy": + import snappy + + stream = io.BytesIO() + if isinstance(filesystem, HadoopFileSystem): + snappy.hadoop_snappy.stream_decompress(src=file, dst=stream) + else: + snappy.stream_decompress(src=file, dst=stream) + stream.seek(0) + + file = pa.PythonFile(stream, mode="r") + + return file + + def _rows_per_file(self): + """Returns the number of rows per file, or None if unknown.""" + return None + + def _read_stream(self, f: "pyarrow.NativeFile", path: str) -> Iterator[Block]: + """Streaming read a single file. + + This method should be implemented by subclasses. + """ + raise NotImplementedError( + "Subclasses of FileBasedDatasource must implement _read_stream()." + ) + + @property + def supports_distributed_reads(self) -> bool: + return self._supports_distributed_reads + + +def _add_partitions( + data: Union["pyarrow.Table", "pd.DataFrame"], partitions: Dict[str, Any] +) -> Union["pyarrow.Table", "pd.DataFrame"]: + import pandas as pd + import pyarrow as pa + + assert isinstance(data, (pa.Table, pd.DataFrame)) + if isinstance(data, pa.Table): + return _add_partitions_to_table(data, partitions) + if isinstance(data, pd.DataFrame): + return _add_partitions_to_dataframe(data, partitions) + + +def _add_partitions_to_table( + table: "pyarrow.Table", partitions: Dict[str, Any] +) -> "pyarrow.Table": + import pyarrow as pa + import pyarrow.compute as pc + + column_names = set(table.column_names) + for field, value in partitions.items(): + column = pa.array([value] * len(table)) + if field in column_names: + # TODO: Handle cast error. + column_type = table.schema.field(field).type + column = column.cast(column_type) + + values_are_equal = pc.all(pc.equal(column, table[field])) + values_are_equal = values_are_equal.as_py() + + if not values_are_equal: + raise ValueError( + f"Partition column {field} exists in table data, but partition " + f"value '{value}' is different from in-data values: " + f"{table[field].unique().to_pylist()}." + ) + + i = table.schema.get_field_index(field) + table = table.set_column(i, field, column) + else: + table = table.append_column(field, column) + + return table + + +def _add_partitions_to_dataframe( + df: "pd.DataFrame", partitions: Dict[str, Any] +) -> "pd.DataFrame": + import pandas as pd + + for field, value in partitions.items(): + column = pd.Series(data=[value] * len(df), name=field) + + if field in df: + column = column.astype(df[field].dtype) + mask = df[field].notna() + if not df[field][mask].equals(column[mask]): + raise ValueError( + f"Partition column {field} exists in table data, but partition " + f"value '{value}' is different from in-data values: " + f"{list(df[field].unique())}." + ) + + df[field] = column + + return df + + +def _wrap_s3_serialization_workaround(filesystem: "pyarrow.fs.FileSystem"): + # This is needed because pa.fs.S3FileSystem assumes pa.fs is already + # imported before deserialization. See #17085. + import pyarrow as pa + import pyarrow.fs + + if isinstance(filesystem, pa.fs.S3FileSystem): + return _S3FileSystemWrapper(filesystem) + return filesystem + + +def _unwrap_s3_serialization_workaround( + filesystem: Union["pyarrow.fs.FileSystem", "_S3FileSystemWrapper"] +): + if isinstance(filesystem, _S3FileSystemWrapper): + return filesystem.unwrap() + else: + return filesystem + + +class _S3FileSystemWrapper: + def __init__(self, fs: "pyarrow.fs.S3FileSystem"): + self._fs = fs + + def unwrap(self): + return self._fs + + @classmethod + def _reconstruct(cls, fs_reconstruct, fs_args): + # Implicitly trigger S3 subsystem initialization by importing + # pyarrow.fs. + import pyarrow.fs # noqa: F401 + + return cls(fs_reconstruct(*fs_args)) + + def __reduce__(self): + return _S3FileSystemWrapper._reconstruct, self._fs.__reduce__() + + +def _wrap_arrow_serialization_workaround(kwargs: dict) -> dict: + if "filesystem" in kwargs: + kwargs["filesystem"] = _wrap_s3_serialization_workaround(kwargs["filesystem"]) + + return kwargs + + +def _unwrap_arrow_serialization_workaround(kwargs: dict) -> dict: + if isinstance(kwargs.get("filesystem"), _S3FileSystemWrapper): + kwargs["filesystem"] = kwargs["filesystem"].unwrap() + return kwargs + + +def _resolve_kwargs( + kwargs_fn: Callable[[], Dict[str, Any]], **kwargs +) -> Dict[str, Any]: + if kwargs_fn: + kwarg_overrides = kwargs_fn() + kwargs.update(kwarg_overrides) + return kwargs + + +def _open_file_with_retry( + file_path: str, + open_file: Callable[[], "pyarrow.NativeFile"], +) -> "pyarrow.NativeFile": + """Open file with an exponential backoff retry strategy. + + This is to avoid transient task failure with remote storage (such as S3), + when the remote storage throttles the requests. + """ + if OPEN_FILE_MAX_ATTEMPTS < 1: + raise ValueError( + "OPEN_FILE_MAX_ATTEMPTS cannot be negative or 0. Get: " + f"{OPEN_FILE_MAX_ATTEMPTS}" + ) + + return call_with_retry( + open_file, + description=f"open file {file_path}", + match=DataContext.get_current().retried_io_errors, + max_attempts=OPEN_FILE_MAX_ATTEMPTS, + max_backoff_s=OPEN_FILE_RETRY_MAX_BACKOFF_SECONDS, + ) + + +def _validate_shuffle_arg(shuffle: Optional[str]) -> None: + if shuffle not in [None, "files"]: + raise ValueError( + f"Invalid value for 'shuffle': {shuffle}. " + "Valid values are None, 'files'." + ) diff --git a/minigpt2/lib/python3.10/site-packages/ray/data/datasource/file_datasink.py b/minigpt2/lib/python3.10/site-packages/ray/data/datasource/file_datasink.py new file mode 100644 index 0000000000000000000000000000000000000000..79d106f39ba3d58b355f6ced08e384d25cde00ab --- /dev/null +++ b/minigpt2/lib/python3.10/site-packages/ray/data/datasource/file_datasink.py @@ -0,0 +1,262 @@ +import logging +import posixpath +from typing import TYPE_CHECKING, Any, Dict, Iterable, List, Optional +from urllib.parse import urlparse + +from ray._private.utils import _add_creatable_buckets_param_if_s3_uri +from ray.data._internal.delegating_block_builder import DelegatingBlockBuilder +from ray.data._internal.execution.interfaces import TaskContext +from ray.data._internal.util import _is_local_scheme, call_with_retry +from ray.data.block import Block, BlockAccessor +from ray.data.context import DataContext +from ray.data.datasource.datasink import Datasink, WriteResult +from ray.data.datasource.filename_provider import ( + FilenameProvider, + _DefaultFilenameProvider, +) +from ray.data.datasource.path_util import _resolve_paths_and_filesystem +from ray.util.annotations import DeveloperAPI + +if TYPE_CHECKING: + import pyarrow + +logger = logging.getLogger(__name__) + + +WRITE_FILE_MAX_ATTEMPTS = 10 +WRITE_FILE_RETRY_MAX_BACKOFF_SECONDS = 32 + + +class _FileDatasink(Datasink): + def __init__( + self, + path: str, + *, + filesystem: Optional["pyarrow.fs.FileSystem"] = None, + try_create_dir: bool = True, + open_stream_args: Optional[Dict[str, Any]] = None, + filename_provider: Optional[FilenameProvider] = None, + dataset_uuid: Optional[str] = None, + file_format: Optional[str] = None, + ): + """Initialize this datasink. + + Args: + path: The folder to write files to. + filesystem: The filesystem to write files to. If not provided, the + filesystem is inferred from the path. + try_create_dir: Whether to create the directory to write files to. + open_stream_args: Arguments to pass to ``filesystem.open_output_stream``. + filename_provider: A :class:`ray.data.datasource.FilenameProvider` that + generates filenames for each row or block. + dataset_uuid: The UUID of the dataset being written. If specified, it's + included in the filename. + file_format: The file extension. If specified, files are written with this + extension. + """ + if open_stream_args is None: + open_stream_args = {} + + if filename_provider is None: + filename_provider = _DefaultFilenameProvider( + dataset_uuid=dataset_uuid, file_format=file_format + ) + + self.unresolved_path = path + paths, self.filesystem = _resolve_paths_and_filesystem(path, filesystem) + assert len(paths) == 1, len(paths) + self.path = paths[0] + + self.try_create_dir = try_create_dir + self.open_stream_args = open_stream_args + self.filename_provider = filename_provider + self.dataset_uuid = dataset_uuid + self.file_format = file_format + + self.has_created_dir = False + + def open_output_stream(self, path: str) -> "pyarrow.NativeFile": + return self.filesystem.open_output_stream(path, **self.open_stream_args) + + def on_write_start(self) -> None: + """Create a directory to write files to. + + If ``try_create_dir`` is ``False``, this method is a no-op. + """ + from pyarrow.fs import FileType + + # We should skip creating directories in s3 unless the user specifically + # overrides this behavior. PyArrow's s3fs implementation for create_dir + # will attempt to check if the parent directory exists before trying to + # create the directory (with recursive=True it will try to do this to + # all of the directories until the root of the bucket). An IAM Policy that + # restricts access to a subset of prefixes within the bucket might cause + # the creation of the directory to fail even if the permissions should + # allow the data can be written to the specified path. For example if a + # a policy only allows users to write blobs prefixed with s3://bucket/foo + # a call to create_dir for s3://bucket/foo/bar will fail even though it + # should not. + parsed_uri = urlparse(self.path) + is_s3_uri = parsed_uri.scheme == "s3" + skip_create_dir_for_s3 = ( + is_s3_uri and not DataContext.get_current().s3_try_create_dir + ) + + if self.try_create_dir and not skip_create_dir_for_s3: + if self.filesystem.get_file_info(self.path).type is FileType.NotFound: + # Arrow's S3FileSystem doesn't allow creating buckets by default, so we + # add a query arg enabling bucket creation if an S3 URI is provided. + tmp = _add_creatable_buckets_param_if_s3_uri(self.path) + self.filesystem.create_dir(tmp, recursive=True) + self.has_created_dir = True + + def write( + self, + blocks: Iterable[Block], + ctx: TaskContext, + ) -> None: + builder = DelegatingBlockBuilder() + for block in blocks: + builder.add_block(block) + block = builder.build() + block_accessor = BlockAccessor.for_block(block) + + if block_accessor.num_rows() == 0: + logger.warning(f"Skipped writing empty block to {self.path}") + return + + self.write_block(block_accessor, 0, ctx) + + def write_block(self, block: BlockAccessor, block_index: int, ctx: TaskContext): + raise NotImplementedError + + def on_write_complete(self, write_result_blocks: List[Block]) -> WriteResult: + aggregated_results = super().on_write_complete(write_result_blocks) + + # If no rows were written, we can delete the directory. + if self.has_created_dir and aggregated_results.num_rows == 0: + self.filesystem.delete_dir(self.path) + return aggregated_results + + @property + def supports_distributed_writes(self) -> bool: + return not _is_local_scheme(self.unresolved_path) + + +@DeveloperAPI +class RowBasedFileDatasink(_FileDatasink): + """A datasink that writes one row to each file. + + Subclasses must implement ``write_row_to_file`` and call the superclass constructor. + + Examples: + .. testcode:: + + import io + from typing import Any, Dict + + import pyarrow + from PIL import Image + + from ray.data.datasource import RowBasedFileDatasink + + class ImageDatasink(RowBasedFileDatasink): + def __init__(self, path: str, *, column: str, file_format: str = "png"): + super().__init__(path, file_format=file_format) + self._file_format = file_format + self._column = column + + def write_row_to_file(self, row: Dict[str, Any], file: "pyarrow.NativeFile"): + image = Image.fromarray(row[self._column]) + buffer = io.BytesIO() + image.save(buffer, format=self._file_format) + file.write(buffer.getvalue()) + """ # noqa: E501 + + def write_row_to_file(self, row: Dict[str, Any], file: "pyarrow.NativeFile"): + """Write a row to a file. + + Args: + row: The row to write. + file: The file to write the row to. + """ + raise NotImplementedError + + def write_block(self, block: BlockAccessor, block_index: int, ctx: TaskContext): + for row_index, row in enumerate(block.iter_rows(public_row_format=False)): + filename = self.filename_provider.get_filename_for_row( + row, ctx.task_idx, block_index, row_index + ) + write_path = posixpath.join(self.path, filename) + + def write_row_to_path(): + with self.open_output_stream(write_path) as file: + self.write_row_to_file(row, file) + + logger.debug(f"Writing {write_path} file.") + call_with_retry( + write_row_to_path, + description=f"write '{write_path}'", + match=DataContext.get_current().retried_io_errors, + max_attempts=WRITE_FILE_MAX_ATTEMPTS, + max_backoff_s=WRITE_FILE_RETRY_MAX_BACKOFF_SECONDS, + ) + + +@DeveloperAPI +class BlockBasedFileDatasink(_FileDatasink): + """A datasink that writes multiple rows to each file. + + Subclasses must implement ``write_block_to_file`` and call the superclass + constructor. + + Examples: + .. testcode:: + + class CSVDatasink(BlockBasedFileDatasink): + def __init__(self, path: str): + super().__init__(path, file_format="csv") + + def write_block_to_file(self, block: BlockAccessor, file: "pyarrow.NativeFile"): + from pyarrow import csv + csv.write_csv(block.to_arrow(), file) + """ # noqa: E501 + + def __init__( + self, path, *, num_rows_per_file: Optional[int] = None, **file_datasink_kwargs + ): + super().__init__(path, **file_datasink_kwargs) + + self._num_rows_per_file = num_rows_per_file + + def write_block_to_file(self, block: BlockAccessor, file: "pyarrow.NativeFile"): + """Write a block of data to a file. + + Args: + block: The block to write. + file: The file to write the block to. + """ + raise NotImplementedError + + def write_block(self, block: BlockAccessor, block_index: int, ctx: TaskContext): + filename = self.filename_provider.get_filename_for_block( + block, ctx.task_idx, block_index + ) + write_path = posixpath.join(self.path, filename) + + def write_block_to_path(): + with self.open_output_stream(write_path) as file: + self.write_block_to_file(block, file) + + logger.debug(f"Writing {write_path} file.") + call_with_retry( + write_block_to_path, + description=f"write '{write_path}'", + match=DataContext.get_current().retried_io_errors, + max_attempts=WRITE_FILE_MAX_ATTEMPTS, + max_backoff_s=WRITE_FILE_RETRY_MAX_BACKOFF_SECONDS, + ) + + @property + def num_rows_per_write(self) -> Optional[int]: + return self._num_rows_per_file diff --git a/minigpt2/lib/python3.10/site-packages/ray/data/datasource/file_meta_provider.py b/minigpt2/lib/python3.10/site-packages/ray/data/datasource/file_meta_provider.py new file mode 100644 index 0000000000000000000000000000000000000000..c6654e9e2708f32ee50577ac186c84961a9e7396 --- /dev/null +++ b/minigpt2/lib/python3.10/site-packages/ray/data/datasource/file_meta_provider.py @@ -0,0 +1,484 @@ +import itertools +import logging +import os +import pathlib +import re +from typing import ( + TYPE_CHECKING, + Callable, + Iterator, + List, + Optional, + Tuple, + TypeVar, + Union, +) + +import numpy as np + +import ray +from ray.data._internal.progress_bar import ProgressBar +from ray.data._internal.remote_fn import cached_remote_fn +from ray.data._internal.util import call_with_retry +from ray.data.block import BlockMetadata +from ray.data.datasource.partitioning import Partitioning +from ray.util.annotations import DeveloperAPI + +if TYPE_CHECKING: + import pyarrow + + +logger = logging.getLogger(__name__) + + +@DeveloperAPI +class FileMetadataProvider: + """Abstract callable that provides metadata for the files of a single dataset block. + + Current subclasses: + - :class:`BaseFileMetadataProvider` + - :class:`ParquetMetadataProvider` + """ + + def _get_block_metadata( + self, + paths: List[str], + schema: Optional[Union[type, "pyarrow.lib.Schema"]], + **kwargs, + ) -> BlockMetadata: + """Resolves and returns block metadata for files in the given paths. + + All file paths provided should belong to a single dataset block. + + Args: + paths: The file paths for a single dataset block. + schema: The user-provided or inferred schema for the given paths, + if any. + + Returns: + BlockMetadata aggregated across the given paths. + """ + raise NotImplementedError + + def __call__( + self, + paths: List[str], + schema: Optional[Union[type, "pyarrow.lib.Schema"]], + **kwargs, + ) -> BlockMetadata: + return self._get_block_metadata(paths, schema, **kwargs) + + +@DeveloperAPI +class BaseFileMetadataProvider(FileMetadataProvider): + """Abstract callable that provides metadata for + :class:`~ray.data.datasource.file_based_datasource.FileBasedDatasource` + implementations that reuse the base :meth:`~ray.data.Datasource.prepare_read` + method. + + Also supports file and file size discovery in input directory paths. + + Current subclasses: + - :class:`DefaultFileMetadataProvider` + """ + + def _get_block_metadata( + self, + paths: List[str], + schema: Optional[Union[type, "pyarrow.lib.Schema"]], + *, + rows_per_file: Optional[int], + file_sizes: List[Optional[int]], + ) -> BlockMetadata: + """Resolves and returns block metadata for files of a single dataset block. + + Args: + paths: The file paths for a single dataset block. These + paths will always be a subset of those previously returned from + :meth:`.expand_paths`. + schema: The user-provided or inferred schema for the given file + paths, if any. + rows_per_file: The fixed number of rows per input file, or None. + file_sizes: Optional file size per input file previously returned + from :meth:`.expand_paths`, where `file_sizes[i]` holds the size of + the file at `paths[i]`. + + Returns: + BlockMetadata aggregated across the given file paths. + """ + raise NotImplementedError + + def expand_paths( + self, + paths: List[str], + filesystem: Optional["pyarrow.fs.FileSystem"], + partitioning: Optional[Partitioning] = None, + ignore_missing_paths: bool = False, + ) -> Iterator[Tuple[str, int]]: + """Expands all paths into concrete file paths by walking directories. + + Also returns a sidecar of file sizes. + + The input paths must be normalized for compatibility with the input + filesystem prior to invocation. + + Args: + paths: A list of file and/or directory paths compatible with the + given filesystem. + filesystem: The filesystem implementation that should be used for + expanding all paths and reading their files. + ignore_missing_paths: If True, ignores any file paths in ``paths`` that + are not found. Defaults to False. + + Returns: + An iterator of `(file_path, file_size)` pairs. None may be returned for the + file size if it is either unknown or will be fetched later by + `_get_block_metadata()`, but the length of + both lists must be equal. + """ + raise NotImplementedError + + +@DeveloperAPI +class DefaultFileMetadataProvider(BaseFileMetadataProvider): + """Default metadata provider for + :class:`~ray.data.datasource.file_based_datasource.FileBasedDatasource` + implementations that reuse the base `prepare_read` method. + + Calculates block size in bytes as the sum of its constituent file sizes, + and assumes a fixed number of rows per file. + """ + + def _get_block_metadata( + self, + paths: List[str], + schema: Optional[Union[type, "pyarrow.lib.Schema"]], + *, + rows_per_file: Optional[int], + file_sizes: List[Optional[int]], + ) -> BlockMetadata: + if rows_per_file is None: + num_rows = None + else: + num_rows = len(paths) * rows_per_file + return BlockMetadata( + num_rows=num_rows, + size_bytes=None if None in file_sizes else int(sum(file_sizes)), + schema=schema, + input_files=paths, + exec_stats=None, + ) # Exec stats filled in later. + + def expand_paths( + self, + paths: List[str], + filesystem: "pyarrow.fs.FileSystem", + partitioning: Optional[Partitioning] = None, + ignore_missing_paths: bool = False, + ) -> Iterator[Tuple[str, int]]: + yield from _expand_paths(paths, filesystem, partitioning, ignore_missing_paths) + + +@DeveloperAPI +class FastFileMetadataProvider(DefaultFileMetadataProvider): + """Fast Metadata provider for + :class:`~ray.data.datasource.file_based_datasource.FileBasedDatasource` + implementations. + + Offers improved performance vs. + :class:`DefaultFileMetadataProvider` + by skipping directory path expansion and file size collection. + While this performance improvement may be negligible for local filesystems, + it can be substantial for cloud storage service providers. + + This should only be used when all input paths exist and are known to be files. + """ + + def expand_paths( + self, + paths: List[str], + filesystem: "pyarrow.fs.FileSystem", + partitioning: Optional[Partitioning] = None, + ignore_missing_paths: bool = False, + ) -> Iterator[Tuple[str, int]]: + if ignore_missing_paths: + raise ValueError( + "`ignore_missing_paths` cannot be set when used with " + "`FastFileMetadataProvider`. All paths must exist when " + "using `FastFileMetadataProvider`." + ) + + logger.warning( + f"Skipping expansion of {len(paths)} path(s). If your paths contain " + f"directories or if file size collection is required, try rerunning this " + f"read with `meta_provider=DefaultFileMetadataProvider()`." + ) + + yield from zip(paths, itertools.repeat(None, len(paths))) + + +def _handle_read_os_error(error: OSError, paths: Union[str, List[str]]) -> str: + # NOTE: this is not comprehensive yet, and should be extended as more errors arise. + # NOTE: The latter patterns are raised in Arrow 10+, while the former is raised in + # Arrow < 10. + aws_error_pattern = ( + r"^(?:(.*)AWS Error \[code \d+\]: No response body\.(.*))|" + r"(?:(.*)AWS Error UNKNOWN \(HTTP status 400\) during HeadObject operation: " + r"No response body\.(.*))|" + r"(?:(.*)AWS Error ACCESS_DENIED during HeadObject operation: No response " + r"body\.(.*))$" + ) + if re.match(aws_error_pattern, str(error)): + # Specially handle AWS error when reading files, to give a clearer error + # message to avoid confusing users. The real issue is most likely that the AWS + # S3 file credentials have not been properly configured yet. + if isinstance(paths, str): + # Quote to highlight single file path in error message for better + # readability. List of file paths will be shown up as ['foo', 'boo'], + # so only quote single file path here. + paths = f'"{paths}"' + raise OSError( + ( + f"Failing to read AWS S3 file(s): {paths}. " + "Please check that file exists and has properly configured access. " + "You can also run AWS CLI command to get more detailed error message " + "(e.g., aws s3 ls ). " + "See https://awscli.amazonaws.com/v2/documentation/api/latest/reference/s3/index.html " # noqa + "and https://docs.ray.io/en/latest/data/creating-datasets.html#reading-from-remote-storage " # noqa + "for more information." + ) + ) + else: + raise error + + +def _expand_paths( + paths: List[str], + filesystem: "pyarrow.fs.FileSystem", + partitioning: Optional[Partitioning], + ignore_missing_paths: bool = False, +) -> Iterator[Tuple[str, int]]: + """Get the file sizes for all provided file paths.""" + from pyarrow.fs import LocalFileSystem + + from ray.data.datasource.file_based_datasource import ( + FILE_SIZE_FETCH_PARALLELIZATION_THRESHOLD, + ) + from ray.data.datasource.path_util import _unwrap_protocol + + # We break down our processing paths into a few key cases: + # 1. If len(paths) < threshold, fetch the file info for the individual files/paths + # serially. + # 2. If all paths are contained under the same parent directory (or base directory, + # if using partitioning), fetch all file infos at this prefix and filter to the + # provided paths on the client; this should be a single file info request. + # 3. If more than threshold requests required, parallelize them via Ray tasks. + # 1. Small # of paths case. + if ( + len(paths) < FILE_SIZE_FETCH_PARALLELIZATION_THRESHOLD + # Local file systems are very fast to hit. + or isinstance(filesystem, LocalFileSystem) + ): + yield from _get_file_infos_serial(paths, filesystem, ignore_missing_paths) + else: + # 2. Common path prefix case. + # Get longest common path of all paths. + common_path = os.path.commonpath(paths) + # If parent directory (or base directory, if using partitioning) is common to + # all paths, fetch all file infos at that prefix and filter the response to the + # provided paths. + if ( + partitioning is not None + and common_path == _unwrap_protocol(partitioning.base_dir) + ) or all(str(pathlib.Path(path).parent) == common_path for path in paths): + yield from _get_file_infos_common_path_prefix( + paths, common_path, filesystem, ignore_missing_paths + ) + # 3. Parallelization case. + else: + # Parallelize requests via Ray tasks. + yield from _get_file_infos_parallel(paths, filesystem, ignore_missing_paths) + + +def _get_file_infos_serial( + paths: List[str], + filesystem: "pyarrow.fs.FileSystem", + ignore_missing_paths: bool = False, +) -> Iterator[Tuple[str, int]]: + for path in paths: + yield from _get_file_infos(path, filesystem, ignore_missing_paths) + + +def _get_file_infos_common_path_prefix( + paths: List[str], + common_path: str, + filesystem: "pyarrow.fs.FileSystem", + ignore_missing_paths: bool = False, +) -> Iterator[Tuple[str, int]]: + path_to_size = {path: None for path in paths} + for path, file_size in _get_file_infos( + common_path, filesystem, ignore_missing_paths + ): + if path in path_to_size: + path_to_size[path] = file_size + + # Check if all `paths` have file size metadata. + # If any of paths has no file size, fall back to get files metadata in parallel. + # This can happen when path is a directory, but not a file. + have_missing_path = False + for path in paths: + if path_to_size[path] is None: + logger.debug( + f"Finding path {path} not have file size metadata. " + "Fall back to get files metadata in parallel for all paths." + ) + have_missing_path = True + break + + if have_missing_path: + # Parallelize requests via Ray tasks. + yield from _get_file_infos_parallel(paths, filesystem, ignore_missing_paths) + else: + # Iterate over `paths` to yield each path in original order. + # NOTE: do not iterate over `path_to_size` because the dictionary skips + # duplicated path, while `paths` might contain duplicated path if one wants + # to read same file multiple times. + for path in paths: + yield path, path_to_size[path] + + +def _get_file_infos_parallel( + paths: List[str], + filesystem: "pyarrow.fs.FileSystem", + ignore_missing_paths: bool = False, +) -> Iterator[Tuple[str, int]]: + from ray.data.datasource.file_based_datasource import ( + PATHS_PER_FILE_SIZE_FETCH_TASK, + _unwrap_s3_serialization_workaround, + _wrap_s3_serialization_workaround, + ) + + logger.warning( + f"Expanding {len(paths)} path(s). This may be a HIGH LATENCY " + f"operation on some cloud storage services. Moving all the " + "paths to a common parent directory will lead to faster " + "metadata fetching." + ) + + # Capture the filesystem in the fetcher func closure, but wrap it in our + # serialization workaround to make sure that the pickle roundtrip works as expected. + filesystem = _wrap_s3_serialization_workaround(filesystem) + + def _file_infos_fetcher(paths: List[str]) -> List[Tuple[str, int]]: + fs = _unwrap_s3_serialization_workaround(filesystem) + return list( + itertools.chain.from_iterable( + _get_file_infos(path, fs, ignore_missing_paths) for path in paths + ) + ) + + yield from _fetch_metadata_parallel( + paths, _file_infos_fetcher, PATHS_PER_FILE_SIZE_FETCH_TASK + ) + + +Uri = TypeVar("Uri") +Meta = TypeVar("Meta") + + +def _fetch_metadata_parallel( + uris: List[Uri], + fetch_func: Callable[[List[Uri]], List[Meta]], + desired_uris_per_task: int, + **ray_remote_args, +) -> Iterator[Meta]: + """Fetch file metadata in parallel using Ray tasks.""" + remote_fetch_func = cached_remote_fn(fetch_func) + if ray_remote_args: + remote_fetch_func = remote_fetch_func.options(**ray_remote_args) + # Choose a parallelism that results in a # of metadata fetches per task that + # dominates the Ray task overhead while ensuring good parallelism. + # Always launch at least 2 parallel fetch tasks. + parallelism = max(len(uris) // desired_uris_per_task, 2) + metadata_fetch_bar = ProgressBar( + "Metadata Fetch Progress", total=parallelism, unit="task" + ) + fetch_tasks = [] + for uri_chunk in np.array_split(uris, parallelism): + if len(uri_chunk) == 0: + continue + fetch_tasks.append(remote_fetch_func.remote(uri_chunk)) + results = metadata_fetch_bar.fetch_until_complete(fetch_tasks) + yield from itertools.chain.from_iterable(results) + + +def _get_file_infos( + path: str, filesystem: "pyarrow.fs.FileSystem", ignore_missing_path: bool = False +) -> List[Tuple[str, int]]: + """Get the file info for all files at or under the provided path.""" + from pyarrow.fs import FileType + + file_infos = [] + try: + ctx = ray.data.DataContext.get_current() + file_info = call_with_retry( + lambda: filesystem.get_file_info(path), + description="get file info", + match=ctx.retried_io_errors, + ) + except OSError as e: + _handle_read_os_error(e, path) + if file_info.type == FileType.Directory: + for file_path, file_size in _expand_directory(path, filesystem): + file_infos.append((file_path, file_size)) + elif file_info.type == FileType.File: + file_infos.append((path, file_info.size)) + elif file_info.type == FileType.NotFound and ignore_missing_path: + pass + else: + raise FileNotFoundError(path) + + return file_infos + + +def _expand_directory( + path: str, + filesystem: "pyarrow.fs.FileSystem", + exclude_prefixes: Optional[List[str]] = None, + ignore_missing_path: bool = False, +) -> List[Tuple[str, int]]: + """ + Expand the provided directory path to a list of file paths. + + Args: + path: The directory path to expand. + filesystem: The filesystem implementation that should be used for + reading these files. + exclude_prefixes: The file relative path prefixes that should be + excluded from the returned file set. Default excluded prefixes are + "." and "_". + + Returns: + An iterator of (file_path, file_size) tuples. + """ + if exclude_prefixes is None: + exclude_prefixes = [".", "_"] + + from pyarrow.fs import FileSelector + + selector = FileSelector(path, recursive=True, allow_not_found=ignore_missing_path) + files = filesystem.get_file_info(selector) + base_path = selector.base_dir + out = [] + for file_ in files: + if not file_.is_file: + continue + file_path = file_.path + if not file_path.startswith(base_path): + continue + relative = file_path[len(base_path) :] + if any(relative.startswith(prefix) for prefix in exclude_prefixes): + continue + out.append((file_path, file_.size)) + # We sort the paths to guarantee a stable order. + return sorted(out) diff --git a/minigpt2/lib/python3.10/site-packages/ray/data/datasource/filename_provider.py b/minigpt2/lib/python3.10/site-packages/ray/data/datasource/filename_provider.py new file mode 100644 index 0000000000000000000000000000000000000000..592db59a2fe52c8dcdac60a7de33fb531304d5a8 --- /dev/null +++ b/minigpt2/lib/python3.10/site-packages/ray/data/datasource/filename_provider.py @@ -0,0 +1,122 @@ +from typing import Any, Dict, Optional + +from ray.data.block import Block +from ray.util.annotations import PublicAPI + + +@PublicAPI(stability="alpha") +class FilenameProvider: + """Generates filenames when you write a :class:`~ray.data.Dataset`. + + Use this class to customize the filenames used when writing a Dataset. + + Some methods write each row to a separate file, while others write each block to a + separate file. For example, :meth:`ray.data.Dataset.write_images` writes individual + rows, and :func:`ray.data.Dataset.write_parquet` writes blocks of data. For more + information about blocks, see :ref:`Data internals `. + + If you're writing each row to a separate file, implement + :meth:`~FilenameProvider.get_filename_for_row`. Otherwise, implement + :meth:`~FilenameProvider.get_filename_for_block`. + + Example: + + This snippet shows you how to encode labels in written files. For example, if + `"cat"` is a label, you might write a file named `cat_000000_000000_000000.png`. + + .. testcode:: + + import ray + from ray.data.datasource import FilenameProvider + + class ImageFilenameProvider(FilenameProvider): + + def __init__(self, file_format: str): + self.file_format = file_format + + def get_filename_for_row(self, row, task_index, block_index, row_index): + return ( + f"{row['label']}_{task_index:06}_{block_index:06}" + f"_{row_index:06}.{self.file_format}" + ) + + ds = ray.data.read_parquet("s3://anonymous@ray-example-data/images.parquet") + ds.write_images( + "/tmp/results", + column="image", + filename_provider=ImageFilenameProvider("png") + ) + """ # noqa: E501 + + def get_filename_for_block( + self, block: Block, task_index: int, block_index: int + ) -> str: + """Generate a filename for a block of data. + + .. note:: + Filenames must be unique and deterministic for a given task and block index. + + A block consists of multiple rows and corresponds to a single output file. + Each task might produce a different number of blocks. + + Args: + block: The block that will be written to a file. + task_index: The index of the the write task. + block_index: The index of the block *within* the write task. + """ + raise NotImplementedError + + def get_filename_for_row( + self, row: Dict[str, Any], task_index: int, block_index: int, row_index: int + ) -> str: + """Generate a filename for a row. + + .. note:: + Filenames must be unique and deterministic for a given task, block, and row + index. + + A block consists of multiple rows, and each row corresponds to a single + output file. Each task might produce a different number of blocks, and each + block might contain a different number of rows. + + .. tip:: + If you require a contiguous row index into the global dataset, use + :meth:`~ray.data.Dataset.iter_rows`. This method is single-threaded and + isn't recommended for large datasets. + + Args: + row: The row that will be written to a file. + task_index: The index of the the write task. + block_index: The index of the block *within* the write task. + row_index: The index of the row *within* the block. + """ + raise NotImplementedError + + +class _DefaultFilenameProvider(FilenameProvider): + def __init__( + self, dataset_uuid: Optional[str] = None, file_format: Optional[str] = None + ): + self._dataset_uuid = dataset_uuid + self._file_format = file_format + + def get_filename_for_block( + self, block: Block, task_index: int, block_index: int + ) -> str: + file_id = f"{task_index:06}_{block_index:06}" + return self._generate_filename(file_id) + + def get_filename_for_row( + self, row: Dict[str, Any], task_index: int, block_index: int, row_index: int + ) -> str: + file_id = f"{task_index:06}_{block_index:06}_{row_index:06}" + return self._generate_filename(file_id) + + def _generate_filename(self, file_id: str) -> str: + filename = "" + if self._dataset_uuid is not None: + filename += f"{self._dataset_uuid}_" + filename += file_id + if self._file_format is not None: + filename += f".{self._file_format}" + return filename diff --git a/minigpt2/lib/python3.10/site-packages/ray/data/datasource/parquet_meta_provider.py b/minigpt2/lib/python3.10/site-packages/ray/data/datasource/parquet_meta_provider.py new file mode 100644 index 0000000000000000000000000000000000000000..f43272dec77900a05fcd17559e87f130dbc7950b --- /dev/null +++ b/minigpt2/lib/python3.10/site-packages/ray/data/datasource/parquet_meta_provider.py @@ -0,0 +1,252 @@ +from typing import TYPE_CHECKING, List, Optional, Union + +import ray.cloudpickle as cloudpickle +from ray.data._internal.util import call_with_retry +from ray.data.block import BlockMetadata +from ray.data.datasource.file_meta_provider import ( + FileMetadataProvider, + _fetch_metadata_parallel, +) +from ray.util.annotations import DeveloperAPI + +if TYPE_CHECKING: + import pyarrow + + from ray.data._internal.datasource.parquet_datasource import SerializedFragment + + +FRAGMENTS_PER_META_FETCH = 6 +PARALLELIZE_META_FETCH_THRESHOLD = 24 + +# The application-level exceptions to retry for metadata prefetching task. +# Default to retry on access denied and read timeout errors because AWS S3 would throw +# these transient errors when load is too high. +RETRY_EXCEPTIONS_FOR_META_FETCH_TASK = ["AWS Error ACCESS_DENIED", "Timeout"] +# Maximum number of retries for metadata prefetching task due to transient errors. +RETRY_MAX_ATTEMPTS_FOR_META_FETCH_TASK = 32 +# Maximum retry back-off interval in seconds for failed metadata prefetching task. +RETRY_MAX_BACKOFF_S_FOR_META_FETCH_TASK = 64 + + +class _ParquetFileFragmentMetaData: + """Class to store metadata of a Parquet file fragment. This includes + all attributes from `pyarrow.parquet.FileMetaData` except for `schema`, + which is stored in `self.schema_pickled` as a pickled object from + `cloudpickle.loads()`, used in deduplicating schemas across multiple fragments.""" + + def __init__(self, fragment_metadata: "pyarrow.parquet.FileMetaData"): + self.created_by = fragment_metadata.created_by + self.format_version = fragment_metadata.format_version + self.num_columns = fragment_metadata.num_columns + self.num_row_groups = fragment_metadata.num_row_groups + self.num_rows = fragment_metadata.num_rows + self.serialized_size = fragment_metadata.serialized_size + # This is a pickled schema object, to be set later with + # `self.set_schema_pickled()`. To get the underlying schema, use + # `cloudpickle.loads(self.schema_pickled)`. + self.schema_pickled = None + + # Calculate the total byte size of the file fragment using the original + # object, as it is not possible to access row groups from this class. + self.total_byte_size = 0 + for row_group_idx in range(fragment_metadata.num_row_groups): + row_group_metadata = fragment_metadata.row_group(row_group_idx) + self.total_byte_size += row_group_metadata.total_byte_size + + def set_schema_pickled(self, schema_pickled: bytes): + """Note: to get the underlying schema, use + `cloudpickle.loads(self.schema_pickled)`.""" + self.schema_pickled = schema_pickled + + +@DeveloperAPI +class ParquetMetadataProvider(FileMetadataProvider): + """Provides block metadata for Arrow Parquet file fragments.""" + + def _get_block_metadata( + self, + paths: List[str], + schema: Optional[Union[type, "pyarrow.lib.Schema"]], + *, + num_fragments: int, + prefetched_metadata: Optional[List["_ParquetFileFragmentMetaData"]], + ) -> BlockMetadata: + """Resolves and returns block metadata for files of a single dataset block. + + Args: + paths: The file paths for a single dataset block. + schema: The user-provided or inferred schema for the given file + paths, if any. + num_fragments: The number of Parquet file fragments derived from the input + file paths. + prefetched_metadata: Metadata previously returned from + `prefetch_file_metadata()` for each file fragment, where + `prefetched_metadata[i]` contains the metadata for `fragments[i]`. + + Returns: + BlockMetadata aggregated across the given file paths. + """ + if ( + prefetched_metadata is not None + and len(prefetched_metadata) == num_fragments + and all(m is not None for m in prefetched_metadata) + ): + # Fragment metadata was available, construct a normal + # BlockMetadata. + block_metadata = BlockMetadata( + num_rows=sum(m.num_rows for m in prefetched_metadata), + size_bytes=sum(m.total_byte_size for m in prefetched_metadata), + schema=schema, + input_files=paths, + exec_stats=None, + ) # Exec stats filled in later. + else: + # Fragment metadata was not available, construct an empty + # BlockMetadata. + block_metadata = BlockMetadata( + num_rows=None, + size_bytes=None, + schema=schema, + input_files=paths, + exec_stats=None, + ) + return block_metadata + + def prefetch_file_metadata( + self, + fragments: List["pyarrow.dataset.ParquetFileFragment"], + **ray_remote_args, + ) -> Optional[List[_ParquetFileFragmentMetaData]]: + """Pre-fetches file metadata for all Parquet file fragments in a single batch. + + Subsets of the metadata returned will be provided as input to subsequent calls + to ``_get_block_metadata`` together with their corresponding Parquet file + fragments. + + Args: + fragments: The Parquet file fragments to fetch metadata for. + + Returns: + Metadata resolved for each input file fragment, or `None`. Metadata + must be returned in the same order as all input file fragments, such + that `metadata[i]` always contains the metadata for `fragments[i]`. + """ + from ray.data._internal.datasource.parquet_datasource import SerializedFragment + + if len(fragments) > PARALLELIZE_META_FETCH_THRESHOLD: + # Wrap Parquet fragments in serialization workaround. + fragments = [SerializedFragment(fragment) for fragment in fragments] + # Fetch Parquet metadata in parallel using Ray tasks. + + def fetch_func(fragments): + return _fetch_metadata_serialization_wrapper( + fragments, + # Ensure that retry settings are propagated to remote tasks. + retry_match=RETRY_EXCEPTIONS_FOR_META_FETCH_TASK, + retry_max_attempts=RETRY_MAX_ATTEMPTS_FOR_META_FETCH_TASK, + retry_max_interval=RETRY_MAX_BACKOFF_S_FOR_META_FETCH_TASK, + ) + + raw_metadata = list( + _fetch_metadata_parallel( + fragments, + fetch_func, + FRAGMENTS_PER_META_FETCH, + **ray_remote_args, + ) + ) + else: + raw_metadata = _fetch_metadata(fragments) + + return _dedupe_metadata(raw_metadata) + + +def _fetch_metadata_serialization_wrapper( + fragments: List["SerializedFragment"], + retry_match: Optional[List[str]], + retry_max_attempts: int, + retry_max_interval: int, +) -> List["pyarrow.parquet.FileMetaData"]: + from ray.data._internal.datasource.parquet_datasource import ( + _deserialize_fragments_with_retry, + ) + + deserialized_fragments = _deserialize_fragments_with_retry(fragments) + try: + metadata = call_with_retry( + lambda: _fetch_metadata(deserialized_fragments), + description="fetch metdata", + match=retry_match, + max_attempts=retry_max_attempts, + max_backoff_s=retry_max_interval, + ) + except OSError as e: + raise RuntimeError( + f"Exceeded maximum number of attempts ({retry_max_attempts}) to retry " + "metadata fetching task. Metadata fetching tasks can fail due to transient " + "errors like rate limiting.\n" + "\n" + "To increase the maximum number of attempts, configure " + "`RETRY_MAX_ATTEMPTS_FOR_META_FETCH_TASK`. For example:\n" + "```\n" + "ray.data._internal.datasource.parquet_datasource.RETRY_MAX_ATTEMPTS_FOR_META_FETCH_TASK = 64\n" # noqa: E501 + "```\n" + "To increase the maximum retry backoff interval, configure " + "`RETRY_MAX_BACKOFF_S_FOR_META_FETCH_TASK`. For example:\n" + "```\n" + "ray.data._internal.datasource.parquet_datasource.RETRY_MAX_BACKOFF_S_FOR_META_FETCH_TASK = 128\n" # noqa: E501 + "```\n" + "If the error continues to occur, you can also try decresasing the " + "concurency of metadata fetching tasks by setting " + "`NUM_CPUS_FOR_META_FETCH_TASK` to a larger value. For example:\n" + "```\n" + "ray.data._internal.datasource.parquet_datasource.NUM_CPUS_FOR_META_FETCH_TASK = 4.\n" # noqa: E501 + "```\n" + "To change which exceptions to retry on, set " + "`RETRY_EXCEPTIONS_FOR_META_FETCH_TASK` to a list of error messages. For " + "example:\n" + "```\n" + 'ray.data._internal.datasource.parquet_datasource.RETRY_EXCEPTIONS_FOR_META_FETCH_TASK = ["AWS Error ACCESS_DENIED", "Timeout"]\n' # noqa: E501 + "```" + ) from e + return metadata + + +def _fetch_metadata( + fragments: List["pyarrow.dataset.ParquetFileFragment"], +) -> List["pyarrow.parquet.FileMetaData"]: + fragment_metadata = [] + for f in fragments: + try: + fragment_metadata.append(f.metadata) + except AttributeError: + break + return fragment_metadata + + +def _dedupe_metadata( + raw_metadatas: List["pyarrow.parquet.FileMetaData"], +) -> List[_ParquetFileFragmentMetaData]: + """For datasets with a large number of columns, the FileMetaData + (in particular the schema) can be very large. We can reduce the + memory usage by only keeping unique schema objects across all + file fragments. This method deduplicates the schemas and returns + a list of `_ParquetFileFragmentMetaData` objects.""" + schema_to_id = {} # schema_id -> serialized_schema + id_to_schema = {} # serialized_schema -> schema_id + stripped_metadatas = [] + for fragment_metadata in raw_metadatas: + stripped_md = _ParquetFileFragmentMetaData(fragment_metadata) + + schema_ser = cloudpickle.dumps(fragment_metadata.schema.to_arrow_schema()) + if schema_ser not in schema_to_id: + schema_id = len(schema_to_id) + schema_to_id[schema_ser] = schema_id + id_to_schema[schema_id] = schema_ser + stripped_md.set_schema_pickled(schema_ser) + else: + schema_id = schema_to_id.get(schema_ser) + existing_schema_ser = id_to_schema[schema_id] + stripped_md.set_schema_pickled(existing_schema_ser) + stripped_metadatas.append(stripped_md) + return stripped_metadatas diff --git a/minigpt2/lib/python3.10/site-packages/ray/data/datasource/partitioning.py b/minigpt2/lib/python3.10/site-packages/ray/data/datasource/partitioning.py new file mode 100644 index 0000000000000000000000000000000000000000..2d83fe6b67de0ad7a24a2a0cbc533c9d49cd06ac --- /dev/null +++ b/minigpt2/lib/python3.10/site-packages/ray/data/datasource/partitioning.py @@ -0,0 +1,456 @@ +import posixpath +from dataclasses import dataclass +from enum import Enum +from typing import TYPE_CHECKING, Any, Callable, Dict, List, Optional, Type, Union + +from ray.util.annotations import DeveloperAPI, PublicAPI + +if TYPE_CHECKING: + import pyarrow + + +PartitionDataType = Type[Union[int, float, str, bool]] + + +@DeveloperAPI +class PartitionStyle(str, Enum): + """Supported dataset partition styles. + + Inherits from `str` to simplify plain text serialization/deserialization. + + Examples: + >>> # Serialize to JSON text. + >>> json.dumps(PartitionStyle.HIVE) # doctest: +SKIP + '"hive"' + + >>> # Deserialize from JSON text. + >>> PartitionStyle(json.loads('"hive"')) # doctest: +SKIP + + """ + + HIVE = "hive" + DIRECTORY = "dir" + + +@DeveloperAPI +@dataclass +class Partitioning: + """Partition scheme used to describe path-based partitions. + + Path-based partition formats embed all partition keys and values directly in + their dataset file paths. + + For example, to read a dataset with + `Hive-style partitions `_: + + >>> import ray + >>> from ray.data.datasource.partitioning import Partitioning + >>> ds = ray.data.read_csv( + ... "s3://anonymous@ray-example-data/iris.csv", + ... partitioning=Partitioning("hive"), + ... ) + + Instead, if your files are arranged in a directory structure such as: + + .. code:: + + root/dog/dog_0.jpeg + root/dog/dog_1.jpeg + ... + + root/cat/cat_0.jpeg + root/cat/cat_1.jpeg + ... + + Then you can use directory-based partitioning: + + >>> import ray + >>> from ray.data.datasource.partitioning import Partitioning + >>> root = "s3://anonymous@air-example-data/cifar-10/images" + >>> partitioning = Partitioning("dir", field_names=["class"], base_dir=root) + >>> ds = ray.data.read_images(root, partitioning=partitioning) + """ + + #: The partition style - may be either HIVE or DIRECTORY. + style: PartitionStyle + #: "/"-delimited base directory that all partitioned paths should + #: exist under (exclusive). File paths either outside of, or at the first + #: level of, this directory will be considered unpartitioned. Specify + #: `None` or an empty string to search for partitions in all file path + #: directories. + base_dir: Optional[str] = None + #: The partition key field names (i.e. column names for tabular + #: datasets). When non-empty, the order and length of partition key + #: field names must match the order and length of partition values. + #: Required when parsing DIRECTORY partitioned paths or generating + #: HIVE partitioned paths. + field_names: Optional[List[str]] = None + #: A dictionary that maps partition key names to their desired data type. If not + #: provided, the data type defaults to string. + field_types: Optional[Dict[str, PartitionDataType]] = None + #: Filesystem that will be used for partition path file I/O. + filesystem: Optional["pyarrow.fs.FileSystem"] = None + + def __post_init__(self): + if self.base_dir is None: + self.base_dir = "" + + if self.field_types is None: + self.field_types = {} + + self._normalized_base_dir = None + self._resolved_filesystem = None + + @property + def normalized_base_dir(self) -> str: + """Returns the base directory normalized for compatibility with a filesystem.""" + if self._normalized_base_dir is None: + self._normalize_base_dir() + return self._normalized_base_dir + + @property + def resolved_filesystem(self) -> "pyarrow.fs.FileSystem": + """Returns the filesystem resolved for compatibility with a base directory.""" + if self._resolved_filesystem is None: + self._normalize_base_dir() + return self._resolved_filesystem + + def _normalize_base_dir(self): + """Normalizes the partition base directory for compatibility with the + given filesystem. + + This should be called once a filesystem has been resolved to ensure that this + base directory is correctly discovered at the root of all partitioned file + paths. + """ + from ray.data.datasource.path_util import _resolve_paths_and_filesystem + + paths, self._resolved_filesystem = _resolve_paths_and_filesystem( + self.base_dir, + self.filesystem, + ) + assert ( + len(paths) == 1 + ), f"Expected 1 normalized base directory, but found {len(paths)}" + normalized_base_dir = paths[0] + if len(normalized_base_dir) and not normalized_base_dir.endswith("/"): + normalized_base_dir += "/" + self._normalized_base_dir = normalized_base_dir + + +@DeveloperAPI +class PathPartitionParser: + """Partition parser for path-based partition formats. + + Path-based partition formats embed all partition keys and values directly in + their dataset file paths. + + Two path partition formats are currently supported - `HIVE` and `DIRECTORY`. + + For `HIVE` Partitioning, all partition directories under the base directory + will be discovered based on `{key1}={value1}/{key2}={value2}` naming + conventions. Key/value pairs do not need to be presented in the same + order across all paths. Directory names nested under the base directory that + don't follow this naming condition will be considered unpartitioned. If a + partition filter is defined, then it will be called with an empty input + dictionary for each unpartitioned file. + + For `DIRECTORY` Partitioning, all directories under the base directory will + be interpreted as partition values of the form `{value1}/{value2}`. An + accompanying ordered list of partition field names must also be provided, + where the order and length of all partition values must match the order and + length of field names. Files stored directly in the base directory will + be considered unpartitioned. If a partition filter is defined, then it will + be called with an empty input dictionary for each unpartitioned file. For + example, if the base directory is `"foo"`, then `"foo.csv"` and `"foo/bar.csv"` + would be considered unpartitioned files but `"foo/bar/baz.csv"` would be associated + with partition `"bar"`. If the base directory is undefined, then `"foo.csv"` would + be unpartitioned, `"foo/bar.csv"` would be associated with partition `"foo"`, and + "foo/bar/baz.csv" would be associated with partition `("foo", "bar")`. + """ + + @staticmethod + def of( + style: PartitionStyle = PartitionStyle.HIVE, + base_dir: Optional[str] = None, + field_names: Optional[List[str]] = None, + field_types: Optional[Dict[str, PartitionDataType]] = None, + filesystem: Optional["pyarrow.fs.FileSystem"] = None, + ) -> "PathPartitionParser": + """Creates a path-based partition parser using a flattened argument list. + + Args: + style: The partition style - may be either HIVE or DIRECTORY. + base_dir: "/"-delimited base directory to start searching for partitions + (exclusive). File paths outside of this directory will be considered + unpartitioned. Specify `None` or an empty string to search for + partitions in all file path directories. + field_names: The partition key names. Required for DIRECTORY partitioning. + Optional for HIVE partitioning. When non-empty, the order and length of + partition key field names must match the order and length of partition + directories discovered. Partition key field names are not required to + exist in the dataset schema. + field_types: A dictionary that maps partition key names to their desired + data type. If not provided, the data type default to string. + filesystem: Filesystem that will be used for partition path file I/O. + + Returns: + The new path-based partition parser. + """ + scheme = Partitioning(style, base_dir, field_names, field_types, filesystem) + return PathPartitionParser(scheme) + + def __init__(self, partitioning: Partitioning): + """Creates a path-based partition parser. + + Args: + partitioning: The path-based partition scheme. The parser starts + searching for partitions from this scheme's base directory. File paths + outside the base directory will be considered unpartitioned. If the + base directory is `None` or an empty string then this will search for + partitions in all file path directories. Field names are required for + DIRECTORY partitioning, and optional for HIVE partitioning. When + non-empty, the order and length of partition key field names must match + the order and length of partition directories discovered. + """ + style = partitioning.style + field_names = partitioning.field_names + if style == PartitionStyle.DIRECTORY and not field_names: + raise ValueError( + "Directory partitioning requires a corresponding list of " + "partition key field names. Please retry your request with one " + "or more field names specified." + ) + parsers = { + PartitionStyle.HIVE: self._parse_hive_path, + PartitionStyle.DIRECTORY: self._parse_dir_path, + } + self._parser_fn: Callable[[str], Dict[str, str]] = parsers.get(style) + if self._parser_fn is None: + raise ValueError( + f"Unsupported partition style: {style}. " + f"Supported styles: {parsers.keys()}" + ) + self._scheme = partitioning + + def __call__(self, path: str) -> Dict[str, str]: + """Parses partition keys and values from a single file path. + + Args: + path: Input file path to parse. + + Returns: + Dictionary mapping directory partition keys to values from the input file + path. Returns an empty dictionary for unpartitioned files. + """ + dir_path = self._dir_path_trim_base(path) + if dir_path is None: + return {} + partitions: Dict[str, str] = self._parser_fn(dir_path) + + for field, data_type in self._scheme.field_types.items(): + partitions[field] = _cast_value(partitions[field], data_type) + + return partitions + + @property + def scheme(self) -> Partitioning: + """Returns the partitioning for this parser.""" + return self._scheme + + def _dir_path_trim_base(self, path: str) -> Optional[str]: + """Trims the normalized base directory and returns the directory path. + + Returns None if the path does not start with the normalized base directory. + Simply returns the directory path if the base directory is undefined. + """ + if not path.startswith(self._scheme.normalized_base_dir): + return None + path = path[len(self._scheme.normalized_base_dir) :] + return posixpath.dirname(path) + + def _parse_hive_path(self, dir_path: str) -> Dict[str, str]: + """Hive partition path parser. + + Returns a dictionary mapping partition keys to values given a hive-style + partition path of the form "{key1}={value1}/{key2}={value2}/..." or an empty + dictionary for unpartitioned files. + """ + dirs = [d for d in dir_path.split("/") if d and (d.count("=") == 1)] + kv_pairs = [d.split("=") for d in dirs] if dirs else [] + field_names = self._scheme.field_names + if field_names and kv_pairs: + if len(kv_pairs) != len(field_names): + raise ValueError( + f"Expected {len(field_names)} partition value(s) but found " + f"{len(kv_pairs)}: {kv_pairs}." + ) + for i, field_name in enumerate(field_names): + if kv_pairs[i][0] != field_name: + raise ValueError( + f"Expected partition key {field_name} but found " + f"{kv_pairs[i][0]}" + ) + return dict(kv_pairs) + + def _parse_dir_path(self, dir_path: str) -> Dict[str, str]: + """Directory partition path parser. + + Returns a dictionary mapping directory partition keys to values from a + partition path of the form "{value1}/{value2}/..." or an empty dictionary for + unpartitioned files. + + Requires a corresponding ordered list of partition key field names to map the + correct key to each value. + """ + dirs = [d for d in dir_path.split("/") if d] + field_names = self._scheme.field_names + + if dirs and len(dirs) != len(field_names): + raise ValueError( + f"Expected {len(field_names)} partition value(s) but found " + f"{len(dirs)}: {dirs}." + ) + + if not dirs: + return {} + return { + field: directory + for field, directory in zip(field_names, dirs) + if field is not None + } + + +@PublicAPI(stability="beta") +class PathPartitionFilter: + """Partition filter for path-based partition formats. + + Used to explicitly keep or reject files based on a custom filter function that + takes partition keys and values parsed from the file's path as input. + """ + + @staticmethod + def of( + filter_fn: Callable[[Dict[str, str]], bool], + style: PartitionStyle = PartitionStyle.HIVE, + base_dir: Optional[str] = None, + field_names: Optional[List[str]] = None, + field_types: Optional[Dict[str, PartitionDataType]] = None, + filesystem: Optional["pyarrow.fs.FileSystem"] = None, + ) -> "PathPartitionFilter": + """Creates a path-based partition filter using a flattened argument list. + + Args: + filter_fn: Callback used to filter partitions. Takes a dictionary mapping + partition keys to values as input. Unpartitioned files are denoted with + an empty input dictionary. Returns `True` to read a file for that + partition or `False` to skip it. Partition keys and values are always + strings read from the filesystem path. For example, this removes all + unpartitioned files: + + .. code:: python + + lambda d: True if d else False + + This raises an assertion error for any unpartitioned file found: + + .. code:: python + + def do_assert(val, msg): + assert val, msg + + lambda d: do_assert(d, "Expected all files to be partitioned!") + + And this only reads files from January, 2022 partitions: + + .. code:: python + + lambda d: d["month"] == "January" and d["year"] == "2022" + + style: The partition style - may be either HIVE or DIRECTORY. + base_dir: "/"-delimited base directory to start searching for partitions + (exclusive). File paths outside of this directory will be considered + unpartitioned. Specify `None` or an empty string to search for + partitions in all file path directories. + field_names: The partition key names. Required for DIRECTORY partitioning. + Optional for HIVE partitioning. When non-empty, the order and length of + partition key field names must match the order and length of partition + directories discovered. Partition key field names are not required to + exist in the dataset schema. + field_types: A dictionary that maps partition key names to their desired + data type. If not provided, the data type defaults to string. + filesystem: Filesystem that will be used for partition path file I/O. + + Returns: + The new path-based partition filter. + """ + scheme = Partitioning(style, base_dir, field_names, field_types, filesystem) + path_partition_parser = PathPartitionParser(scheme) + return PathPartitionFilter(path_partition_parser, filter_fn) + + def __init__( + self, + path_partition_parser: PathPartitionParser, + filter_fn: Callable[[Dict[str, str]], bool], + ): + """Creates a new path-based partition filter based on a parser. + + Args: + path_partition_parser: The path-based partition parser. + filter_fn: Callback used to filter partitions. Takes a dictionary mapping + partition keys to values as input. Unpartitioned files are denoted with + an empty input dictionary. Returns `True` to read a file for that + partition or `False` to skip it. Partition keys and values are always + strings read from the filesystem path. For example, this removes all + unpartitioned files: + ``lambda d: True if d else False`` + This raises an assertion error for any unpartitioned file found: + ``lambda d: assert d, "Expected all files to be partitioned!"`` + And this only reads files from January, 2022 partitions: + ``lambda d: d["month"] == "January" and d["year"] == "2022"`` + """ + self._parser = path_partition_parser + self._filter_fn = filter_fn + + def __call__(self, paths: List[str]) -> List[str]: + """Returns all paths that pass this partition scheme's partition filter. + + If no partition filter is set, then returns all input paths. If a base + directory is set, then only paths under this base directory will be parsed + for partitions. All paths outside of this base directory will automatically + be considered unpartitioned, and passed into the filter function as empty + dictionaries. + + Also normalizes the partition base directory for compatibility with the + given filesystem before applying the filter. + + Args: + paths: Paths to pass through the partition filter function. All + paths should be normalized for compatibility with the given + filesystem. + Returns: + List of paths that pass the partition filter, or all paths if no + partition filter is defined. + """ + filtered_paths = paths + if self._filter_fn is not None: + filtered_paths = [ + path for path in paths if self._filter_fn(self._parser(path)) + ] + return filtered_paths + + @property + def parser(self) -> PathPartitionParser: + """Returns the path partition parser for this filter.""" + return self._parser + + +def _cast_value(value: str, data_type: PartitionDataType) -> Any: + if data_type is int: + return int(value) + elif data_type is float: + return float(value) + elif data_type is bool: + return value.lower() == "true" + else: + return value diff --git a/minigpt2/lib/python3.10/site-packages/ray/data/datasource/path_util.py b/minigpt2/lib/python3.10/site-packages/ray/data/datasource/path_util.py new file mode 100644 index 0000000000000000000000000000000000000000..ba446af5b0955cac26af17cfa3843a04a7accea6 --- /dev/null +++ b/minigpt2/lib/python3.10/site-packages/ray/data/datasource/path_util.py @@ -0,0 +1,205 @@ +import pathlib +import sys +import urllib +from typing import TYPE_CHECKING, List, Optional, Tuple, Union + +from ray.data._internal.util import _resolve_custom_scheme + +if TYPE_CHECKING: + import pyarrow + + +def _has_file_extension(path: str, extensions: Optional[List[str]]) -> bool: + """Check if a path has a file extension in the provided list. + + Examples: + >>> _has_file_extension("foo.csv", ["csv"]) + True + >>> _has_file_extension("foo.csv", ["json", "jsonl"]) + False + >>> _has_file_extension("foo.csv", None) + True + + Args: + path: The path to check. + extensions: A list of extensions to check against. If `None`, any extension is + considered valid. + """ + assert extensions is None or isinstance(extensions, list), type(extensions) + + if extensions is None: + return True + + # `Path.suffixes` contain leading dots. The user-specified extensions don't. + extensions = [f".{ext.lower()}" for ext in extensions] + suffixes = [suffix.lower() for suffix in pathlib.Path(path).suffixes] + return any(ext in suffixes for ext in extensions) + + +def _resolve_paths_and_filesystem( + paths: Union[str, List[str]], + filesystem: "pyarrow.fs.FileSystem" = None, +) -> Tuple[List[str], "pyarrow.fs.FileSystem"]: + """ + Resolves and normalizes all provided paths, infers a filesystem from the + paths and ensures that all paths use the same filesystem. + + Args: + paths: A single file/directory path or a list of file/directory paths. + A list of paths can contain both files and directories. + filesystem: The filesystem implementation that should be used for + reading these files. If None, a filesystem will be inferred. If not + None, the provided filesystem will still be validated against all + filesystems inferred from the provided paths to ensure + compatibility. + """ + import pyarrow as pa + from pyarrow.fs import ( + FileSystem, + FSSpecHandler, + PyFileSystem, + _resolve_filesystem_and_path, + ) + + if isinstance(paths, str): + paths = [paths] + if isinstance(paths, pathlib.Path): + paths = [str(paths)] + elif not isinstance(paths, list) or any(not isinstance(p, str) for p in paths): + raise ValueError( + "Expected `paths` to be a `str`, `pathlib.Path`, or `list[str]`, but got " + f"`{paths}`." + ) + elif len(paths) == 0: + raise ValueError("Must provide at least one path.") + + need_unwrap_path_protocol = True + if filesystem and not isinstance(filesystem, FileSystem): + err_msg = ( + f"The filesystem passed must either conform to " + f"pyarrow.fs.FileSystem, or " + f"fsspec.spec.AbstractFileSystem. The provided " + f"filesystem was: {filesystem}" + ) + try: + import fsspec + from fsspec.implementations.http import HTTPFileSystem + except ModuleNotFoundError: + # If filesystem is not a pyarrow filesystem and fsspec isn't + # installed, then filesystem is neither a pyarrow filesystem nor + # an fsspec filesystem, so we raise a TypeError. + raise TypeError(err_msg) from None + if not isinstance(filesystem, fsspec.spec.AbstractFileSystem): + raise TypeError(err_msg) from None + if isinstance(filesystem, HTTPFileSystem): + # If filesystem is fsspec HTTPFileSystem, the protocol/scheme of paths + # should not be unwrapped/removed, because HTTPFileSystem expects full file + # paths including protocol/scheme. This is different behavior compared to + # file systems implementation in pyarrow.fs.FileSystem. + need_unwrap_path_protocol = False + + filesystem = PyFileSystem(FSSpecHandler(filesystem)) + + resolved_paths = [] + for path in paths: + path = _resolve_custom_scheme(path) + try: + resolved_filesystem, resolved_path = _resolve_filesystem_and_path( + path, filesystem + ) + except pa.lib.ArrowInvalid as e: + if "Cannot parse URI" in str(e): + resolved_filesystem, resolved_path = _resolve_filesystem_and_path( + _encode_url(path), filesystem + ) + resolved_path = _decode_url(resolved_path) + elif "Unrecognized filesystem type in URI" in str(e): + scheme = urllib.parse.urlparse(path, allow_fragments=False).scheme + if scheme in ["http", "https"]: + # If scheme of path is HTTP and filesystem is not resolved, + # try to use fsspec HTTPFileSystem. This expects fsspec is + # installed. + try: + from fsspec.implementations.http import HTTPFileSystem + except ModuleNotFoundError: + raise ImportError( + "Please install fsspec to read files from HTTP." + ) from None + + resolved_filesystem = PyFileSystem(FSSpecHandler(HTTPFileSystem())) + resolved_path = path + need_unwrap_path_protocol = False + else: + raise + else: + raise + if filesystem is None: + filesystem = resolved_filesystem + elif need_unwrap_path_protocol: + resolved_path = _unwrap_protocol(resolved_path) + resolved_path = filesystem.normalize_path(resolved_path) + resolved_paths.append(resolved_path) + + return resolved_paths, filesystem + + +def _unwrap_protocol(path): + """ + Slice off any protocol prefixes on path. + """ + if sys.platform == "win32" and _is_local_windows_path(path): + # Represent as posix path such that downstream functions properly handle it. + # This is executed when 'file://' is NOT included in the path. + return pathlib.Path(path).as_posix() + + parsed = urllib.parse.urlparse(path, allow_fragments=False) # support '#' in path + query = "?" + parsed.query if parsed.query else "" # support '?' in path + netloc = parsed.netloc + if parsed.scheme == "s3" and "@" in parsed.netloc: + # If the path contains an @, it is assumed to be an anonymous + # credentialed path, and we need to strip off the credentials. + netloc = parsed.netloc.split("@")[-1] + + parsed_path = parsed.path + # urlparse prepends the path with a '/'. This does not work on Windows + # so if this is the case strip the leading slash. + if ( + sys.platform == "win32" + and not netloc + and len(parsed_path) >= 3 + and parsed_path[0] == "/" # The problematic leading slash + and parsed_path[1].isalpha() # Ensure it is a drive letter. + and parsed_path[2:4] in (":", ":/") + ): + parsed_path = parsed_path[1:] + + return netloc + parsed_path + query + + +def _is_url(path) -> bool: + return urllib.parse.urlparse(path).scheme != "" + + +def _is_local_windows_path(path: str) -> bool: + """Determines if path is a Windows file-system location.""" + if sys.platform != "win32": + return False + + if len(path) >= 1 and path[0] == "\\": + return True + if ( + len(path) >= 3 + and path[1] == ":" + and (path[2] == "/" or path[2] == "\\") + and path[0].isalpha() + ): + return True + return False + + +def _encode_url(path): + return urllib.parse.quote(path, safe="/:") + + +def _decode_url(path): + return urllib.parse.unquote(path) diff --git a/minigpt2/lib/python3.10/site-packages/ray/data/extensions/__init__.py b/minigpt2/lib/python3.10/site-packages/ray/data/extensions/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..517b4fe7a3a2b6274f35714c709625fade8ef46e --- /dev/null +++ b/minigpt2/lib/python3.10/site-packages/ray/data/extensions/__init__.py @@ -0,0 +1,45 @@ +from ray.air.util.tensor_extensions.arrow import ( + ArrowTensorTypeV2, + get_arrow_extension_tensor_types, +) +from ray.data.extensions.object_extension import ( + ArrowPythonObjectArray, + ArrowPythonObjectScalar, + ArrowPythonObjectType, + PythonObjectArray, + PythonObjectDtype, + _object_extension_type_allowed, +) +from ray.data.extensions.tensor_extension import ( + ArrowConversionError, + ArrowTensorArray, + ArrowTensorType, + ArrowVariableShapedTensorArray, + ArrowVariableShapedTensorType, + TensorArray, + TensorArrayElement, + TensorDtype, + column_needs_tensor_extension, +) + +__all__ = [ + # Tensor array extension. + "TensorDtype", + "TensorArray", + "TensorArrayElement", + "ArrowTensorType", + "ArrowTensorTypeV2", + "ArrowTensorArray", + "ArrowVariableShapedTensorType", + "ArrowVariableShapedTensorArray", + "column_needs_tensor_extension", + "ArrowConversionError", + # Object array extension + "ArrowPythonObjectArray", + "ArrowPythonObjectType", + "ArrowPythonObjectScalar", + "PythonObjectArray", + "PythonObjectDtype", + "_object_extension_type_allowed", + "get_arrow_extension_tensor_types", +] diff --git a/minigpt2/lib/python3.10/site-packages/ray/data/extensions/__pycache__/__init__.cpython-310.pyc b/minigpt2/lib/python3.10/site-packages/ray/data/extensions/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..19fabe9f0dad961b3cd99a96b059cabf8739d50e Binary files /dev/null and b/minigpt2/lib/python3.10/site-packages/ray/data/extensions/__pycache__/__init__.cpython-310.pyc differ diff --git a/minigpt2/lib/python3.10/site-packages/ray/data/extensions/__pycache__/object_extension.cpython-310.pyc b/minigpt2/lib/python3.10/site-packages/ray/data/extensions/__pycache__/object_extension.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..8efd9c582c62633676332a4d1ab55de14bc8da4d Binary files /dev/null and b/minigpt2/lib/python3.10/site-packages/ray/data/extensions/__pycache__/object_extension.cpython-310.pyc differ diff --git a/minigpt2/lib/python3.10/site-packages/ray/data/extensions/__pycache__/tensor_extension.cpython-310.pyc b/minigpt2/lib/python3.10/site-packages/ray/data/extensions/__pycache__/tensor_extension.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..e0534e555caa7060f7bb30aa2202bd61455b6002 Binary files /dev/null and b/minigpt2/lib/python3.10/site-packages/ray/data/extensions/__pycache__/tensor_extension.cpython-310.pyc differ diff --git a/minigpt2/lib/python3.10/site-packages/ray/data/extensions/object_extension.py b/minigpt2/lib/python3.10/site-packages/ray/data/extensions/object_extension.py new file mode 100644 index 0000000000000000000000000000000000000000..42ab20a231c62e607ba531e1f25f0b2d91c96178 --- /dev/null +++ b/minigpt2/lib/python3.10/site-packages/ray/data/extensions/object_extension.py @@ -0,0 +1,10 @@ +from ray.air.util.object_extensions.arrow import ( # noqa: F401 + ArrowPythonObjectArray, + ArrowPythonObjectScalar, + ArrowPythonObjectType, + _object_extension_type_allowed, +) +from ray.air.util.object_extensions.pandas import ( # noqa: F401 + PythonObjectArray, + PythonObjectDtype, +) diff --git a/minigpt2/lib/python3.10/site-packages/ray/data/extensions/tensor_extension.py b/minigpt2/lib/python3.10/site-packages/ray/data/extensions/tensor_extension.py new file mode 100644 index 0000000000000000000000000000000000000000..121685e4c5ad07f95b6d24f0b721e28c65590482 --- /dev/null +++ b/minigpt2/lib/python3.10/site-packages/ray/data/extensions/tensor_extension.py @@ -0,0 +1,15 @@ +from ray.air.util.tensor_extensions.arrow import ( # noqa: F401 + ArrowConversionError, + ArrowTensorArray, + ArrowTensorType, + ArrowTensorTypeV2, + ArrowVariableShapedTensorArray, + ArrowVariableShapedTensorType, +) +from ray.air.util.tensor_extensions.pandas import ( # noqa: F401 + TensorArray, + TensorArrayElement, + TensorDtype, + column_needs_tensor_extension, +) +from ray.air.util.tensor_extensions.utils import create_ragged_ndarray # noqa: F401 diff --git a/minigpt2/lib/python3.10/site-packages/ray/data/preprocessors/__init__.py b/minigpt2/lib/python3.10/site-packages/ray/data/preprocessors/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..ef99a58ee113934da0162c86945c1275cb85455b --- /dev/null +++ b/minigpt2/lib/python3.10/site-packages/ray/data/preprocessors/__init__.py @@ -0,0 +1,50 @@ +from ray.data.preprocessors.chain import Chain +from ray.data.preprocessors.concatenator import Concatenator +from ray.data.preprocessors.discretizer import ( + CustomKBinsDiscretizer, + UniformKBinsDiscretizer, +) +from ray.data.preprocessors.encoder import ( + Categorizer, + LabelEncoder, + MultiHotEncoder, + OneHotEncoder, + OrdinalEncoder, +) +from ray.data.preprocessors.hasher import FeatureHasher +from ray.data.preprocessors.imputer import SimpleImputer +from ray.data.preprocessors.normalizer import Normalizer +from ray.data.preprocessors.scaler import ( + MaxAbsScaler, + MinMaxScaler, + RobustScaler, + StandardScaler, +) +from ray.data.preprocessors.tokenizer import Tokenizer +from ray.data.preprocessors.torch import TorchVisionPreprocessor +from ray.data.preprocessors.transformer import PowerTransformer +from ray.data.preprocessors.vectorizer import CountVectorizer, HashingVectorizer + +__all__ = [ + "Categorizer", + "CountVectorizer", + "Chain", + "FeatureHasher", + "HashingVectorizer", + "LabelEncoder", + "MaxAbsScaler", + "MinMaxScaler", + "MultiHotEncoder", + "Normalizer", + "OneHotEncoder", + "OrdinalEncoder", + "PowerTransformer", + "RobustScaler", + "SimpleImputer", + "StandardScaler", + "Concatenator", + "Tokenizer", + "TorchVisionPreprocessor", + "CustomKBinsDiscretizer", + "UniformKBinsDiscretizer", +] diff --git a/minigpt2/lib/python3.10/site-packages/ray/data/preprocessors/__pycache__/__init__.cpython-310.pyc b/minigpt2/lib/python3.10/site-packages/ray/data/preprocessors/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..ea5eb746d1fdd62514f0831710e6f61a57778644 Binary files /dev/null and b/minigpt2/lib/python3.10/site-packages/ray/data/preprocessors/__pycache__/__init__.cpython-310.pyc differ diff --git a/minigpt2/lib/python3.10/site-packages/ray/data/preprocessors/__pycache__/chain.cpython-310.pyc b/minigpt2/lib/python3.10/site-packages/ray/data/preprocessors/__pycache__/chain.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..7915c77ab1b8f463f232ea094de8ab3e65f0c9a3 Binary files /dev/null and b/minigpt2/lib/python3.10/site-packages/ray/data/preprocessors/__pycache__/chain.cpython-310.pyc differ diff --git a/minigpt2/lib/python3.10/site-packages/ray/data/preprocessors/__pycache__/concatenator.cpython-310.pyc b/minigpt2/lib/python3.10/site-packages/ray/data/preprocessors/__pycache__/concatenator.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..3d79ddd6d2184d6e6fbbae70db2ad4f087692bac Binary files /dev/null and b/minigpt2/lib/python3.10/site-packages/ray/data/preprocessors/__pycache__/concatenator.cpython-310.pyc differ diff --git a/minigpt2/lib/python3.10/site-packages/ray/data/preprocessors/__pycache__/discretizer.cpython-310.pyc b/minigpt2/lib/python3.10/site-packages/ray/data/preprocessors/__pycache__/discretizer.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..c17e90d51fd87c5168e661bc451c39088ce88f36 Binary files /dev/null and b/minigpt2/lib/python3.10/site-packages/ray/data/preprocessors/__pycache__/discretizer.cpython-310.pyc differ diff --git a/minigpt2/lib/python3.10/site-packages/ray/data/preprocessors/__pycache__/encoder.cpython-310.pyc b/minigpt2/lib/python3.10/site-packages/ray/data/preprocessors/__pycache__/encoder.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..229c44c7a7c92395124539c8f01faf94dbac1e75 Binary files /dev/null and b/minigpt2/lib/python3.10/site-packages/ray/data/preprocessors/__pycache__/encoder.cpython-310.pyc differ diff --git a/minigpt2/lib/python3.10/site-packages/ray/data/preprocessors/__pycache__/hasher.cpython-310.pyc b/minigpt2/lib/python3.10/site-packages/ray/data/preprocessors/__pycache__/hasher.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..bcc6e9d03d03d0195046968813a18e116186d5f4 Binary files /dev/null and b/minigpt2/lib/python3.10/site-packages/ray/data/preprocessors/__pycache__/hasher.cpython-310.pyc differ diff --git a/minigpt2/lib/python3.10/site-packages/ray/data/preprocessors/__pycache__/imputer.cpython-310.pyc b/minigpt2/lib/python3.10/site-packages/ray/data/preprocessors/__pycache__/imputer.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..66897a777aa2d2a904df5c598fdd8512a07615a0 Binary files /dev/null and b/minigpt2/lib/python3.10/site-packages/ray/data/preprocessors/__pycache__/imputer.cpython-310.pyc differ diff --git a/minigpt2/lib/python3.10/site-packages/ray/data/preprocessors/__pycache__/normalizer.cpython-310.pyc b/minigpt2/lib/python3.10/site-packages/ray/data/preprocessors/__pycache__/normalizer.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..418c8275269e574f2c8147dbd057513063c66d9c Binary files /dev/null and b/minigpt2/lib/python3.10/site-packages/ray/data/preprocessors/__pycache__/normalizer.cpython-310.pyc differ diff --git a/minigpt2/lib/python3.10/site-packages/ray/data/preprocessors/__pycache__/scaler.cpython-310.pyc b/minigpt2/lib/python3.10/site-packages/ray/data/preprocessors/__pycache__/scaler.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..b855aac0a04f05a49065e8e39d009b6fd26d6c1b Binary files /dev/null and b/minigpt2/lib/python3.10/site-packages/ray/data/preprocessors/__pycache__/scaler.cpython-310.pyc differ diff --git a/minigpt2/lib/python3.10/site-packages/ray/data/preprocessors/__pycache__/tokenizer.cpython-310.pyc b/minigpt2/lib/python3.10/site-packages/ray/data/preprocessors/__pycache__/tokenizer.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..fac701a8baecd7ab654390bf2a31a139920eb052 Binary files /dev/null and b/minigpt2/lib/python3.10/site-packages/ray/data/preprocessors/__pycache__/tokenizer.cpython-310.pyc differ diff --git a/minigpt2/lib/python3.10/site-packages/ray/data/preprocessors/__pycache__/torch.cpython-310.pyc b/minigpt2/lib/python3.10/site-packages/ray/data/preprocessors/__pycache__/torch.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..4ac80a5735be01c8a9ec60ea735f8d1b55acd7b0 Binary files /dev/null and b/minigpt2/lib/python3.10/site-packages/ray/data/preprocessors/__pycache__/torch.cpython-310.pyc differ diff --git a/minigpt2/lib/python3.10/site-packages/ray/data/preprocessors/__pycache__/transformer.cpython-310.pyc b/minigpt2/lib/python3.10/site-packages/ray/data/preprocessors/__pycache__/transformer.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..6e4064416948d36e24352bd439824b77cd75ac05 Binary files /dev/null and b/minigpt2/lib/python3.10/site-packages/ray/data/preprocessors/__pycache__/transformer.cpython-310.pyc differ diff --git a/minigpt2/lib/python3.10/site-packages/ray/data/preprocessors/__pycache__/utils.cpython-310.pyc b/minigpt2/lib/python3.10/site-packages/ray/data/preprocessors/__pycache__/utils.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..9845102dad31ec8adb38b2a32e29a8650b4a27c9 Binary files /dev/null and b/minigpt2/lib/python3.10/site-packages/ray/data/preprocessors/__pycache__/utils.cpython-310.pyc differ diff --git a/minigpt2/lib/python3.10/site-packages/ray/data/preprocessors/__pycache__/vectorizer.cpython-310.pyc b/minigpt2/lib/python3.10/site-packages/ray/data/preprocessors/__pycache__/vectorizer.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..9b3304124da200a9c868929e524088ef1fbe8dd3 Binary files /dev/null and b/minigpt2/lib/python3.10/site-packages/ray/data/preprocessors/__pycache__/vectorizer.cpython-310.pyc differ diff --git a/minigpt2/lib/python3.10/site-packages/ray/data/preprocessors/chain.py b/minigpt2/lib/python3.10/site-packages/ray/data/preprocessors/chain.py new file mode 100644 index 0000000000000000000000000000000000000000..e608f8cf2f86aaf86b418e01dd182fa1bb56365d --- /dev/null +++ b/minigpt2/lib/python3.10/site-packages/ray/data/preprocessors/chain.py @@ -0,0 +1,101 @@ +from typing import TYPE_CHECKING + +from ray.air.util.data_batch_conversion import BatchFormat +from ray.data import Dataset +from ray.data.preprocessor import Preprocessor + +if TYPE_CHECKING: + from ray.air.data_batch_type import DataBatchType + + +class Chain(Preprocessor): + """Combine multiple preprocessors into a single :py:class:`Preprocessor`. + + When you call ``fit``, each preprocessor is fit on the dataset produced by the + preceeding preprocessor's ``fit_transform``. + + Example: + >>> import pandas as pd + >>> import ray + >>> from ray.data.preprocessors import * + >>> + >>> df = pd.DataFrame({ + ... "X0": [0, 1, 2], + ... "X1": [3, 4, 5], + ... "Y": ["orange", "blue", "orange"], + ... }) + >>> ds = ray.data.from_pandas(df) # doctest: +SKIP + >>> + >>> preprocessor = Chain( + ... StandardScaler(columns=["X0", "X1"]), + ... Concatenator(columns=["X0", "X1"], output_column_name="X"), + ... LabelEncoder(label_column="Y") + ... ) + >>> preprocessor.fit_transform(ds).to_pandas() # doctest: +SKIP + Y X + 0 1 [-1.224744871391589, -1.224744871391589] + 1 0 [0.0, 0.0] + 2 1 [1.224744871391589, 1.224744871391589] + + Args: + preprocessors: The preprocessors to sequentially compose. + """ + + def fit_status(self): + fittable_count = 0 + fitted_count = 0 + for p in self.preprocessors: + if p.fit_status() == Preprocessor.FitStatus.FITTED: + fittable_count += 1 + fitted_count += 1 + elif p.fit_status() in ( + Preprocessor.FitStatus.NOT_FITTED, + Preprocessor.FitStatus.PARTIALLY_FITTED, + ): + fittable_count += 1 + else: + assert p.fit_status() == Preprocessor.FitStatus.NOT_FITTABLE + if fittable_count > 0: + if fitted_count == fittable_count: + return Preprocessor.FitStatus.FITTED + elif fitted_count > 0: + return Preprocessor.FitStatus.PARTIALLY_FITTED + else: + return Preprocessor.FitStatus.NOT_FITTED + else: + return Preprocessor.FitStatus.NOT_FITTABLE + + def __init__(self, *preprocessors: Preprocessor): + self.preprocessors = preprocessors + + def _fit(self, ds: Dataset) -> Preprocessor: + for preprocessor in self.preprocessors[:-1]: + ds = preprocessor.fit_transform(ds) + self.preprocessors[-1].fit(ds) + return self + + def fit_transform(self, ds: Dataset) -> Dataset: + for preprocessor in self.preprocessors: + ds = preprocessor.fit_transform(ds) + return ds + + def _transform(self, ds: Dataset) -> Dataset: + for preprocessor in self.preprocessors: + ds = preprocessor.transform(ds) + return ds + + def _transform_batch(self, df: "DataBatchType") -> "DataBatchType": + for preprocessor in self.preprocessors: + df = preprocessor.transform_batch(df) + return df + + def __repr__(self): + arguments = ", ".join(repr(preprocessor) for preprocessor in self.preprocessors) + return f"{self.__class__.__name__}({arguments})" + + def _determine_transform_to_use(self) -> BatchFormat: + # This is relevant for BatchPrediction. + # For Chain preprocessor, we picked the first one as entry point. + # TODO (jiaodong): We should revisit if our Chain preprocessor is + # still optimal with context of lazy execution. + return self.preprocessors[0]._determine_transform_to_use() diff --git a/minigpt2/lib/python3.10/site-packages/ray/data/preprocessors/concatenator.py b/minigpt2/lib/python3.10/site-packages/ray/data/preprocessors/concatenator.py new file mode 100644 index 0000000000000000000000000000000000000000..941834adbd16614366a870fa1d9a5d379a73926c --- /dev/null +++ b/minigpt2/lib/python3.10/site-packages/ray/data/preprocessors/concatenator.py @@ -0,0 +1,125 @@ +import logging +from typing import List, Optional + +import numpy as np +import pandas as pd + +from ray.data.preprocessor import Preprocessor +from ray.util.annotations import PublicAPI + +logger = logging.getLogger(__name__) + + +@PublicAPI(stability="alpha") +class Concatenator(Preprocessor): + """Combine numeric columns into a column of type + :class:`~ray.air.util.tensor_extensions.pandas.TensorDtype`. Only columns + specified in ``columns`` will be concatenated. + + This preprocessor concatenates numeric columns and stores the result in a new + column. The new column contains + :class:`~ray.air.util.tensor_extensions.pandas.TensorArrayElement` objects of + shape :math:`(m,)`, where :math:`m` is the number of columns concatenated. + The :math:`m` concatenated columns are dropped after concatenation. + The preprocessor preserves the order of the columns provided in the ``colummns`` + argument and will use that order when calling ``transform()`` and ``transform_batch()``. + + Examples: + >>> import numpy as np + >>> import pandas as pd + >>> import ray + >>> from ray.data.preprocessors import Concatenator + + :py:class:`Concatenator` combines numeric columns into a column of + :py:class:`~ray.air.util.tensor_extensions.pandas.TensorDtype`. + + >>> df = pd.DataFrame({"X0": [0, 3, 1], "X1": [0.5, 0.2, 0.9]}) + >>> ds = ray.data.from_pandas(df) # doctest: +SKIP + >>> concatenator = Concatenator(columns=["X0", "X1"]) + >>> concatenator.transform(ds).to_pandas() # doctest: +SKIP + concat_out + 0 [0.0, 0.5] + 1 [3.0, 0.2] + 2 [1.0, 0.9] + + By default, the created column is called `"concat_out"`, but you can specify + a different name. + + >>> concatenator = Concatenator(columns=["X0", "X1"], output_column_name="tensor") + >>> concatenator.transform(ds).to_pandas() # doctest: +SKIP + tensor + 0 [0.0, 0.5] + 1 [3.0, 0.2] + 2 [1.0, 0.9] + + >>> concatenator = Concatenator(columns=["X0", "X1"], dtype=np.float32) + >>> concatenator.transform(ds) # doctest: +SKIP + Dataset(num_rows=3, schema={Y: object, concat_out: TensorDtype(shape=(2,), dtype=float32)}) + + Args: + output_column_name: The desired name for the new column. + Defaults to ``"concat_out"``. + columns: A list of columns to concatenate. The provided order of the columns + will be retained during concatenation. + dtype: The ``dtype`` to convert the output tensors to. If unspecified, + the ``dtype`` is determined by standard coercion rules. + raise_if_missing: If ``True``, an error is raised if any + of the columns in ``columns`` don't exist. + Defaults to ``False``. + + Raises: + ValueError: if `raise_if_missing` is `True` and a column in `columns` or + doesn't exist in the dataset. + """ # noqa: E501 + + _is_fittable = False + + def __init__( + self, + columns: List[str], + output_column_name: str = "concat_out", + dtype: Optional[np.dtype] = None, + raise_if_missing: bool = False, + ): + self.columns = columns + + self.output_column_name = output_column_name + self.dtype = dtype + self.raise_if_missing = raise_if_missing + + def _validate(self, df: pd.DataFrame) -> None: + missing_columns = set(self.columns) - set(df) + if missing_columns: + message = ( + f"Missing columns specified in '{self.columns}': {missing_columns}" + ) + if self.raise_if_missing: + raise ValueError(message) + else: + logger.warning(message) + + def _transform_pandas(self, df: pd.DataFrame): + self._validate(df) + + concatenated = df[self.columns].to_numpy(dtype=self.dtype) + df = df.drop(columns=self.columns) + # Use a Pandas Series for column assignment to get more consistent + # behavior across Pandas versions. + df.loc[:, self.output_column_name] = pd.Series(list(concatenated)) + return df + + def __repr__(self): + default_values = { + "output_column_name": "concat_out", + "columns": None, + "dtype": None, + "raise_if_missing": False, + } + + non_default_arguments = [] + for parameter, default_value in default_values.items(): + value = getattr(self, parameter) + if value != default_value: + non_default_arguments.append(f"{parameter}={value}") + + return f"{self.__class__.__name__}({', '.join(non_default_arguments)})" diff --git a/minigpt2/lib/python3.10/site-packages/ray/data/preprocessors/discretizer.py b/minigpt2/lib/python3.10/site-packages/ray/data/preprocessors/discretizer.py new file mode 100644 index 0000000000000000000000000000000000000000..6ccd33fc8af407e8d3726979f87d4d89230f27a9 --- /dev/null +++ b/minigpt2/lib/python3.10/site-packages/ray/data/preprocessors/discretizer.py @@ -0,0 +1,363 @@ +from typing import Dict, Iterable, List, Optional, Type, Union + +import numpy as np +import pandas as pd + +from ray.data import Dataset +from ray.data._internal.aggregate import Max, Min +from ray.data.preprocessor import Preprocessor +from ray.util.annotations import PublicAPI + + +class _AbstractKBinsDiscretizer(Preprocessor): + """Abstract base class for all KBinsDiscretizers. + + Essentially a thin wraper around ``pd.cut``. + + Expects either ``self.stats_`` or ``self.bins`` to be set and + contain {column:list_of_bin_intervals}. + """ + + def _transform_pandas(self, df: pd.DataFrame): + def bin_values(s: pd.Series) -> pd.Series: + if s.name not in self.columns: + return s + labels = self.dtypes.get(s.name) if self.dtypes else False + ordered = True + if labels: + if isinstance(labels, pd.CategoricalDtype): + ordered = labels.ordered + labels = list(labels.categories) + else: + labels = False + + bins = self.stats_ if self._is_fittable else self.bins + return pd.cut( + s, + bins[s.name] if isinstance(bins, dict) else bins, + right=self.right, + labels=labels, + ordered=ordered, + retbins=False, + include_lowest=self.include_lowest, + duplicates=self.duplicates, + ) + + return df.apply(bin_values, axis=0) + + def _validate_bins_columns(self): + if isinstance(self.bins, dict) and not all( + col in self.bins for col in self.columns + ): + raise ValueError( + "If `bins` is a dictionary, all elements of `columns` must be present " + "in it." + ) + + def __repr__(self): + attr_str = ", ".join( + [ + f"{attr_name}={attr_value!r}" + for attr_name, attr_value in vars(self).items() + if not attr_name.startswith("_") + ] + ) + return f"{self.__class__.__name__}({attr_str})" + + +@PublicAPI(stability="alpha") +class CustomKBinsDiscretizer(_AbstractKBinsDiscretizer): + """Bin values into discrete intervals using custom bin edges. + + Columns must contain numerical values. + + Examples: + Use :class:`CustomKBinsDiscretizer` to bin continuous features. + + >>> import pandas as pd + >>> import ray + >>> from ray.data.preprocessors import CustomKBinsDiscretizer + >>> df = pd.DataFrame({ + ... "value_1": [0.2, 1.4, 2.5, 6.2, 9.7, 2.1], + ... "value_2": [10, 15, 13, 12, 23, 25], + ... }) + >>> ds = ray.data.from_pandas(df) + >>> discretizer = CustomKBinsDiscretizer( + ... columns=["value_1", "value_2"], + ... bins=[0, 1, 4, 10, 25] + ... ) + >>> discretizer.transform(ds).to_pandas() + value_1 value_2 + 0 0 2 + 1 1 3 + 2 1 3 + 3 2 3 + 4 2 3 + 5 1 3 + + You can also specify different bin edges per column. + + >>> discretizer = CustomKBinsDiscretizer( + ... columns=["value_1", "value_2"], + ... bins={"value_1": [0, 1, 4], "value_2": [0, 18, 35, 70]}, + ... ) + >>> discretizer.transform(ds).to_pandas() + value_1 value_2 + 0 0.0 0 + 1 1.0 0 + 2 1.0 0 + 3 NaN 0 + 4 NaN 1 + 5 1.0 1 + + + Args: + columns: The columns to discretize. + bins: Defines custom bin edges. Can be an iterable of numbers, + a ``pd.IntervalIndex``, or a dict mapping columns to either of them. + Note that ``pd.IntervalIndex`` for bins must be non-overlapping. + right: Indicates whether bins include the rightmost edge. + include_lowest: Indicates whether the first interval should be left-inclusive. + duplicates: Can be either 'raise' or 'drop'. If bin edges are not unique, + raise ``ValueError`` or drop non-uniques. + dtypes: An optional dictionary that maps columns to ``pd.CategoricalDtype`` + objects or ``np.integer`` types. If you don't include a column in ``dtypes`` + or specify it as an integer dtype, the outputted column will consist of + ordered integers corresponding to bins. If you use a + ``pd.CategoricalDtype``, the outputted column will be a + ``pd.CategoricalDtype`` with the categories being mapped to bins. + You can use ``pd.CategoricalDtype(categories, ordered=True)`` to + preserve information about bin order. + + .. seealso:: + + :class:`UniformKBinsDiscretizer` + If you want to bin data into uniform width bins. + """ + + def __init__( + self, + columns: List[str], + bins: Union[ + Iterable[float], + pd.IntervalIndex, + Dict[str, Union[Iterable[float], pd.IntervalIndex]], + ], + *, + right: bool = True, + include_lowest: bool = False, + duplicates: str = "raise", + dtypes: Optional[ + Dict[str, Union[pd.CategoricalDtype, Type[np.integer]]] + ] = None, + ): + self.columns = columns + self.bins = bins + self.right = right + self.include_lowest = include_lowest + self.duplicates = duplicates + self.dtypes = dtypes + + self._validate_bins_columns() + + _is_fittable = False + + +@PublicAPI(stability="alpha") +class UniformKBinsDiscretizer(_AbstractKBinsDiscretizer): + """Bin values into discrete intervals (bins) of uniform width. + + Columns must contain numerical values. + + Examples: + Use :class:`UniformKBinsDiscretizer` to bin continuous features. + + >>> import pandas as pd + >>> import ray + >>> from ray.data.preprocessors import UniformKBinsDiscretizer + >>> df = pd.DataFrame({ + ... "value_1": [0.2, 1.4, 2.5, 6.2, 9.7, 2.1], + ... "value_2": [10, 15, 13, 12, 23, 25], + ... }) + >>> ds = ray.data.from_pandas(df) + >>> discretizer = UniformKBinsDiscretizer( + ... columns=["value_1", "value_2"], bins=4 + ... ) + >>> discretizer.fit_transform(ds).to_pandas() + value_1 value_2 + 0 0 0 + 1 0 1 + 2 0 0 + 3 2 0 + 4 3 3 + 5 0 3 + + You can also specify different number of bins per column. + + >>> discretizer = UniformKBinsDiscretizer( + ... columns=["value_1", "value_2"], bins={"value_1": 4, "value_2": 3} + ... ) + >>> discretizer.fit_transform(ds).to_pandas() + value_1 value_2 + 0 0 0 + 1 0 0 + 2 0 0 + 3 2 0 + 4 3 2 + 5 0 2 + + + Args: + columns: The columns to discretize. + bins: Defines the number of equal-width bins. + Can be either an integer (which will be applied to all columns), + or a dict that maps columns to integers. + The range is extended by .1% on each side to include + the minimum and maximum values. + right: Indicates whether bins includes the rightmost edge or not. + include_lowest: Whether the first interval should be left-inclusive + or not. + duplicates: Can be either 'raise' or 'drop'. If bin edges are not unique, + raise ``ValueError`` or drop non-uniques. + dtypes: An optional dictionary that maps columns to ``pd.CategoricalDtype`` + objects or ``np.integer`` types. If you don't include a column in ``dtypes`` + or specify it as an integer dtype, the outputted column will consist of + ordered integers corresponding to bins. If you use a + ``pd.CategoricalDtype``, the outputted column will be a + ``pd.CategoricalDtype`` with the categories being mapped to bins. + You can use ``pd.CategoricalDtype(categories, ordered=True)`` to + preserve information about bin order. + + .. seealso:: + + :class:`CustomKBinsDiscretizer` + If you want to specify your own bin edges. + """ + + def __init__( + self, + columns: List[str], + bins: Union[int, Dict[str, int]], + *, + right: bool = True, + include_lowest: bool = False, + duplicates: str = "raise", + dtypes: Optional[ + Dict[str, Union[pd.CategoricalDtype, Type[np.integer]]] + ] = None, + ): + self.columns = columns + self.bins = bins + self.right = right + self.include_lowest = include_lowest + self.duplicates = duplicates + self.dtypes = dtypes + + def _fit(self, dataset: Dataset) -> Preprocessor: + self._validate_on_fit() + stats = {} + aggregates = [] + if isinstance(self.bins, dict): + columns = self.bins.keys() + else: + columns = self.columns + + for column in columns: + aggregates.extend( + self._fit_uniform_covert_bin_to_aggregate_if_needed(column) + ) + + aggregate_stats = dataset.aggregate(*aggregates) + mins = {} + maxes = {} + for key, value in aggregate_stats.items(): + column_name = key[4:-1] # min(column) -> column + if key.startswith("min"): + mins[column_name] = value + if key.startswith("max"): + maxes[column_name] = value + + for column in mins.keys(): + bins = self.bins[column] if isinstance(self.bins, dict) else self.bins + stats[column] = _translate_min_max_number_of_bins_to_bin_edges( + mins[column], maxes[column], bins, self.right + ) + + self.stats_ = stats + return self + + def _validate_on_fit(self): + self._validate_bins_columns() + + def _fit_uniform_covert_bin_to_aggregate_if_needed(self, column: str): + bins = self.bins[column] if isinstance(self.bins, dict) else self.bins + if isinstance(bins, int): + return (Min(column), Max(column)) + else: + raise TypeError( + f"`bins` must be an integer or a dict of integers, got {bins}" + ) + + +# Copied from +# https://github.com/pandas-dev/pandas/blob/v1.4.4/pandas/core/reshape/tile.py#L257 +# under +# BSD 3-Clause License +# +# Copyright (c) 2008-2011, AQR Capital Management, LLC, Lambda Foundry, Inc. +# and PyData Development Team +# All rights reserved. +# +# Copyright (c) 2011-2022, Open source contributors. +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are met: +# +# * Redistributions of source code must retain the above copyright notice, this +# list of conditions and the following disclaimer. +# +# * Redistributions in binary form must reproduce the above copyright notice, +# this list of conditions and the following disclaimer in the documentation +# and/or other materials provided with the distribution. +# +# * Neither the name of the copyright holder nor the names of its +# contributors may be used to endorse or promote products derived from +# this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE +# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +def _translate_min_max_number_of_bins_to_bin_edges( + mn: float, mx: float, bins: int, right: bool +) -> List[float]: + """Translates a range and desired number of bins into list of bin edges.""" + rng = (mn, mx) + mn, mx = (mi + 0.0 for mi in rng) + + if np.isinf(mn) or np.isinf(mx): + raise ValueError( + "Cannot specify integer `bins` when input data contains infinity." + ) + elif mn == mx: # adjust end points before binning + mn -= 0.001 * abs(mn) if mn != 0 else 0.001 + mx += 0.001 * abs(mx) if mx != 0 else 0.001 + bins = np.linspace(mn, mx, bins + 1, endpoint=True) + else: # adjust end points after binning + bins = np.linspace(mn, mx, bins + 1, endpoint=True) + adj = (mx - mn) * 0.001 # 0.1% of the range + if right: + bins[0] -= adj + else: + bins[-1] += adj + return bins + + +# TODO(ml-team) +# Add QuantileKBinsDiscretizer diff --git a/minigpt2/lib/python3.10/site-packages/ray/data/preprocessors/encoder.py b/minigpt2/lib/python3.10/site-packages/ray/data/preprocessors/encoder.py new file mode 100644 index 0000000000000000000000000000000000000000..8bd6af80f6b19ab7ac0fa23bb436a2410ec6d946 --- /dev/null +++ b/minigpt2/lib/python3.10/site-packages/ray/data/preprocessors/encoder.py @@ -0,0 +1,661 @@ +from collections import Counter, OrderedDict +from functools import partial +from typing import Dict, List, Optional + +import numpy as np +import pandas as pd +import pandas.api.types + +from ray.air.util.data_batch_conversion import BatchFormat +from ray.data import Dataset +from ray.data.preprocessor import Preprocessor, PreprocessorNotFittedException +from ray.util.annotations import PublicAPI + + +@PublicAPI(stability="alpha") +class OrdinalEncoder(Preprocessor): + """Encode values within columns as ordered integer values. + + :class:`OrdinalEncoder` encodes categorical features as integers that range from + :math:`0` to :math:`n - 1`, where :math:`n` is the number of categories. + + If you transform a value that isn't in the fitted datset, then the value is encoded + as ``float("nan")``. + + Columns must contain either hashable values or lists of hashable values. Also, you + can't have both scalars and lists in the same column. + + Examples: + Use :class:`OrdinalEncoder` to encode categorical features as integers. + + >>> import pandas as pd + >>> import ray + >>> from ray.data.preprocessors import OrdinalEncoder + >>> df = pd.DataFrame({ + ... "sex": ["male", "female", "male", "female"], + ... "level": ["L4", "L5", "L3", "L4"], + ... }) + >>> ds = ray.data.from_pandas(df) # doctest: +SKIP + >>> encoder = OrdinalEncoder(columns=["sex", "level"]) + >>> encoder.fit_transform(ds).to_pandas() # doctest: +SKIP + sex level + 0 1 1 + 1 0 2 + 2 1 0 + 3 0 1 + + If you transform a value not present in the original dataset, then the value + is encoded as ``float("nan")``. + + >>> df = pd.DataFrame({"sex": ["female"], "level": ["L6"]}) + >>> ds = ray.data.from_pandas(df) # doctest: +SKIP + >>> encoder.transform(ds).to_pandas() # doctest: +SKIP + sex level + 0 0 NaN + + :class:`OrdinalEncoder` can also encode categories in a list. + + >>> df = pd.DataFrame({ + ... "name": ["Shaolin Soccer", "Moana", "The Smartest Guys in the Room"], + ... "genre": [ + ... ["comedy", "action", "sports"], + ... ["animation", "comedy", "action"], + ... ["documentary"], + ... ], + ... }) + >>> ds = ray.data.from_pandas(df) # doctest: +SKIP + >>> encoder = OrdinalEncoder(columns=["genre"]) + >>> encoder.fit_transform(ds).to_pandas() # doctest: +SKIP + name genre + 0 Shaolin Soccer [2, 0, 4] + 1 Moana [1, 2, 0] + 2 The Smartest Guys in the Room [3] + + Args: + columns: The columns to separately encode. + encode_lists: If ``True``, encode list elements. If ``False``, encode + whole lists (i.e., replace each list with an integer). ``True`` + by default. + + .. seealso:: + + :class:`OneHotEncoder` + Another preprocessor that encodes categorical data. + """ + + def __init__(self, columns: List[str], *, encode_lists: bool = True): + # TODO: allow user to specify order of values within each column. + self.columns = columns + self.encode_lists = encode_lists + + def _fit(self, dataset: Dataset) -> Preprocessor: + self.stats_ = _get_unique_value_indices( + dataset, self.columns, encode_lists=self.encode_lists + ) + return self + + def _transform_pandas(self, df: pd.DataFrame): + _validate_df(df, *self.columns) + + def encode_list(element: list, *, name: str): + return [self.stats_[f"unique_values({name})"].get(x) for x in element] + + def column_ordinal_encoder(s: pd.Series): + if _is_series_composed_of_lists(s): + if self.encode_lists: + return s.map(partial(encode_list, name=s.name)) + + # cannot simply use map here due to pandas thinking + # tuples are to be used for indices + def list_as_category(element): + element = tuple(element) + return self.stats_[f"unique_values({s.name})"].get(element) + + return s.apply(list_as_category) + + s_values = self.stats_[f"unique_values({s.name})"] + return s.map(s_values) + + df[self.columns] = df[self.columns].apply(column_ordinal_encoder) + return df + + def __repr__(self): + return ( + f"{self.__class__.__name__}(columns={self.columns!r}, " + f"encode_lists={self.encode_lists!r})" + ) + + +@PublicAPI(stability="alpha") +class OneHotEncoder(Preprocessor): + """`One-hot encode `_ + categorical data. + + This preprocessor transforms each specified column into a one-hot encoded vector. + Each element in the vector corresponds to a unique category in the column, with a + value of 1 if the category matches and 0 otherwise. + + If a category is infrequent (based on ``max_categories``) or not present in the + fitted dataset, it is encoded as all 0s. + + Columns must contain hashable objects or lists of hashable objects. + + .. note:: + Lists are treated as categories. If you want to encode individual list + elements, use :class:`MultiHotEncoder`. + + Example: + >>> import pandas as pd + >>> import ray + >>> from ray.data.preprocessors import OneHotEncoder + >>> + >>> df = pd.DataFrame({"color": ["red", "green", "red", "red", "blue", "green"]}) + >>> ds = ray.data.from_pandas(df) # doctest: +SKIP + >>> encoder = OneHotEncoder(columns=["color"]) + >>> encoder.fit_transform(ds).to_pandas() # doctest: +SKIP + color_blue color_green color_red + 0 0 0 1 + 1 0 1 0 + 2 0 0 1 + 3 0 0 1 + 4 1 0 0 + 5 0 1 0 + + If you one-hot encode a value that isn't in the fitted dataset, then the + value is encoded with zeros. + + >>> df = pd.DataFrame({"color": ["yellow"]}) + >>> batch = ray.data.from_pandas(df) # doctest: +SKIP + >>> encoder.transform(batch).to_pandas() # doctest: +SKIP + color_blue color_green color_red + 0 0 0 0 + + Likewise, if you one-hot encode an infrequent value, then the value is encoded + with zeros. + + >>> encoder = OneHotEncoder(columns=["color"], max_categories={"color": 2}) + >>> encoder.fit_transform(ds).to_pandas() # doctest: +SKIP + color_red color_green + 0 1 0 + 1 0 1 + 2 1 0 + 3 1 0 + 4 0 0 + 5 0 1 + + Args: + columns: The columns to separately encode. + max_categories: The maximum number of features to create for each column. + If a value isn't specified for a column, then a feature is created + for every category in that column. + + .. seealso:: + + :class:`MultiHotEncoder` + If you want to encode individual list elements, use + :class:`MultiHotEncoder`. + + :class:`OrdinalEncoder` + If your categories are ordered, you may want to use + :class:`OrdinalEncoder`. + """ # noqa: E501 + + def __init__( + self, columns: List[str], *, max_categories: Optional[Dict[str, int]] = None + ): + # TODO: add `drop` parameter. + self.columns = columns + self.max_categories = max_categories + + def _fit(self, dataset: Dataset) -> Preprocessor: + self.stats_ = _get_unique_value_indices( + dataset, + self.columns, + max_categories=self.max_categories, + encode_lists=False, + ) + return self + + def _transform_pandas(self, df: pd.DataFrame): + _validate_df(df, *self.columns) + + # Compute new one-hot encoded columns + for column in self.columns: + column_values = self.stats_[f"unique_values({column})"] + if _is_series_composed_of_lists(df[column]): + df[column] = df[column].map(lambda x: tuple(x)) + for column_value in column_values: + df[f"{column}_{column_value}"] = (df[column] == column_value).astype( + int + ) + # Concatenate the value columns + value_columns = [ + f"{column}_{column_value}" for column_value in column_values + ] + concatenated = df[value_columns].to_numpy() + df = df.drop(columns=value_columns) + # Use a Pandas Series for column assignment to get more consistent + # behavior across Pandas versions. + df.loc[:, column] = pd.Series(list(concatenated)) + return df + + def __repr__(self): + return ( + f"{self.__class__.__name__}(columns={self.columns!r}, " + f"max_categories={self.max_categories!r})" + ) + + +@PublicAPI(stability="alpha") +class MultiHotEncoder(Preprocessor): + """Multi-hot encode categorical data. + + This preprocessor replaces each list of categories with an :math:`m`-length binary + list, where :math:`m` is the number of unique categories in the column or the value + specified in ``max_categories``. The :math:`i\\text{-th}` element of the binary list + is :math:`1` if category :math:`i` is in the input list and :math:`0` otherwise. + + Columns must contain hashable objects or lists of hashable objects. + Also, you can't have both types in the same column. + + .. note:: + The logic is similar to scikit-learn's `MultiLabelBinarizer \ + `_. + + Examples: + >>> import pandas as pd + >>> import ray + >>> from ray.data.preprocessors import MultiHotEncoder + >>> + >>> df = pd.DataFrame({ + ... "name": ["Shaolin Soccer", "Moana", "The Smartest Guys in the Room"], + ... "genre": [ + ... ["comedy", "action", "sports"], + ... ["animation", "comedy", "action"], + ... ["documentary"], + ... ], + ... }) + >>> ds = ray.data.from_pandas(df) # doctest: +SKIP + >>> + >>> encoder = MultiHotEncoder(columns=["genre"]) + >>> encoder.fit_transform(ds).to_pandas() # doctest: +SKIP + name genre + 0 Shaolin Soccer [1, 0, 1, 0, 1] + 1 Moana [1, 1, 1, 0, 0] + 2 The Smartest Guys in the Room [0, 0, 0, 1, 0] + + If you specify ``max_categories``, then :class:`MultiHotEncoder` + creates features for only the most frequent categories. + + >>> encoder = MultiHotEncoder(columns=["genre"], max_categories={"genre": 3}) + >>> encoder.fit_transform(ds).to_pandas() # doctest: +SKIP + name genre + 0 Shaolin Soccer [1, 1, 1] + 1 Moana [1, 1, 0] + 2 The Smartest Guys in the Room [0, 0, 0] + >>> encoder.stats_ # doctest: +SKIP + OrderedDict([('unique_values(genre)', {'comedy': 0, 'action': 1, 'sports': 2})]) + + Args: + columns: The columns to separately encode. + max_categories: The maximum number of features to create for each column. + If a value isn't specified for a column, then a feature is created + for every unique category in that column. + + .. seealso:: + + :class:`OneHotEncoder` + If you're encoding individual categories instead of lists of + categories, use :class:`OneHotEncoder`. + + :class:`OrdinalEncoder` + If your categories are ordered, you may want to use + :class:`OrdinalEncoder`. + """ + + def __init__( + self, columns: List[str], *, max_categories: Optional[Dict[str, int]] = None + ): + # TODO: add `drop` parameter. + self.columns = columns + self.max_categories = max_categories + + def _fit(self, dataset: Dataset) -> Preprocessor: + self.stats_ = _get_unique_value_indices( + dataset, + self.columns, + max_categories=self.max_categories, + encode_lists=True, + ) + return self + + def _transform_pandas(self, df: pd.DataFrame): + _validate_df(df, *self.columns) + + def encode_list(element: list, *, name: str): + if isinstance(element, np.ndarray): + element = element.tolist() + elif not isinstance(element, list): + element = [element] + stats = self.stats_[f"unique_values({name})"] + counter = Counter(element) + return [counter.get(x, 0) for x in stats] + + for column in self.columns: + df[column] = df[column].map(partial(encode_list, name=column)) + + return df + + def __repr__(self): + return ( + f"{self.__class__.__name__}(columns={self.columns!r}, " + f"max_categories={self.max_categories!r})" + ) + + +@PublicAPI(stability="alpha") +class LabelEncoder(Preprocessor): + """Encode labels as integer targets. + + :class:`LabelEncoder` encodes labels as integer targets that range from + :math:`0` to :math:`n - 1`, where :math:`n` is the number of unique labels. + + If you transform a label that isn't in the fitted datset, then the label is encoded + as ``float("nan")``. + + Examples: + >>> import pandas as pd + >>> import ray + >>> df = pd.DataFrame({ + ... "sepal_width": [5.1, 7, 4.9, 6.2], + ... "sepal_height": [3.5, 3.2, 3, 3.4], + ... "species": ["setosa", "versicolor", "setosa", "virginica"] + ... }) + >>> ds = ray.data.from_pandas(df) # doctest: +SKIP + >>> + >>> from ray.data.preprocessors import LabelEncoder + >>> encoder = LabelEncoder(label_column="species") + >>> encoder.fit_transform(ds).to_pandas() # doctest: +SKIP + sepal_width sepal_height species + 0 5.1 3.5 0 + 1 7.0 3.2 1 + 2 4.9 3.0 0 + 3 6.2 3.4 2 + + If you transform a label not present in the original dataset, then the new + label is encoded as ``float("nan")``. + + >>> df = pd.DataFrame({ + ... "sepal_width": [4.2], + ... "sepal_height": [2.7], + ... "species": ["bracteata"] + ... }) + >>> ds = ray.data.from_pandas(df) # doctest: +SKIP + >>> encoder.transform(ds).to_pandas() # doctest: +SKIP + sepal_width sepal_height species + 0 4.2 2.7 NaN + + Args: + label_column: A column containing labels that you want to encode. + + .. seealso:: + + :class:`OrdinalEncoder` + If you're encoding ordered features, use :class:`OrdinalEncoder` instead of + :class:`LabelEncoder`. + """ + + def __init__(self, label_column: str): + self.label_column = label_column + + def _fit(self, dataset: Dataset) -> Preprocessor: + self.stats_ = _get_unique_value_indices(dataset, [self.label_column]) + return self + + def _transform_pandas(self, df: pd.DataFrame): + _validate_df(df, self.label_column) + + def column_label_encoder(s: pd.Series): + s_values = self.stats_[f"unique_values({s.name})"] + return s.map(s_values) + + df[self.label_column] = df[self.label_column].transform(column_label_encoder) + return df + + def inverse_transform(self, ds: "Dataset") -> "Dataset": + """Inverse transform the given dataset. + + Args: + ds: Input Dataset that has been fitted and/or transformed. + + Returns: + ray.data.Dataset: The inverse transformed Dataset. + + Raises: + PreprocessorNotFittedException: if ``fit`` is not called yet. + """ + + fit_status = self.fit_status() + + if fit_status in ( + Preprocessor.FitStatus.PARTIALLY_FITTED, + Preprocessor.FitStatus.NOT_FITTED, + ): + raise PreprocessorNotFittedException( + "`fit` must be called before `inverse_transform`, " + ) + + kwargs = self._get_transform_config() + + return ds.map_batches( + self._inverse_transform_pandas, batch_format=BatchFormat.PANDAS, **kwargs + ) + + def _inverse_transform_pandas(self, df: pd.DataFrame): + def column_label_decoder(s: pd.Series): + inverse_values = { + value: key + for key, value in self.stats_[ + f"unique_values({self.label_column})" + ].items() + } + return s.map(inverse_values) + + df[self.label_column] = df[self.label_column].transform(column_label_decoder) + return df + + def __repr__(self): + return f"{self.__class__.__name__}(label_column={self.label_column!r})" + + +@PublicAPI(stability="alpha") +class Categorizer(Preprocessor): + """Convert columns to ``pd.CategoricalDtype``. + + Use this preprocessor with frameworks that have built-in support for + ``pd.CategoricalDtype`` like LightGBM. + + .. warning:: + + If you don't specify ``dtypes``, fit this preprocessor before splitting + your dataset into train and test splits. This ensures categories are + consistent across splits. + + Examples: + >>> import pandas as pd + >>> import ray + >>> from ray.data.preprocessors import Categorizer + >>> + >>> df = pd.DataFrame( + ... { + ... "sex": ["male", "female", "male", "female"], + ... "level": ["L4", "L5", "L3", "L4"], + ... }) + >>> ds = ray.data.from_pandas(df) # doctest: +SKIP + >>> categorizer = Categorizer(columns=["sex", "level"]) + >>> categorizer.fit_transform(ds).schema().types # doctest: +SKIP + [CategoricalDtype(categories=['female', 'male'], ordered=False), CategoricalDtype(categories=['L3', 'L4', 'L5'], ordered=False)] + + If you know the categories in advance, you can specify the categories with the + ``dtypes`` parameter. + + >>> categorizer = Categorizer( + ... columns=["sex", "level"], + ... dtypes={"level": pd.CategoricalDtype(["L3", "L4", "L5", "L6"], ordered=True)}, + ... ) + >>> categorizer.fit_transform(ds).schema().types # doctest: +SKIP + [CategoricalDtype(categories=['female', 'male'], ordered=False), CategoricalDtype(categories=['L3', 'L4', 'L5', 'L6'], ordered=True)] + + Args: + columns: The columns to convert to ``pd.CategoricalDtype``. + dtypes: An optional dictionary that maps columns to ``pd.CategoricalDtype`` + objects. If you don't include a column in ``dtypes``, the categories + are inferred. + """ # noqa: E501 + + def __init__( + self, + columns: List[str], + dtypes: Optional[Dict[str, pd.CategoricalDtype]] = None, + ): + if not dtypes: + dtypes = {} + + self.columns = columns + self.dtypes = dtypes + + def _fit(self, dataset: Dataset) -> Preprocessor: + columns_to_get = [ + column for column in self.columns if column not in set(self.dtypes) + ] + if columns_to_get: + unique_indices = _get_unique_value_indices( + dataset, columns_to_get, drop_na_values=True, key_format="{0}" + ) + unique_indices = { + column: pd.CategoricalDtype(values_indices.keys()) + for column, values_indices in unique_indices.items() + } + else: + unique_indices = {} + unique_indices = {**self.dtypes, **unique_indices} + self.stats_: Dict[str, pd.CategoricalDtype] = unique_indices + return self + + def _transform_pandas(self, df: pd.DataFrame): + df = df.astype(self.stats_) + return df + + def __repr__(self): + return ( + f"{self.__class__.__name__}(columns={self.columns!r}, " + f"dtypes={self.dtypes!r})" + ) + + +def _get_unique_value_indices( + dataset: Dataset, + columns: List[str], + drop_na_values: bool = False, + key_format: str = "unique_values({0})", + max_categories: Optional[Dict[str, int]] = None, + encode_lists: bool = True, +) -> Dict[str, Dict[str, int]]: + """If drop_na_values is True, will silently drop NA values.""" + + if max_categories is None: + max_categories = {} + columns_set = set(columns) + for column in max_categories: + if column not in columns_set: + raise ValueError( + f"You set `max_categories` for {column}, which is not present in " + f"{columns}." + ) + + def get_pd_value_counts_per_column(col: pd.Series): + # special handling for lists + if _is_series_composed_of_lists(col): + if encode_lists: + counter = Counter() + + def update_counter(element): + counter.update(element) + return element + + col.map(update_counter) + return counter + else: + # convert to tuples to make lists hashable + col = col.map(lambda x: tuple(x)) + return Counter(col.value_counts(dropna=False).to_dict()) + + def get_pd_value_counts(df: pd.DataFrame) -> List[Dict[str, Counter]]: + df_columns = df.columns.tolist() + result = {} + for col in columns: + if col in df_columns: + result[col] = [get_pd_value_counts_per_column(df[col])] + else: + raise ValueError( + f"Column '{col}' does not exist in DataFrame, which has columns: {df_columns}" # noqa: E501 + ) + return result + + value_counts = dataset.map_batches(get_pd_value_counts, batch_format="pandas") + final_counters = {col: Counter() for col in columns} + for batch in value_counts.iter_batches(batch_size=None): + for col, counters in batch.items(): + for counter in counters: + final_counters[col] += counter + + # Inspect if there is any NA values. + for col in columns: + if drop_na_values: + counter = final_counters[col] + counter_dict = dict(counter) + sanitized_dict = {k: v for k, v in counter_dict.items() if not pd.isnull(k)} + final_counters[col] = Counter(sanitized_dict) + else: + if any(pd.isnull(k) for k in final_counters[col]): + raise ValueError( + f"Unable to fit column '{col}' because it contains null" + f" values. Consider imputing missing values first." + ) + + unique_values_with_indices = OrderedDict() + for column in columns: + if column in max_categories: + # Output sorted by freq. + unique_values_with_indices[key_format.format(column)] = { + k[0]: j + for j, k in enumerate( + final_counters[column].most_common(max_categories[column]) + ) + } + else: + # Output sorted by column name. + unique_values_with_indices[key_format.format(column)] = { + k: j for j, k in enumerate(sorted(dict(final_counters[column]).keys())) + } + return unique_values_with_indices + + +def _validate_df(df: pd.DataFrame, *columns: str) -> None: + null_columns = [column for column in columns if df[column].isnull().values.any()] + if null_columns: + raise ValueError( + f"Unable to transform columns {null_columns} because they contain " + f"null values. Consider imputing missing values first." + ) + + +def _is_series_composed_of_lists(series: pd.Series) -> bool: + # we assume that all elements are a list here + first_not_none_element = next( + (element for element in series if element is not None), None + ) + return pandas.api.types.is_object_dtype(series.dtype) and isinstance( + first_not_none_element, (list, np.ndarray) + ) diff --git a/minigpt2/lib/python3.10/site-packages/ray/data/preprocessors/hasher.py b/minigpt2/lib/python3.10/site-packages/ray/data/preprocessors/hasher.py new file mode 100644 index 0000000000000000000000000000000000000000..364874b21d3f1e3b5053fb79b31f51e317db7441 --- /dev/null +++ b/minigpt2/lib/python3.10/site-packages/ray/data/preprocessors/hasher.py @@ -0,0 +1,106 @@ +import collections +from typing import List + +import pandas as pd + +from ray.data.preprocessor import Preprocessor +from ray.data.preprocessors.utils import simple_hash +from ray.util.annotations import PublicAPI + + +@PublicAPI(stability="alpha") +class FeatureHasher(Preprocessor): + """Apply the `hashing trick `_ to a + table that describes token frequencies. + + :class:`FeatureHasher` creates ``num_features`` columns named ``hash_{index}``, + where ``index`` ranges from :math:`0` to ``num_features``:math:`- 1`. The column + ``hash_{index}`` describes the frequency of tokens that hash to ``index``. + + Distinct tokens can correspond to the same index. However, if ``num_features`` is + large enough, then columns probably correspond to a unique token. + + This preprocessor is memory efficient and quick to pickle. However, given a + transformed column, you can't know which tokens correspond to it. This might make it + hard to determine which tokens are important to your model. + + .. warning:: + Sparse matrices aren't supported. If you use a large ``num_features``, this + preprocessor might behave poorly. + + Examples: + + >>> import pandas as pd + >>> import ray + >>> from ray.data.preprocessors import FeatureHasher + + The data below describes the frequencies of tokens in ``"I like Python"`` and + ``"I dislike Python"``. + + >>> df = pd.DataFrame({ + ... "I": [1, 1], + ... "like": [1, 0], + ... "dislike": [0, 1], + ... "Python": [1, 1] + ... }) + >>> ds = ray.data.from_pandas(df) # doctest: +SKIP + + :class:`FeatureHasher` hashes each token to determine its index. For example, + the index of ``"I"`` is :math:`hash(\\texttt{"I"}) \pmod 8 = 5`. + + >>> hasher = FeatureHasher(columns=["I", "like", "dislike", "Python"], num_features=8) + >>> hasher.fit_transform(ds).to_pandas().to_numpy() # doctest: +SKIP + array([[0, 0, 0, 2, 0, 1, 0, 0], + [0, 0, 0, 1, 0, 1, 1, 0]]) + + Notice the hash collision: both ``"like"`` and ``"Python"`` correspond to index + :math:`3`. You can avoid hash collisions like these by increasing + ``num_features``. + + Args: + columns: The columns to apply the hashing trick to. Each column should describe + the frequency of a token. + num_features: The number of features used to represent the vocabulary. You + should choose a value large enough to prevent hash collisions between + distinct tokens. + + .. seealso:: + :class:`~ray.data.preprocessors.CountVectorizer` + Use this preprocessor to generate inputs for :class:`FeatureHasher`. + + :class:`ray.data.preprocessors.HashingVectorizer` + If your input data describes documents rather than token frequencies, + use :class:`~ray.data.preprocessors.HashingVectorizer`. + """ # noqa: E501 + + _is_fittable = False + + def __init__(self, columns: List[str], num_features: int): + self.columns = columns + # TODO(matt): Set default number of features. + # This likely requires sparse matrix support to avoid explosion of columns. + self.num_features = num_features + + def _transform_pandas(self, df: pd.DataFrame): + # TODO(matt): Use sparse matrix for efficiency. + def row_feature_hasher(row): + hash_counts = collections.defaultdict(int) + for column in self.columns: + hashed_value = simple_hash(column, self.num_features) + hash_counts[hashed_value] += row[column] + return {f"hash_{i}": hash_counts[i] for i in range(self.num_features)} + + feature_columns = df.loc[:, self.columns].apply( + row_feature_hasher, axis=1, result_type="expand" + ) + df = df.join(feature_columns) + + # Drop original unhashed columns. + df.drop(columns=self.columns, inplace=True) + return df + + def __repr__(self): + return ( + f"{self.__class__.__name__}(columns={self.columns!r}, " + f"num_features={self.num_features!r})" + ) diff --git a/minigpt2/lib/python3.10/site-packages/ray/data/preprocessors/imputer.py b/minigpt2/lib/python3.10/site-packages/ray/data/preprocessors/imputer.py new file mode 100644 index 0000000000000000000000000000000000000000..dde460fb3b80c712e8c484e7e26d6c1619f202da --- /dev/null +++ b/minigpt2/lib/python3.10/site-packages/ray/data/preprocessors/imputer.py @@ -0,0 +1,170 @@ +from collections import Counter +from numbers import Number +from typing import Dict, List, Optional, Union + +import pandas as pd +from pandas.api.types import is_categorical_dtype + +from ray.data import Dataset +from ray.data._internal.aggregate import Mean +from ray.data.preprocessor import Preprocessor +from ray.util.annotations import PublicAPI + + +@PublicAPI(stability="alpha") +class SimpleImputer(Preprocessor): + """Replace missing values with imputed values. If the column is missing from a + batch, it will be filled with the imputed value. + + Examples: + >>> import pandas as pd + >>> import ray + >>> from ray.data.preprocessors import SimpleImputer + >>> df = pd.DataFrame({"X": [0, None, 3, 3], "Y": [None, "b", "c", "c"]}) + >>> ds = ray.data.from_pandas(df) # doctest: +SKIP + >>> ds.to_pandas() # doctest: +SKIP + X Y + 0 0.0 None + 1 NaN b + 2 3.0 c + 3 3.0 c + + The `"mean"` strategy imputes missing values with the mean of non-missing + values. This strategy doesn't work with categorical data. + + >>> preprocessor = SimpleImputer(columns=["X"], strategy="mean") + >>> preprocessor.fit_transform(ds).to_pandas() # doctest: +SKIP + X Y + 0 0.0 None + 1 2.0 b + 2 3.0 c + 3 3.0 c + + The `"most_frequent"` strategy imputes missing values with the most frequent + value in each column. + + >>> preprocessor = SimpleImputer(columns=["X", "Y"], strategy="most_frequent") + >>> preprocessor.fit_transform(ds).to_pandas() # doctest: +SKIP + X Y + 0 0.0 c + 1 3.0 b + 2 3.0 c + 3 3.0 c + + The `"constant"` strategy imputes missing values with the value specified by + `fill_value`. + + >>> preprocessor = SimpleImputer( + ... columns=["Y"], + ... strategy="constant", + ... fill_value="?", + ... ) + >>> preprocessor.fit_transform(ds).to_pandas() # doctest: +SKIP + X Y + 0 0.0 ? + 1 NaN b + 2 3.0 c + 3 3.0 c + + Args: + columns: The columns to apply imputation to. + strategy: How imputed values are chosen. + + * ``"mean"``: The mean of non-missing values. This strategy only works with numeric columns. + * ``"most_frequent"``: The most common value. + * ``"constant"``: The value passed to ``fill_value``. + + fill_value: The value to use when ``strategy`` is ``"constant"``. + + Raises: + ValueError: if ``strategy`` is not ``"mean"``, ``"most_frequent"``, or + ``"constant"``. + """ # noqa: E501 + + _valid_strategies = ["mean", "most_frequent", "constant"] + + def __init__( + self, + columns: List[str], + strategy: str = "mean", + fill_value: Optional[Union[str, Number]] = None, + ): + self.columns = columns + self.strategy = strategy + self.fill_value = fill_value + + if strategy not in self._valid_strategies: + raise ValueError( + f"Strategy {strategy} is not supported." + f"Supported values are: {self._valid_strategies}" + ) + + if strategy == "constant": + # There is no information to be fitted. + self._is_fittable = False + if fill_value is None: + raise ValueError( + '`fill_value` must be set when using "constant" strategy.' + ) + + def _fit(self, dataset: Dataset) -> Preprocessor: + if self.strategy == "mean": + aggregates = [Mean(col) for col in self.columns] + self.stats_ = dataset.aggregate(*aggregates) + elif self.strategy == "most_frequent": + self.stats_ = _get_most_frequent_values(dataset, *self.columns) + + return self + + def _transform_pandas(self, df: pd.DataFrame): + if self.strategy == "mean": + new_values = { + column: self.stats_[f"mean({column})"] for column in self.columns + } + elif self.strategy == "most_frequent": + new_values = { + column: self.stats_[f"most_frequent({column})"] + for column in self.columns + } + elif self.strategy == "constant": + new_values = {column: self.fill_value for column in self.columns} + for column, value in new_values.items(): + if is_categorical_dtype(df.dtypes[column]): + df[column] = df[column].cat.add_categories(value) + + for column_name in new_values: + if column_name not in df.columns: + # Create the column with the fill_value if it doesn't exist + df[column_name] = new_values[column_name] + else: + # Fill NaN (empty) values in the existing column with the fill_value + df[column_name].fillna(new_values[column_name], inplace=True) + + return df + + def __repr__(self): + return ( + f"{self.__class__.__name__}(columns={self.columns!r}, " + f"strategy={self.strategy!r}, fill_value={self.fill_value!r})" + ) + + +def _get_most_frequent_values( + dataset: Dataset, *columns: str +) -> Dict[str, Union[str, Number]]: + columns = list(columns) + + def get_pd_value_counts(df: pd.DataFrame) -> List[Dict[str, Counter]]: + return {col: [Counter(df[col].value_counts().to_dict())] for col in columns} + + value_counts = dataset.map_batches(get_pd_value_counts, batch_format="pandas") + final_counters = {col: Counter() for col in columns} + for batch in value_counts.iter_batches(batch_size=None): + for col, counters in batch.items(): + for counter in counters: + final_counters[col] += counter + + return { + f"most_frequent({column})": final_counters[column].most_common(1)[0][0] + for column in columns + } diff --git a/minigpt2/lib/python3.10/site-packages/ray/data/preprocessors/normalizer.py b/minigpt2/lib/python3.10/site-packages/ray/data/preprocessors/normalizer.py new file mode 100644 index 0000000000000000000000000000000000000000..430bf6ec6c09aecc9c18b4f2f21e409af0586b9e --- /dev/null +++ b/minigpt2/lib/python3.10/site-packages/ray/data/preprocessors/normalizer.py @@ -0,0 +1,106 @@ +from typing import List + +import numpy as np +import pandas as pd + +from ray.data.preprocessor import Preprocessor +from ray.util.annotations import PublicAPI + + +@PublicAPI(stability="alpha") +class Normalizer(Preprocessor): + r"""Scales each sample to have unit norm. + + This preprocessor works by dividing each sample (i.e., row) by the sample's norm. + The general formula is given by + + .. math:: + + s' = \frac{s}{\lVert s \rVert_p} + + where :math:`s` is the sample, :math:`s'` is the transformed sample, + :math:\lVert s \rVert`, and :math:`p` is the norm type. + + The following norms are supported: + + * `"l1"` (:math:`L^1`): Sum of the absolute values. + * `"l2"` (:math:`L^2`): Square root of the sum of the squared values. + * `"max"` (:math:`L^\infty`): Maximum value. + + Examples: + >>> import pandas as pd + >>> import ray + >>> from ray.data.preprocessors import Normalizer + >>> + >>> df = pd.DataFrame({"X1": [1, 1], "X2": [1, 0], "X3": [0, 1]}) + >>> ds = ray.data.from_pandas(df) # doctest: +SKIP + >>> ds.to_pandas() # doctest: +SKIP + X1 X2 X3 + 0 1 1 0 + 1 1 0 1 + + The :math:`L^2`-norm of the first sample is :math:`\sqrt{2}`, and the + :math:`L^2`-norm of the second sample is :math:`1`. + + >>> preprocessor = Normalizer(columns=["X1", "X2"]) + >>> preprocessor.fit_transform(ds).to_pandas() # doctest: +SKIP + X1 X2 X3 + 0 0.707107 0.707107 0 + 1 1.000000 0.000000 1 + + The :math:`L^1`-norm of the first sample is :math:`2`, and the + :math:`L^1`-norm of the second sample is :math:`1`. + + >>> preprocessor = Normalizer(columns=["X1", "X2"], norm="l1") + >>> preprocessor.fit_transform(ds).to_pandas() # doctest: +SKIP + X1 X2 X3 + 0 0.5 0.5 0 + 1 1.0 0.0 1 + + The :math:`L^\infty`-norm of the both samples is :math:`1`. + + >>> preprocessor = Normalizer(columns=["X1", "X2"], norm="max") + >>> preprocessor.fit_transform(ds).to_pandas() # doctest: +SKIP + X1 X2 X3 + 0 1.0 1.0 0 + 1 1.0 0.0 1 + + Args: + columns: The columns to scale. For each row, these colmumns are scaled to + unit-norm. + norm: The norm to use. The supported values are ``"l1"``, ``"l2"``, or + ``"max"``. Defaults to ``"l2"``. + + Raises: + ValueError: if ``norm`` is not ``"l1"``, ``"l2"``, or ``"max"``. + """ + + _norm_fns = { + "l1": lambda cols: np.abs(cols).sum(axis=1), + "l2": lambda cols: np.sqrt(np.power(cols, 2).sum(axis=1)), + "max": lambda cols: np.max(abs(cols), axis=1), + } + + _is_fittable = False + + def __init__(self, columns: List[str], norm="l2"): + self.columns = columns + self.norm = norm + + if norm not in self._norm_fns: + raise ValueError( + f"Norm {norm} is not supported." + f"Supported values are: {self._norm_fns.keys()}" + ) + + def _transform_pandas(self, df: pd.DataFrame): + columns = df.loc[:, self.columns] + column_norms = self._norm_fns[self.norm](columns) + + df.loc[:, self.columns] = columns.div(column_norms, axis=0) + return df + + def __repr__(self): + return ( + f"{self.__class__.__name__}(columns={self.columns!r}, norm={self.norm!r})" + ) diff --git a/minigpt2/lib/python3.10/site-packages/ray/data/preprocessors/scaler.py b/minigpt2/lib/python3.10/site-packages/ray/data/preprocessors/scaler.py new file mode 100644 index 0000000000000000000000000000000000000000..4a30d315d6a2b811fb750dc3069150a95c32cdb1 --- /dev/null +++ b/minigpt2/lib/python3.10/site-packages/ray/data/preprocessors/scaler.py @@ -0,0 +1,376 @@ +from typing import List, Tuple + +import numpy as np +import pandas as pd + +from ray.data import Dataset +from ray.data._internal.aggregate import AbsMax, Max, Mean, Min, Std +from ray.data.preprocessor import Preprocessor +from ray.util.annotations import PublicAPI + + +@PublicAPI(stability="alpha") +class StandardScaler(Preprocessor): + r"""Translate and scale each column by its mean and standard deviation, + respectively. + + The general formula is given by + + .. math:: + + x' = \frac{x - \bar{x}}{s} + + where :math:`x` is the column, :math:`x'` is the transformed column, + :math:`\bar{x}` is the column average, and :math:`s` is the column's sample + standard deviation. If :math:`s = 0` (i.e., the column is constant-valued), + then the transformed column will contain zeros. + + .. warning:: + :class:`StandardScaler` works best when your data is normal. If your data isn't + approximately normal, then the transformed features won't be meaningful. + + Examples: + >>> import pandas as pd + >>> import ray + >>> from ray.data.preprocessors import StandardScaler + >>> + >>> df = pd.DataFrame({"X1": [-2, 0, 2], "X2": [-3, -3, 3], "X3": [1, 1, 1]}) + >>> ds = ray.data.from_pandas(df) # doctest: +SKIP + >>> ds.to_pandas() # doctest: +SKIP + X1 X2 X3 + 0 -2 -3 1 + 1 0 -3 1 + 2 2 3 1 + + Columns are scaled separately. + + >>> preprocessor = StandardScaler(columns=["X1", "X2"]) + >>> preprocessor.fit_transform(ds).to_pandas() # doctest: +SKIP + X1 X2 X3 + 0 -1.224745 -0.707107 1 + 1 0.000000 -0.707107 1 + 2 1.224745 1.414214 1 + + Constant-valued columns get filled with zeros. + + >>> preprocessor = StandardScaler(columns=["X3"]) + >>> preprocessor.fit_transform(ds).to_pandas() # doctest: +SKIP + X1 X2 X3 + 0 -2 -3 0.0 + 1 0 -3 0.0 + 2 2 3 0.0 + + Args: + columns: The columns to separately scale. + """ + + def __init__(self, columns: List[str]): + self.columns = columns + + def _fit(self, dataset: Dataset) -> Preprocessor: + mean_aggregates = [Mean(col) for col in self.columns] + std_aggregates = [Std(col, ddof=0) for col in self.columns] + self.stats_ = dataset.aggregate(*mean_aggregates, *std_aggregates) + return self + + def _transform_pandas(self, df: pd.DataFrame): + def column_standard_scaler(s: pd.Series): + s_mean = self.stats_[f"mean({s.name})"] + s_std = self.stats_[f"std({s.name})"] + + # Handle division by zero. + # TODO: extend this to handle near-zero values. + if s_std == 0: + s_std = 1 + + return (s - s_mean) / s_std + + df.loc[:, self.columns] = df.loc[:, self.columns].transform( + column_standard_scaler + ) + return df + + def __repr__(self): + return f"{self.__class__.__name__}(columns={self.columns!r})" + + +@PublicAPI(stability="alpha") +class MinMaxScaler(Preprocessor): + r"""Scale each column by its range. + + The general formula is given by + + .. math:: + + x' = \frac{x - \min(x)}{\max{x} - \min{x}} + + where :math:`x` is the column and :math:`x'` is the transformed column. If + :math:`\max{x} - \min{x} = 0` (i.e., the column is constant-valued), then the + transformed column will get filled with zeros. + + Transformed values are always in the range :math:`[0, 1]`. + + .. tip:: + This can be used as an alternative to :py:class:`StandardScaler`. + + Examples: + >>> import pandas as pd + >>> import ray + >>> from ray.data.preprocessors import MinMaxScaler + >>> + >>> df = pd.DataFrame({"X1": [-2, 0, 2], "X2": [-3, -3, 3], "X3": [1, 1, 1]}) # noqa: E501 + >>> ds = ray.data.from_pandas(df) # doctest: +SKIP + >>> ds.to_pandas() # doctest: +SKIP + X1 X2 X3 + 0 -2 -3 1 + 1 0 -3 1 + 2 2 3 1 + + Columns are scaled separately. + + >>> preprocessor = MinMaxScaler(columns=["X1", "X2"]) + >>> preprocessor.fit_transform(ds).to_pandas() # doctest: +SKIP + X1 X2 X3 + 0 0.0 0.0 1 + 1 0.5 0.0 1 + 2 1.0 1.0 1 + + Constant-valued columns get filled with zeros. + + >>> preprocessor = MinMaxScaler(columns=["X3"]) + >>> preprocessor.fit_transform(ds).to_pandas() # doctest: +SKIP + X1 X2 X3 + 0 -2 -3 0.0 + 1 0 -3 0.0 + 2 2 3 0.0 + + Args: + columns: The columns to separately scale. + """ + + def __init__(self, columns: List[str]): + self.columns = columns + + def _fit(self, dataset: Dataset) -> Preprocessor: + aggregates = [Agg(col) for Agg in [Min, Max] for col in self.columns] + self.stats_ = dataset.aggregate(*aggregates) + return self + + def _transform_pandas(self, df: pd.DataFrame): + def column_min_max_scaler(s: pd.Series): + s_min = self.stats_[f"min({s.name})"] + s_max = self.stats_[f"max({s.name})"] + diff = s_max - s_min + + # Handle division by zero. + # TODO: extend this to handle near-zero values. + if diff == 0: + diff = 1 + + return (s - s_min) / diff + + df.loc[:, self.columns] = df.loc[:, self.columns].transform( + column_min_max_scaler + ) + return df + + def __repr__(self): + return f"{self.__class__.__name__}(columns={self.columns!r})" + + +@PublicAPI(stability="alpha") +class MaxAbsScaler(Preprocessor): + r"""Scale each column by its absolute max value. + + The general formula is given by + + .. math:: + + x' = \frac{x}{\max{\vert x \vert}} + + where :math:`x` is the column and :math:`x'` is the transformed column. If + :math:`\max{\vert x \vert} = 0` (i.e., the column contains all zeros), then the + column is unmodified. + + .. tip:: + This is the recommended way to scale sparse data. If you data isn't sparse, + you can use :class:`MinMaxScaler` or :class:`StandardScaler` instead. + + Examples: + >>> import pandas as pd + >>> import ray + >>> from ray.data.preprocessors import MaxAbsScaler + >>> + >>> df = pd.DataFrame({"X1": [-6, 3], "X2": [2, -4], "X3": [0, 0]}) # noqa: E501 + >>> ds = ray.data.from_pandas(df) # doctest: +SKIP + >>> ds.to_pandas() # doctest: +SKIP + X1 X2 X3 + 0 -6 2 0 + 1 3 -4 0 + + Columns are scaled separately. + + >>> preprocessor = MaxAbsScaler(columns=["X1", "X2"]) + >>> preprocessor.fit_transform(ds).to_pandas() # doctest: +SKIP + X1 X2 X3 + 0 -1.0 0.5 0 + 1 0.5 -1.0 0 + + Zero-valued columns aren't scaled. + + >>> preprocessor = MaxAbsScaler(columns=["X3"]) + >>> preprocessor.fit_transform(ds).to_pandas() # doctest: +SKIP + X1 X2 X3 + 0 -6 2 0.0 + 1 3 -4 0.0 + + Args: + columns: The columns to separately scale. + """ + + def __init__(self, columns: List[str]): + self.columns = columns + + def _fit(self, dataset: Dataset) -> Preprocessor: + aggregates = [AbsMax(col) for col in self.columns] + self.stats_ = dataset.aggregate(*aggregates) + return self + + def _transform_pandas(self, df: pd.DataFrame): + def column_abs_max_scaler(s: pd.Series): + s_abs_max = self.stats_[f"abs_max({s.name})"] + + # Handle division by zero. + # All values are 0. + if s_abs_max == 0: + s_abs_max = 1 + + return s / s_abs_max + + df.loc[:, self.columns] = df.loc[:, self.columns].transform( + column_abs_max_scaler + ) + return df + + def __repr__(self): + return f"{self.__class__.__name__}(columns={self.columns!r})" + + +@PublicAPI(stability="alpha") +class RobustScaler(Preprocessor): + r"""Scale and translate each column using quantiles. + + The general formula is given by + + .. math:: + x' = \frac{x - \mu_{1/2}}{\mu_h - \mu_l} + + where :math:`x` is the column, :math:`x'` is the transformed column, + :math:`\mu_{1/2}` is the column median. :math:`\mu_{h}` and :math:`\mu_{l}` are the + high and low quantiles, respectively. By default, :math:`\mu_{h}` is the third + quartile and :math:`\mu_{l}` is the first quartile. + + .. tip:: + This scaler works well when your data contains many outliers. + + Examples: + >>> import pandas as pd + >>> import ray + >>> from ray.data.preprocessors import RobustScaler + >>> + >>> df = pd.DataFrame({ + ... "X1": [1, 2, 3, 4, 5], + ... "X2": [13, 5, 14, 2, 8], + ... "X3": [1, 2, 2, 2, 3], + ... }) + >>> ds = ray.data.from_pandas(df) # doctest: +SKIP + >>> ds.to_pandas() # doctest: +SKIP + X1 X2 X3 + 0 1 13 1 + 1 2 5 2 + 2 3 14 2 + 3 4 2 2 + 4 5 8 3 + + :class:`RobustScaler` separately scales each column. + + >>> preprocessor = RobustScaler(columns=["X1", "X2"]) + >>> preprocessor.fit_transform(ds).to_pandas() # doctest: +SKIP + X1 X2 X3 + 0 -1.0 0.625 1 + 1 -0.5 -0.375 2 + 2 0.0 0.750 2 + 3 0.5 -0.750 2 + 4 1.0 0.000 3 + + Args: + columns: The columns to separately scale. + quantile_range: A tuple that defines the lower and upper quantiles. Values + must be between 0 and 1. Defaults to the 1st and 3rd quartiles: + ``(0.25, 0.75)``. + """ + + def __init__( + self, columns: List[str], quantile_range: Tuple[float, float] = (0.25, 0.75) + ): + self.columns = columns + self.quantile_range = quantile_range + + def _fit(self, dataset: Dataset) -> Preprocessor: + low = self.quantile_range[0] + med = 0.50 + high = self.quantile_range[1] + + num_records = dataset.count() + max_index = num_records - 1 + split_indices = [int(percentile * max_index) for percentile in (low, med, high)] + + self.stats_ = {} + + # TODO(matt): Handle case where quantile lands between 2 numbers. + # The current implementation will simply choose the closest index. + # This will affect the results of small datasets more than large datasets. + for col in self.columns: + filtered_dataset = dataset.map_batches( + lambda df: df[[col]], batch_format="pandas" + ) + sorted_dataset = filtered_dataset.sort(col) + _, low, med, high = sorted_dataset.split_at_indices(split_indices) + + def _get_first_value(ds: Dataset, c: str): + return ds.take(1)[0][c] + + low_val = _get_first_value(low, col) + med_val = _get_first_value(med, col) + high_val = _get_first_value(high, col) + + self.stats_[f"low_quantile({col})"] = low_val + self.stats_[f"median({col})"] = med_val + self.stats_[f"high_quantile({col})"] = high_val + + return self + + def _transform_pandas(self, df: pd.DataFrame): + def column_robust_scaler(s: pd.Series): + s_low_q = self.stats_[f"low_quantile({s.name})"] + s_median = self.stats_[f"median({s.name})"] + s_high_q = self.stats_[f"high_quantile({s.name})"] + diff = s_high_q - s_low_q + + # Handle division by zero. + # Return all zeros. + if diff == 0: + return np.zeros_like(s) + + return (s - s_median) / diff + + df.loc[:, self.columns] = df.loc[:, self.columns].transform( + column_robust_scaler + ) + return df + + def __repr__(self): + return ( + f"{self.__class__.__name__}(columns={self.columns!r}, " + f"quantile_range={self.quantile_range!r})" + ) diff --git a/minigpt2/lib/python3.10/site-packages/ray/data/preprocessors/tokenizer.py b/minigpt2/lib/python3.10/site-packages/ray/data/preprocessors/tokenizer.py new file mode 100644 index 0000000000000000000000000000000000000000..03bc14185244f404276071ffd880ca0dcd18331c --- /dev/null +++ b/minigpt2/lib/python3.10/site-packages/ray/data/preprocessors/tokenizer.py @@ -0,0 +1,74 @@ +from typing import Callable, List, Optional + +import pandas as pd + +from ray.data.preprocessor import Preprocessor +from ray.data.preprocessors.utils import simple_split_tokenizer +from ray.util.annotations import PublicAPI + + +@PublicAPI(stability="alpha") +class Tokenizer(Preprocessor): + """Replace each string with a list of tokens. + + Examples: + >>> import pandas as pd + >>> import ray + >>> df = pd.DataFrame({"text": ["Hello, world!", "foo bar\\nbaz"]}) + >>> ds = ray.data.from_pandas(df) # doctest: +SKIP + + The default ``tokenization_fn`` delimits strings using the space character. + + >>> from ray.data.preprocessors import Tokenizer + >>> tokenizer = Tokenizer(columns=["text"]) + >>> tokenizer.transform(ds).to_pandas() # doctest: +SKIP + text + 0 [Hello,, world!] + 1 [foo, bar\\nbaz] + + If the default logic isn't adequate for your use case, you can specify a + custom ``tokenization_fn``. + + >>> import string + >>> def tokenization_fn(s): + ... for character in string.punctuation: + ... s = s.replace(character, "") + ... return s.split() + >>> tokenizer = Tokenizer(columns=["text"], tokenization_fn=tokenization_fn) + >>> tokenizer.transform(ds).to_pandas() # doctest: +SKIP + text + 0 [Hello, world] + 1 [foo, bar, baz] + + Args: + columns: The columns to tokenize. + tokenization_fn: The function used to generate tokens. This function + should accept a string as input and return a list of tokens as + output. If unspecified, the tokenizer uses a function equivalent to + ``lambda s: s.split(" ")``. + """ + + _is_fittable = False + + def __init__( + self, + columns: List[str], + tokenization_fn: Optional[Callable[[str], List[str]]] = None, + ): + self.columns = columns + # TODO(matt): Add a more robust default tokenizer. + self.tokenization_fn = tokenization_fn or simple_split_tokenizer + + def _transform_pandas(self, df: pd.DataFrame): + def column_tokenizer(s: pd.Series): + return s.map(self.tokenization_fn) + + df.loc[:, self.columns] = df.loc[:, self.columns].transform(column_tokenizer) + return df + + def __repr__(self): + name = getattr(self.tokenization_fn, "__name__", self.tokenization_fn) + return ( + f"{self.__class__.__name__}(columns={self.columns!r}, " + f"tokenization_fn={name})" + ) diff --git a/minigpt2/lib/python3.10/site-packages/ray/data/preprocessors/torch.py b/minigpt2/lib/python3.10/site-packages/ray/data/preprocessors/torch.py new file mode 100644 index 0000000000000000000000000000000000000000..4206bc18e214f36d566931ad3cc6c365983c352f --- /dev/null +++ b/minigpt2/lib/python3.10/site-packages/ray/data/preprocessors/torch.py @@ -0,0 +1,149 @@ +from typing import TYPE_CHECKING, Callable, Dict, List, Mapping, Optional, Union + +import numpy as np + +from ray.air.util.data_batch_conversion import BatchFormat +from ray.air.util.tensor_extensions.utils import _create_possibly_ragged_ndarray +from ray.data.preprocessor import Preprocessor +from ray.util.annotations import PublicAPI + +if TYPE_CHECKING: + import torch + + +@PublicAPI(stability="alpha") +class TorchVisionPreprocessor(Preprocessor): + """Apply a `TorchVision transform `_ + to image columns. + + Examples: + + Torch models expect inputs of shape :math:`(B, C, H, W)` in the range + :math:`[0.0, 1.0]`. To convert images to this format, add ``ToTensor`` to your + preprocessing pipeline. + + .. testcode:: + + from torchvision import transforms + + import ray + from ray.data.preprocessors import TorchVisionPreprocessor + + transform = transforms.Compose([ + transforms.ToTensor(), + transforms.Resize((224, 224)), + ]) + preprocessor = TorchVisionPreprocessor(["image"], transform=transform) + + dataset = ray.data.read_images("s3://anonymous@air-example-data-2/imagenet-sample-images") + dataset = preprocessor.transform(dataset) + + + For better performance, set ``batched`` to ``True`` and replace ``ToTensor`` + with a batch-supporting ``Lambda``. + + .. testcode:: + + import numpy as np + import torch + + def to_tensor(batch: np.ndarray) -> torch.Tensor: + tensor = torch.as_tensor(batch, dtype=torch.float) + # (B, H, W, C) -> (B, C, H, W) + tensor = tensor.permute(0, 3, 1, 2).contiguous() + # [0., 255.] -> [0., 1.] + tensor = tensor.div(255) + return tensor + + transform = transforms.Compose([ + transforms.Lambda(to_tensor), + transforms.Resize((224, 224)) + ]) + preprocessor = TorchVisionPreprocessor(["image"], transform=transform, batched=True) + + dataset = ray.data.read_images("s3://anonymous@air-example-data-2/imagenet-sample-images") + dataset = preprocessor.transform(dataset) + + Args: + columns: The columns to apply the TorchVision transform to. + transform: The TorchVision transform you want to apply. This transform should + accept a ``np.ndarray`` or ``torch.Tensor`` as input and return a + ``torch.Tensor`` as output. + output_columns: The output name for each input column. If not specified, this + defaults to the same set of columns as the columns. + batched: If ``True``, apply ``transform`` to batches of shape + :math:`(B, H, W, C)`. Otherwise, apply ``transform`` to individual images. + """ # noqa: E501 + + _is_fittable = False + + def __init__( + self, + columns: List[str], + transform: Callable[[Union["np.ndarray", "torch.Tensor"]], "torch.Tensor"], + output_columns: Optional[List[str]] = None, + batched: bool = False, + ): + if not output_columns: + output_columns = columns + if len(columns) != len(output_columns): + raise ValueError( + "The length of columns should match the " + f"length of output_columns: {columns} vs {output_columns}." + ) + self._columns = columns + self._output_columns = output_columns + self._torchvision_transform = transform + self._batched = batched + + def __repr__(self) -> str: + return ( + f"{self.__class__.__name__}(" + f"columns={self._columns}, " + f"output_columns={self._output_columns}, " + f"transform={self._torchvision_transform!r})" + ) + + def _transform_numpy( + self, data_batch: Dict[str, "np.ndarray"] + ) -> Dict[str, "np.ndarray"]: + import torch + + from ray.air._internal.torch_utils import convert_ndarray_to_torch_tensor + + def apply_torchvision_transform(array: np.ndarray) -> np.ndarray: + try: + tensor = convert_ndarray_to_torch_tensor(array) + output = self._torchvision_transform(tensor) + except TypeError: + # Transforms like `ToTensor` expect a `np.ndarray` as input. + output = self._torchvision_transform(array) + if isinstance(output, torch.Tensor): + output = output.numpy() + if not isinstance(output, np.ndarray): + raise ValueError( + "`TorchVisionPreprocessor` expected your transform to return a " + "`torch.Tensor` or `np.ndarray`, but your transform returned a " + f"`{type(output).__name__}` instead." + ) + return output + + def transform_batch(batch: np.ndarray) -> np.ndarray: + if self._batched: + return apply_torchvision_transform(batch) + return _create_possibly_ragged_ndarray( + [apply_torchvision_transform(array) for array in batch] + ) + + if isinstance(data_batch, Mapping): + for input_col, output_col in zip(self._columns, self._output_columns): + data_batch[output_col] = transform_batch(data_batch[input_col]) + else: + # TODO(ekl) deprecate this code path. Unfortunately, predictors are still + # sending schemaless arrays to preprocessors. + data_batch = transform_batch(data_batch) + + return data_batch + + def preferred_batch_format(cls) -> BatchFormat: + return BatchFormat.NUMPY diff --git a/minigpt2/lib/python3.10/site-packages/ray/data/preprocessors/transformer.py b/minigpt2/lib/python3.10/site-packages/ray/data/preprocessors/transformer.py new file mode 100644 index 0000000000000000000000000000000000000000..e0429b05251dde20edda92ea3c416dae8cc2ba0d --- /dev/null +++ b/minigpt2/lib/python3.10/site-packages/ray/data/preprocessors/transformer.py @@ -0,0 +1,88 @@ +from typing import List + +import numpy as np +import pandas as pd + +from ray.data.preprocessor import Preprocessor +from ray.util.annotations import PublicAPI + + +@PublicAPI(stability="alpha") +class PowerTransformer(Preprocessor): + """Apply a `power transform `_ to + make your data more normally distributed. + + Some models expect data to be normally distributed. By making your data more + Gaussian-like, you might be able to improve your model's performance. + + This preprocessor supports the following transformations: + + * `Yeo-Johnson `_ + * `Box-Cox `_ + + Box-Cox requires all data to be positive. + + .. warning:: + + You need to manually specify the transform's power parameter. If you + choose a bad value, the transformation might not work well. + + Args: + columns: The columns to separately transform. + power: A parameter that determines how your data is transformed. Practioners + typically set ``power`` between :math:`-2.5` and :math:`2.5`, although you + may need to try different values to find one that works well. + method: A string representing which transformation to apply. Supports + ``"yeo-johnson"`` and ``"box-cox"``. If you choose ``"box-cox"``, your data + needs to be positive. Defaults to ``"yeo-johnson"``. + """ # noqa: E501 + + _valid_methods = ["yeo-johnson", "box-cox"] + _is_fittable = False + + def __init__(self, columns: List[str], power: float, method: str = "yeo-johnson"): + self.columns = columns + self.method = method + self.power = power + + if method not in self._valid_methods: + raise ValueError( + f"Method {method} is not supported." + f"Supported values are: {self._valid_methods}" + ) + + def _transform_pandas(self, df: pd.DataFrame): + def column_power_transformer(s: pd.Series): + if self.method == "yeo-johnson": + result = np.zeros_like(s, dtype=np.float64) + pos = s >= 0 # binary mask + + if self.power != 0: + result[pos] = (np.power(s[pos] + 1, self.power) - 1) / self.power + else: + result[pos] = np.log(s[pos] + 1) + + if self.power != 2: + result[~pos] = -(np.power(-s[~pos] + 1, 2 - self.power) - 1) / ( + 2 - self.power + ) + else: + result[~pos] = -np.log(-s[~pos] + 1) + return result + + else: # box-cox + if self.power != 0: + return (np.power(s, self.power) - 1) / self.power + else: + return np.log(s) + + df.loc[:, self.columns] = df.loc[:, self.columns].transform( + column_power_transformer + ) + return df + + def __repr__(self): + return ( + f"{self.__class__.__name__}(columns={self.columns!r}, " + f"power={self.power!r}, method={self.method!r})" + ) diff --git a/minigpt2/lib/python3.10/site-packages/ray/data/preprocessors/utils.py b/minigpt2/lib/python3.10/site-packages/ray/data/preprocessors/utils.py new file mode 100644 index 0000000000000000000000000000000000000000..81c061fc1441917f790efeadbc3eaea4e43bf89e --- /dev/null +++ b/minigpt2/lib/python3.10/site-packages/ray/data/preprocessors/utils.py @@ -0,0 +1,19 @@ +import hashlib +from typing import List + +from ray.util.annotations import DeveloperAPI + + +@DeveloperAPI +def simple_split_tokenizer(value: str) -> List[str]: + """Tokenize a string using a split on spaces.""" + return value.split(" ") + + +@DeveloperAPI +def simple_hash(value: object, num_features: int) -> int: + """Deterministically hash a value into the integer space.""" + encoded_value = str(value).encode() + hashed_value = hashlib.sha1(encoded_value) + hashed_value_int = int(hashed_value.hexdigest(), 16) + return hashed_value_int % num_features diff --git a/minigpt2/lib/python3.10/site-packages/ray/data/preprocessors/vectorizer.py b/minigpt2/lib/python3.10/site-packages/ray/data/preprocessors/vectorizer.py new file mode 100644 index 0000000000000000000000000000000000000000..c33b8e93ef8e4ee111715440bcb9b62deed466ad --- /dev/null +++ b/minigpt2/lib/python3.10/site-packages/ray/data/preprocessors/vectorizer.py @@ -0,0 +1,270 @@ +from collections import Counter +from typing import Callable, List, Optional + +import pandas as pd + +from ray.data import Dataset +from ray.data.preprocessor import Preprocessor +from ray.data.preprocessors.utils import simple_hash, simple_split_tokenizer +from ray.util.annotations import PublicAPI + + +@PublicAPI(stability="alpha") +class HashingVectorizer(Preprocessor): + """Count the frequency of tokens using the + `hashing trick `_. + + This preprocessors creates ``num_features`` columns named like + ``hash_{column_name}_{index}``. If ``num_features`` is large enough relative to + the size of your vocabulary, then each column approximately corresponds to the + frequency of a unique token. + + :class:`HashingVectorizer` is memory efficient and quick to pickle. However, given a + transformed column, you can't know which tokens correspond to it. This might make it + hard to determine which tokens are important to your model. + + .. note:: + + This preprocessor transforms each input column to a + `document-term matrix `_. + + A document-term matrix is a table that describes the frequency of tokens in a + collection of documents. For example, the strings `"I like Python"` and `"I + dislike Python"` might have the document-term matrix below: + + .. code-block:: + + corpus_I corpus_Python corpus_dislike corpus_like + 0 1 1 1 0 + 1 1 1 0 1 + + To generate the matrix, you typically map each token to a unique index. For + example: + + .. code-block:: + + token index + 0 I 0 + 1 Python 1 + 2 dislike 2 + 3 like 3 + + The problem with this approach is that memory use scales linearly with the size + of your vocabulary. :class:`HashingVectorizer` circumvents this problem by + computing indices with a hash function: + :math:`\\texttt{index} = hash(\\texttt{token})`. + + .. warning:: + Sparse matrices aren't currently supported. If you use a large ``num_features``, + this preprocessor might behave poorly. + + Examples: + >>> import pandas as pd + >>> import ray + >>> from ray.data.preprocessors import HashingVectorizer + >>> + >>> df = pd.DataFrame({ + ... "corpus": [ + ... "Jimmy likes volleyball", + ... "Bob likes volleyball too", + ... "Bob also likes fruit jerky" + ... ] + ... }) + >>> ds = ray.data.from_pandas(df) # doctest: +SKIP + >>> + >>> vectorizer = HashingVectorizer(["corpus"], num_features=8) + >>> vectorizer.fit_transform(ds).to_pandas() # doctest: +SKIP + hash_corpus_0 hash_corpus_1 hash_corpus_2 hash_corpus_3 hash_corpus_4 hash_corpus_5 hash_corpus_6 hash_corpus_7 + 0 1 0 1 0 0 0 0 1 + 1 1 0 1 0 0 0 1 1 + 2 0 0 1 1 0 2 1 0 + + Args: + columns: The columns to separately tokenize and count. + num_features: The number of features used to represent the vocabulary. You + should choose a value large enough to prevent hash collisions between + distinct tokens. + tokenization_fn: The function used to generate tokens. This function + should accept a string as input and return a list of tokens as + output. If unspecified, the tokenizer uses a function equivalent to + ``lambda s: s.split(" ")``. + + .. seealso:: + + :class:`CountVectorizer` + Another method for counting token frequencies. Unlike :class:`HashingVectorizer`, + :class:`CountVectorizer` creates a feature for each unique token. This + enables you to compute the inverse transformation. + + :class:`FeatureHasher` + This preprocessor is similar to :class:`HashingVectorizer`, except it expects + a table describing token frequencies. In contrast, + :class:`FeatureHasher` expects a column containing documents. + """ # noqa: E501 + + _is_fittable = False + + def __init__( + self, + columns: List[str], + num_features: int, + tokenization_fn: Optional[Callable[[str], List[str]]] = None, + ): + self.columns = columns + # TODO(matt): Set default number of features. + # This likely requires sparse matrix support to avoid explosion of columns. + self.num_features = num_features + # TODO(matt): Add a more robust default tokenizer. + self.tokenization_fn = tokenization_fn or simple_split_tokenizer + + def _transform_pandas(self, df: pd.DataFrame): + # TODO(matt): Use sparse matrix for efficiency. + + def hash_count(tokens: List[str]) -> Counter: + hashed_tokens = [simple_hash(token, self.num_features) for token in tokens] + return Counter(hashed_tokens) + + for col in self.columns: + tokenized = df[col].map(self.tokenization_fn) + hashed = tokenized.map(hash_count) + for i in range(self.num_features): + df[f"hash_{col}_{i}"] = hashed.map(lambda counts: counts[i]) + + # Drop original columns. + df.drop(columns=self.columns, inplace=True) + return df + + def __repr__(self): + fn_name = getattr(self.tokenization_fn, "__name__", self.tokenization_fn) + return ( + f"{self.__class__.__name__}(columns={self.columns!r}, " + f"num_features={self.num_features!r}, tokenization_fn={fn_name})" + ) + + +@PublicAPI(stability="alpha") +class CountVectorizer(Preprocessor): + """Count the frequency of tokens in a column of strings. + + :class:`CountVectorizer` operates on columns that contain strings. For example: + + .. code-block:: + + corpus + 0 I dislike Python + 1 I like Python + + This preprocessors creates a column named like ``{column}_{token}`` for each + unique token. These columns represent the frequency of token ``{token}`` in + column ``{column}``. For example: + + .. code-block:: + + corpus_I corpus_Python corpus_dislike corpus_like + 0 1 1 1 0 + 1 1 1 0 1 + + Examples: + >>> import pandas as pd + >>> import ray + >>> from ray.data.preprocessors import CountVectorizer + >>> + >>> df = pd.DataFrame({ + ... "corpus": [ + ... "Jimmy likes volleyball", + ... "Bob likes volleyball too", + ... "Bob also likes fruit jerky" + ... ] + ... }) + >>> ds = ray.data.from_pandas(df) # doctest: +SKIP + >>> + >>> vectorizer = CountVectorizer(["corpus"]) + >>> vectorizer.fit_transform(ds).to_pandas() # doctest: +SKIP + corpus_likes corpus_volleyball corpus_Bob corpus_Jimmy corpus_too corpus_also corpus_fruit corpus_jerky + 0 1 1 0 1 0 0 0 0 + 1 1 1 1 0 1 0 0 0 + 2 1 0 1 0 0 1 1 1 + + You can limit the number of tokens in the vocabulary with ``max_features``. + + >>> vectorizer = CountVectorizer(["corpus"], max_features=3) + >>> vectorizer.fit_transform(ds).to_pandas() # doctest: +SKIP + corpus_likes corpus_volleyball corpus_Bob + 0 1 1 0 + 1 1 1 1 + 2 1 0 1 + + Args: + columns: The columns to separately tokenize and count. + tokenization_fn: The function used to generate tokens. This function + should accept a string as input and return a list of tokens as + output. If unspecified, the tokenizer uses a function equivalent to + ``lambda s: s.split(" ")``. + max_features: The maximum number of tokens to encode in the transformed + dataset. If specified, only the most frequent tokens are encoded. + + """ # noqa: E501 + + def __init__( + self, + columns: List[str], + tokenization_fn: Optional[Callable[[str], List[str]]] = None, + max_features: Optional[int] = None, + ): + # TODO(matt): Add fit_transform to avoid recomputing tokenization step. + self.columns = columns + # TODO(matt): Add a more robust default tokenizer. + self.tokenization_fn = tokenization_fn or simple_split_tokenizer + self.max_features = max_features + + def _fit(self, dataset: Dataset) -> Preprocessor: + def get_pd_value_counts(df: pd.DataFrame) -> List[Counter]: + def get_token_counts(col): + token_series = df[col].apply(self.tokenization_fn) + tokens = token_series.sum() + return Counter(tokens) + + return {col: [get_token_counts(col)] for col in self.columns} + + value_counts = dataset.map_batches(get_pd_value_counts, batch_format="pandas") + total_counts = {col: Counter() for col in self.columns} + for batch in value_counts.iter_batches(batch_size=None): + for col, counters in batch.items(): + for counter in counters: + total_counts[col].update(counter) + + def most_common(counter: Counter, n: int): + return Counter(dict(counter.most_common(n))) + + top_counts = [ + most_common(counter, self.max_features) for counter in total_counts.values() + ] + + self.stats_ = { + f"token_counts({col})": counts + for (col, counts) in zip(self.columns, top_counts) + } + + return self + + def _transform_pandas(self, df: pd.DataFrame): + + to_concat = [] + for col in self.columns: + token_counts = self.stats_[f"token_counts({col})"] + sorted_tokens = [token for (token, count) in token_counts.most_common()] + tokenized = df[col].map(self.tokenization_fn).map(Counter) + for token in sorted_tokens: + series = tokenized.map(lambda val: val[token]) + series.name = f"{col}_{token}" + to_concat.append(series) + + df = pd.concat(to_concat, axis=1) + return df + + def __repr__(self): + fn_name = getattr(self.tokenization_fn, "__name__", self.tokenization_fn) + return ( + f"{self.__class__.__name__}(columns={self.columns!r}, " + f"tokenization_fn={fn_name}, max_features={self.max_features!r})" + ) diff --git a/minigpt2/lib/python3.10/site-packages/ray/workflow/__init__.py b/minigpt2/lib/python3.10/site-packages/ray/workflow/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..f5c2e703afc929801fc03f68bf9d884a84f4e11d --- /dev/null +++ b/minigpt2/lib/python3.10/site-packages/ray/workflow/__init__.py @@ -0,0 +1,55 @@ +from ray.workflow.api import ( + init, + run, + run_async, + resume, + resume_all, + resume_async, + cancel, + list_all, + delete, + get_output, + get_output_async, + get_status, + get_metadata, + sleep, + wait_for_event, + continuation, + options, +) +from ray.workflow.exceptions import ( + WorkflowError, + WorkflowExecutionError, + WorkflowCancellationError, +) +from ray.workflow.common import WorkflowStatus +from ray.workflow.event_listener import EventListener + +globals().update(WorkflowStatus.__members__) + + +__all__ = [ + "init", + "run", + "run_async", + "resume", + "resume_async", + "resume_all", + "cancel", + "list_all", + "delete", + "get_output", + "get_output_async", + "get_status", + "get_metadata", + "sleep", + "wait_for_event", + "options", + "continuation", + # events + "EventListener", + # exceptions + "WorkflowError", + "WorkflowExecutionError", + "WorkflowCancellationError", +] diff --git a/minigpt2/lib/python3.10/site-packages/ray/workflow/event_listener.py b/minigpt2/lib/python3.10/site-packages/ray/workflow/event_listener.py new file mode 100644 index 0000000000000000000000000000000000000000..03babc47b711a84dc34e86629265b2f02dc343af --- /dev/null +++ b/minigpt2/lib/python3.10/site-packages/ray/workflow/event_listener.py @@ -0,0 +1,70 @@ +import asyncio +from ray.util.annotations import PublicAPI +from ray.workflow.common import Event +import time +from typing import Callable + +EventListenerType = Callable[[], "EventListener"] + + +@PublicAPI(stability="alpha") +class EventListener: + """Defining a custom event listener. Event listeners provide an efficient way + to listen for a custom event. + + Event listeners should be stateless. They will be instantiated from a + coordinator actor. + + Example definition + ================== + + ``` + class CustomEventListener: + + def __init__(self): + self.event_provider = ... + + async def poll_for_event(self, topic, partition): + return await self.event_provider.poll(topic, partition) + + async def event_checkpointed(self, event: Event): + self.event_provider.commit(event.offset) + ``` + + Example Usage + ============= + .. testcode:: + :skipif: True + + from ray import workflow + CustomEventListener = ... + event_task = workflow.wait_for_event( + CustomEventListener, "topic1", "partition2") + handle_event = ... + workflow.run(handle_event.task(event_task)) + + """ + + def __init__(self): + """Optional constructor. Only the constructor with now arguments will be + called.""" + pass + + async def poll_for_event(self, *args, **kwargs) -> Event: + """Should return only when the event is received.""" + raise NotImplementedError + + async def event_checkpointed(self, event: Event) -> None: + """Optional. Called after an event has been checkpointed and a transaction can + be safely committed.""" + pass + + +@PublicAPI(stability="alpha") +class TimerListener(EventListener): + """ + A listener that produces an event at a given timestamp. + """ + + async def poll_for_event(self, timestamp): + await asyncio.sleep(timestamp - time.time()) diff --git a/minigpt2/lib/python3.10/site-packages/ray/workflow/serialization.py b/minigpt2/lib/python3.10/site-packages/ray/workflow/serialization.py new file mode 100644 index 0000000000000000000000000000000000000000..f858577bc9df875abb3dde26828eb0aab21047b7 --- /dev/null +++ b/minigpt2/lib/python3.10/site-packages/ray/workflow/serialization.py @@ -0,0 +1,235 @@ +import contextlib +from dataclasses import dataclass +import logging +import os + +import ray +from ray import cloudpickle +from ray.types import ObjectRef +from ray.workflow import common, workflow_storage +from typing import Any, Dict, Generator, List, Optional, Tuple, TYPE_CHECKING + +from collections import ChainMap +import io + +if TYPE_CHECKING: + from ray.actor import ActorHandle + +logger = logging.getLogger(__name__) + + +def init_manager() -> None: + get_or_create_manager(warn_on_creation=False) + + +def get_or_create_manager(warn_on_creation: bool = True) -> "ActorHandle": + """Get or create the storage manager.""" + # TODO(suquark): We should not get the actor everytime. We also need to + # resume the actor if it failed. Using a global variable to cache the + # actor seems not enough to resume the actor, because there is no + # aliveness detection for an actor. + try: + return ray.get_actor( + common.STORAGE_ACTOR_NAME, namespace=common.MANAGEMENT_ACTOR_NAMESPACE + ) + except ValueError: + if warn_on_creation: + logger.warning( + "Cannot access workflow serialization manager. It " + "could be because " + "the workflow manager exited unexpectedly. A new " + "workflow manager is being created. " + ) + handle = Manager.options( + name=common.STORAGE_ACTOR_NAME, + namespace=common.MANAGEMENT_ACTOR_NAMESPACE, + lifetime="detached", + ).remote() + ray.get(handle.ping.remote()) + return handle + + +@dataclass +class Upload: + identifier_ref: ObjectRef[str] + upload_task: ObjectRef[None] + + +@ray.remote(num_cpus=0) +class Manager: + """ + Responsible for deduping the serialization/upload of object references. + """ + + def __init__(self): + self._uploads: Dict[ray.ObjectRef, Upload] = {} + self._num_uploads = 0 + + def ping(self) -> None: + """ + Trivial function to ensure actor creation is successful. + """ + return None + + async def save_objectref( + self, ref_tuple: Tuple[ray.ObjectRef], workflow_id: "str" + ) -> Tuple[List[str], ray.ObjectRef]: + """Serialize and upload an object reference exactly once. + + Args: + ref_tuple: A 1-element tuple which wraps the reference. + + Returns: + A pair. The first element is the paths the ref will be uploaded to. + The second is an object reference to the upload task. + """ + (ref,) = ref_tuple + # Use the hex as the key to avoid holding a reference to the object. + key = (ref.hex(), workflow_id) + + if key not in self._uploads: + # TODO(Alex): We should probably eventually free these refs. + identifier_ref = common.calculate_identifier.remote(ref) + upload_task = _put_helper.remote(identifier_ref, ref, workflow_id) + self._uploads[key] = Upload( + identifier_ref=identifier_ref, upload_task=upload_task + ) + self._num_uploads += 1 + + info = self._uploads[key] + identifer = await info.identifier_ref + key = _obj_id_to_key(identifer) + return key, info.upload_task + + async def export_stats(self) -> Dict[str, Any]: + return {"num_uploads": self._num_uploads} + + +OBJECTS_DIR = "objects" + + +def _obj_id_to_key(object_id: str) -> str: + return os.path.join(OBJECTS_DIR, object_id) + + +@ray.remote(num_cpus=0) +def _put_helper(identifier: str, obj: Any, workflow_id: str) -> None: + # TODO (Alex): This check isn't sufficient, it only works for directly + # nested object refs. + if isinstance(obj, ray.ObjectRef): + raise NotImplementedError( + "Workflow does not support checkpointing nested object references yet." + ) + key = _obj_id_to_key(identifier) + + dump_to_storage( + key, + obj, + workflow_id, + workflow_storage.WorkflowStorage(workflow_id), + update_existing=False, + ) + + +def _reduce_objectref( + workflow_id: str, + obj_ref: ObjectRef, + tasks: List[ObjectRef], +): + manager = get_or_create_manager() + key, task = ray.get(manager.save_objectref.remote((obj_ref,), workflow_id)) + + assert task + tasks.append(task) + + return _load_object_ref, (key, workflow_id) + + +def dump_to_storage( + key: str, + obj: Any, + workflow_id: str, + storage: "workflow_storage.WorkflowStorage", + update_existing=True, +) -> None: + """Serializes and puts arbitrary object, handling references. The object will + be uploaded at `paths`. Any object references will be uploaded to their + global, remote storage. + + Args: + key: The key of the object. + obj: The object to serialize. If it contains object references, those + will be serialized too. + workflow_id: The workflow id. + storage: The storage to use. If obj contains object references, + `storage.put` will be called on them individually. + update_existing: If False, the object will not be uploaded if the path + exists. + """ + if not update_existing: + if storage._exists(key): + return + + tasks = [] + + # NOTE: Cloudpickle doesn't support private dispatch tables, so we extend + # the cloudpickler instead to avoid changing cloudpickle's global dispatch + # table which is shared with `ray.put`. See + # https://github.com/cloudpipe/cloudpickle/issues/437 + class ObjectRefPickler(cloudpickle.CloudPickler): + _object_ref_reducer = { + ray.ObjectRef: lambda ref: _reduce_objectref(workflow_id, ref, tasks) + } + dispatch_table = ChainMap( + _object_ref_reducer, cloudpickle.CloudPickler.dispatch_table + ) + dispatch = dispatch_table + + ray.get(tasks) + + # TODO(Alex): We should be able to do this without the extra buffer. + with io.BytesIO() as f: + pickler = ObjectRefPickler(f) + pickler.dump(obj) + f.seek(0) + # use the underlying storage to avoid cyclic calls of "dump_to_storage" + storage._storage.put(key, f.read()) + + +@ray.remote +def _load_ref_helper(key: str, workflow_id: str): + # TODO(Alex): We should stream the data directly into `cloudpickle.load`. + storage = workflow_storage.WorkflowStorage(workflow_id) + return storage._get(key) + + +# TODO (Alex): We should use weakrefs here instead requiring a context manager. +_object_cache: Optional[Dict[str, ray.ObjectRef]] = None + + +def _load_object_ref(key: str, workflow_id: str) -> ray.ObjectRef: + global _object_cache + if _object_cache is None: + return _load_ref_helper.remote(key, workflow_id) + + if _object_cache is None: + return _load_ref_helper.remote(key, workflow_id) + + if key not in _object_cache: + _object_cache[key] = _load_ref_helper.remote(key, workflow_id) + + return _object_cache[key] + + +@contextlib.contextmanager +def objectref_cache() -> Generator: + """A reentrant caching context for object refs.""" + global _object_cache + clear_cache = _object_cache is None + if clear_cache: + _object_cache = {} + try: + yield + finally: + if clear_cache: + _object_cache = None